The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def _compute(self, windows, dates, assets, mask): """ Call the user's `compute` function on each window with a pre-built output array. """ # TODO: Make mask available to user's `compute`. compute = self.compute missing_value = self.missing_value params = self.params out = full_like(mask, missing_value, dtype=self.dtype) with self.ctx: # TODO: Consider pre-filtering columns that are all-nan at each # time-step? for idx, date in enumerate(dates): compute( date, assets, out[idx], *(next(w) for w in windows), **params ) out[~mask] = missing_value return out
Example 2
def _is_feasible(kind, enforce_feasibility, f0): keyword = kind[0] if keyword == "equals": lb = np.asarray(kind[1], dtype=float) ub = np.asarray(kind[1], dtype=float) elif keyword == "greater": lb = np.asarray(kind[1], dtype=float) ub = np.full_like(lb, np.inf, dtype=float) elif keyword == "less": ub = np.asarray(kind[1], dtype=float) lb = np.full_like(ub, -np.inf, dtype=float) elif keyword == "interval": lb = np.asarray(kind[1], dtype=float) ub = np.asarray(kind[2], dtype=float) else: raise RuntimeError("Never be here.") return ((lb[enforce_feasibility] <= f0[enforce_feasibility]).all() and (f0[enforce_feasibility] <= ub[enforce_feasibility]).all())
Example 3
def as_strided_writeable(): arr = np.ones(10) view = as_strided(arr, writeable=False) assert_(not view.flags.writeable) # Check that writeable also is fine: view = as_strided(arr, writeable=True) assert_(view.flags.writeable) view[...] = 3 assert_array_equal(arr, np.full_like(arr, 3)) # Test that things do not break down for readonly: arr.flags.writeable = False view = as_strided(arr, writeable=False) view = as_strided(arr, writeable=True) assert_(not view.flags.writeable)
Example 4
def draw_axes(self, ax=None): # concatenate lklhd_pot_diff and lklhd_pot_diff_root lpd = self.__lklhd_pot_diff lpdr = self.lklhd_pot_diff_root[np.newaxis,:] pad = np.full_like(lpdr, np.nan) data = np.concatenate((lpd, pad, lpdr), axis=0) lpds = self.lklhd_pot_diff_siblings if ax is None: ax = self._graph.get_axes(self.id_axes) assert len(ax) == self.required_axes # imshow lklhd_pot_diff ax[0].set_anchor('N') imshow_values(data, ax[0], show_value_text=self.show_value_text) # imshow lklhd_pot_diff_siblings ax[1].set_anchor('N') imshow_values(lpds, ax[1], show_value_text=self.show_value_text)
Example 5
def __f(self, x): '''??x(k+1)?? ??????x(k+1) = A * x(k) + B * u(k) ??? x???x(k) ???? x_next???x(k+1) ''' yaw = x[2, :] a = self.__DT_s * np.cos(yaw) b = self.__DT_s * np.sin(yaw) c = np.full_like(a, self.__DT_s) u = np.array([a, b, c]) x_next = (self.__A @ x) + (self.__B @ u) for i in range(x_next.shape[1]): x_next[2, i] = limit.limit_angle(x_next[2, i]) return x_next
Example 6
def test_ignore_nans(self): """ Test that NaNs are ignored. """ source = [np.ones((16,), dtype = np.float) for _ in range(10)] source.append(np.full_like(source[0], np.nan)) product = cprod(source, ignore_nan = True) self.assertTrue(np.allclose(product, np.ones_like(product)))
Example 7
def test_ignore_nans(self): """ Test that NaNs are ignored. """ source = [np.ones((16,), dtype = np.float) for _ in range(10)] source.append(np.full_like(source[0], np.nan)) product = last(iprod(source, ignore_nan = True)) self.assertTrue(np.allclose(product, np.ones_like(product)))
Example 8
def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) self.check_like_function(np.full_like, 1000, True) self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True)
Example 9
def input_generator(): for dtype in [np.float64]: for nsamples in [1000, 10000]: sigma = 5.0 samples = np.random.normal(loc=0.0, scale=sigma, size=nsamples).astype(dtype) # For simplicity, initialize bandwidth array with constant using 1D rule of thumb bandwidths = np.full_like(samples, 1.06 * nsamples**0.2 * sigma) for neval in [10, 1000, 10000]: category = ('samples%d' % nsamples, np.dtype(dtype).name) eval_points = np.random.normal(loc=0.0, scale=5.0, size=neval).astype(dtype) yield dict(category=category, x=neval, input_args=(eval_points, samples, bandwidths), input_kwargs={}) #### BEGIN: numpy
Example 10
def test_map_can_only_return_none_if_missing_value_is_none(self): # Should work. la = LabelArray(self.strs, missing_value=None) result = la.map(lambda x: None) check_arrays( result, LabelArray(np.full_like(self.strs, None), missing_value=None), ) la = LabelArray(self.strs, missing_value="__MISSING__") with self.assertRaises(TypeError): la.map(lambda x: None)
Example 11
def color(self, data, alpha=255): """Maps your data values to the pallette with linear interpolation""" red = np.interp(data, self.range, self.r) blue = np.interp(data, self.range, self.b) green = np.interp(data, self.range, self.g) # Style plot to return a grey color when value is 'nan' red[np.isnan(red)] = 240 blue[np.isnan(blue)] = 240 green[np.isnan(green)] = 240 colors = np.dstack([red.astype(np.uint8), green.astype(np.uint8), blue.astype(np.uint8), np.full_like(data, alpha, dtype=np.uint8)]) return colors.view(dtype=np.uint32).reshape(data.shape)
Example 12
def estimate_hyperplane(dbf, comps, phases, current_statevars, comp_dicts, phase_models, parameters): region_chemical_potentials = [] parameters = OrderedDict(sorted(parameters.items(), key=str)) for cond_dict, phase_flag in comp_dicts: # We are now considering a particular tie vertex for key, val in cond_dict.items(): if val is None: cond_dict[key] = np.nan cond_dict.update(current_statevars) if np.any(np.isnan(list(cond_dict.values()))): # This composition is unknown -- it doesn't contribute to hyperplane estimation pass else: # Extract chemical potential hyperplane from multi-phase calculation # Note that we consider all phases in the system, not just ones in this tie region multi_eqdata = equilibrium(dbf, comps, phases, cond_dict, verbose=False, model=phase_models, scheduler=dask.local.get_sync, parameters=parameters) # Does there exist only a single phase in the result with zero internal degrees of freedom? # We should exclude those chemical potentials from the average because they are meaningless. num_phases = len(np.squeeze(multi_eqdata['Phase'].values != '')) zero_dof = np.all((multi_eqdata['Y'].values == 1.) | np.isnan(multi_eqdata['Y'].values)) if (num_phases == 1) and zero_dof: region_chemical_potentials.append(np.full_like(np.squeeze(multi_eqdata['MU'].values), np.nan)) else: region_chemical_potentials.append(np.squeeze(multi_eqdata['MU'].values)) region_chemical_potentials = np.nanmean(region_chemical_potentials, axis=0, dtype=np.float) return region_chemical_potentials
Example 13
def rcosfir(beta, sps, span=None): """Generates a raised cosine FIR filter. :param beta: shape of the raised cosine filter (0-1) :param sps: number of samples per symbol :param span: length of the filter in symbols (None => automatic selection) >>> import arlpy >>> rc = arlpy.comms.rcosfir(0.25, 6) >>> bb = arlpy.comms.modulate(arlpy.comms.random_data(100), arlpy.comms.psk()) >>> pb = arlpy.comms.upconvert(bb, 6, 27000, 18000, rc) """ if beta < 0 or beta > 1: raise ValueError('Beta must be between 0 and 1') if span is None: # from http://www.commsys.isy.liu.se/TSKS04/lectures/3/MichaelZoltowski_SquareRootRaisedCosine.pdf # since this recommendation is for root raised cosine filter, it is conservative for a raised cosine filter span = 33-int(44*beta) if beta < 0.68 else 4 delay = int(span*sps/2) t = _np.arange(-delay, delay+1, dtype=_np.float)/sps denom = 1 - (2*beta*t)**2 eps = _np.finfo(float).eps idx1 = _np.nonzero(_np.abs(denom) > _sqrt(eps)) b = _np.full_like(t, beta*_sin(_pi/(2*beta))/(2*sps)) b[idx1] = _np.sinc(t[idx1]) * _cos(_pi*beta*t[idx1])/denom[idx1] / sps b /= _sqrt(_np.sum(b**2)) return b
Example 14
def mask_to_output_target(mask): target = np.full_like(mask, CONFIG.model.v_false, dtype=np.float32) target[mask] = CONFIG.model.v_true return target
Example 15
def __init__(self, orig_file, image_dataset, label_dataset, mask_dataset, mask_bounds=None): logging.debug('Loading HDF5 file "{}"'.format(orig_file)) self.file = h5py.File(orig_file, 'r') self.resolution = None self._mask_bounds = tuple(map(np.asarray, mask_bounds)) if mask_bounds is not None else None if image_dataset is None and label_dataset is None: raise ValueError('HDF5 volume must have either an image or label dataset: {}'.format(orig_file)) if image_dataset is not None: self.image_data = self.file[image_dataset] if 'resolution' in self.file[image_dataset].attrs: self.resolution = np.array(self.file[image_dataset].attrs['resolution']) if label_dataset is not None: self.label_data = self.file[label_dataset] if 'resolution' in self.file[label_dataset].attrs: resolution = np.array(self.file[label_dataset].attrs['resolution']) if self.resolution is not None and not np.array_equal(self.resolution, resolution): logging.warning('HDF5 image and label dataset resolutions differ in %s: %s, %s', orig_file, self.resolution, resolution) else: self.resolution = resolution else: self.label_data = None if mask_dataset is not None: self.mask_data = self.file[mask_dataset] else: self.mask_data = None if image_dataset is None: self.image_data = np.full_like(self.label_data, np.NaN, dtype=np.float32) if self.resolution is None: self.resolution = np.ones(3)
Example 16
def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) self.check_like_function(np.full_like, 1000, True) self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True)
Example 17
def test_predict_f(self): with self.test_context(): ms, Xs, _rng = self.prepare() for m in ms: mf, vf = m.predict_f(Xs) assert_array_equal(mf.shape, vf.shape) assert_array_equal(mf.shape, (10, 1)) assert_array_less(np.full_like(vf, -1e-6), vf)
Example 18
def test_predict_y(self): with self.test_context(): ms, Xs, _rng = self.prepare() for m in ms: mf, vf = m.predict_y(Xs) assert_array_equal(mf.shape, vf.shape) assert_array_equal(mf.shape, (10, 1)) assert_array_less(np.full_like(vf, -1e-6), vf)
Example 19
def maskedFilter(arr, mask, ksize=30, fill_mask=True, fn='median'): ''' fn['mean', 'median'] fill_mask=True: replaced masked areas with filtered results fill_mask=False: masked areas are ignored ''' if fill_mask: mask1 = mask out = arr else: mask1 = ~mask out = np.full_like(arr, fill_value=np.nan) mask2 = ~mask if fn == 'mean': _calcMean(arr, mask1, mask2, out, ksize // 2) else: buff = np.empty(shape=(ksize * ksize), dtype=arr.dtype) _calcMedian(arr, mask1, mask2, out, ksize // 2, buff) return out # TODO: only filter method differs # find better way for replace it than making n extra defs
Example 20
def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) self.check_like_function(np.full_like, 1000, True) self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True)
Example 21
def setUp(self): parser = argparse.ArgumentParser() self.args = parser.parse_args([]) self.args.init_alpha = 1.0 self.args.tolerance = 0.0001 self.args.max_iter = 1000 self.args.n_multi = 1 self.args.verbose = False phy_in = ['I, A1G ,,', ',H, A3T A5T ,,', ',,F, A6T ,,', ',,,B, A8T ,,', ',,,C, T5A ,,', ',,G, A7T ,,', ',,,D, A9T ,,', ',,,E, A4T ,,', ',A, A2T A4T ,,'] phy = phylotree.Phylotree(phy_in) ref = "AAAAAAAAA" reads = list(["1:A,2:T,3:A", "2:T,3:A", "3:A,4:T,5:T", "5:T,6:A", "6:A,7:T", "6:A,7:T,8:A", "7:T,8:A", "4:T,5:T", "1:A,2:T,3:T,4:T", "5:A,6:T,7:A,8:A"]) haps = list('ABCDEFGHI') self.input_mat = preprocess.build_em_matrix(ref, phy, reads, haps, self.args) self.wts = numpy.ones(len(reads)) self.true_props = numpy.array( [0.0, 0.8, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0]) inf = float('Inf') self.true_haps = numpy.full_like(self.input_mat, -inf) self.true_haps[0:8, 1] = 0.0 self.true_haps[8:10, 4] = 0.0
Example 22
def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) self.check_like_function(np.full_like, 1000, True) self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True)
Example 23
def _initialize(self, flat_size, fill_value, dtype): if self.nans: # For avoiding branches flat_size += 1 if self.forced_fill_value is None: ret = np.full(flat_size, fill_value, dtype=dtype) else: ret = np.full(flat_size, self.forced_fill_value, dtype=dtype) counter = np.full_like(ret, self.counter_fill_value, dtype=self.counter_dtype) if self.mean_fill_value is not None: mean = np.full_like(ret, self.mean_fill_value, dtype=ret.dtype) else: mean = None return ret, counter, mean
Example 24
def from_lib(name, cell, pad=0): blocks = np.asarray(cell["blocks"], dtype=np.uint8) _, width, length = blocks.shape data = np.asarray(cell["data"], dtype=np.uint8) mask = np.full_like(blocks, True, dtype=np.bool) delay = cell["delay"] if pad != 0: pad_out = (pad,) blocks = np.pad(blocks, pad_out, "constant") data = np.pad(data, pad_out, "constant") mask = np.pad(mask, pad_out, "constant") # create a padded base immediately below it stone = block_names.index("stone") y = pad-1 xs = pad zs = pad xe = xs + length ze = zs + width blocks[y, zs:ze, xs:xe] = stone # build ports ports = {} for pin, d in cell["pins"].iteritems(): y, z, x = d["coordinates"] coord = (y + pad, z + pad, x + pad) facing = d["facing"] direction = d["direction"] level = d["level"] ports[pin] = {"coordinates": coord, "facing": facing, "direction": direction, "level": level} return Cell(blocks, data, mask, name, ports, delay)
Example 25
def _reinforce_box_constraint(kind, enforce_feasibility, x0, relative_tolerance=0.01, absolute_tolerance=0.01): """Reinforce box constraint""" x0 = np.copy(np.asarray(x0, dtype=float)) keyword = kind[0] if keyword == "greater": lb = np.asarray(kind[1], dtype=float) ub = np.full_like(lb, np.inf, dtype=float) elif keyword == "less": ub = np.asarray(kind[1], dtype=float) lb = np.full_like(ub, -np.inf, dtype=float) elif keyword == "interval": lb = np.asarray(kind[1], dtype=float) ub = np.asarray(kind[2], dtype=float) x0_new = np.copy(x0) for i in range(np.size(x0)): if enforce_feasibility[i]: if not np.isinf(lb[i]): lower_bound = min(lb[i]+absolute_tolerance, lb[i]+relative_tolerance*(ub[i]-lb[i])) x0_new[i] = max(x0_new[i], lower_bound) if not np.isinf(ub[i]): upper_bound = max(ub[i]-absolute_tolerance, ub[i]-relative_tolerance*(ub[i]-lb[i])) x0_new[i] = min(x0_new[i], upper_bound) return x0_new
Example 26
def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) self.check_like_function(np.full_like, 1000, True) self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True)
Example 27
def test_threshold_boundingzero(self): """Test fuzzy threshold of zero.""" bounds = (-1.0, 1.0) plugin = Threshold(0.0, fuzzy_bounds=bounds) result = plugin.process(self.cube) expected_result_array = np.full_like( self.cube.data, fill_value=0.5).reshape(1, 1, 5, 5) expected_result_array[0][0][2][2] = 0.75 self.assertArrayAlmostEqual(result.data, expected_result_array)
Example 28
def test_threshold_boundingzero_above(self): """Test fuzzy threshold of zero where data are above upper-bound.""" bounds = (-0.1, 0.1) plugin = Threshold(0.0, fuzzy_bounds=bounds) result = plugin.process(self.cube) expected_result_array = np.full_like( self.cube.data, fill_value=0.5).reshape(1, 1, 5, 5) expected_result_array[0][0][2][2] = 1. self.assertArrayAlmostEqual(result.data, expected_result_array)
Example 29
def test_threshold_boundingbelowzero(self): """Test fuzzy threshold of below-zero.""" bounds = (-1.0, 1.0) plugin = Threshold(0.0, fuzzy_bounds=bounds, below_thresh_ok=True) result = plugin.process(self.cube) expected_result_array = np.full_like( self.cube.data, fill_value=0.5).reshape(1, 1, 5, 5) expected_result_array[0][0][2][2] = 0.25 self.assertArrayAlmostEqual(result.data, expected_result_array)
Example 30
def comp_diff_weights(weights, quantize_vals): diff_weights = np.full_like(weights, np.inf) for q in quantize_vals: d2 = (weights - q)**2.0 midx = np.where(d2 < diff_weights)[0] diff_weights[midx] = d2[midx] return diff_weights
Example 31
def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) self.check_like_function(np.full_like, 1000, True) self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True)
Example 32
def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) self.check_like_function(np.full_like, 1000, True) self.check_like_function(np.full_like, 123.456, True) self.check_like_function(np.full_like, np.inf, True)
Example 33
def generate_rrab_lightcurve( times, mags=None, errs=None, paramdists={ 'period':sps.uniform(loc=0.45,scale=0.35), 'fourierorder':[8,11], 'amplitude':sps.uniform(loc=0.4,scale=0.5), 'phioffset':np.pi, }, magsarefluxes=False ): '''This generates fake RRab light curves. times is an array of time values that will be used as the time base. mags and errs will have the model mags applied to them. If either is None, np.full_like(times, 0.0) will used as a substitute. paramdists is a dict containing parameter distributions to use for the transitparams, in order: {'period', 'fourierorder', 'amplitude'} These are all 'frozen' scipy.stats distribution objects, e.g.: https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions The minimum light curve epoch will be automatically chosen from a uniform distribution between times.min() and times.max(). The amplitude will be flipped automatically as appropriate if magsarefluxes=True. ''' modeldict = generate_sinusoidal_lightcurve(times, mags=mags, errs=errs, paramdists=paramdists, magsarefluxes=magsarefluxes) modeldict['vartype'] = 'RRab' return modeldict
Example 34
def generate_rrc_lightcurve( times, mags=None, errs=None, paramdists={ 'period':sps.uniform(loc=0.10,scale=0.30), 'fourierorder':[2,3], 'amplitude':sps.uniform(loc=0.1,scale=0.3), 'phioffset':1.5*np.pi, }, magsarefluxes=False ): '''This generates fake RRc light curves. times is an array of time values that will be used as the time base. mags and errs will have the model mags applied to them. If either is None, np.full_like(times, 0.0) will used as a substitute. paramdists is a dict containing parameter distributions to use for the transitparams, in order: {'period', 'fourierorder', 'amplitude'} These are all 'frozen' scipy.stats distribution objects, e.g.: https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions The minimum light curve epoch will be automatically chosen from a uniform distribution between times.min() and times.max(). The amplitude will be flipped automatically as appropriate if magsarefluxes=True. ''' modeldict = generate_sinusoidal_lightcurve(times, mags=mags, errs=errs, paramdists=paramdists, magsarefluxes=magsarefluxes) modeldict['vartype'] = 'RRc' return modeldict
Example 35
def generate_hads_lightcurve( times, mags=None, errs=None, paramdists={ 'period':sps.uniform(loc=0.04,scale=0.06), 'fourierorder':[5,10], 'amplitude':sps.uniform(loc=0.1,scale=0.6), 'phioffset':np.pi, }, magsarefluxes=False ): '''This generates fake HADS light curves. times is an array of time values that will be used as the time base. mags and errs will have the model mags applied to them. If either is None, np.full_like(times, 0.0) will used as a substitute. paramdists is a dict containing parameter distributions to use for the transitparams, in order: {'period', 'fourierorder', 'amplitude'} These are all 'frozen' scipy.stats distribution objects, e.g.: https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions The minimum light curve epoch will be automatically chosen from a uniform distribution between times.min() and times.max(). The amplitude will be flipped automatically as appropriate if magsarefluxes=True. ''' modeldict = generate_sinusoidal_lightcurve(times, mags=mags, errs=errs, paramdists=paramdists, magsarefluxes=magsarefluxes) modeldict['vartype'] = 'HADS' return modeldict
Example 36
def generate_rotator_lightcurve( times, mags=None, errs=None, paramdists={ 'period':sps.uniform(loc=0.80,scale=119.20), 'fourierorder':[2,3], 'amplitude':sps.uniform(loc=0.01,scale=0.7), 'phioffset':1.5*np.pi, }, magsarefluxes=False ): '''This generates fake rotator light curves. times is an array of time values that will be used as the time base. mags and errs will have the model mags applied to them. If either is None, np.full_like(times, 0.0) will used as a substitute. paramdists is a dict containing parameter distributions to use for the transitparams, in order: {'period', 'fourierorder', 'amplitude'} These are all 'frozen' scipy.stats distribution objects, e.g.: https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions The minimum light curve epoch will be automatically chosen from a uniform distribution between times.min() and times.max(). The amplitude will be flipped automatically as appropriate if magsarefluxes=True. ''' modeldict = generate_sinusoidal_lightcurve(times, mags=mags, errs=errs, paramdists=paramdists, magsarefluxes=magsarefluxes) modeldict['vartype'] = 'rotator' return modeldict
Example 37
def generate_lpv_lightcurve( times, mags=None, errs=None, paramdists={ 'period':sps.uniform(loc=250.0,scale=250.0), 'fourierorder':[2,3], 'amplitude':sps.uniform(loc=0.1,scale=0.8), 'phioffset':1.5*np.pi, }, magsarefluxes=False ): '''This generates fake LPV light curves. times is an array of time values that will be used as the time base. mags and errs will have the model mags applied to them. If either is None, np.full_like(times, 0.0) will used as a substitute. paramdists is a dict containing parameter distributions to use for the transitparams, in order: {'period', 'fourierorder', 'amplitude'} These are all 'frozen' scipy.stats distribution objects, e.g.: https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions The minimum light curve epoch will be automatically chosen from a uniform distribution between times.min() and times.max(). The amplitude will be flipped automatically as appropriate if magsarefluxes=True. ''' modeldict = generate_sinusoidal_lightcurve(times, mags=mags, errs=errs, paramdists=paramdists, magsarefluxes=magsarefluxes) modeldict['vartype'] = 'LPV' return modeldict
Example 38
def process(self, **kwargs): """Process module.""" old_observations = tuple( getattr(self, '_' + x) for x in self._obs_keys) if (kwargs.get('root', 'output') == 'output' and 'extra_times' in kwargs): obslist = (list( zip(*([kwargs.get(k) for k in self._okeys] + [[True for x in range(len(kwargs['times']))]])) ) + list( zip(*([kwargs.get('extra_' + k) for k in self._okeys] + [[False for x in range(len(kwargs['extra_times']))]])))) obslist.sort() self._all_observations = np.concatenate([ np.atleast_2d(np.array(x, dtype=object)) for x in obslist], axis=0).T for ki, key in enumerate(self._obs_keys): setattr(self, '_' + key, self._all_observations[ki]) else: for key in list( set(self._obs_keys) - set([ 'frequencies', 'observed'])): setattr(self, '_' + key, kwargs[key]) self._frequencies = np.array([ x / frequency_unit(y) if x is not None else None for x, y in zip(kwargs['frequencies'], kwargs['u_frequencies']) ]) self._observed = np.full_like(kwargs['times'], True, dtype=bool) self._all_observations = tuple( getattr(self, '_' + x) for x in self._obs_keys) outputs = OrderedDict( [('all_' + x, getattr(self, '_' + x)) for x in list(set(self._obs_keys) - set(['observed']))]) if any(not np.array_equal(x, y) for x, y in zip( old_observations, self._all_observations)): self._all_band_indices = np.array([ (self._photometry.find_band_index( b, telescope=t, instrument=i, mode=m, bandset=bs, system=s) if f is None else -1) for ti, b, t, s, i, m, bs, f, uf, o in zip(*self._all_observations) ]) self._observation_types = np.array([ self._photometry._band_kinds[bi] if bi >= 0 else 'fluxdensity' for bi in self._all_band_indices ], dtype=object) outputs['all_band_indices'] = self._all_band_indices outputs['observation_types'] = self._observation_types outputs['observed'] = np.array(self._observed, dtype=bool) return outputs
Example 39
def __init__(self, seed=42): np.random.seed(seed) EPOCH = np.random.uniform(0., 40) self.data = OrderedDict() self.joker_params = OrderedDict() self.truths = OrderedDict() P = np.random.uniform(40, 80) * u.day mjd = np.random.uniform(0, 300., 8) _genmjd = mjd + (EPOCH % P.value) # First just a binary truth = dict() truth['P'] = P truth['K'] = np.random.uniform(5, 15) * u.km/u.s truth['phi0'] = np.random.uniform(0., 2*np.pi) * u.radian truth['omega'] = np.random.uniform(0., 2*np.pi) * u.radian truth['ecc'] = np.random.uniform() self.v0 = np.random.uniform(-100, 100) * u.km/u.s orbit = RVOrbit(**truth) rv = orbit.generate_rv_curve(mjd) + self.v0 err = np.full_like(rv.value, 0.01) * u.km/u.s data = RVData(mjd, rv, stddev=err) self.data['binary'] = data self.joker_params['binary'] = JokerParams(P_min=8*u.day, P_max=1024*u.day) self.truths['binary'] = truth.copy() self.truths['binary']['phi0'] = self.truths['binary']['phi0'] - ((2*np.pi*data.t_offset/P.value))*u.radian # hierarchical triple - long term velocity trend self.v1 = np.random.uniform(-1, 1) * u.km/u.s/u.day orbit = RVOrbit(**truth) rv = orbit.generate_rv_curve(mjd) + self.v0 + self.v1*(mjd-mjd.min())*u.day err = np.full_like(rv.value, 0.01) * u.km/u.s data = RVData(mjd, rv, stddev=err, t_offset=mjd.min()) self.data['triple'] = data self.joker_params['triple'] = JokerParams(P_min=8*u.day, P_max=1024*u.day, trend_cls=VelocityTrend2) self.truths['triple'] = truth.copy() self.truths['triple']['phi0'] = self.truths['triple']['phi0'] - ((2*np.pi*data.t_offset/P.value))*u.radian # Binary on circular orbit truth = dict() truth['P'] = P truth['K'] = np.random.uniform(5, 15) * u.km/u.s truth['phi0'] = np.random.uniform(0., 2*np.pi) * u.radian truth['omega'] = 0*u.radian truth['ecc'] = 0. orbit = RVOrbit(**truth) rv = orbit.generate_rv_curve(_genmjd) + self.v0 err = np.full_like(rv.value, 0.1) * u.km/u.s data = RVData(mjd+EPOCH, rv, stddev=err) self.data['circ_binary'] = data self.joker_params['circ_binary'] = JokerParams(P_min=8*u.day, P_max=1024*u.day) self.truths['circ_binary'] = truth.copy() self.truths['circ_binary']['phi0'] = self.truths['circ_binary']['phi0'] - (2*np.pi*data.t_offset/P.value)*u.radian
Example 40
def test_compare_to_str_array(self, missing_value): strs = self.strs shape = strs.shape arr = LabelArray(strs, missing_value=missing_value) if missing_value is None: # As of numpy 1.9.2, object array != None returns just False # instead of an array, with a deprecation warning saying the # behavior will change in the future. Work around that by just # using the ufunc. notmissing = np.not_equal(strs, missing_value) else: notmissing = (strs != missing_value) check_arrays(arr.not_missing(), notmissing) check_arrays(arr.is_missing(), ~notmissing) # The arrays are equal everywhere, but comparisons against the # missing_value should always produce False check_arrays(strs == arr, notmissing) check_arrays(strs != arr, np.zeros_like(strs, dtype=bool)) def broadcastable_row(value, dtype): return np.full((shape[0], 1), value, dtype=strs.dtype) def broadcastable_col(value, dtype): return np.full((1, shape[1]), value, dtype=strs.dtype) # Test comparison between arr and a like-shap 2D array, a column # vector, and a row vector. for comparator, dtype, value in product((eq, ne), (bytes, unicode, object), set(self.rowvalues)): check_arrays( comparator(arr, np.full_like(strs, value)), comparator(strs, value) & notmissing, ) check_arrays( comparator(arr, broadcastable_row(value, dtype=dtype)), comparator(strs, value) & notmissing, ) check_arrays( comparator(arr, broadcastable_col(value, dtype=dtype)), comparator(strs, value) & notmissing, )
Example 41
def _gen_init_reduce(self, reduce_var, reduce_op): """generate code to initialize reduction variables on non-root processors. """ red_var_typ = self.typemap[reduce_var.name] el_typ = red_var_typ if self._isarray(reduce_var.name): el_typ = red_var_typ.dtype init_val = None pre_init_val = "" if reduce_op == Reduce_Type.Sum: init_val = str(el_typ(0)) if reduce_op == Reduce_Type.Prod: init_val = str(el_typ(1)) if reduce_op == Reduce_Type.Min: init_val = "numba.targets.builtins.get_type_max_value(np.ones(1,dtype=np.{}).dtype)".format(el_typ) if reduce_op == Reduce_Type.Max: init_val = "numba.targets.builtins.get_type_min_value(np.ones(1,dtype=np.{}).dtype)".format(el_typ) if reduce_op in [Reduce_Type.Argmin, Reduce_Type.Argmax]: # don't generate initialization for argmin/argmax since they are not # initialized by user and correct initialization is already there return [] assert init_val is not None #import pdb; pdb.set_trace() if self._isarray(reduce_var.name): pre_init_val = "v = np.full_like(s, {}, s.dtype)".format(init_val) init_val = "v" f_text = "def f(s):\n {}\n s = hpat.distributed_lower._root_rank_select(s, {})".format(pre_init_val, init_val) loc_vars = {} exec(f_text, {}, loc_vars) f = loc_vars['f'] f_block = compile_to_numba_ir(f, {'hpat': hpat, 'numba': numba, 'np': np}, self.typingctx, (red_var_typ,), self.typemap, self.calltypes).blocks.popitem()[1] replace_arg_nodes(f_block, [reduce_var]) nodes = f_block.body[:-3] nodes[-1].target = reduce_var return nodes
Example 42
def __init__(self, img, bg=None, maxDev=1e-4, maxIter=10, remove_border_size=0, # feature_size=5, cameraMatrix=None, distortionCoeffs=None): # 20 """ Args: img (path or array): Reference image Kwargs: bg (path or array): background image - same for all given images maxDev (float): Relative deviation between the last two iteration steps Stop iterative refinement, if deviation is smaller maxIter (int): Stop iterative refinement after maxIter steps """ self.lens = None if cameraMatrix is not None: self.lens = LensDistortion() self.lens._coeffs['distortionCoeffs'] = distortionCoeffs self.lens._coeffs['cameraMatrix'] = cameraMatrix self.maxDev = maxDev self.maxIter = maxIter self.remove_border_size = remove_border_size #self.feature_size = feature_size img = imread(img, 'gray') self.bg = bg if bg is not None: self.bg = getBackground(bg) if not isinstance(self.bg, np.ndarray): self.bg = np.full_like(img, self.bg, dtype=img.dtype) else: self.bg = self.bg.astype(img.dtype) img = cv2.subtract(img, self.bg) if self.lens is not None: img = self.lens.correct(img, keepSize=True) # CREATE TEMPLATE FOR PATTERN COMPARISON: pos = self._findObject(img) self.obj_shape = img[pos].shape PatternRecognition.__init__(self, img[pos]) self._ff_mma = MaskedMovingAverage(shape=img.shape, dtype=np.float64) self.object = None self.Hs = [] # Homography matrices of all fitted images self.Hinvs = [] # same, but inverse self.fits = [] # all imaged, fitted to reference self._fit_masks = [] self._refined = False # TODO: remove that property?
Example 43
def quantile_mapping(input_data, data_to_match, mask=None, alpha=0.4, beta=0.4): '''quantile mapping''' assert input_data.get_axis_num('time') == 0 assert data_to_match.get_axis_num('time') == 0 assert input_data.shape[1:] == data_to_match.shape[1:] # Make mask if mask is one was not provided if mask is None: d0 = input_data.isel(time=0, drop=True) mask = xr.Variable(d0.dims, ~da.isnull(d0)) # quantiles for the input data n = len(input_data['time']) x1 = (np.arange(1, n + 1) - alpha) / (n + 1. - alpha - beta) # quantiles for the obs n = len(data_to_match['time']) x0 = (np.arange(1, n + 1) - alpha) / (n + 1. - alpha - beta) def qmap(data, like, mask): # Use numpy to sort these arrays before we loop through each variable sort_inds_all = np.argsort(data, axis=0) sorted_all = np.sort(like, axis=0) ii, jj = mask.nonzero() new = np.full_like(data, np.nan) for i, j in zip(ii, jj): # Sorted Observations y0 = sorted_all[:, i, j] # Indicies that would sort the input data sort_inds = sort_inds_all[:, i, j] new[sort_inds, i, j] = np.interp(x1, x0, y0) # TODO: handle edges return new if isinstance(input_data.data, da.Array): # dask arrays new = da.map_blocks(qmap, input_data.data, data_to_match.data, mask.data, chunks=input_data.data.chunks, name='qmap') else: # numpy arrays new = qmap(input_data.data, data_to_match.data, mask.data) return xr.DataArray(new, dims=input_data.dims, coords=input_data.coords, attrs=input_data.attrs, name=input_data.name)
Example 44
def _round_hitcounts(self, accuracy, count_miss=None): """Round the accuracy to the nearest hit counts. Parameters ---------- accuracy : np.ndarray[float] The accuracy to round in the range [0, 1] count_miss : np.ndarray[int]int, optional The number of misses to fix. Returns ------- count_300 : np.ndarray[int] The number of 300s. count_100 : np.ndarray[int] The number of 100s. count_50 : np.ndarray[int] The number of 50s. count_miss : np.ndarray[int] The number of misses. """ if count_miss is None: count_miss = np.full_like(accuracy, 0) max_300 = len(self.hit_objects) - count_miss accuracy = np.maximum( 0.0, np.minimum( calculate_accuracy(max_300, 0, 0, count_miss) * 100.0, accuracy * 100, ), ) count_50 = np.full_like(accuracy, 0) count_100 = np.round( -3.0 * ((accuracy * 0.01 - 1.0) * len(self.hit_objects) + count_miss) * 0.5, ) mask = count_100 > len(self.hit_objects) - count_miss count_100[mask] = 0 count_50[mask] = np.round( -6.0 * ((accuracy[mask] * 0.01 - 1.0) * len(self.hit_objects) + count_miss[mask]) * 0.2, ) count_50[mask] = np.minimum(max_300[mask], count_50[mask]) count_100[~mask] = np.minimum(max_300[~mask], count_100[~mask]) count_300 = ( len(self.hit_objects) - count_100 - count_50 - count_miss ) return count_300, count_100, count_50, count_miss
Example 45
def full(shape, fill_value, dtype=None, order='C'): """ Return a new array of given shape and type, filled with `fill_value`. Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. fill_value : scalar Fill value. dtype : data-type, optional The desired data-type for the array The default, `None`, means `np.array(fill_value).dtype`. order : {'C', 'F'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Returns ------- out : ndarray Array of `fill_value` with the given shape, dtype, and order. See Also -------- zeros_like : Return an array of zeros with shape and type of input. ones_like : Return an array of ones with shape and type of input. empty_like : Return an empty array with shape and type of input. full_like : Fill an array with shape and type of input. zeros : Return a new array setting values to zero. ones : Return a new array setting values to one. empty : Return a new uninitialized array. Examples -------- >>> np.full((2, 2), np.inf) array([[ inf, inf], [ inf, inf]]) >>> np.full((2, 2), 10) array([[10, 10], [10, 10]]) """ if dtype is None: dtype = array(fill_value).dtype a = empty(shape, dtype, order) multiarray.copyto(a, fill_value, casting='unsafe') return a
Example 46
def full_like(a, fill_value, dtype=None, order='K', subok=True): """ Return a full array with the same shape and type as a given array. Parameters ---------- a : array_like The shape and data-type of `a` define these same attributes of the returned array. fill_value : scalar Fill value. dtype : data-type, optional Overrides the data type of the result. order : {'C', 'F', 'A', or 'K'}, optional Overrides the memory layout of the result. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely as possible. subok : bool, optional. If True, then the newly created array will use the sub-class type of 'a', otherwise it will be a base-class array. Defaults to True. Returns ------- out : ndarray Array of `fill_value` with the same shape and type as `a`. See Also -------- zeros_like : Return an array of zeros with shape and type of input. ones_like : Return an array of ones with shape and type of input. empty_like : Return an empty array with shape and type of input. zeros : Return a new array setting values to zero. ones : Return a new array setting values to one. empty : Return a new uninitialized array. full : Fill a new array. Examples -------- >>> x = np.arange(6, dtype=np.int) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0]) >>> np.full_like(x, 0.1, dtype=np.double) array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) >>> np.full_like(x, np.nan, dtype=np.double) array([ nan, nan, nan, nan, nan, nan]) >>> y = np.arange(6, dtype=np.double) >>> np.full_like(y, 0.1) array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) """ res = empty_like(a, dtype=dtype, order=order, subok=subok) multiarray.copyto(res, fill_value, casting='unsafe') return res
Example 47
def back_transform(self, scores): "transform nore score back to orginal data" values = np.full_like(scores, np.nan) lo_value = self.transform_table['value'][0] up_value = self.transform_table['value'][-1] lo_score = self.transform_table['score'][0] up_score = self.transform_table['score'][-1] # scores in normal range normal_mask = np.logical_and(scores <= up_score, scores >= lo_score) normal_scores = scores[normal_mask] values[normal_mask] = self.back_func(normal_scores) # scores in lower tail: 1=linear, 2=power lower_mask = scores < lo_score lower_scores = scores[lower_mask] temp = list() for sc in lower_scores: backtr = lo_value cdflo = gcum(lo_score) cdfbt = gcum(sc) if self.ltail == 1: # linear backtr = powint(0, cdflo, self.zmin, lo_value, cdfbt, 1) temp.append(backtr) elif self.ltail == 2: # power cpow = 1.0 / self.ltpar backtr = powint(0, cdflo, self.zmin, lo_value, cdfbt, cpow) temp.append(backtr) values[lower_mask] = temp # scores in upper tail: 1=linear, 2=power, 4=hyperbolic upper_mask = scores > up_score upper_scores = scores[upper_mask] temp = list() for sc in up_score: backtr = up_value cdfhi = gcum(up_score) cdfbt = gcum(sc) # cdf value of the score to be back-transformed if self.utail == 1: # linear backtr = powint(cdfhi, 1.0, up_value, self.zmax, cdfbt, 1) temp.append(backtr) elif self.utail == 2: # power cpow = 1.0 / self.utpar backtr = powint(cdfhi, 1.0, up_value, self.zmax, cdfbt, cpow) temp.append(backtr) elif self.utail == 4: # hyperbolic l = (up_value**self.utpar) * (1 - gcum(up_score)) backtr = (l / (1 - gcum(sc)))**(1 / self.utpar) temp.append(backtr) values[upper_mask] = temp return values
Example 48
def _evaluate_rollout(self, state, limit): # _, player, legal_moves = Game.possible_moves(state) winner = 0 # old_board = Board() # old_board.stones = state player = None for i in range(limit): legal_states, p, legal_moves = Game.possible_moves(state) if player is None: player = p if len(legal_states) == 0: break probs = self._rollout(state, legal_moves) mask = np.full_like(probs, -0.01) mask[:, legal_moves] = probs[:, legal_moves] probs = mask best_move = np.argmax(probs, 1)[0] idx = np.where(legal_moves == best_move)[0] # if idx.size == 0: # print(i, idx) # print(best_move) # print(probs.shape) # print(legal_moves) # print(probs) assert idx.size == 1 idx = idx[0] st1 = legal_states[idx] over, winner, last_loc = st1.is_over(state) if over: break state = st1 else: # If no break from the loop, issue a warning. print("WARNING: rollout reached move limit") if winner == 0: return 0 else: return 1 if winner == player else -1
Example 49
def testMultipliesGradient(self): embedding_language = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('language', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) embedding_wire = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('wire', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) params = { 'feature_columns': [embedding_language, embedding_wire], 'head': head_lib._multi_class_head(2), 'hidden_units': [1], # Set lr mult to 0. to keep embeddings constant. 'embedding_lr_multipliers': { embedding_language: 0.0 }, } features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), } labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32) model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params) with monitored_session.MonitoredSession() as sess: language_var = dnn_linear_combined._get_embedding_variable( embedding_language, 'dnn', 'dnn/input_from_feature_columns') wire_var = dnn_linear_combined._get_embedding_variable( embedding_wire, 'dnn', 'dnn/input_from_feature_columns') for _ in range(2): _, language_value, wire_value = sess.run( [model_ops.train_op, language_var, wire_var]) initial_value = np.full_like(language_value, 0.1) self.assertTrue(np.all(np.isclose(language_value, initial_value))) self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
Example 50
def testMultipliesGradient(self): embedding_language = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('language', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) embedding_wire = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('wire', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) params = { 'dnn_feature_columns': [embedding_language, embedding_wire], 'head': head_lib._multi_class_head(2), 'dnn_hidden_units': [1], # Set lr mult to 0. to keep embeddings constant. 'embedding_lr_multipliers': { embedding_language: 0.0 }, 'dnn_optimizer': 'Adagrad', } features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), } labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32) model_ops = dnn_linear_combined._dnn_linear_combined_model_fn( features, labels, model_fn.ModeKeys.TRAIN, params) with monitored_session.MonitoredSession() as sess: language_var = dnn_linear_combined._get_embedding_variable( embedding_language, 'dnn', 'dnn/input_from_feature_columns') wire_var = dnn_linear_combined._get_embedding_variable( embedding_wire, 'dnn', 'dnn/input_from_feature_columns') for _ in range(2): _, language_value, wire_value = sess.run( [model_ops.train_op, language_var, wire_var]) initial_value = np.full_like(language_value, 0.1) self.assertTrue(np.all(np.isclose(language_value, initial_value))) self.assertFalse(np.all(np.isclose(wire_value, initial_value)))