The following are code examples for showing how to use . They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don’t like. You can also save this page to your account.
Example 1
def test_PlotCurveItem(): p = pg.GraphicsWindow() p.ci.layout.setContentsMargins(4, 4, 4, 4) # default margins vary by platform v = p.addViewBox() p.resize(200, 150) data = np.array([1,4,2,3,np.inf,5,7,6,-np.inf,8,10,9,np.nan,-1,-2,0]) c = pg.PlotCurveItem(data) v.addItem(c) v.autoRange() # Check auto-range works. Some platform differences may be expected.. checkRange = np.array([[-1.1457564053237301, 16.145756405323731], [-3.076811473165955, 11.076811473165955]]) assert np.allclose(v.viewRange(), checkRange) assertImageApproved(p, 'plotcurveitem/connectall', "Plot curve with all points connected.") c.setData(data, connect='pairs') assertImageApproved(p, 'plotcurveitem/connectpairs', "Plot curve with pairs connected.") c.setData(data, connect='finite') assertImageApproved(p, 'plotcurveitem/connectfinite', "Plot curve with finite points connected.") c.setData(data, connect=np.array([1,1,1,0,1,1,0,0,1,0,0,0,1,1,0,0])) assertImageApproved(p, 'plotcurveitem/connectarray', "Plot curve with connection array.")
Example 2
def test_FFT2(FFT2): N = FFT2.N if FFT2.rank == 0: A = random(N).astype(FFT2.float) else: A = zeros(N, dtype=FFT2.float) atol, rtol = (1e-10, 1e-8) if FFT2.float is float64 else (5e-7, 1e-4) FFT2.comm.Bcast(A, root=0) a = zeros(FFT2.real_shape(), dtype=FFT2.float) c = zeros(FFT2.complex_shape(), dtype=FFT2.complex) a[:] = A[FFT2.real_local_slice()] c = FFT2.fft2(a, c) B2 = zeros(FFT2.global_complex_shape(), dtype=FFT2.complex) B2 = rfft2(A, B2, axes=(0,1)) assert allclose(c, B2[FFT2.complex_local_slice()], rtol, atol) a = FFT2.ifft2(c, a) assert allclose(a, A[FFT2.real_local_slice()], rtol, atol)
Example 3
def test_acoustic2d_create_matrices(): fld = fds.Acoustic2D(t_delta=1, t_samples=1, x_delta=1, x_samples=2, y_delta=1, y_samples=2, material=fds.AcousticMaterial(700, 0.01, bulk_viscosity=1)) fld.create_matrices() assert np.allclose(fld.a_p_vx.toarray(), [[-4900, 4900, 0, 0], [0, -4900, 4900, 0], [0, 0, -4900, 4900], [0, 0, 0, -4900]]) assert np.allclose(fld.a_p_vy.toarray(), [[-4900, 0, 4900, 0], [0, -4900, 0, 4900], [0, 0, -4900, 0], [0, 0, 0, -4900]]) assert np.allclose(fld.a_vx_p.toarray(), [[100, 0, 0, 0], [-100, 100, 0, 0], [0, -100, 100, 0], [0, 0, -100, 100]]) assert np.allclose(fld.a_vy_p.toarray(), [[100, 0, 0, 0], [0, 100, 0, 0], [-100, 0, 100, 0], [0, -100, 0, 100]]) assert np.allclose(fld.a_vx_vx.toarray(), [[-400, 100, 100, 0], [100, -400, 100, 100], [100, 100, -400, 100], [0, 100, 100, -400]]) assert np.allclose(fld.a_vy_vy.toarray(), [[-400, 100, 100, 0], [100, -400, 100, 100], [100, 100, -400, 100], [0, 100, 100, -400]])
Example 4
def test_acoustic3d_axi_create_matrices(): fld = fds.Acoustic3DAxi(t_delta=1, t_samples=1, x_delta=1, x_samples=2, y_delta=1, y_samples=2, material=fds.AcousticMaterial(1, 1, bulk_viscosity=1)) fld.create_matrices() assert np.allclose(fld.a_p_vx.toarray(), [[-2, 2/3, 0, 0], [0, -2/3, 2, 0], [0, 0, -2, 2/3], [0, 0, 0, -2/3]]) assert np.allclose(fld.a_p_vy.toarray(), [[-1, 0, 1, 0], [0, -1, 0, 1], [0, 0, -1, 0], [0, 0, 0, -1]]) assert np.allclose(fld.a_vx_p.toarray(), [[1, 0, 0, 0], [-1, 1, 0, 0], [0, -1, 1, 0], [0, 0, -1, 1]]) assert np.allclose(fld.a_vy_p.toarray(), [[1, 0, 0, 0], [0, 1, 0, 0], [-1, 0, 1, 0], [0, -1, 0, 1]]) assert np.allclose(fld.a_vx_vx.toarray(), [[-4, 4/3, 1, 0], [0, -4, 2, 1], [1, 2/3, -4, 4/3], [0, 1, 0, -4]]) assert np.allclose(fld.a_vy_vy.toarray(), [[-4, 4/3, 1, 0], [0, -4, 2, 1], [1, 2/3, -4, 4/3], [0, 1, 0, -4]])
Example 5
def test_latent_correlation(N, V, C, M): set_random_seed(make_seed(N, V, C, M)) model = generate_fake_model(N, V, C, M) config = TINY_CONFIG.copy() config['model_num_clusters'] = M model['config'] = config server = TreeCatServer(model) correlation = server.latent_correlation() print(correlation) assert np.all(0 <= correlation) assert np.all(correlation <= 1) assert np.allclose(correlation, correlation.T) for v in range(V): assert correlation[v, :].argmax() == v assert correlation[:, v].argmax() == v
Example 6
def test_against_numpy_nanstd(self): source = [np.random.random((16, 12, 5)) for _ in range(10)] for arr in source: arr[randint(0, 15), randint(0, 11), randint(0, 4)] = np.nan stack = np.stack(source, axis = -1) for axis in (0, 1, 2, None): for ddof in range(4): with self.subTest('axis = {}, ddof = {}'.format(axis, ddof)): from_numpy = np.nanstd(stack, axis = axis, ddof = ddof) from_ivar = last(istd(source, axis = axis, ddof = ddof, ignore_nan = True)) self.assertSequenceEqual(from_numpy.shape, from_ivar.shape) self.assertTrue(np.allclose(from_ivar, from_numpy))
Example 7
def test_against_numpy(self): """ Test iall against numpy.all """ stream = [np.zeros((8, 16, 2)) for _ in range(11)] stream[3][3,0,1] = 1 # so that np.all(axis = None) evaluates to False stack = np.stack(stream, axis = -1) with self.subTest('axis = None'): from_numpy = np.all(stack, axis = None) from_stream = last(iall(stream, axis = None)) self.assertEqual(from_numpy, from_stream) for axis in range(stack.ndim): with self.subTest('axis = {}'.format(axis)): from_numpy = np.all(stack, axis = axis) from_stream = last(iall(stream, axis = axis)) self.assertTrue(np.allclose(from_numpy, from_stream))
Example 8
def testFromParamWithUInt16Array(self): class UInt16ArrayArg(): def __init__(self, value): self._ret= (ctypes.c_uint16 * len(value))() for i in range(len(value)): self._ret[i]= value[i] def from_param(self): return self._ret def array(self): return np.array([x for x in self._ret]) xsubi1= UInt16ArrayArg([1, 2, 4092]) self.assertTrue(np.allclose(np.array([1, 2, 4092]), xsubi1.array())) xsubi2= UInt16ArrayArg([1, 2, 4092]) self.libc.nrand48.argtypes= [UInt16ArrayArg] ret1= self.libc.nrand48(xsubi1) ret2= self.libc.nrand48(xsubi2) self.assertEqual(ret1, ret2) self.assertFalse(np.allclose(np.array([1, 2, 4092]), xsubi1.array()))
Example 9
def _enableServoControlMode(self): self._gcs.setServoControlMode("A B C", [False, False, False]) self.assertTrue( np.allclose( np.array([False, False, False]), self._gcs.getServoControlMode("A B C"))) self._gcs.setServoControlMode("A", True) self.assertTrue( np.allclose( np.array([True]), self._gcs.getServoControlMode("A"))) self._gcs.setServoControlMode("A B C", [True, True, False]) self.assertTrue( np.allclose( np.array([True, True, False]), self._gcs.getServoControlMode("A B C")))
Example 10
def testStartStopModulation(self): radiusInMilliRad= 12.4 frequencyInHz= 100. centerInMilliRad= [-10, 15] self._tt.setTargetPosition(centerInMilliRad) self._tt.startModulation(radiusInMilliRad, frequencyInHz, centerInMilliRad) self.assertTrue( np.allclose( [1, 1, 0], self._ctrl.getWaveGeneratorStartStopMode())) waveform= self._ctrl.getWaveform(1) wants= self._tt._milliRadToGcsUnitsOneAxis(-10, self._tt.AXIS_A) got= np.mean(waveform) self.assertAlmostEqual( wants, got, msg="wants %g, got %g" % (wants, got)) wants= self._tt._milliRadToGcsUnitsOneAxis(-10 + 12.4, self._tt.AXIS_A) got= np.max(waveform) self.assertAlmostEqual( wants, got, msg="wants %g, got %g" % (wants, got)) self._tt.stopModulation() self.assertTrue( np.allclose(centerInMilliRad, self._tt.getTargetPosition()))
Example 11
def test_from_Thetas(self): Theta1 = np.array([[1, 0, .3], [0, .5, 0], [.3, 0, 1]]) Theta2 = np.array([[1, .3, 0], [.3, .5, 0], [0, 0, 1]]) Thetas = np.zeros((10, 3, 3)) for i in range(5): Thetas[i] = Theta1 for i in range(5, 10): Thetas[i] = Theta2 DGM = DynamicGraphicalModel.from_Thetas(Thetas) self.assertEqual(len(DGM.graphs), 2) G1, G2 = DGM.graphs self.assertEqual(G1.n_edges, 1) self.assertEqual(G2.n_edges, 1) self.assertTrue(np.allclose(G1.Theta, Theta1)) self.assertTrue(np.allclose(G2.Theta, Theta2))
Example 12
def test_restore_1(self): """Test restore from directory with one valid checkpoint.""" # test model saving trainable_model = TrainableModel(dataset=None, log_dir=self.tmpdir, **_IO, optimizer=_OPTIMIZER) batch = {'input': [[1] * 10], 'target': [[0] * 10]} for _ in range(1000): trainable_model.run(batch, train=True) saved_var_value = trainable_model.var.eval(session=trainable_model.session) trainable_model.save('1') # test restoring restored_model = BaseModel(dataset=None, log_dir='', restore_from=self.tmpdir, **_IO, optimizer=_OPTIMIZER) var = restored_model.graph.get_tensor_by_name('var:0') var_value = var.eval(session=restored_model.session) self.assertTrue(np.allclose(saved_var_value, var_value))
Example 13
def test_restore_and_train(self): """Test model training after restoring.""" # save a model that is not trained trainable_model = TrainableModel(dataset=None, log_dir=self.tmpdir, **_IO, optimizer=_OPTIMIZER) trainable_model.save('') # restored the model restored_model = BaseModel(dataset=None, log_dir='', restore_from=self.tmpdir, **_IO) # test whether it can be trained batch = {'input': [[1] * 10], 'target': [[0] * 10]} for _ in range(1000): restored_model.run(batch, train=True) after_value = restored_model.graph.get_tensor_by_name('var:0').eval(session=restored_model.session) self.assertTrue(np.allclose([0]*10, after_value))
Example 14
def test_two_models_created(self): """ Test if one can create and train two ``BaseModels``. This is regression test for issue #83 (One can not create and use more than one instance of ``BaseModel``). """ model1 = TrainableModel(dataset=None, log_dir='', **_IO, optimizer=_OPTIMIZER) model2 = TrainableModel(dataset=None, log_dir='', **_IO, optimizer=_OPTIMIZER) batch = {'input': [[1]*10], 'target': [[0]*10]} # test if one can train one model while the other remains intact for _ in range(1000): model1.run(batch, train=True) trained_value = model1.var.eval(session=model1.session) self.assertTrue(np.allclose([0]*10, trained_value)) default_value = model2.var.eval(session=model2.session) self.assertTrue(np.allclose([2]*10, default_value)) # test if one can train the other model for _ in range(1000): model2.run(batch, train=True) trained_value2 = model2.var.eval(session=model2.session) self.assertTrue(np.allclose([0] * 10, trained_value2))
Example 15
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947]) >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0))) True """ q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True) nq = numpy.dot(q, q) if nq < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / nq) q = numpy.outer(q, q) return numpy.array(( (1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0), ( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0), ( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0), ( 0.0, 0.0, 0.0, 1.0) ), dtype=numpy.float64)
Example 16
def test_optimalk(parallel_backend, n_jobs, n_clusters): """ Test core functionality of OptimalK using all backends. """ import numpy as np from sklearn.datasets.samples_generator import make_blobs from gap_statistic import OptimalK # Create optimalK instance optimalK = OptimalK(parallel_backend=parallel_backend, n_jobs=n_jobs) # Create data X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3) suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10)) assert np.allclose(suggested_clusters, n_clusters, 2), ('Correct clusters is {}, OptimalK suggested {}' .format(n_clusters, suggested_clusters))
Example 17
def __init__(self, kp, kd, adaptation_rate = 0.0001, quantization = None): """ :param kp_over_kd: The ratio of kp/kd. 0.01 might be a normal value. :param relative_scale: Try to maintain a scale of :param adaptation_rate: """ self.k_alpha = kd/float(kp+kd) self.k_beta_init = 1/float(kp+kd) # The scale self.k_beta=self.k_beta_init assert np.allclose(self.kp, kp) assert np.allclose(self.kd, kd) self.k_beta = create_shared_variable(self.k_beta_init) self.adaptation_rate = adaptation_rate self.quantization = quantization
Example 18
def reflection_matrix(point, normal): """Return matrix to mirror at plane defined by point and normal vector. >>> v0 = numpy.random.random(4) - 0.5 >>> v0[3] = 1. >>> v1 = numpy.random.random(3) - 0.5 >>> R = reflection_matrix(v0, v1) >>> numpy.allclose(2, numpy.trace(R)) True >>> numpy.allclose(v0, numpy.dot(R, v0)) True >>> v2 = v0.copy() >>> v2[:3] += v1 >>> v3 = v0.copy() >>> v2[:3] -= v1 >>> numpy.allclose(v2, numpy.dot(R, v3)) True """ normal = unit_vector(normal[:3]) M = numpy.identity(4) M[:3, :3] -= 2.0 * numpy.outer(normal, normal) M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal return M
Example 19
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) True >>> M = quaternion_matrix([1, 0, 0, 0]) >>> numpy.allclose(M, numpy.identity(4)) True >>> M = quaternion_matrix([0, 1, 0, 0]) >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) n = numpy.dot(q, q) if n < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / n) q = numpy.outer(q, q) return numpy.array([ [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], [ 0.0, 0.0, 0.0, 1.0]])
Example 20
def test_Dropout(): from npdl.layers import Dropout input = np.random.rand(10, 20) pre_grad = np.random.rand(10, 20) layer = Dropout(0.5) layer.connect_to(PrevLayer((10, 20))) assert layer.forward(input).shape == input.shape assert np.allclose(layer.forward(input, False), input * 0.5) assert layer.backward(pre_grad).shape == input.shape layer = Dropout() layer.connect_to(PrevLayer((10, 20))) assert np.allclose(layer.forward(input), input) assert layer.backward(pre_grad).shape == input.shape
Example 21
def test_GWD(self): # Compute categorical crossentropy indices = self.mock_y > 0 selected_log = -np.log(self.mock_x_softmax[indices]) self.loss = 0#np.sum(selected_log) / np.sum(self.mock_y) # Create keras model with this activation and compile it model = Sequential() activation_layer = Lambda(lambda x: x, input_shape=self.data_shape[1:], output_shape=self.data_shape[1:] ) model.add(activation_layer) model.compile('sgd', loss=gwd) # Predict data from the model loss = model.evaluate(self.mock_y, self.mock_y, batch_size=1, verbose=0) # Assertions print('Expected loss: {}'.format(self.loss)) print('Actual loss: {}'.format(loss)) self.assertTrue(np.allclose(loss, self.loss), msg='Categorical cross-entropy loss 3D does not produce the expected results')
Example 22
def test_rescaleData(): dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float')) for dtype1 in dtypes: for dtype2 in dtypes: data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1) for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]: if dtype2.kind in 'iu': lim = np.iinfo(dtype2) lim = lim.min, lim.max else: lim = (-np.inf, np.inf) s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2) s2 = pg.rescaleData(data, scale, offset, dtype2) assert s1.dtype == s2.dtype if dtype2.kind in 'iu': assert np.all(s1 == s2) else: assert np.allclose(s1, s2)
Example 23
def test_rescaleData(): dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float')) for dtype1 in dtypes: for dtype2 in dtypes: data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1) for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]: if dtype2.kind in 'iu': lim = np.iinfo(dtype2) lim = lim.min, lim.max else: lim = (-np.inf, np.inf) s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2) s2 = pg.rescaleData(data, scale, offset, dtype2) assert s1.dtype == s2.dtype if dtype2.kind in 'iu': assert np.all(s1 == s2) else: assert np.allclose(s1, s2)
Example 24
def vector_product(v0, v1, axis=0): """Return vector perpendicular to vectors. >>> v = vector_product([2, 0, 0], [0, 3, 0]) >>> numpy.allclose(v, [0, 0, 6]) True >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]] >>> v1 = [[3], [0], [0]] >>> v = vector_product(v0, v1) >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]]) True >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]] >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]] >>> v = vector_product(v0, v1, axis=1) >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]]) True """ return numpy.cross(v0, v1, axis=axis)
Example 25
def reflection_matrix(point, normal): """Return matrix to mirror at plane defined by point and normal vector. >>> v0 = numpy.random.random(4) - 0.5 >>> v0[3] = 1.0 >>> v1 = numpy.random.random(3) - 0.5 >>> R = reflection_matrix(v0, v1) >>> numpy.allclose(2., numpy.trace(R)) True >>> numpy.allclose(v0, numpy.dot(R, v0)) True >>> v2 = v0.copy() >>> v2[:3] += v1 >>> v3 = v0.copy() >>> v2[:3] -= v1 >>> numpy.allclose(v2, numpy.dot(R, v3)) True """ normal = unit_vector(normal[:3]) M = numpy.identity(4) M[:3, :3] -= 2.0 * numpy.outer(normal, normal) M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal return M
Example 26
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947]) >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0))) True """ q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True) nq = numpy.dot(q, q) if nq < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / nq) q = numpy.outer(q, q) return numpy.array(( (1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0), ( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0), ( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0), ( 0.0, 0.0, 0.0, 1.0) ), dtype=numpy.float64)
Example 27
def test_FFT(FFT): N = FFT.N if FFT.rank == 0: A = random(N).astype(FFT.float) if FFT.communication == 'AlltoallN': C = empty(FFT.global_complex_shape(), dtype=FFT.complex) C = rfftn(A, C, axes=(0,1,2)) C[:, :, -1] = 0 # Remove Nyquist frequency A = irfftn(C, A, axes=(0,1,2)) B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex) B2 = rfftn(A, B2, axes=(0,1,2)) else: A = zeros(N, dtype=FFT.float) B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex) atol, rtol = (1e-10, 1e-8) if FFT.float is float64 else (5e-7, 1e-4) FFT.comm.Bcast(A, root=0) FFT.comm.Bcast(B2, root=0) a = zeros(FFT.real_shape(), dtype=FFT.float) c = zeros(FFT.complex_shape(), dtype=FFT.complex) a[:] = A[FFT.real_local_slice()] c = FFT.fftn(a, c) #print abs((c - B2[FFT.complex_local_slice()])/c.max()).max() assert all(abs((c - B2[FFT.complex_local_slice()])/c.max()) < rtol) #assert allclose(c, B2[FFT.complex_local_slice()], rtol, atol) a = FFT.ifftn(c, a) #print abs((a - A[FFT.real_local_slice()])/a.max()).max() assert all(abs((a - A[FFT.real_local_slice()])/a.max()) < rtol) #assert allclose(a, A[FFT.real_local_slice()], rtol, atol)
Example 28
def time_pure(self): q, r = np.linalg.qr(self.x) test = np.allclose(self.x, q.dot(r))
Example 29
def test_dimension(): dim = fls.Dimension(3, 0.1) assert np.allclose(dim.vector, np.asarray([0, 0.1, 0.2])) assert dim.get_index(0.1) == 1
Example 30
def test_field_component_boundary_1(): fc = fls.FieldComponent(100) fc.values = np.random.rand(100) fc.boundaries = [reg.Boundary(reg.LineRegion([5, 6, 7], [0, 0.2], 'test boundary'))] fc.boundaries[0].value = 23 fc.apply_bounds(step=0) assert np.allclose(fc.values[[5, 6, 7]], [23, 23, 23])
Example 31
def test_field_component_boundary_2(): fc = fls.FieldComponent(100) fc.values = np.ones(100) fc.boundaries = [reg.Boundary(reg.LineRegion([5, 6, 7], [0, 0.2], 'test boundary'))] fc.boundaries[0].value = [23, 42, 23] fc.boundaries[0].additive = True fc.apply_bounds(step=0) assert np.allclose(fc.values[[5, 6, 7]], [24, 43, 24])
Example 32
def test_field_component_output(): fc = fls.FieldComponent(100) fc.outputs = [reg.Output(reg.LineRegion([0, 1, 2], [0, 0.2], 'test output'))] fc.write_outputs() fc.write_outputs() assert np.allclose(fc.outputs[0].signals, [[0, 0], [0, 0], [0, 0]]) assert np.allclose(fc.outputs[0].mean_signal, np.zeros(2))
Example 33
def test_field1d_init(): # create a field where the main material is 5 fld = fls.Field1D(100, 0.1, 100, 0.1, int(5)) # check if the "material parameter" 'real' for the complete field is 5 assert np.allclose(fld.material_vector('real'), 5)
Example 34
def test_field1d_d_x2(): fld = fls.Field1D(3, 1, 3, 1, 5) assert np.allclose(fld.d_x2().toarray(), [[-2, 1, 0], [1, -2, 1], [0, 1, -2]])
Example 35
def test_field2d_init(): # create a field where the main material is 5 fld = fls.Field2D(100, 0.1, 100, 0.1, 100, 0.1, int(5)) # check if the "material parameter" 'real' for the complete field is 5 assert np.allclose(fld.material_vector('real'), 5) assert np.size(fld.material_vector('real')) == 10000
Example 36
def test_field2d_d_x(): fld = fls.Field2D(2, 1, 2, 1, 10, 1, int(5)) assert np.allclose(fld.d_x().toarray(), [[-1, 1, 0, 0], [0, -1, 1, 0], [0, 0, -1, 1], [0, 0, 0, -1]]) assert np.allclose(fld.d_x(variant='backward').toarray(), [[1, 0, 0, 0], [-1, 1, 0, 0], [0, -1, 1, 0], [0, 0, -1, 1]]) assert np.allclose(fld.d_x(variant='central').toarray(), [[0, 0.5, 0, 0], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5], [0, 0, -0.5, 0]])
Example 37
def test_field2d_d_x2(): fld = fls.Field2D(2, 1, 2, 1, 10, 1, int(5)) assert np.allclose(fld.d_x2().toarray(), [[-2, 1, 0, 0], [1, -2, 1, 0], [0, 1, -2, 1], [0, 0, 1, -2]])
Example 38
def test_field2d_d_y(): fld = fls.Field2D(2, 1, 2, 1, 10, 1, int(5)) assert np.allclose(fld.d_y().toarray(), [[-1, 0, 1, 0], [0, -1, 0, 1], [0, 0, -1, 0], [0, 0, 0, -1]]) assert np.allclose(fld.d_y(variant='backward').toarray(), [[1, 0, 0, 0], [0, 1, 0, 0], [-1, 0, 1, 0], [0, -1, 0, 1]]) assert np.allclose(fld.d_y(variant='central').toarray(), [[0, 0, 0.5, 0], [0, 0, 0, 0.5], [-0.5, 0, 0, 0], [0, -0.5, 0, 0]])
Example 39
def test_field1d_get_position(): fld = fls.Field1D(4, 0.1, 1, 1, int(5)) assert np.allclose(fld.get_position(fld.get_index(0.1)), 0.1)
Example 40
def test_field2d_get_position(): fld = fls.Field2D(4, 0.1, 3, 0.1, 1, 1, int(5)) assert np.allclose(fld.get_position(fld.get_index((0.2, 0.1))), (0.2, 0.1))
Example 41
def test_field1d_get_line_region(): fld = fls.Field1D(4, 0.1, 1, 1, int(5)) fld.material_regions.append(reg.MaterialRegion(fld.get_line_region((0.1, 0.2)), int(23))) assert np.allclose(fld.material_vector('real'), [5, 23, 23, 5])
Example 42
def test_field2d_get_line_region(): fld = fls.Field2D(3, 1, 4, 0.5, 1, 1, int(5)) region = fld.get_line_region((1, 0, 1, 1.5)) assert np.allclose(region.indices, [1, 4, 7, 10]) region = fld.get_line_region((0, 0, 2, 0)) assert np.allclose(region.indices, [0, 1, 2]) region = fld.get_line_region((0, 0, 2, 1.5)) assert np.allclose(region.indices, [0, 4, 7, 11]) region = fld.get_line_region((0, 1.5, 2, 0)) assert np.allclose(region.indices, [9, 7, 4, 2])
Example 43
def test_field2d_get_rect_region(): fld = fls.Field2D(3, 1, 4, 0.5, 1, 1, int(5)) region = fld.get_rect_region((0, 0, 1, 1)) assert np.allclose(region.indices, [0, 3, 6, 1, 4, 7]) region = fld.get_rect_region((2, 1.5, -1, -1)) assert np.allclose(region.indices, [4, 7, 10, 5, 8, 11])
Example 44
def test_output(): out = reg.Output(reg.LineRegion([0, 1, 2], [0, 0.2], 'test output')) out.signals = [np.linspace(0, 1) for _ in range(len(out.region.indices))] assert np.allclose(out.mean_signal, np.linspace(0, 1))
Example 45
def test_server_marginals(N, V, C, M): model = generate_fake_model(N, V, C, M) config = TINY_CONFIG.copy() config['model_num_clusters'] = M model['config'] = config server = TreeCatServer(model) # Evaluate on random data. table = generate_dataset(N, V, C)['table'] marginals = server.marginals(table.data) for v in range(V): beg, end = table.ragged_index[v:v + 2] totals = marginals[:, beg:end].sum(axis=1) assert np.allclose(totals, 1.0)
Example 46
def test_ensemble_latent_correlation(N, V, C, M): set_random_seed(make_seed(N, V, C, M)) ensemble = generate_fake_ensemble(N, V, C, M) server = EnsembleServer(ensemble) correlation = server.latent_correlation() print(correlation) assert np.all(0 <= correlation) assert np.all(correlation <= 1) assert np.allclose(correlation, correlation.T) for v in range(V): assert correlation[v, :].argmax() == v assert correlation[:, v].argmax() == v
Example 47
def active_set_Lam(self, fixed, vary): grad = self.grad_wrt_Lam(fixed, vary) assert np.allclose(grad, grad.T, 1e-3) return np.where((np.abs(np.triu(grad)) > self.lamL) | (self.Lam != 0)) # return np.where((np.abs(grad) > self.lamL) | (~np.isclose(self.Lam, 0)))
Example 48
def test_theta_0(): rng.seed(0) n_samples = 100 Y = rng.randn(n_samples, 5) X = rng.randn(n_samples, 5) sgcrf = SparseGaussianCRF(lamL=0.01, lamT=0.01) sgcrf.fit(X, Y) assert np.allclose(sgcrf.Lam, np.eye(5), .1, .2)
Example 49
def test_trivial(self): """ Test iaverage on stream of zeroes """ stream = repeat(np.zeros( (64,64), dtype = np.float ), times = 5) for av in iaverage(stream): self.assertTrue(np.allclose(av, np.zeros_like(av)))
Example 50
def test_weighted_average(self): """ Test results of weighted average against numpy.average """ stream = [np.random.random(size = (16,16)) for _ in range(5)] with self.subTest('float weights'): weights = [random() for _ in stream] from_iaverage = last(iaverage(stream, weights = weights)) from_numpy = np.average(np.dstack(stream), axis = 2, weights = np.array(weights)) self.assertTrue(np.allclose(from_iaverage, from_numpy)) with self.subTest('array weights'): weights = [np.random.random(size = stream[0].shape) for _ in stream] from_iaverage = last(iaverage(stream, weights = weights)) from_numpy = np.average(np.dstack(stream), axis = 2, weights = np.dstack(weights)) self.assertTrue(np.allclose(from_iaverage, from_numpy))