numpy.random.normal 方法的 20 个代码示例
展示了random.normal方法的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: random_rot
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def random_rot(dim):
"""Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., 'The efficient generation of random orthogonal
matrices with an application to condition estimators', SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization"""
H = eye(dim)
D = ones((dim,))
for n in range(1, dim):
x = normal(size=(dim-n+1,))
D[n-1] = sign(x[0])
x[0] -= D[n-1]*sqrt((x*x).sum())
# Householder transformation
Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()
mat = eye(dim)
mat[n-1:,n-1:] = Hx
H = dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = -D.prod()
H = (D*H.T).T
return H
开发者ID:ktraunmueller,项目名称:Computable,代码行数:27,代码来源:test_decomp.py
示例2: gen_wave
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def gen_wave():
""" Generate a synthetic wave by adding up a few sine waves and some noise
:return: the final wave
"""
t = np.arange(0.0, 10.0, 0.01)
wave1 = sin(2 * 2 * pi * t)
noise = random.normal(0, 0.1, len(t))
wave1 = wave1 + noise
print("wave1", len(wave1))
wave2 = sin(2 * pi * t)
print("wave2", len(wave2))
t_rider = arange(0.0, 0.5, 0.01)
wave3 = sin(10 * pi * t_rider)
print("wave3", len(wave3))
insert = round(0.8 * len(t))
wave1[insert:insert + 50] = wave1[insert:insert + 50] + wave3
return wave1 + wave2
开发者ID:aurotripathy,项目名称:lstm-anomaly-detect,代码行数:19,代码来源:lstm-synthetic-wave-anomaly-detect.py
示例3: test_gaussian2d_fit
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def test_gaussian2d_fit():
mx0 = 0.1
my0 = 0.9
sigx0 = 0.4
sigy0 = 0.25
Size = 500
sx = R.normal(size=Size, loc=mx0, scale=sigx0)
sy = R.normal(size=Size, loc=my0, scale=sigy0)
mux, sigmax, muy, sigmay = gaussian2d_fit(sx, sy)
plot(sx, sy, 'o', alpha=0.2, mew=0)
X,Y = np.mgrid[sx.min()-1:sx.max()+1:200j, sy.min()-1:sy.max()+1:200j]
def gauss2d(X,Y, mx, my, sigx, sigy):
return np.exp(-((X-mx)**2)/(2*sigx**2))*np.exp(-((Y-my)**2)/(2*sigy**2))
contour(X,Y,gauss2d(X,Y,mux,muy,sigmax,sigmay))
plot(mx0,my0, 'ok', mew=0, ms=10)
plot(mux,muy, 'x', mew=2, ms=10, color='green')
开发者ID:tritemio,项目名称:FRETBursts,代码行数:25,代码来源:gaussian_fitting.py
示例4: test_gaussian_fit
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def test_gaussian_fit():
m0 = 0.1
s0 = 0.4
size = 500
s = R.normal(size=size, loc=m0, scale=s0)
#s = s[s<0.4]
mu, sig = gaussian_fit(s)
mu1, sig1 = S.norm.fit(s)
mu2, sig2 = gaussian_fit_ml(s)
print("ECDF ", mu, sig)
print("ML ", mu1, sig1)
print("ML (manual)", mu2, sig2)
H = np.histogram(s, bins=20, density=True)
h = H[0]
bw = H[1][1] - H[1][0]
#bins_c = H[1][:-1]+0.5*bw
bar(H[1][:-1], H[0], bw, alpha=0.3)
x = np.r_[s.min()-1:s.max()+1:200j]
plot(x, normpdf(x,m0,s0), lw=2, color='grey')
plot(x, normpdf(x,mu,sig), lw=2, color='r', alpha=0.5)
plot(x, normpdf(x,mu1,sig1), lw=2, color='b', alpha=0.5)
开发者ID:tritemio,项目名称:FRETBursts,代码行数:27,代码来源:gaussian_fitting.py
示例5: step
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def step(self):
#self.r *= BRANCH_DIMINISH
self.r = self.r - self.tree.branch_diminish
angle = normal()*self.tree.branch_angle_max
#da = (1.-1./((self.g+1)**SEARCH_ANGLE_EXP))*angle
#da = ((1./(ONE + INIT_BRANCH - self.r))**SEARCH_ANGLE_EXP)*angle
#da = (1.-1./(ONE + INIT_BRANCH - self.r)**SEARCH_ANGLE_EXP)*angle
scale = self.tree.one+self.tree.root_r-self.r
da = (1.+scale/self.tree.root_r)**self.tree.branch_angle_exp
self.a += da*angle
dx = cos(self.a)*self.tree.stepsize
dy = sin(self.a)*self.tree.stepsize
self.x += dx
self.y += dy
self.i += 1
开发者ID:inconvergent,项目名称:tree,代码行数:23,代码来源:tree.py
示例6: _test_guassian_comparison
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def _test_guassian_comparison():
'''
Method to test the _comparE_gaussian function
'''
size = 100
dist1 = normal(loc=0, scale=1, size=size)
dist2 = normal(loc=0.1, scale=0.9, size=size)
assert ptest._compare_gaussians(dist1, dist2) == True, "The input distributions are similar."
dist2 = normal(loc=5, scale=1, size=size)
assert ptest._compare_gaussians(dist1, dist2) == False, "The input distributions are not similar."
dist2 = normal(loc=5, scale=5, size=size)
assert ptest._compare_gaussians(dist1, dist2) == False, "The input distributions are not similar."
开发者ID:shagunsodhani,项目名称:pregel,代码行数:18,代码来源:test.py
示例7: pw_linear
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def pw_linear(n_samples=200, n_features=1, n_bkps=3, noise_std=None):
"""
Return piecewise linear signal and the associated changepoints.
Args:
n_samples (int, optional): signal length
n_features (int, optional): number of covariates
n_bkps (int, optional): number of change points
noise_std (float, optional): noise std. If None, no noise is added
Returns:
tuple: signal of shape (n_samples, n_features+1), list of breakpoints
"""
covar = normal(size=(n_samples, n_features))
linear_coeff, bkps = pw_constant(n_samples=n_samples,
n_bkps=n_bkps,
n_features=n_features,
noise_std=None)
var = np.sum(linear_coeff * covar, axis=1)
if noise_std is not None:
var += normal(scale=noise_std, size=var.shape)
signal = np.c_[var, covar]
return signal, bkps
开发者ID:deepcharles,项目名称:ruptures,代码行数:25,代码来源:pw_linear.py
示例8: _create_regression_table
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def _create_regression_table(nrow, id_vars=None, seed=1234, true_label='target', pred_label='p_target'):
nr.seed(seed)
mean_value = nr.normal(loc=10, scale=2, size=(nrow, 1))
error = nr.normal(loc=0, scale=1, size=(nrow, 1))
true_value = mean_value + error
regression_matrix = np.hstack((true_value, mean_value))
regression_matrix = np.abs(regression_matrix)
colnames = [true_label, pred_label]
if id_vars is not None:
if not isinstance(id_vars, list):
id_vars = [id_vars]
ncol = len(id_vars)
id_matrix = _create_id_matrix(nrow, ncol)
regression_matrix = np.hstack((regression_matrix, id_matrix))
colnames = colnames + id_vars
return pd.DataFrame(regression_matrix, columns=colnames)
开发者ID:sassoftware,项目名称:python-dlpy,代码行数:21,代码来源:test_metrics.py
示例9: set_noise_function
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def set_noise_function(self, proportional=0.0, absolute=0.0):
'''
Adds noise to the function.
with the formula::
c'(c,x) = c (1 + s_p p) + s_a a
where s_i are gaussian random variables, p is the proportional noise factor and a is the absolute noise factor, and c is the cost before noise is added
the uncertainty is then::
u = sqrt((cp)^2 + a^2)
Keyword Args:
proportional (Optional [float]): the proportional factor. Defaults to 0
absolute (Optional [float]): the absolute factor. Defaults to 0
'''
self.noise_prop = proportional
self.noise_abs = absolute
self.noise_function = lambda p,c,u : (c *(1 + nr.normal()*self.noise_prop) + nr.normal()*self.noise_abs,np.sqrt((c*self.noise_prop)**2 + (self.noise_abs)**2))
开发者ID:michaelhush,项目名称:M-LOOP,代码行数:24,代码来源:testing.py
示例10: make_bd
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def make_bd(self):
"Make a set of 'shaped' random #'s for particle brightness deltas (bd)"
self.bd = concatenate((
# These values will dim the particles
random.normal(
self.bd_mean - self.bd_mu, self.bd_sigma, 16).astype(int),
# These values will brighten the particles
random.normal(
self.bd_mean + self.bd_mu, self.bd_sigma, 16).astype(int)),
axis=0)
开发者ID:ManiacalLabs,项目名称:BiblioPixelAnimations,代码行数:12,代码来源:init.py
示例11: make_vel
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def make_vel(self):
"Make a set of velocities to be randomly chosen for emitted particles"
self.vel = random.normal(self.vel_mu, self.vel_sigma, 16)
# Make sure nothing's slower than 1/8 pixel / step
for i, vel in enumerate(self.vel):
if abs(vel) < 0.125 / self._size:
if vel < 0:
self.vel[i] = -0.125 / self._size
else:
self.vel[i] = 0.125 / self._size
开发者ID:ManiacalLabs,项目名称:BiblioPixelAnimations,代码行数:12,代码来源:init.py
示例12: setup_method
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def setup_method(self, method):
import matplotlib as mpl
mpl.rcdefaults()
self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1()
self.mpl_ge_2_1_0 = plotting._compat._mpl_ge_2_1_0()
self.mpl_ge_2_2_0 = plotting._compat._mpl_ge_2_2_0()
self.mpl_ge_2_2_2 = plotting._compat._mpl_ge_2_2_2()
self.mpl_ge_3_0_0 = plotting._compat._mpl_ge_3_0_0()
self.bp_n_objects = 7
self.polycollection_factor = 2
self.default_figsize = (6.4, 4.8)
self.default_tick_position = 'left'
n = 100
with tm.RNGContext(42):
gender = np.random.choice(['Male', 'Female'], size=n)
classroom = np.random.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
开发者ID:Frank-qlu,项目名称:recruit,代码行数:34,代码来源:common.py
示例13: tgauss_old
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def tgauss_old(mu=0, sigma=1, trunc=2):
if sigma == 0:
return mu
err = normal(mu, sigma, size=50)
err_ok = err[abs(err) < trunc*sigma]
return err_ok[0]
开发者ID:ocelot-collab,项目名称:ocelot,代码行数:8,代码来源:errors.py
示例14: create_dataset
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def create_dataset(num):
dataset = DataFrame(columns=['x','y'])
for i in range(num):
x = float(i)/float(num-1)
y = np.sin(2*np.pi*x) + normal(scale=0.3)
dataset = dataset.append(Series([x,y], index=['x','y']),
ignore_index=True)
return dataset
# 平方根平均二乗誤差(Root mean square error)を計算
开发者ID:enakai00,项目名称:ml4se,代码行数:12,代码来源:02-square_error.py
示例15: create_dataset
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def create_dataset(num):
dataset = DataFrame(columns=['x','y'])
for i in range(num):
x = float(i)/float(num-1)
y = np.sin(2.0*np.pi*x) + normal(scale=0.3)
dataset = dataset.append(Series([x,y], index=['x','y']),
ignore_index=True)
return dataset
# 事後分布に基づく推定曲線、および、事後分布の平均と分散を計算
开发者ID:enakai00,项目名称:ml4se,代码行数:12,代码来源:08-bayes_regression.py
示例16: create_dataset
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def create_dataset(num):
dataset = DataFrame(columns=['x','y'])
for i in range(num):
x = float(i)/float(num-1)
y = np.sin(2*np.pi*x) + normal(scale=0.3)
dataset = dataset.append(Series([x,y], index=['x','y']),
ignore_index=True)
return dataset
# 最大対数尤度(Maximum log likelihood)を計算
开发者ID:enakai00,项目名称:ml4se,代码行数:12,代码来源:03-maximum_likelihood.py
示例17: get_data
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def get_data(self,x_stride=1,y_stride=1):
mult = array(1, dtype=self.dtype)
if self.dtype in [complex64, complex128]:
mult = array(1+1j, dtype=self.dtype)
from numpy.random import normal
alpha = array(1., dtype=self.dtype) * mult
beta = array(1.,dtype=self.dtype) * mult
a = normal(0.,1.,(3,3)).astype(self.dtype) * mult
x = arange(shape(a)[0]*x_stride,dtype=self.dtype) * mult
y = arange(shape(a)[1]*y_stride,dtype=self.dtype) * mult
return alpha,beta,a,x,y
开发者ID:ktraunmueller,项目名称:Computable,代码行数:13,代码来源:test_fblas.py
示例18: get_data
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def get_data(self,x_stride=1,y_stride=1):
mult = array(1, dtype=self.dtype)
if self.dtype in [complex64, complex128]:
mult = array(1+1j, dtype=self.dtype)
from numpy.random import normal, seed
seed(1234)
alpha = array(1., dtype=self.dtype) * mult
beta = array(1.,dtype=self.dtype) * mult
a = normal(0.,1.,(3,3)).astype(self.dtype) * mult
x = arange(shape(a)[0]*x_stride,dtype=self.dtype) * mult
y = arange(shape(a)[1]*y_stride,dtype=self.dtype) * mult
return alpha,beta,a,x,y
开发者ID:ktraunmueller,项目名称:Computable,代码行数:14,代码来源:test_fblas.py
示例19: make_buzzy_based_simulated_labeler
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def make_buzzy_based_simulated_labeler(treat_strength, con_strength, noise_level, setting="simple", seed=0):
# hardcode probability of theorem given buzzy / not_buzzy
theorem_given_buzzy_probs = np.array([0.27, 0.07], dtype=np.float32)
np.random.seed(seed)
all_noise = np.array(random.normal(0, 1, 12000), dtype=np.float32)
all_threshholds = np.array(random.uniform(0, 1, 12000), dtype=np.float32)
def labeler(data):
buzzy = data['buzzy_title']
index = data['index']
treatment = data['theorem_referenced']
treatment = tf.cast(treatment, tf.float32)
confounding = 3.0*(tf.gather(theorem_given_buzzy_probs, buzzy) - 0.25)
noise = tf.gather(all_noise, index)
y, y0, y1 = outcome_sim(treat_strength, con_strength, noise_level, treatment, confounding, noise, setting=setting)
simulated_prob = tf.nn.sigmoid(y)
y0 = tf.nn.sigmoid(y0)
y1 = tf.nn.sigmoid(y1)
threshold = tf.gather(all_threshholds, index)
simulated_outcome = tf.cast(tf.greater(simulated_prob, threshold), tf.int32)
return {**data, 'outcome': simulated_outcome, 'y0': y0, 'y1': y1}
return labeler
开发者ID:blei-lab,项目名称:causal-text-embeddings,代码行数:29,代码来源:dataset.py
示例20: make_propensity_based_simulated_labeler
# 需要导入模块: from numpy import random [as 别名]
# 或者: from numpy.random import normal [as 别名]
def make_propensity_based_simulated_labeler(treat_strength, con_strength, noise_level,
base_propensity_scores, example_indices, exogeneous_con=0.,
setting="simple", seed=42):
np.random.seed(seed)
all_noise = random.normal(0, 1, base_propensity_scores.shape[0]).astype(np.float32)
all_threshholds = np.array(random.uniform(0, 1, base_propensity_scores.shape[0]), dtype=np.float32)
extra_confounding = random.normal(0, 1, base_propensity_scores.shape[0]).astype(np.float32)
all_propensity_scores = expit((1.-exogeneous_con)*logit(base_propensity_scores) + exogeneous_con * extra_confounding).astype(np.float32)
all_treatments = random.binomial(1, all_propensity_scores).astype(np.int32)
# indices in dataset refer to locations in entire corpus,
# but propensity scores will typically only inlcude a subset of the examples
reindex_hack = np.zeros(12000, dtype=np.int32)
reindex_hack[example_indices] = np.arange(example_indices.shape[0], dtype=np.int32)
def labeler(data):
index = data['index']
index_hack = tf.gather(reindex_hack, index)
treatment = tf.gather(all_treatments, index_hack)
confounding = 3.0 * (tf.gather(all_propensity_scores, index_hack) - 0.25)
noise = tf.gather(all_noise, index_hack)
y, y0, y1 = outcome_sim(treat_strength, con_strength, noise_level, tf.cast(treatment, tf.float32), confounding, noise, setting=setting)
simulated_prob = tf.nn.sigmoid(y)
y0 = tf.nn.sigmoid(y0)
y1 = tf.nn.sigmoid(y1)
threshold = tf.gather(all_threshholds, index)
simulated_outcome = tf.cast(tf.greater(simulated_prob, threshold), tf.int32)
return {**data, 'outcome': simulated_outcome, 'y0': y0, 'y1': y1, 'treatment': treatment}
return labeler
开发者ID:blei-lab,项目名称:causal-text-embeddings,代码行数:36,代码来源:dataset.py