10 examples of 'linear_model.linearregression()' in Python

Every line of 'linear_model.linearregression()' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
80def Linear_regression():
81 # get train data
82 data =np.loadtxt('data.csv',delimiter=',')
83
84 #define hyperparamters
85 #learning_rate is used for update gradient
86 #defint the number that will iteration
87 # define y =mx+b
88 learning_rate = 0.001
89 initial_b =0.0
90 initial_m = 0.0
91 num_iter = 1000
92
93 #train model
94 #print b m error
95 print 'initial variables:\n initial_b = {0}\n intial_m = {1}\n error of begin = {2} \n'\
96 .format(initial_b,initial_m,compute_error(initial_b,initial_m,data))
97
98 #optimizing b and m
99 [b ,m] = optimizer(data,initial_b,initial_m,learning_rate,num_iter)
100
101 #print final b m error
102 print 'final formula parmaters:\n b = {1}\n m={2}\n error of end = {3} \n'.format(num_iter,b,m,compute_error(b,m,data))
103
104 #plot result
105 plot_data(data,b,m)
205def test_predict_2(self):
206 X = np.array([[3.5]])
207 m, n = X.shape
208 intercept = np.ones((m, 1), dtype=np.int64)
209 X = np.append(intercept, X, axis=1)
210 theta = np.zeros((n + 1, 1), dtype=np.int64)
211
212 assert_allclose([[0]],
213 predict(X, theta),
214 rtol=0, atol=0.001)
187def linear_regression(x, y):
188 """
189 NOTE: Proceed linear regression
190 Input
191 - x: 1d timeseries (time)
192 - y: time varying 2d field (time, lat, lon)
193 Output
194 - slope: 2d array, spatial map, linear regression slope on each grid
195 - intercept: 2d array, spatial map, linear regression intercept on each grid
196 """
197 # get original global dimension
198 lat = y.getLatitude()
199 lon = y.getLongitude()
200 # Convert 3d (time, lat, lon) to 2d (time, lat*lon) for polyfit applying
201 im = y.shape[2]
202 jm = y.shape[1]
203 y_2d = y.reshape(y.shape[0], jm * im)
204 # Linear regression
205 slope_1d, intercept_1d = np.polyfit(x, y_2d, 1)
206 # Retreive to cdms2 variabile from numpy array
207 slope = MV2.array(slope_1d.reshape(jm, im))
208 intercept = MV2.array(intercept_1d.reshape(jm, im))
209 # Set lat/lon coordinates
210 slope.setAxis(0, lat)
211 slope.setAxis(1, lon)
212 slope.mask = y.mask
213 intercept.setAxis(0, lat)
214 intercept.setAxis(1, lon)
215 intercept.mask = y.mask
216 # return result
217 return slope, intercept
99def test (self) :
100 for t in self.test_cases :
101 h = self.__hypothesis ( t[0:-1] )
102 print "H = %lf, ANS = %d" % ( h, t[self.__MAX_FEATURE_CNT])
59def linear_regression(x, y, init_mean=None, init_stddev=1.0):
60 """Creates linear regression TensorFlow subgraph.
61
62 Args:
63 x: tensor or placeholder for input features.
64 y: tensor or placeholder for labels.
65 init_mean: the mean value to use for initialization.
66 init_stddev: the standard devation to use for initialization.
67
68 Returns:
69 Predictions and loss tensors.
70
71 Side effects:
72 The variables linear_regression.weights and linear_regression.bias are
73 initialized as follows. If init_mean is not None, then initialization
74 will be done using a random normal initializer with the given init_mean
75 and init_stddv. (These may be set to 0.0 each if a zero initialization
76 is desirable for convex use cases.) If init_mean is None, then the
77 uniform_unit_scaling_initialzer will be used.
78 """
79 with vs.variable_scope('linear_regression'):
80 scope_name = vs.get_variable_scope().name
81 summary.histogram('%s.x' % scope_name, x)
82 summary.histogram('%s.y' % scope_name, y)
83 dtype = x.dtype.base_dtype
84 y_shape = y.get_shape()
85 if len(y_shape) == 1:
86 output_shape = 1
87 else:
88 output_shape = y_shape[1]
89 # Set up the requested initialization.
90 if init_mean is None:
91 weights = vs.get_variable(
92 'weights', [x.get_shape()[1], output_shape], dtype=dtype)
93 bias = vs.get_variable('bias', [output_shape], dtype=dtype)
94 else:
95 weights = vs.get_variable(
96 'weights', [x.get_shape()[1], output_shape],
97 initializer=init_ops.random_normal_initializer(
98 init_mean, init_stddev, dtype=dtype),
99 dtype=dtype)
100 bias = vs.get_variable(
101 'bias', [output_shape],
102 initializer=init_ops.random_normal_initializer(
103 init_mean, init_stddev, dtype=dtype),
104 dtype=dtype)
105 summary.histogram('%s.weights' % scope_name, weights)
106 summary.histogram('%s.bias' % scope_name, bias)
107 return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
94def Logistic_Regression(X,Y,alpha,theta,num_iters):
95 m = len(Y)
96 for x in xrange(num_iters):
97 new_theta = Gradient_Descent(X,Y,theta,m,alpha)
98 theta = new_theta
99 if x % 100 == 0:
100 Cost_Function(X,Y,theta,m)
101 print 'theta ', theta
102 print 'cost is ', Cost_Function(X,Y,theta,m)
103 Declare_Winner(theta)
64def logistic_regression(opytimizer):
65 # Instanciating the model
66 model = torch.nn.Sequential()
67
68 # Some model parameters
69 n_features = 64
70 n_classes = 10
71
72 # Adding linear layer
73 model.add_module("linear", torch.nn.Linear(
74 n_features, n_classes, bias=False))
75
76 # Input variables
77 batch_size = 100
78 epochs = 100
79
80 # Gathering parameters from Opytimizer
81 # Pay extremely attention to their order when declaring due to their bounds
82 learning_rate = opytimizer[0][0]
83 momentum = opytimizer[1][0]
84
85 # Declaring the loss function
86 loss = torch.nn.CrossEntropyLoss(reduction='mean')
87
88 # Declaring the optimization algorithm
89 opt = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)
90
91 # Performing training loop
92 for _ in range(epochs):
93 # Initial cost as 0.0
94 cost = 0.0
95
96 # Calculating the number of batches
97 num_batches = len(X_train) // batch_size
98
99 # For every batch
100 for k in range(num_batches):
101 # Declaring initial and ending for each batch
102 start, end = k * batch_size, (k + 1) * batch_size
103
104 # Cost will be the loss accumulated from model's fitting
105 cost += fit(model, loss, opt,
106 X_train[start:end], Y_train[start:end])
107
108 # Predicting samples from evaluating set
109 preds = predict(model, X_val)
110
111 # Calculating accuracy
112 acc = np.mean(preds == Y_val)
113
114 return 1 - acc
8def linear_regression(feat1, feat2):
9 return random.gauss(2 * feat1 + feat2 + 5, 3)
101def run_logistic_regression(df):
102 # Logistic regression
103 X = df['pageviews_cumsum']
104 X = sm.add_constant(X)
105 y = df['is_conversion']
106 logit = sm.Logit(y, X)
107 logistic_regression_results = logit.fit()
108 print(logistic_regression_results.summary())
109 return logistic_regression_results
87def calc_linear_regression(coeff, x):
88 result = 0
89 for i in range(1, len(coeff)):
90 result += x[i - 1] * coeff[i]
91
92 result += coeff[0]
93 return result

Related snippets