Every line of 'python multiple linear regression' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
80 def Linear_regression(): 81 # get train data 82 data =np.loadtxt('data.csv',delimiter=',') 83 84 #define hyperparamters 85 #learning_rate is used for update gradient 86 #defint the number that will iteration 87 # define y =mx+b 88 learning_rate = 0.001 89 initial_b =0.0 90 initial_m = 0.0 91 num_iter = 1000 92 93 #train model 94 #print b m error 95 print 'initial variables:\n initial_b = {0}\n intial_m = {1}\n error of begin = {2} \n'\ 96 .format(initial_b,initial_m,compute_error(initial_b,initial_m,data)) 97 98 #optimizing b and m 99 [b ,m] = optimizer(data,initial_b,initial_m,learning_rate,num_iter) 100 101 #print final b m error 102 print 'final formula parmaters:\n b = {1}\n m={2}\n error of end = {3} \n'.format(num_iter,b,m,compute_error(b,m,data)) 103 104 #plot result 105 plot_data(data,b,m)
187 def linear_regression(x, y): 188 """ 189 NOTE: Proceed linear regression 190 Input 191 - x: 1d timeseries (time) 192 - y: time varying 2d field (time, lat, lon) 193 Output 194 - slope: 2d array, spatial map, linear regression slope on each grid 195 - intercept: 2d array, spatial map, linear regression intercept on each grid 196 """ 197 # get original global dimension 198 lat = y.getLatitude() 199 lon = y.getLongitude() 200 # Convert 3d (time, lat, lon) to 2d (time, lat*lon) for polyfit applying 201 im = y.shape[2] 202 jm = y.shape[1] 203 y_2d = y.reshape(y.shape[0], jm * im) 204 # Linear regression 205 slope_1d, intercept_1d = np.polyfit(x, y_2d, 1) 206 # Retreive to cdms2 variabile from numpy array 207 slope = MV2.array(slope_1d.reshape(jm, im)) 208 intercept = MV2.array(intercept_1d.reshape(jm, im)) 209 # Set lat/lon coordinates 210 slope.setAxis(0, lat) 211 slope.setAxis(1, lon) 212 slope.mask = y.mask 213 intercept.setAxis(0, lat) 214 intercept.setAxis(1, lon) 215 intercept.mask = y.mask 216 # return result 217 return slope, intercept
59 def linear_regression(x, y, init_mean=None, init_stddev=1.0): 60 """Creates linear regression TensorFlow subgraph. 61 62 Args: 63 x: tensor or placeholder for input features. 64 y: tensor or placeholder for labels. 65 init_mean: the mean value to use for initialization. 66 init_stddev: the standard devation to use for initialization. 67 68 Returns: 69 Predictions and loss tensors. 70 71 Side effects: 72 The variables linear_regression.weights and linear_regression.bias are 73 initialized as follows. If init_mean is not None, then initialization 74 will be done using a random normal initializer with the given init_mean 75 and init_stddv. (These may be set to 0.0 each if a zero initialization 76 is desirable for convex use cases.) If init_mean is None, then the 77 uniform_unit_scaling_initialzer will be used. 78 """ 79 with vs.variable_scope('linear_regression'): 80 scope_name = vs.get_variable_scope().name 81 summary.histogram('%s.x' % scope_name, x) 82 summary.histogram('%s.y' % scope_name, y) 83 dtype = x.dtype.base_dtype 84 y_shape = y.get_shape() 85 if len(y_shape) == 1: 86 output_shape = 1 87 else: 88 output_shape = y_shape[1] 89 # Set up the requested initialization. 90 if init_mean is None: 91 weights = vs.get_variable( 92 'weights', [x.get_shape()[1], output_shape], dtype=dtype) 93 bias = vs.get_variable('bias', [output_shape], dtype=dtype) 94 else: 95 weights = vs.get_variable( 96 'weights', [x.get_shape()[1], output_shape], 97 initializer=init_ops.random_normal_initializer( 98 init_mean, init_stddev, dtype=dtype), 99 dtype=dtype) 100 bias = vs.get_variable( 101 'bias', [output_shape], 102 initializer=init_ops.random_normal_initializer( 103 init_mean, init_stddev, dtype=dtype), 104 dtype=dtype) 105 summary.histogram('%s.weights' % scope_name, weights) 106 summary.histogram('%s.bias' % scope_name, bias) 107 return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
205 def test_predict_2(self): 206 X = np.array([[3.5]]) 207 m, n = X.shape 208 intercept = np.ones((m, 1), dtype=np.int64) 209 X = np.append(intercept, X, axis=1) 210 theta = np.zeros((n + 1, 1), dtype=np.int64) 211 212 assert_allclose([[0]], 213 predict(X, theta), 214 rtol=0, atol=0.001)
99 def test (self) : 100 for t in self.test_cases : 101 h = self.__hypothesis ( t[0:-1] ) 102 print "H = %lf, ANS = %d" % ( h, t[self.__MAX_FEATURE_CNT])