10 examples of 'load dataset in python' in Python

Every line of 'load dataset in python' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
this disclaimer
534def load(*, path='./dataset3'):
535 dataset = list()
536 for file in os.listdir(path):
537 if os.path.splitext(file)[1] != '.json':
538 continue
539 with open(f'{path}/{file}', 'r') as load_file:
540 data = json.load(load_file, object_hook=object_hook)
541 for item in data:
542 dataset.append(Info(item))
543 return Dataset(dataset)
Important

Use secure code every time

Secure your code as it's written. Use Snyk Code to scan source code in minutes – no build needed – and fix issues immediately. Enable Snyk Code

92def load_dataset(data_file=('%s/%s' % (DATA_DIR, DATA_FILE))):
93
94 dataset = utils_backdoor.load_dataset(data_file, keys=['X_test', 'Y_test'])
95
96 X_test = np.array(dataset['X_test'], dtype='float32')
97 Y_test = np.array(dataset['Y_test'], dtype='float32')
98
99 print('X_test shape %s' % str(X_test.shape))
100 print('Y_test shape %s' % str(Y_test.shape))
101
102 return X_test, Y_test
170def load_dataset():
171 global data_path
172 dataset = Dataset(data_path, verbose=True)
173 dataset_size = len(dataset.samples)
174 assert dataset_size > 0
175 return dataset
61def load_pandas():
62 data = load()
63 # pandas <= 0.12.0 fails in the to_datetime regex on Python 3
64 index = pd.DatetimeIndex(start=data.data['date'][0].decode('utf-8'),
65 periods=len(data.data), format='%Y%m%d',
66 freq='W-SAT')
67 dataset = pd.DataFrame(data.data['co2'], index=index, columns=['co2'])
68 #NOTE: this is how I got the missing values in co2.csv
69 #new_index = pd.DatetimeIndex(start='1958-3-29', end=index[-1],
70 # freq='W-SAT')
71 #data.data = dataset.reindex(new_index)
72 data.data = dataset
73 return data
35def load_dataset(data_arg, wvpath, embedding_size):
36 wv = wordvec_class(wvpath)
37 dm = data_class(**data_arg)
38 return dm, wv.load_matrix(embedding_size, dm.vocab_list)
43def load(self, *args, **kwargs):
44 """Load data and check the channel number `c_dim`.
45 """
46 self._load(*args, **kwargs)
47 click.secho(" [*] Dataset '%s' loaded." % self.name, fg="green")
22def get_dataset():
23 dataset_filename = 'dataset/annoted_dataset.pkl'
24 if not os.path.isfile(dataset_filename):
25 annoted_data = read_json_formatted()
26 dataset = []
27 for row in annoted_data:
28 sources = [s.lower() for s in row['target']]
29 targets = [s.lower() for s in row['polarity']]
30 sentence_meta = {}
31 sentence = row['sentence']
32 for source, target in zip(sources, targets):
33 sentence_meta[source] = target
34 dataset.append({'sentence': sentence, 'meta': sentence_meta})
35 pd.to_pickle(dataset, dataset_filename)
36 else:
37 dataset = pd.read_pickle(dataset_filename)
38 return dataset
581def load_data(self,param,dates):
582 if param=='PM2.5':
583 df = self.load_aqs_pm25_data(dates)
584 elif param == 'PM10':
585 df = self.load_aqs_pm10_data(dates)
586 elif param == 'SPEC':
587 df = self.load_aqs_spec_data(dates)
588 elif param == 'CO':
589 df = self.load_aqs_co_data(dates)
590 elif param == 'OZONE':
591 df = self.load_aqs_ozone_data(dates)
592 elif param == 'SO2':
593 df = self.load_aqs_so2_data(dates)
594 elif param == 'VOC':
595 df = self.load_aqs_voc_data(dates)
596 elif param == 'NONOXNOY':
597 df = self.load_aqs_nonoxnoy_data(dates)
598 elif param == 'WIND':
599 df = self.load_aqs_wind_data(dates)
600 elif param == 'TEMP':
601 df = self.load_aqs_temp_data(dates)
602 elif param == 'RHDP':
603 df = self.load_aqs_rhdp_data(dates)
604 return df
66def load(self):
67 self.open_file(self.path)
15def load_data(dataset=""):
16 base_dir = os.path.join("data", dataset)
17 assert os.path.exists(base_dir), \
18 "Could not find data directory: " + base_dir
19
20 model_path = os.path.join(base_dir, "model.pkl.gz")
21 model = None
22 if os.path.exists(model_path):
23 with gzip.open(model_path, "r") as f:
24 model = cPickle.load(f)
25
26 train_path = os.path.join(base_dir, "train.pkl.gz")
27 with gzip.open(train_path, "r") as f:
28 train = cPickle.load(f)
29
30 test_path = os.path.join(base_dir, "test.pkl.gz")
31 with gzip.open(test_path, "r") as f:
32 test = cPickle.load(f)
33
34 return train, test, model

Related snippets