Every line of 'how to read csv file in python without pandas' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
37 def readcsv(filename, header=True): 38 return pd.read_csv(filename, header=None) if not header else pd.read_csv(filename)
554 def _csv_to_pandas_df(filepath, 555 separator=DEFAULT_SEPARATOR, 556 quote_char=DEFAULT_QUOTE_CHARACTER, 557 escape_char=DEFAULT_ESCAPSE_CHAR, 558 contain_headers=True, 559 lines_to_skip=0, 560 date_columns=None, 561 rowIdAndVersionInIndex=True): 562 test_import_pandas() 563 import pandas as pd 564 565 # DATEs are stored in csv as unix timestamp in milliseconds 566 def datetime_millisecond_parser(milliseconds): return pd.to_datetime(milliseconds, unit='ms', utc=True) 567 568 if not date_columns: 569 date_columns = [] 570 571 line_terminator = str(os.linesep) 572 573 df = pd.read_csv(filepath, 574 sep=separator, 575 lineterminator=line_terminator if len(line_terminator) == 1 else None, 576 quotechar=quote_char, 577 escapechar=escape_char, 578 header=0 if contain_headers else None, 579 skiprows=lines_to_skip, 580 parse_dates=date_columns, 581 date_parser=datetime_millisecond_parser) 582 if rowIdAndVersionInIndex and "ROW_ID" in df.columns and "ROW_VERSION" in df.columns: 583 # combine row-ids (in index) and row-versions (in column 0) to 584 # make new row labels consisting of the row id and version 585 # separated by a dash. 586 zip_args = [df["ROW_ID"], df["ROW_VERSION"]] 587 if "ROW_ETAG" in df.columns: 588 zip_args.append(df['ROW_ETAG']) 589 590 df.index = row_labels_from_id_and_version(zip(*zip_args)) 591 del df["ROW_ID"] 592 del df["ROW_VERSION"] 593 if "ROW_ETAG" in df.columns: 594 del df['ROW_ETAG'] 595 596 return df
34 def test_read_csv(): 35 io = FileIO() 36 filename = os.path.join(os.path.dirname( 37 os.path.abspath(__file__)), 38 'stock_N225.csv') 39 df = io.read_from_csv("N225", filename) 40 41 result = round(df.ix['2015-03-20', 'Adj Close'], 2) 42 expected = 19560.22 43 eq_(expected, result)
114 def _pandas_read_csv(filepath, **kwargs): 115 """ 116 Wrapper function around the Pandas read_csv function. 117 :param filepath: The file to read. 118 :type filepath: str, StringIO 119 :param kwargs: Extra key word arguments to be applied. 120 :return: A pandas DataFrame. 121 :rtype: pandas.DataFrame 122 """ 123 try: 124 return pd.read_csv(filepath, **kwargs) 125 except FileNotFoundError: 126 raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filepath) 127 except Exception as error: 128 raise error
63 def _dataframe_from_csv(reader, delimiter, with_header, skipspace): 64 """Returns csv data as a pandas Dataframe object""" 65 sep = delimiter 66 header = 0 67 if not with_header: 68 header = None 69 70 return pd.read_csv( 71 reader, 72 header=header, 73 sep=sep, 74 skipinitialspace=skipspace, 75 encoding='utf-8-sig' 76 )
146 def get_data_from_file(csv_content, files): 147 # Get description or fix from the file reference parsed in JsonToCsv class 148 data = '' 149 number_from_file = re.search('\d+', csv_content) 150 if not number_from_file: 151 return data 152 else: 153 file_number = number_from_file.group() 154 155 if file_number in files['filenames']: 156 filename = files['filenames'][file_number] 157 else: 158 return data 159 160 with open(path.join(files['path'], filename)) as file_object: 161 data = file_object.read() 162 163 return data
392 def pandas_read_csv(self, usecols=None, **kwargs): 393 """ Use pandas.read_csv with the right keyword arguments 394 395 In particular we know what dtypes should be, which columns are dates, 396 etc... 397 """ 398 dtypes, dates = dshape_to_pandas(self.schema) 399 400 if usecols: 401 if builtins.all(isinstance(c, int) for c in usecols): 402 usecols = get(usecols, self.columns) 403 dates = [name for name in dates if name in usecols] 404 405 header = kwargs.pop('header', self.header) 406 header = 0 if self.header else None 407 408 result = pd.read_csv(self.path, 409 names=kwargs.pop('names', self.columns), 410 usecols=usecols, 411 compression={'gz': 'gzip', 412 'bz2': 'bz2'}.get(ext(self.path)), 413 dtype=kwargs.pop('dtype', dtypes), 414 parse_dates=kwargs.pop('parse_dates', dates), 415 encoding=kwargs.pop('encoding', self.encoding), 416 header=header, 417 **merge(kwargs, clean_dialect(self.dialect))) 418 419 return result
136 def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs): 137 """ csv.py doesn't do Unicode; encode temporarily as UTF-8.""" 138 csv_reader = csv.reader(utf_8_encoder(unicode_csv_data), 139 dialect=dialect, **kwargs) 140 for row in csv_reader: 141 # decode UTF-8 back to Unicode, cell by cell: 142 yield [unicode(cell, 'utf-8') for cell in row]
12 def readFile(): 13 #make the format of the csv file. Our format is a vector with 13 features and a label which show the condition of the 14 #sample hc/pc : helathy case, parkinson case 15 names = ['Feature1', 'Feature2', 'Feature3', 'Feature4','Feature5','Feature6','Feature7','Feature8','Feature9', 16 'Feature10','Feature11','Feature12','Feature13','Label'] 17 18 #path to read the samples, samples consist from healthy subjects and subject suffering from Parkinson's desease. 19 path = '' 20 #read file in csv format 21 data = pd.read_csv(path,names=names ) 22 23 #return an array of the shape (2103, 14), lines are the samples and columns are the features as we mentioned before 24 return data
20 def readFile(): 21 #make the format of the csv file. Our format is a vector with 13 features and a label which show the condition of the 22 #sample hc/pc : helathy case, parkinson case 23 names = ['Feature1', 'Feature2', 'Feature3', 'Feature4','Feature5','Feature6','Feature7','Feature8','Feature9', 24 'Feature10','Feature11','Feature12','Feature13','Label'] 25 26 #path to read the samples, samples consist from healthy subjects and subject suffering from Parkinson's desease. 27 path = 'mfcc_multiclass.txt' 28 #read file in csv format 29 data = pd.read_csv(path,names=names ) 30 31 #return an array of the shape (2103, 14), lines are the samples and columns are the features as we mentioned before 32 return data