10 examples of 'python download csv from url' in Python

Every line of 'python download csv from url' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
4def download_stock_data(csv_url):
5 response = request.urlopen(csv_url)
6 csv = response.read()
7 csv_str = str(csv)
8 lines = csv_str.split("\\n")
9 dest_url = r'goog.csv'
10 fx = open(dest_url, "w")
11 for line in lines:
12 fx.write(line + "\n")
13 fx.close()
36def download_csv():
37 post_and_check('/gtfs/download/')
10def _download(url, path):
11 """Download *url* to *path*."""
12 r = requests.get(url.strip(), stream=True)
13 with open(str(path), 'wb') as f:
14 for chunk in r.iter_content(chunk_size=1024):
15 # filter out keep-alive new chunks
16 if chunk:
17 f.write(chunk)
5def download(url, file_name):
6 res = requests.get(url, stream=True)
7 if res.status_code == 200:
8 with open(file_name, 'wb') as file:
9 for chunk in res.iter_content(chunk_size=1024):
10 file.write(chunk)
21def download_csv(request):
22 """Return a CSV file.
23
24 This view reponds with the entire content of the CSV file in a single piece.
25 """
26 csv_file = ''.join(big_csv(100))
27 response = HttpResponse(csv_file, content_type='text/csv')
28 response['Content-Disposition'] = 'attachment; filename=big.csv'
29 response['Content-Length'] = len(csv_file)
30
31 return response
19def download_2(url):
20 try:
21 response = requests.get(url)
22 except Exception:
23 print('Failed Download!')
24 else:
25 if response.status_code == 200:
26 with open('file_name.jpg', 'wb') as f:
27 f.write(requests.get(url).content)
28 print("Succesfully Downloaded")
29 else:
30 print('Failed Download!')
7def download(url):
8 wget.download(url)
20@classmethod
21def download(cls,ref_book):
22 out=[]
23 # Fetch data into StringIO wrapper
24 url = ref_book.download_url
25 url = url.replace(
26 "http://update.nocproject.org/db/",
27 "https://cdn.nocproject.org/refbook/"
28 )
29 f=urlopen(url)
30 data=cStringIO.StringIO(f.read())
31 f.close()
32 # Wrap GzipFile for gzipped content
33 if ref_book.download_url.endswith(".gz"):
34 data=gzip.GzipFile(fileobj=data)
35 # Iterate through CSV
36 reader=csv.reader(data)
37 header={}
38 for row in reader:
39 if not row:
40 continue
41 if not header:
42 # Read field names from first line
43 for i,h in enumerate(row):
44 header[i]=unicode(h,"utf8","ignore")
45 continue
46 r={}
47 for i,v in enumerate(row):
48 r[header[i]]=unicode(v,"utf8","ignore")
49 out.append(r)
50 return out
18def download_file(url, download_path):
19
20 local_filename = url.split('/')[-1]
21 if not isfile(join(download_path, local_filename)):
22 # NOTE the stream=True parameter
23 r = requests.get(url, stream=True)
24 with open(join(download_path, local_filename), 'wb') as f:
25 for chunk in r.iter_content(chunk_size=1024):
26 if chunk: # filter out keep-alive new chunks
27 f.write(chunk)
28 f.flush()
29 print local_filename
58def download(self, url):
59 '''
60 下载指定url页面
61 :param url:
62 :return:
63 '''
64 headers = {
65 'User-Agent': useragent.getUserAgent()
66 }
67 s = requests.session()
68 r = s.request(method='get', url=url, headers=headers)
69 if r.status_code == 200:
70 print('正在抓取地址:%s' % url)
71 print('User-Agent:', r.request.headers.get('user-agent'))
72 return r.content
73 return None

Related snippets