10 examples of 'python requests download file' in Python

Every line of 'python requests download file' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
51def _download_file(download_url, filename):
52 response = request(
53 method='GET',
54 url=download_url,
55 allow_redirects=False,
56 headers={'Accept': 'application/octet-stream'},
57 stream=True)
58 while response.status_code == 302:
59 response = request(
60 'GET', response.headers['Location'], allow_redirects=False,
61 stream=True
62 )
63 with open(filename, 'w+b') as f:
64 for chunk in response.iter_content(chunk_size=REQ_BUFFER_SIZE):
65 f.write(chunk)
66
67 return filename
18def download_file(url, download_path):
19
20 local_filename = url.split('/')[-1]
21 if not isfile(join(download_path, local_filename)):
22 # NOTE the stream=True parameter
23 r = requests.get(url, stream=True)
24 with open(join(download_path, local_filename), 'wb') as f:
25 for chunk in r.iter_content(chunk_size=1024):
26 if chunk: # filter out keep-alive new chunks
27 f.write(chunk)
28 f.flush()
29 print local_filename
5def download(url, file_name):
6 res = requests.get(url, stream=True)
7 if res.status_code == 200:
8 with open(file_name, 'wb') as file:
9 for chunk in res.iter_content(chunk_size=1024):
10 file.write(chunk)
12def download(url, file_path, timeout=30):
13 response = requests.get(url, stream=True, timeout=timeout)
14
15 with open(file_path, "wb") as handle:
16 for data in response.iter_content():
17 handle.write(data)
44def fetch_file(file, bucket):
45 s3_client.download_file(bucket, file, save_to)
9def download_file(url):
10 ''' Downloads and extracts the url content.
11
12 :kwarg url: Contains the download link to raw data.
13
14 '''
15 response = requests.get(url, stream = True)
16 with open('subtitle.zip', 'wb') as out_file:
17 fsrc = response.raw
18 size = response.headers.get("content-length")
19 length = 16*1024
20 while True:
21 buf = fsrc.read(length)
22 if not buf:
23 break
24 out_file.write(buf)
25 sys.stdout.write("Downloaded " + str(os.path.getsize('subtitle.zip')/1024) + "kb of " + str(int(size)/1024) + " kb\r")
26 sys.stdout.flush()
27 print "\nDownload complete\nExtracting"
28 del response
29 try:
30 zipfile.ZipFile.extractall(zipfile.ZipFile('subtitle.zip'))
31 os.remove('subtitle.zip')
32 except:
33 print "* The subtitle file is not good to extract."
34 return 'no'
35 return prompt()
84def download_file(url, date_created, verbose=False):
85
86 filename = date_created + str(url).split('/')[-1]
87
88 if verbose:
89 print(MSG_START.format(filename))
90
91 with open(filename, "wb") as file:
92
93 dl_time = datetime.now()
94
95 response = get(str(url))
96 file.write(response.content)
97
98 delta = (datetime.now() - dl_time).total_seconds()
99
100 if verbose:
101 print(MSG_END.format(filename, str(delta)))
102
103 dl_time = datetime.now()
22def download_file(url, path):
23 logging.info("Downloading {url} to {path}...".format(url=url, path=path))
24
25 with open(path, "wb") as f:
26 with urllib.request.urlopen(url) as u:
27 f.write(u.read())
127def download_file(file_name=None):
128 from google.colab import files
129 files.download(file_name)
10def _download(url, path):
11 """Download *url* to *path*."""
12 r = requests.get(url.strip(), stream=True)
13 with open(str(path), 'wb') as f:
14 for chunk in r.iter_content(chunk_size=1024):
15 # filter out keep-alive new chunks
16 if chunk:
17 f.write(chunk)

Related snippets