10 examples of 'python download file from url' in Python

Every line of 'python download file from url' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
this disclaimer
5def download(url, file_name):
6 res = requests.get(url, stream=True)
7 if res.status_code == 200:
8 with open(file_name, 'wb') as file:
9 for chunk in res.iter_content(chunk_size=1024):
10 file.write(chunk)
Important

Use secure code every time

Secure your code as it's written. Use Snyk Code to scan source code in minutes – no build needed – and fix issues immediately. Enable Snyk Code

18def download_file(url, download_path):
19
20 local_filename = url.split('/')[-1]
21 if not isfile(join(download_path, local_filename)):
22 # NOTE the stream=True parameter
23 r = requests.get(url, stream=True)
24 with open(join(download_path, local_filename), 'wb') as f:
25 for chunk in r.iter_content(chunk_size=1024):
26 if chunk: # filter out keep-alive new chunks
27 f.write(chunk)
28 f.flush()
29 print local_filename
51def _download_file(download_url, filename):
52 response = request(
53 method='GET',
54 url=download_url,
55 allow_redirects=False,
56 headers={'Accept': 'application/octet-stream'},
57 stream=True)
58 while response.status_code == 302:
59 response = request(
60 'GET', response.headers['Location'], allow_redirects=False,
61 stream=True
62 )
63 with open(filename, 'w+b') as f:
64 for chunk in response.iter_content(chunk_size=REQ_BUFFER_SIZE):
65 f.write(chunk)
66
67 return filename
165def download(url):
166 downloader = get_best_downloader()
167 downloader(url, os.path.basename(url))
7def download(url):
8 wget.download(url)
19def download_2(url):
20 try:
21 response = requests.get(url)
22 except Exception:
23 print('Failed Download!')
24 else:
25 if response.status_code == 200:
26 with open('file_name.jpg', 'wb') as f:
27 f.write(requests.get(url).content)
28 print("Succesfully Downloaded")
29 else:
30 print('Failed Download!')
9def download_file(url):
10 ''' Downloads and extracts the url content.
11
12 :kwarg url: Contains the download link to raw data.
13
14 '''
15 response = requests.get(url, stream = True)
16 with open('subtitle.zip', 'wb') as out_file:
17 fsrc = response.raw
18 size = response.headers.get("content-length")
19 length = 16*1024
20 while True:
21 buf = fsrc.read(length)
22 if not buf:
23 break
24 out_file.write(buf)
25 sys.stdout.write("Downloaded " + str(os.path.getsize('subtitle.zip')/1024) + "kb of " + str(int(size)/1024) + " kb\r")
26 sys.stdout.flush()
27 print "\nDownload complete\nExtracting"
28 del response
29 try:
30 zipfile.ZipFile.extractall(zipfile.ZipFile('subtitle.zip'))
31 os.remove('subtitle.zip')
32 except:
33 print "* The subtitle file is not good to extract."
34 return 'no'
35 return prompt()
151def _download(url, directory, filename):
152 try:
153 with urllib.request.urlopen(url) as response, open(
154 os.path.join(directory, filename), "wb"
155 ) as out_file:
156 shutil.copyfileobj(response, out_file)
157 except urllib.error.HTTPError as exception:
158 sys.stderr.write(str(exception))
12def download(url, file_path, timeout=30):
13 response = requests.get(url, stream=True, timeout=timeout)
14
15 with open(file_path, "wb") as handle:
16 for data in response.iter_content():
17 handle.write(data)
84def download_file(url, date_created, verbose=False):
85
86 filename = date_created + str(url).split('/')[-1]
87
88 if verbose:
89 print(MSG_START.format(filename))
90
91 with open(filename, "wb") as file:
92
93 dl_time = datetime.now()
94
95 response = get(str(url))
96 file.write(response.content)
97
98 delta = (datetime.now() - dl_time).total_seconds()
99
100 if verbose:
101 print(MSG_END.format(filename, str(delta)))
102
103 dl_time = datetime.now()

Related snippets