Every line of 'python download file' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
51 def _download_file(download_url, filename): 52 response = request( 53 method='GET', 54 url=download_url, 55 allow_redirects=False, 56 headers={'Accept': 'application/octet-stream'}, 57 stream=True) 58 while response.status_code == 302: 59 response = request( 60 'GET', response.headers['Location'], allow_redirects=False, 61 stream=True 62 ) 63 with open(filename, 'w+b') as f: 64 for chunk in response.iter_content(chunk_size=REQ_BUFFER_SIZE): 65 f.write(chunk) 66 67 return filename
5 def download(url, file_name): 6 res = requests.get(url, stream=True) 7 if res.status_code == 200: 8 with open(file_name, 'wb') as file: 9 for chunk in res.iter_content(chunk_size=1024): 10 file.write(chunk)
18 def download_file(url, download_path): 19 20 local_filename = url.split('/')[-1] 21 if not isfile(join(download_path, local_filename)): 22 # NOTE the stream=True parameter 23 r = requests.get(url, stream=True) 24 with open(join(download_path, local_filename), 'wb') as f: 25 for chunk in r.iter_content(chunk_size=1024): 26 if chunk: # filter out keep-alive new chunks 27 f.write(chunk) 28 f.flush() 29 print local_filename
12 def download(url, file_path, timeout=30): 13 response = requests.get(url, stream=True, timeout=timeout) 14 15 with open(file_path, "wb") as handle: 16 for data in response.iter_content(): 17 handle.write(data)
44 def fetch_file(file, bucket): 45 s3_client.download_file(bucket, file, save_to)
9 def download_file(url): 10 ''' Downloads and extracts the url content. 11 12 :kwarg url: Contains the download link to raw data. 13 14 ''' 15 response = requests.get(url, stream = True) 16 with open('subtitle.zip', 'wb') as out_file: 17 fsrc = response.raw 18 size = response.headers.get("content-length") 19 length = 16*1024 20 while True: 21 buf = fsrc.read(length) 22 if not buf: 23 break 24 out_file.write(buf) 25 sys.stdout.write("Downloaded " + str(os.path.getsize('subtitle.zip')/1024) + "kb of " + str(int(size)/1024) + " kb\r") 26 sys.stdout.flush() 27 print "\nDownload complete\nExtracting" 28 del response 29 try: 30 zipfile.ZipFile.extractall(zipfile.ZipFile('subtitle.zip')) 31 os.remove('subtitle.zip') 32 except: 33 print "* The subtitle file is not good to extract." 34 return 'no' 35 return prompt()
84 def download_file(url, date_created, verbose=False): 85 86 filename = date_created + str(url).split('/')[-1] 87 88 if verbose: 89 print(MSG_START.format(filename)) 90 91 with open(filename, "wb") as file: 92 93 dl_time = datetime.now() 94 95 response = get(str(url)) 96 file.write(response.content) 97 98 delta = (datetime.now() - dl_time).total_seconds() 99 100 if verbose: 101 print(MSG_END.format(filename, str(delta))) 102 103 dl_time = datetime.now()
127 def download_file(file_name=None): 128 from google.colab import files 129 files.download(file_name)
22 def download_file(url, path): 23 logging.info("Downloading {url} to {path}...".format(url=url, path=path)) 24 25 with open(path, "wb") as f: 26 with urllib.request.urlopen(url) as u: 27 f.write(u.read())
19 def download_2(url): 20 try: 21 response = requests.get(url) 22 except Exception: 23 print('Failed Download!') 24 else: 25 if response.status_code == 200: 26 with open('file_name.jpg', 'wb') as f: 27 f.write(requests.get(url).content) 28 print("Succesfully Downloaded") 29 else: 30 print('Failed Download!')