Every line of 'python download zip file from url' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
353 def _download_zip(url, output_name): 354 """Download a file from a requested url and store locally. 355 356 Parameters 357 ---------- 358 url : str 359 The url from where the file will be downloaded. 360 361 output_name : str 362 The name of the file in the local directory. 363 364 Returns 365 ------- 366 None. 367 368 """ 369 ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 370 filename = output_name + ".zip" 371 try: 372 data_url = urlopen(url, context=ctx) 373 except HTTPError as e: 374 if e.code == 404: 375 e.msg = "Dataset '%s' not found on mldata.org." % output_name 376 raise 377 378 # Store Zip File 379 try: 380 with open(filename, 'w+b') as zip_file: 381 copyfileobj(data_url, zip_file) 382 except Exception: 383 os.remove(filename) 384 raise 385 data_url.close()
9 def download_file(url): 10 ''' Downloads and extracts the url content. 11 12 :kwarg url: Contains the download link to raw data. 13 14 ''' 15 response = requests.get(url, stream = True) 16 with open('subtitle.zip', 'wb') as out_file: 17 fsrc = response.raw 18 size = response.headers.get("content-length") 19 length = 16*1024 20 while True: 21 buf = fsrc.read(length) 22 if not buf: 23 break 24 out_file.write(buf) 25 sys.stdout.write("Downloaded " + str(os.path.getsize('subtitle.zip')/1024) + "kb of " + str(int(size)/1024) + " kb\r") 26 sys.stdout.flush() 27 print "\nDownload complete\nExtracting" 28 del response 29 try: 30 zipfile.ZipFile.extractall(zipfile.ZipFile('subtitle.zip')) 31 os.remove('subtitle.zip') 32 except: 33 print "* The subtitle file is not good to extract." 34 return 'no' 35 return prompt()
5 def download(url, file_name): 6 res = requests.get(url, stream=True) 7 if res.status_code == 200: 8 with open(file_name, 'wb') as file: 9 for chunk in res.iter_content(chunk_size=1024): 10 file.write(chunk)
7 def download(url): 8 wget.download(url)
18 def download_file(url, download_path): 19 20 local_filename = url.split('/')[-1] 21 if not isfile(join(download_path, local_filename)): 22 # NOTE the stream=True parameter 23 r = requests.get(url, stream=True) 24 with open(join(download_path, local_filename), 'wb') as f: 25 for chunk in r.iter_content(chunk_size=1024): 26 if chunk: # filter out keep-alive new chunks 27 f.write(chunk) 28 f.flush() 29 print local_filename
19 def download_2(url): 20 try: 21 response = requests.get(url) 22 except Exception: 23 print('Failed Download!') 24 else: 25 if response.status_code == 200: 26 with open('file_name.jpg', 'wb') as f: 27 f.write(requests.get(url).content) 28 print("Succesfully Downloaded") 29 else: 30 print('Failed Download!')
43 def grab(src, dest, name): 44 download = True 45 if not dest.exists(): 46 print 'Downloading %s' % name 47 elif not zipfile.is_zipfile(dest): 48 print 'Downloading %s (corrupt file)' % name 49 else: 50 download = False 51 if download: 52 urllib.urlretrieve(str(src), str(dest))
10 def _download(url, path): 11 """Download *url* to *path*.""" 12 r = requests.get(url.strip(), stream=True) 13 with open(str(path), 'wb') as f: 14 for chunk in r.iter_content(chunk_size=1024): 15 # filter out keep-alive new chunks 16 if chunk: 17 f.write(chunk)
165 def download(url): 166 downloader = get_best_downloader() 167 downloader(url, os.path.basename(url))
151 def _download(url, directory, filename): 152 try: 153 with urllib.request.urlopen(url) as response, open( 154 os.path.join(directory, filename), "wb" 155 ) as out_file: 156 shutil.copyfileobj(response, out_file) 157 except urllib.error.HTTPError as exception: 158 sys.stderr.write(str(exception))