10 examples of 'script to download files from website' in Python

Every line of 'script to download files from website' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
9def download_file(url):
10 ''' Downloads and extracts the url content.
11
12 :kwarg url: Contains the download link to raw data.
13
14 '''
15 response = requests.get(url, stream = True)
16 with open('subtitle.zip', 'wb') as out_file:
17 fsrc = response.raw
18 size = response.headers.get("content-length")
19 length = 16*1024
20 while True:
21 buf = fsrc.read(length)
22 if not buf:
23 break
24 out_file.write(buf)
25 sys.stdout.write("Downloaded " + str(os.path.getsize('subtitle.zip')/1024) + "kb of " + str(int(size)/1024) + " kb\r")
26 sys.stdout.flush()
27 print "\nDownload complete\nExtracting"
28 del response
29 try:
30 zipfile.ZipFile.extractall(zipfile.ZipFile('subtitle.zip'))
31 os.remove('subtitle.zip')
32 except:
33 print "* The subtitle file is not good to extract."
34 return 'no'
35 return prompt()
19def download_2(url):
20 try:
21 response = requests.get(url)
22 except Exception:
23 print('Failed Download!')
24 else:
25 if response.status_code == 200:
26 with open('file_name.jpg', 'wb') as f:
27 f.write(requests.get(url).content)
28 print("Succesfully Downloaded")
29 else:
30 print('Failed Download!')
151def _download(url, directory, filename):
152 try:
153 with urllib.request.urlopen(url) as response, open(
154 os.path.join(directory, filename), "wb"
155 ) as out_file:
156 shutil.copyfileobj(response, out_file)
157 except urllib.error.HTTPError as exception:
158 sys.stderr.write(str(exception))
5def download(url, file_name):
6 res = requests.get(url, stream=True)
7 if res.status_code == 200:
8 with open(file_name, 'wb') as file:
9 for chunk in res.iter_content(chunk_size=1024):
10 file.write(chunk)
7def download(url):
8 wget.download(url)
46def download():
47 """
48 会创建一个download/文件夹
49 """
50 result = get_filename()
51 for item in result:
52 r = requests.get('http://down.tdx.com.cn:8001/fin/{}'.format(item))
53
54 file = '{}{}{}{}{}'.format(qa_path, os.sep, 'downloads', os.sep, item)
55 with open(file, "wb") as code:
56 code.write(r.content)
18def download_file(url, download_path):
19
20 local_filename = url.split('/')[-1]
21 if not isfile(join(download_path, local_filename)):
22 # NOTE the stream=True parameter
23 r = requests.get(url, stream=True)
24 with open(join(download_path, local_filename), 'wb') as f:
25 for chunk in r.iter_content(chunk_size=1024):
26 if chunk: # filter out keep-alive new chunks
27 f.write(chunk)
28 f.flush()
29 print local_filename
25def download(data_dir):
26 if not os.path.isdir(data_dir):
27 os.system('mkdir ' + data_dir)
28 os.chdir(data_dir)
29 if (not os.path.exists('train-images-idx3-ubyte')) or \
30 (not os.path.exists('train-labels-idx1-ubyte')) or \
31 (not os.path.exists('t10k-images-idx3-ubyte')) or \
32 (not os.path.exists('t10k-labels-idx1-ubyte')):
33 os.system('wget http://data.mxnet.io/mxnet/data/mnist.zip')
34 os.system('unzip mnist.zip; rm mnist.zip')
35 os.chdir('..')
15def downloadFile(source):
16 os.chdir(datadir)
17 return os.system('wget -t 3 ' + source)
84def download_file(url, date_created, verbose=False):
85
86 filename = date_created + str(url).split('/')[-1]
87
88 if verbose:
89 print(MSG_START.format(filename))
90
91 with open(filename, "wb") as file:
92
93 dl_time = datetime.now()
94
95 response = get(str(url))
96 file.write(response.content)
97
98 delta = (datetime.now() - dl_time).total_seconds()
99
100 if verbose:
101 print(MSG_END.format(filename, str(delta)))
102
103 dl_time = datetime.now()

Related snippets