10 examples of 'python download pdf from url' in Python

Every line of 'python download pdf from url' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
7def download(url):
8 wget.download(url)
5def download(url, file_name):
6 res = requests.get(url, stream=True)
7 if res.status_code == 200:
8 with open(file_name, 'wb') as file:
9 for chunk in res.iter_content(chunk_size=1024):
10 file.write(chunk)
19def download_2(url):
20 try:
21 response = requests.get(url)
22 except Exception:
23 print('Failed Download!')
24 else:
25 if response.status_code == 200:
26 with open('file_name.jpg', 'wb') as f:
27 f.write(requests.get(url).content)
28 print("Succesfully Downloaded")
29 else:
30 print('Failed Download!')
151def _download(url, directory, filename):
152 try:
153 with urllib.request.urlopen(url) as response, open(
154 os.path.join(directory, filename), "wb"
155 ) as out_file:
156 shutil.copyfileobj(response, out_file)
157 except urllib.error.HTTPError as exception:
158 sys.stderr.write(str(exception))
10def _download(url, path):
11 """Download *url* to *path*."""
12 r = requests.get(url.strip(), stream=True)
13 with open(str(path), 'wb') as f:
14 for chunk in r.iter_content(chunk_size=1024):
15 # filter out keep-alive new chunks
16 if chunk:
17 f.write(chunk)
16def download_page(url):
17 try:
18 headers = {}
19 headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
20 req = urllib.request.Request(url, headers = headers)
21 resp = urllib.request.urlopen(req)
22 respData = str(resp.read())
23 return respData
24 except Exception as e:
25 print(str(e))
109def download_urllib(url, filename):
110 """ Try to donwload via urllib."""
111 print("Trying to Download via urllib from:\n ", url)
112 keep_going = True
113 try:
114 url_res = urllib2.urlopen(url)
115 except (HTTPError, URLError, ssl.CertificateError) as err:
116 print("Error: %s" % err)
117 return False
118 with open(filename, 'wb') as outfile:
119 block_sz = 8192
120 meta = url_res.info()
121 meta_func = meta.getheaders if hasattr(meta, 'getheaders') else meta.get_all
122 meta_length = meta_func("Content-Length")
123 file_size = None
124 if meta_length:
125 file_size = int(meta_length[0])
126 message = "Downloading: {0}\nBytes: {1}\n".format(url, file_size)
127 dstyle = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_AUTO_HIDE
128 if file_size:
129 progress = wx.ProgressDialog('Downloading', message,
130 maximum=1+file_size/block_sz, style=dstyle)
131 else:
132 progress = wx.ProgressDialog('Downloading', message, style=dstyle)
133
134 file_size_dl = 0
135 while keep_going:
136 read_buffer = url_res.read(block_sz)
137 if not read_buffer:
138 progress.Update(file_size_dl / block_sz, "message+\nDONE!")
139 wx.Sleep(0.2)
140 break
141
142 file_size_dl += len(read_buffer)
143 outfile.write(read_buffer)
144
145 status = "{0:16}".format(file_size_dl)
146 if file_size:
147 status += " [{0:6.2f}%]".format(file_size_dl * 100 / file_size)
148 (keep_going, dummy_skip) = progress.Update(file_size_dl / block_sz,
149 message+status)
150 wx.Sleep(0.08) # Give the GUI some update time
151 progress.Destroy()
58def download(self, url):
59 '''
60 下载指定url页面
61 :param url:
62 :return:
63 '''
64 headers = {
65 'User-Agent': useragent.getUserAgent()
66 }
67 s = requests.session()
68 r = s.request(method='get', url=url, headers=headers)
69 if r.status_code == 200:
70 print('正在抓取地址:%s' % url)
71 print('User-Agent:', r.request.headers.get('user-agent'))
72 return r.content
73 return None
9def download_file(url):
10 ''' Downloads and extracts the url content.
11
12 :kwarg url: Contains the download link to raw data.
13
14 '''
15 response = requests.get(url, stream = True)
16 with open('subtitle.zip', 'wb') as out_file:
17 fsrc = response.raw
18 size = response.headers.get("content-length")
19 length = 16*1024
20 while True:
21 buf = fsrc.read(length)
22 if not buf:
23 break
24 out_file.write(buf)
25 sys.stdout.write("Downloaded " + str(os.path.getsize('subtitle.zip')/1024) + "kb of " + str(int(size)/1024) + " kb\r")
26 sys.stdout.flush()
27 print "\nDownload complete\nExtracting"
28 del response
29 try:
30 zipfile.ZipFile.extractall(zipfile.ZipFile('subtitle.zip'))
31 os.remove('subtitle.zip')
32 except:
33 print "* The subtitle file is not good to extract."
34 return 'no'
35 return prompt()
18def download_file(url, download_path):
19
20 local_filename = url.split('/')[-1]
21 if not isfile(join(download_path, local_filename)):
22 # NOTE the stream=True parameter
23 r = requests.get(url, stream=True)
24 with open(join(download_path, local_filename), 'wb') as f:
25 for chunk in r.iter_content(chunk_size=1024):
26 if chunk: # filter out keep-alive new chunks
27 f.write(chunk)
28 f.flush()
29 print local_filename

Related snippets