10 examples of 'nltk.download()' in Python

Every line of 'nltk.download()' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
this disclaimer
7@click.command('download')
8def download():
9 '''
10 Install required libraries.
11 Note this library will install nltk dependencies into your
12 user directory.
13 '''
14
15 click.echo("Installing nltk packages into your user directories in " +
16 "the following order of existence (first found):\n" +
17 '\n'.join(nltk.data.path))
18
19 extensions = [("taggers", "averaged_perceptron_tagger"),
20 ("corpora", "wordnet"),
21 ("tokenizers", "punkt")]
22
23 missing = check_packages_exist(extensions)
24
25 for ext_tuple in missing:
26 nltk.download(ext_tuple[1])
Important

Use secure code every time

Secure your code as it's written. Use Snyk Code to scan source code in minutes – no build needed – and fix issues immediately. Enable Snyk Code

11def downloadNLTKConll2000Corpus():
12 logger = logging.getLogger('collective.classification')
13 logger.info("Downloading NLTK's conll2000 corpus")
14 download('conll2000')
129def _download(self, data_sets=DATA_SETS):
130 """
131 Download the given `data_sets`
132
133 Args:
134 data_sets: a list of the datasets to download
135 """
136
137 for data_set_type, data_set_name in data_sets:
138 remote_file = data_set_name + SpeechCorpusProvider.SET_FILE_EXTENSION
139 self._download_if_not_exists(remote_file)
7def download(url):
8 wget.download(url)
42def download():
43 return response.download(request, db)
165def download(url):
166 downloader = get_best_downloader()
167 downloader(url, os.path.basename(url))
36def download(dataset, feature, dest):
37 call(['mkdir', '-p', dest])
38 url = dataset + '/' + feature + '.pkl'
39 file_path = os.path.join(dest, feature + '.pkl')
40 print(file_path)
41
42 try:
43 u = urllib.urlopen(url)
44 except HTTPError:
45 print("The requested data is not available for {} dataset.".format(dataset))
46 return False
47 with open(file_path, 'wb') as f:
48 # meta = u.info()
49 # file_size = int(meta.getheaders("Content-Length")[0])
50 file_size = int(u.headers["Content-Length"])
51 print("Downloading: {}, size: {}".format(' '.join([dataset, feature]), file_size))
52
53 file_size_dl = 0
54 block_sz = 8192
55 while True:
56 buffer = u.read(block_sz)
57 if not buffer:
58 break
59 file_size_dl += len(buffer)
60 f.write(buffer)
61 sys.stdout.write('\r')
62 sys.stdout.write("[%-20s] [%3.2f%%]" % ('='*int((file_size_dl * 100. / file_size)/5), file_size_dl * 100. / file_size))
63 sys.stdout.flush()
64 sys.stdout.write('\n')
65 return True
46def download():
47 """
48 会创建一个download/文件夹
49 """
50 result = get_filename()
51 for item in result:
52 r = requests.get('http://down.tdx.com.cn:8001/fin/{}'.format(item))
53
54 file = '{}{}{}{}{}'.format(qa_path, os.sep, 'downloads', os.sep, item)
55 with open(file, "wb") as code:
56 code.write(r.content)
31def download():
32 logger.info('Retrieving source database: %s ...' % url)
33 #urllib.request.urlretrieve fp)
34 f=urllib.request.urlopen(url)
35 output=f.read().decode('cp1252')
36
37 path=os.path.dirname(fp)
38 if not os.path.exists(path):
39 os.makedirs(path)
40
41 with codecs.open(fp, "w", "utf-8") as temp:
42 temp.write(output)
43
44 logger.info('Source database downloaded to: %s' % fp)
2034def _download(url: str, save_path: str, filename: str):
2035 """Writes data from url to file.
2036 """
2037 if os.path.exists(os.path.join(save_path, filename)):
2038 logger.info("File %s already downloaded" % (os.path.join(save_path, filename)))
2039 return
2040
2041 r = urllib.request.urlopen(url)
2042 logger.info("Downloading file at %s" % os.path.join(save_path, filename))
2043
2044 def read_iter(file, block_size=1000):
2045 """Given a file 'file', returns an iterator that returns bytes of
2046 size 'blocksize' from the file, using read().
2047 """
2048 while True:
2049 block = file.read(block_size)
2050 if not block:
2051 break
2052 yield block
2053
2054 # Create the path to save the data
2055 if not os.path.exists(save_path):
2056 os.makedirs(save_path)
2057
2058 with open(os.path.join(save_path, filename), "wb") as f:
2059 for data in read_iter(r):
2060 f.write(data)

Related snippets