Every line of 'pytorch download' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
10 def download(prefix, epoch): 11 dir_name = os.path.dirname(prefix) 12 if not os.path.exists(dir_name): 13 os.makedirs(dir_name) 14 base_name = prefix.replace("pretrain_model/", "") + "-%04d.params" % epoch 15 save_name = "%s-%04d.params" % (prefix, epoch) 16 base_url = os.environ.get("SIMPLEDET_BASE_URL", "https://1dv.alarge.space/") 17 full_url = base_url + base_name 18 19 try: 20 print("Downloading %s from %s" % (save_name, full_url)) 21 urllib.request.urlretrieve(full_url, save_name, report) 22 except Exception as e: 23 print("Fail to download %s. You can mannually download it from %s and put it to %s" % (base_name, full_url, save_name)) 24 os.remove(save_name) 25 raise e
42 def download(): 43 return response.download(request, db)
7 @click.command('download') 8 def download(): 9 ''' 10 Install required libraries. 11 Note this library will install nltk dependencies into your 12 user directory. 13 ''' 14 15 click.echo("Installing nltk packages into your user directories in " + 16 "the following order of existence (first found):\n" + 17 '\n'.join(nltk.data.path)) 18 19 extensions = [("taggers", "averaged_perceptron_tagger"), 20 ("corpora", "wordnet"), 21 ("tokenizers", "punkt")] 22 23 missing = check_packages_exist(extensions) 24 25 for ext_tuple in missing: 26 nltk.download(ext_tuple[1])
55 def download(self): 56 path = download_url(self.url, self.raw_dir) 57 extract_tar(path, self.raw_dir, mode='r') 58 os.unlink(path)
25 def download(data_dir): 26 if not os.path.isdir(data_dir): 27 os.system('mkdir ' + data_dir) 28 os.chdir(data_dir) 29 if (not os.path.exists('train-images-idx3-ubyte')) or \ 30 (not os.path.exists('train-labels-idx1-ubyte')) or \ 31 (not os.path.exists('t10k-images-idx3-ubyte')) or \ 32 (not os.path.exists('t10k-labels-idx1-ubyte')): 33 os.system('wget http://data.mxnet.io/mxnet/data/mnist.zip') 34 os.system('unzip mnist.zip; rm mnist.zip') 35 os.chdir('..')
9 def download(dataset, year=None, redownload=True, data_dir=None): 10 fname = validate.dataset(dataset) 11 12 if year is None: 13 tag = "master" 14 else: 15 tag = "v{:d}".format(year) 16 17 url_template = "https://github.com/Geosyntec/water-quality-datasets/blob/{tag:s}/data/{fname:s}?raw=true" 18 src_url = url_template.format(tag=tag, fname=fname) 19 20 if data_dir is None: 21 base_dir = Path(os.environ.get("WQ_DATA", "~/.wq-data")) 22 data_dir = base_dir.expanduser().absolute() / tag 23 else: 24 data_dir = Path(data_dir) 25 26 data_dir.mkdir(exist_ok=True, parents=True) 27 dst_path = data_dir / fname 28 if not dst_path.exists() or redownload: 29 request.urlretrieve(src_url, dst_path) 30 31 with ZipFile(dst_path, "r") as zip_ref: 32 zip_ref.extractall(data_dir) 33 34 return dst_path.parent / "{}.csv".format(dst_path.stem)
259 def download(name): 260 subs = dict(file=name%dict(version=version), path=htmlroot) 261 return "%(file)s <%(path)s/download.php?file=%(file)s>"%subs