10 examples of 's3_client upload_file' in Python

Every line of 's3_client upload_file' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
141def upload_file(bucket, key, local_file, s3_client):
142 """
143 Uploads a given file to the s3 key in the bucket
144 """
145 import boto3
146 s3_client.upload_file(local_file, bucket, key)
147
148 return
112def upload(bucket, key, filename, session=None):
113 """Upload file to S3 bucket."""
114 s3_client = _get_client(session)
115 LOGGER.info('Uploading %s to %s/%s', filename, bucket, key)
116 s3_client.upload_file(filename, bucket, key)
27def s3_upload(self, _file):
28 # Upload the File
29 sml = self.boto.new_key(_file.filepath)
30 sml.set_contents_from_string(_file.source_file.read())
30def upload(source_file, bucket_name, object_key):
31 s3 = boto3.resource('s3')
32
33 # Uploads the source file to the specified s3 bucket by using a
34 # managed uploader. The uploader automatically splits large
35 # files and uploads parts in parallel for faster uploads.
36 try:
37 s3.Bucket(bucket_name).upload_file(source_file, object_key)
38 except Exception as e:
39 print(e)
21def upload_to_amazon(bucket_name, file_path):
22
23
24 #Use environmental variables to authenticalt S3
25 c = boto.connect_s3()
26 b = c.get_bucket(bucket_name)
27
28 file_name = os.path.basename(file_path)
29
30 source_path = file_path
31 source_size = os.stat(source_path).st_size
32
33 # Create a multipart upload request
34 mp = b.initiate_multipart_upload(file_name)
35
36 # Use a chunk size of 50 MiB (feel free to change this)
37 chunk_size = 52428800
38 chunk_count = int(math.ceil(source_size / float(chunk_size)))
39
40 # Send the file parts, using FileChunkIO to create a file-like object
41 # that points to a certain byte range within the original file. We
42 # set bytes to never exceed the original file size.
43 for i in range(chunk_count):
44 print('Uploading chunk %s of %s.' %(i+1, chunk_count))
45 offset = chunk_size * i
46 bytes = min(chunk_size, source_size - offset)
47 with FileChunkIO(source_path, 'r', offset=offset,bytes=bytes) as fp:
48 mp.upload_part_from_file(fp, part_num=i + 1)
49
50 # Finish the upload
51 mp.complete_upload()
52
53 b.set_acl('public-read', file_name)
54
55 url = get_s3_url(bucket_name, file_name)
56 return url
403def _upload_one_part(self, filename, bucket, key,
404 upload_id, part_size, extra_args,
405 callback, part_number):
406 open_chunk_reader = self._os.open_file_chunk_reader
407 with open_chunk_reader(filename, part_size * (part_number - 1),
408 part_size, callback) as body:
409 response = self._client.upload_part(
410 Bucket=bucket, Key=key,
411 UploadId=upload_id, PartNumber=part_number, Body=body,
412 **extra_args)
413 etag = response['ETag']
414 return {'ETag': etag, 'PartNumber': part_number}
32def upload_file(src_path, dst_url):
33 """Upload a local file on S3.
34
35 If the file already exists it is overwritten.
36
37 :param src_path: Source local filesystem path
38 :param dst_url: Destination S3 URL
39 """
40 parsed_url = urlparse(dst_url)
41 dst_bucket = parsed_url.netloc
42 dst_key = parsed_url.path[1:]
43
44 client = boto3.client('s3')
45 client.upload_file(src_path, dst_bucket, dst_key)
339def _upload_singlepart(self,
340 namespace_name,
341 bucket_name,
342 object_name,
343 file_path,
344 **kwargs):
345 # put_object expects 'opc_meta' not metadata
346 if 'metadata' in kwargs:
347 kwargs['opc_meta'] = kwargs['metadata']
348 kwargs.pop('metadata')
349
350 # remove unrecognized kwargs for put_object
351 progress_callback = None
352 if 'progress_callback' in kwargs:
353 progress_callback = kwargs['progress_callback']
354 kwargs.pop('progress_callback')
355
356 with open(file_path, 'rb') as file_object:
357 # progress_callback is not supported for files of zero bytes
358 # FileReadCallbackStream will not be handled properly by requests in this case
359 file_size = os.fstat(file_object.fileno()).st_size
360 if file_size != 0 and progress_callback:
361 wrapped_file = FileReadCallbackStream(file_object,
362 lambda bytes_read: progress_callback(bytes_read))
363
364 response = self.object_storage_client.put_object(namespace_name,
365 bucket_name,
366 object_name,
367 wrapped_file,
368 **kwargs)
369 else:
370 response = self.object_storage_client.put_object(namespace_name,
371 bucket_name,
372 object_name,
373 file_object,
374 **kwargs)
375 return response
28def _upload(bucket_name, key_name, data):
29 # Cache to avoid download to same instance
30 download_as_string.key(bucket_name, key_name).set(data)
31 # Upload
32 bucket = _get_bucket(bucket_name)
33 key = bucket.new_key(key_name)
34 key.set_contents_from_string(data)
1275def upload_part_from_file_given_pos(self, bucket, object, filename, offset, partsize, upload_id, part_number, headers=None, params=None):
1276 if not params:
1277 params = {}
1278 params['partNumber'] = part_number
1279 params['uploadId'] = upload_id
1280 content_type = ''
1281 return self.put_object_from_file_given_pos(bucket, object, filename, offset, partsize, content_type, headers, params)

Related snippets