Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 9 additions & 21 deletions google/cloud/storage/blob.py
Original file line number Diff line number Diff line change
Expand Up @@ -992,7 +992,7 @@ def download_to_file(
timeout=_DEFAULT_TIMEOUT,
checksum="md5",
):
"""Download the contents of this blob into a file-like object.
"""DEPRECATED. Download the contents of this blob into a file-like object.

.. note::

Expand Down Expand Up @@ -1084,31 +1084,19 @@ def download_to_file(
"""
client = self._require_client(client)

download_url = self._get_download_url(
client,
client.download_blob_to_file(
self,
file_obj=file_obj,
start=start,
end=end,
raw_download=raw_download,
if_generation_match=if_generation_match,
if_generation_not_match=if_generation_not_match,
if_metageneration_match=if_metageneration_match,
if_metageneration_not_match=if_metageneration_not_match,
timeout=timeout,
checksum=checksum,
)
headers = _get_encryption_headers(self._encryption_key)
headers["accept-encoding"] = "gzip"

transport = self._get_transport(client)
try:
self._do_download(
transport,
file_obj,
download_url,
headers,
start,
end,
raw_download,
timeout=timeout,
checksum=checksum,
)
except resumable_media.InvalidResponse as exc:
_raise_from_invalid_response(exc)

def download_to_filename(
self,
Expand Down
60 changes: 16 additions & 44 deletions google/cloud/storage/bucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -1229,7 +1229,7 @@ def list_blobs(
timeout=_DEFAULT_TIMEOUT,
retry=DEFAULT_RETRY,
):
"""Return an iterator used to find blobs in the bucket.
"""DEPRECATED. Return an iterator used to find blobs in the bucket.

.. note::
Direct use of this method is deprecated. Use ``Client.list_blobs`` instead.
Expand Down Expand Up @@ -1329,52 +1329,24 @@ def list_blobs(
>>> client = storage.Client()

>>> bucket = storage.Bucket("my-bucket-name", user_project='my-project')
>>> all_blobs = list(bucket.list_blobs())
>>> all_blobs = list(client.list_blobs(bucket))
"""
extra_params = {"projection": projection}

if prefix is not None:
extra_params["prefix"] = prefix

if delimiter is not None:
extra_params["delimiter"] = delimiter

if start_offset is not None:
extra_params["startOffset"] = start_offset

if end_offset is not None:
extra_params["endOffset"] = end_offset

if include_trailing_delimiter is not None:
extra_params["includeTrailingDelimiter"] = include_trailing_delimiter

if versions is not None:
extra_params["versions"] = versions

if fields is not None:
extra_params["fields"] = fields

if self.user_project is not None:
extra_params["userProject"] = self.user_project

client = self._require_client(client)
path = self.path + "/o"
api_request = functools.partial(
client._connection.api_request, timeout=timeout, retry=retry
)
iterator = page_iterator.HTTPIterator(
client=client,
api_request=api_request,
path=path,
item_to_value=_item_to_blob,
page_token=page_token,
return client.list_blobs(
self,
max_results=max_results,
extra_params=extra_params,
page_start=_blobs_page_start,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
start_offset=start_offset,
end_offset=end_offset,
include_trailing_delimiter=include_trailing_delimiter,
versions=versions,
projection=projection,
fields=fields,
timeout=timeout,
retry=retry,
)
iterator.bucket = self
iterator.prefixes = set()
return iterator

def list_notifications(
self, client=None, timeout=_DEFAULT_TIMEOUT, retry=DEFAULT_RETRY
Expand Down Expand Up @@ -3111,7 +3083,7 @@ def make_public(

for blob in blobs:
blob.acl.all().grant_read()
blob.acl.save(client=client, timeout=timeout, retry=retry)
blob.acl.save(client=client, timeout=timeout)

def make_private(
self,
Expand Down
144 changes: 124 additions & 20 deletions google/cloud/storage/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@

from google.auth.credentials import AnonymousCredentials

from google import resumable_media

from google.api_core import page_iterator
from google.cloud._helpers import _LocalStack, _NOW
from google.cloud.client import ClientWithProject
Expand All @@ -39,8 +41,12 @@
_sign_message,
)
from google.cloud.storage.batch import Batch
from google.cloud.storage.bucket import Bucket
from google.cloud.storage.blob import Blob
from google.cloud.storage.bucket import Bucket, _item_to_blob, _blobs_page_start
from google.cloud.storage.blob import (
Blob,
_get_encryption_headers,
_raise_from_invalid_response,
)
from google.cloud.storage.hmac_key import HMACKeyMetadata
from google.cloud.storage.acl import BucketACL
from google.cloud.storage.acl import DefaultObjectACL
Expand Down Expand Up @@ -602,7 +608,20 @@ def create_bucket(
bucket._set_properties(api_response)
return bucket

def download_blob_to_file(self, blob_or_uri, file_obj, start=None, end=None):
def download_blob_to_file(
self,
blob_or_uri,
file_obj,
start=None,
end=None,
raw_download=False,
if_generation_match=None,
if_generation_not_match=None,
if_metageneration_match=None,
if_metageneration_not_match=None,
timeout=_DEFAULT_TIMEOUT,
checksum="md5",
):
"""Download the contents of a blob object or blob URI into a file-like object.

Args:
Expand All @@ -617,6 +636,40 @@ def download_blob_to_file(self, blob_or_uri, file_obj, start=None, end=None):
(Optional) The first byte in a range to be downloaded.
end (int):
(Optional) The last byte in a range to be downloaded.
raw_download (bool):
(Optional) If true, download the object without any expansion.
if_generation_match (long):
(Optional) Make the operation conditional on whether
the blob's current generation matches the given value.
Setting to 0 makes the operation succeed only if there
are no live versions of the blob.
if_generation_not_match (long):
(Optional) Make the operation conditional on whether
the blob's current generation does not match the given
value. If no live blob exists, the precondition fails.
Setting to 0 makes the operation succeed only if there
is a live version of the blob.
if_metageneration_match (long):
(Optional) Make the operation conditional on whether the
blob's current metageneration matches the given value.
if_metageneration_not_match (long):
(Optional) Make the operation conditional on whether the
blob's current metageneration does not match the given value.
timeout ([Union[float, Tuple[float, float]]]):
(Optional) The number of seconds the transport should wait for the
server response. Depending on the retry strategy, a request may be
repeated several times using the same timeout each time.
Can also be passed as a tuple (connect_timeout, read_timeout).
See :meth:`requests.Session.request` documentation for details.
checksum (str):
(Optional) The type of checksum to compute to verify the integrity
of the object. The response headers must contain a checksum of the
requested type. If the headers lack an appropriate checksum (for
instance in the case of transcoded or ranged downloads where the
remote service does not know the correct checksum, including
downloads where chunk_size is set) an INFO-level log will be
emitted. Supported values are "md5", "crc32c" and None. The default
is "md5".

Examples:
Download a blob using a blob resource.
Expand All @@ -642,11 +695,33 @@ def download_blob_to_file(self, blob_or_uri, file_obj, start=None, end=None):


"""
if not isinstance(blob_or_uri, Blob):
blob_or_uri = Blob.from_string(blob_or_uri)
download_url = blob_or_uri._get_download_url(
self,
if_generation_match=if_generation_match,
if_generation_not_match=if_generation_not_match,
if_metageneration_match=if_metageneration_match,
if_metageneration_not_match=if_metageneration_not_match,
)
headers = _get_encryption_headers(blob_or_uri._encryption_key)
headers["accept-encoding"] = "gzip"

transport = self._http
try:
blob_or_uri.download_to_file(file_obj, client=self, start=start, end=end)
except AttributeError:
blob = Blob.from_string(blob_or_uri, self)
blob.download_to_file(file_obj, client=self, start=start, end=end)
blob_or_uri._do_download(
transport,
file_obj,
download_url,
headers,
start,
end,
raw_download,
timeout=timeout,
checksum=checksum,
)
except resumable_media.InvalidResponse as exc:
_raise_from_invalid_response(exc)

def list_blobs(
self,
Expand Down Expand Up @@ -761,21 +836,50 @@ def list_blobs(
>>> all_blobs = list(client.list_blobs(bucket))
"""
bucket = self._bucket_arg_to_bucket(bucket_or_name)
return bucket.list_blobs(
max_results=max_results,
page_token=page_token,
prefix=prefix,
delimiter=delimiter,
start_offset=start_offset,
end_offset=end_offset,
include_trailing_delimiter=include_trailing_delimiter,
versions=versions,
projection=projection,
fields=fields,

extra_params = {"projection": projection}

if prefix is not None:
extra_params["prefix"] = prefix

if delimiter is not None:
extra_params["delimiter"] = delimiter

if start_offset is not None:
extra_params["startOffset"] = start_offset

if end_offset is not None:
extra_params["endOffset"] = end_offset

if include_trailing_delimiter is not None:
extra_params["includeTrailingDelimiter"] = include_trailing_delimiter

if versions is not None:
extra_params["versions"] = versions

if fields is not None:
extra_params["fields"] = fields

if bucket.user_project is not None:
extra_params["userProject"] = bucket.user_project

path = bucket.path + "/o"
api_request = functools.partial(
self._connection.api_request, timeout=timeout, retry=DEFAULT_RETRY
)
iterator = page_iterator.HTTPIterator(
client=self,
timeout=timeout,
retry=retry,
api_request=api_request,
path=path,
item_to_value=_item_to_blob,
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
page_start=_blobs_page_start,
)
iterator.bucket = bucket
iterator.prefixes = set()
return iterator

def list_buckets(
self,
Expand Down
Loading