Initial commit
This commit is contained in:
930
venv/lib/python3.8/site-packages/gridfs/__init__.py
Normal file
930
venv/lib/python3.8/site-packages/gridfs/__init__.py
Normal file
@@ -0,0 +1,930 @@
|
||||
# Copyright 2009-present MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""GridFS is a specification for storing large objects in Mongo.
|
||||
|
||||
The :mod:`gridfs` package is an implementation of GridFS on top of
|
||||
:mod:`pymongo`, exposing a file-like interface.
|
||||
|
||||
.. mongodoc:: gridfs
|
||||
"""
|
||||
|
||||
from bson.py3compat import abc
|
||||
from gridfs.errors import NoFile
|
||||
from gridfs.grid_file import (GridIn,
|
||||
GridOut,
|
||||
GridOutCursor,
|
||||
DEFAULT_CHUNK_SIZE,
|
||||
_clear_entity_type_registry)
|
||||
from pymongo import (ASCENDING,
|
||||
DESCENDING)
|
||||
from pymongo.common import UNAUTHORIZED_CODES, validate_string
|
||||
from pymongo.database import Database
|
||||
from pymongo.errors import ConfigurationError, OperationFailure
|
||||
|
||||
|
||||
class GridFS(object):
|
||||
"""An instance of GridFS on top of a single Database.
|
||||
"""
|
||||
def __init__(self, database, collection="fs", disable_md5=False):
|
||||
"""Create a new instance of :class:`GridFS`.
|
||||
|
||||
Raises :class:`TypeError` if `database` is not an instance of
|
||||
:class:`~pymongo.database.Database`.
|
||||
|
||||
:Parameters:
|
||||
- `database`: database to use
|
||||
- `collection` (optional): root collection to use
|
||||
- `disable_md5` (optional): When True, MD5 checksums will not be
|
||||
computed for uploaded files. Useful in environments where MD5
|
||||
cannot be used for regulatory or other reasons. Defaults to False.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
Indexes are only ensured on the first write to the DB.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
`database` must use an acknowledged
|
||||
:attr:`~pymongo.database.Database.write_concern`
|
||||
|
||||
.. mongodoc:: gridfs
|
||||
"""
|
||||
if not isinstance(database, Database):
|
||||
raise TypeError("database must be an instance of Database")
|
||||
|
||||
database = _clear_entity_type_registry(database)
|
||||
|
||||
if not database.write_concern.acknowledged:
|
||||
raise ConfigurationError('database must use '
|
||||
'acknowledged write_concern')
|
||||
|
||||
self.__database = database
|
||||
self.__collection = database[collection]
|
||||
self.__files = self.__collection.files
|
||||
self.__chunks = self.__collection.chunks
|
||||
self.__disable_md5 = disable_md5
|
||||
|
||||
def new_file(self, **kwargs):
|
||||
"""Create a new file in GridFS.
|
||||
|
||||
Returns a new :class:`~gridfs.grid_file.GridIn` instance to
|
||||
which data can be written. Any keyword arguments will be
|
||||
passed through to :meth:`~gridfs.grid_file.GridIn`.
|
||||
|
||||
If the ``"_id"`` of the file is manually specified, it must
|
||||
not already exist in GridFS. Otherwise
|
||||
:class:`~gridfs.errors.FileExists` is raised.
|
||||
|
||||
:Parameters:
|
||||
- `**kwargs` (optional): keyword arguments for file creation
|
||||
"""
|
||||
# No need for __ensure_index_files_id() here; GridIn ensures
|
||||
# the (files_id, n) index when needed.
|
||||
return GridIn(
|
||||
self.__collection, disable_md5=self.__disable_md5, **kwargs)
|
||||
|
||||
def put(self, data, **kwargs):
|
||||
"""Put data in GridFS as a new file.
|
||||
|
||||
Equivalent to doing::
|
||||
|
||||
try:
|
||||
f = new_file(**kwargs)
|
||||
f.write(data)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
`data` can be either an instance of :class:`str` (:class:`bytes`
|
||||
in python 3) or a file-like object providing a :meth:`read` method.
|
||||
If an `encoding` keyword argument is passed, `data` can also be a
|
||||
:class:`unicode` (:class:`str` in python 3) instance, which will
|
||||
be encoded as `encoding` before being written. Any keyword arguments
|
||||
will be passed through to the created file - see
|
||||
:meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the
|
||||
``"_id"`` of the created file.
|
||||
|
||||
If the ``"_id"`` of the file is manually specified, it must
|
||||
not already exist in GridFS. Otherwise
|
||||
:class:`~gridfs.errors.FileExists` is raised.
|
||||
|
||||
:Parameters:
|
||||
- `data`: data to be written as a file.
|
||||
- `**kwargs` (optional): keyword arguments for file creation
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
w=0 writes to GridFS are now prohibited.
|
||||
"""
|
||||
grid_file = GridIn(
|
||||
self.__collection, disable_md5=self.__disable_md5, **kwargs)
|
||||
try:
|
||||
grid_file.write(data)
|
||||
finally:
|
||||
grid_file.close()
|
||||
|
||||
return grid_file._id
|
||||
|
||||
def get(self, file_id, session=None):
|
||||
"""Get a file from GridFS by ``"_id"``.
|
||||
|
||||
Returns an instance of :class:`~gridfs.grid_file.GridOut`,
|
||||
which provides a file-like interface for reading.
|
||||
|
||||
:Parameters:
|
||||
- `file_id`: ``"_id"`` of the file to get
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
gout = GridOut(self.__collection, file_id, session=session)
|
||||
|
||||
# Raise NoFile now, instead of on first attribute access.
|
||||
gout._ensure_file()
|
||||
return gout
|
||||
|
||||
def get_version(self, filename=None, version=-1, session=None, **kwargs):
|
||||
"""Get a file from GridFS by ``"filename"`` or metadata fields.
|
||||
|
||||
Returns a version of the file in GridFS whose filename matches
|
||||
`filename` and whose metadata fields match the supplied keyword
|
||||
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
|
||||
|
||||
Version numbering is a convenience atop the GridFS API provided
|
||||
by MongoDB. If more than one file matches the query (either by
|
||||
`filename` alone, by metadata fields, or by a combination of
|
||||
both), then version ``-1`` will be the most recently uploaded
|
||||
matching file, ``-2`` the second most recently
|
||||
uploaded, etc. Version ``0`` will be the first version
|
||||
uploaded, ``1`` the second version, etc. So if three versions
|
||||
have been uploaded, then version ``0`` is the same as version
|
||||
``-3``, version ``1`` is the same as version ``-2``, and
|
||||
version ``2`` is the same as version ``-1``.
|
||||
|
||||
Raises :class:`~gridfs.errors.NoFile` if no such version of
|
||||
that file exists.
|
||||
|
||||
:Parameters:
|
||||
- `filename`: ``"filename"`` of the file to get, or `None`
|
||||
- `version` (optional): version of the file to get (defaults
|
||||
to -1, the most recent version uploaded)
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
- `**kwargs` (optional): find files by custom metadata.
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``get_version`` no longer ensures indexes.
|
||||
"""
|
||||
query = kwargs
|
||||
if filename is not None:
|
||||
query["filename"] = filename
|
||||
|
||||
cursor = self.__files.find(query, session=session)
|
||||
if version < 0:
|
||||
skip = abs(version) - 1
|
||||
cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING)
|
||||
else:
|
||||
cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING)
|
||||
try:
|
||||
doc = next(cursor)
|
||||
return GridOut(
|
||||
self.__collection, file_document=doc, session=session)
|
||||
except StopIteration:
|
||||
raise NoFile("no version %d for filename %r" % (version, filename))
|
||||
|
||||
def get_last_version(self, filename=None, session=None, **kwargs):
|
||||
"""Get the most recent version of a file in GridFS by ``"filename"``
|
||||
or metadata fields.
|
||||
|
||||
Equivalent to calling :meth:`get_version` with the default
|
||||
`version` (``-1``).
|
||||
|
||||
:Parameters:
|
||||
- `filename`: ``"filename"`` of the file to get, or `None`
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
- `**kwargs` (optional): find files by custom metadata.
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
return self.get_version(filename=filename, session=session, **kwargs)
|
||||
|
||||
# TODO add optional safe mode for chunk removal?
|
||||
def delete(self, file_id, session=None):
|
||||
"""Delete a file from GridFS by ``"_id"``.
|
||||
|
||||
Deletes all data belonging to the file with ``"_id"``:
|
||||
`file_id`.
|
||||
|
||||
.. warning:: Any processes/threads reading from the file while
|
||||
this method is executing will likely see an invalid/corrupt
|
||||
file. Care should be taken to avoid concurrent reads to a file
|
||||
while it is being deleted.
|
||||
|
||||
.. note:: Deletes of non-existent files are considered successful
|
||||
since the end result is the same: no file with that _id remains.
|
||||
|
||||
:Parameters:
|
||||
- `file_id`: ``"_id"`` of the file to delete
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``delete`` no longer ensures indexes.
|
||||
"""
|
||||
self.__files.delete_one({"_id": file_id}, session=session)
|
||||
self.__chunks.delete_many({"files_id": file_id}, session=session)
|
||||
|
||||
def list(self, session=None):
|
||||
"""List the names of all files stored in this instance of
|
||||
:class:`GridFS`.
|
||||
|
||||
:Parameters:
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
|
||||
.. versionchanged:: 3.1
|
||||
``list`` no longer ensures indexes.
|
||||
"""
|
||||
# With an index, distinct includes documents with no filename
|
||||
# as None.
|
||||
return [
|
||||
name for name in self.__files.distinct("filename", session=session)
|
||||
if name is not None]
|
||||
|
||||
def find_one(self, filter=None, session=None, *args, **kwargs):
|
||||
"""Get a single file from gridfs.
|
||||
|
||||
All arguments to :meth:`find` are also valid arguments for
|
||||
:meth:`find_one`, although any `limit` argument will be
|
||||
ignored. Returns a single :class:`~gridfs.grid_file.GridOut`,
|
||||
or ``None`` if no matching file is found. For example::
|
||||
|
||||
file = fs.find_one({"filename": "lisa.txt"})
|
||||
|
||||
:Parameters:
|
||||
- `filter` (optional): a dictionary specifying
|
||||
the query to be performing OR any other type to be used as
|
||||
the value for a query for ``"_id"`` in the file collection.
|
||||
- `*args` (optional): any additional positional arguments are
|
||||
the same as the arguments to :meth:`find`.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
- `**kwargs` (optional): any additional keyword arguments
|
||||
are the same as the arguments to :meth:`find`.
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
if filter is not None and not isinstance(filter, abc.Mapping):
|
||||
filter = {"_id": filter}
|
||||
|
||||
for f in self.find(filter, *args, session=session, **kwargs):
|
||||
return f
|
||||
|
||||
return None
|
||||
|
||||
def find(self, *args, **kwargs):
|
||||
"""Query GridFS for files.
|
||||
|
||||
Returns a cursor that iterates across files matching
|
||||
arbitrary queries on the files collection. Can be combined
|
||||
with other modifiers for additional control. For example::
|
||||
|
||||
for grid_out in fs.find({"filename": "lisa.txt"},
|
||||
no_cursor_timeout=True):
|
||||
data = grid_out.read()
|
||||
|
||||
would iterate through all versions of "lisa.txt" stored in GridFS.
|
||||
Note that setting no_cursor_timeout to True may be important to
|
||||
prevent the cursor from timing out during long multi-file processing
|
||||
work.
|
||||
|
||||
As another example, the call::
|
||||
|
||||
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
|
||||
|
||||
would return a cursor to the three most recently uploaded files
|
||||
in GridFS.
|
||||
|
||||
Follows a similar interface to
|
||||
:meth:`~pymongo.collection.Collection.find`
|
||||
in :class:`~pymongo.collection.Collection`.
|
||||
|
||||
If a :class:`~pymongo.client_session.ClientSession` is passed to
|
||||
:meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances
|
||||
are associated with that session.
|
||||
|
||||
:Parameters:
|
||||
- `filter` (optional): a SON object specifying elements which
|
||||
must be present for a document to be included in the
|
||||
result set
|
||||
- `skip` (optional): the number of files to omit (from
|
||||
the start of the result set) when returning the results
|
||||
- `limit` (optional): the maximum number of results to
|
||||
return
|
||||
- `no_cursor_timeout` (optional): if False (the default), any
|
||||
returned cursor is closed by the server after 10 minutes of
|
||||
inactivity. If set to True, the returned cursor will never
|
||||
time out on the server. Care should be taken to ensure that
|
||||
cursors with no_cursor_timeout turned on are properly closed.
|
||||
- `sort` (optional): a list of (key, direction) pairs
|
||||
specifying the sort order for this query. See
|
||||
:meth:`~pymongo.cursor.Cursor.sort` for details.
|
||||
|
||||
Raises :class:`TypeError` if any of the arguments are of
|
||||
improper type. Returns an instance of
|
||||
:class:`~gridfs.grid_file.GridOutCursor`
|
||||
corresponding to this query.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Removed the read_preference, tag_sets, and
|
||||
secondary_acceptable_latency_ms options.
|
||||
.. versionadded:: 2.7
|
||||
.. mongodoc:: find
|
||||
"""
|
||||
return GridOutCursor(self.__collection, *args, **kwargs)
|
||||
|
||||
def exists(self, document_or_id=None, session=None, **kwargs):
|
||||
"""Check if a file exists in this instance of :class:`GridFS`.
|
||||
|
||||
The file to check for can be specified by the value of its
|
||||
``_id`` key, or by passing in a query document. A query
|
||||
document can be passed in as dictionary, or by using keyword
|
||||
arguments. Thus, the following three calls are equivalent:
|
||||
|
||||
>>> fs.exists(file_id)
|
||||
>>> fs.exists({"_id": file_id})
|
||||
>>> fs.exists(_id=file_id)
|
||||
|
||||
As are the following two calls:
|
||||
|
||||
>>> fs.exists({"filename": "mike.txt"})
|
||||
>>> fs.exists(filename="mike.txt")
|
||||
|
||||
And the following two:
|
||||
|
||||
>>> fs.exists({"foo": {"$gt": 12}})
|
||||
>>> fs.exists(foo={"$gt": 12})
|
||||
|
||||
Returns ``True`` if a matching file exists, ``False``
|
||||
otherwise. Calls to :meth:`exists` will not automatically
|
||||
create appropriate indexes; application developers should be
|
||||
sure to create indexes if needed and as appropriate.
|
||||
|
||||
:Parameters:
|
||||
- `document_or_id` (optional): query document, or _id of the
|
||||
document to check for
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
- `**kwargs` (optional): keyword arguments are used as a
|
||||
query document, if they're present.
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
if kwargs:
|
||||
f = self.__files.find_one(kwargs, ["_id"], session=session)
|
||||
else:
|
||||
f = self.__files.find_one(document_or_id, ["_id"], session=session)
|
||||
|
||||
return f is not None
|
||||
|
||||
|
||||
class GridFSBucket(object):
|
||||
"""An instance of GridFS on top of a single Database."""
|
||||
|
||||
def __init__(self, db, bucket_name="fs",
|
||||
chunk_size_bytes=DEFAULT_CHUNK_SIZE, write_concern=None,
|
||||
read_preference=None, disable_md5=False):
|
||||
"""Create a new instance of :class:`GridFSBucket`.
|
||||
|
||||
Raises :exc:`TypeError` if `database` is not an instance of
|
||||
:class:`~pymongo.database.Database`.
|
||||
|
||||
Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern`
|
||||
is not acknowledged.
|
||||
|
||||
:Parameters:
|
||||
- `database`: database to use.
|
||||
- `bucket_name` (optional): The name of the bucket. Defaults to 'fs'.
|
||||
- `chunk_size_bytes` (optional): The chunk size in bytes. Defaults
|
||||
to 255KB.
|
||||
- `write_concern` (optional): The
|
||||
:class:`~pymongo.write_concern.WriteConcern` to use. If ``None``
|
||||
(the default) db.write_concern is used.
|
||||
- `read_preference` (optional): The read preference to use. If
|
||||
``None`` (the default) db.read_preference is used.
|
||||
- `disable_md5` (optional): When True, MD5 checksums will not be
|
||||
computed for uploaded files. Useful in environments where MD5
|
||||
cannot be used for regulatory or other reasons. Defaults to False.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
.. mongodoc:: gridfs
|
||||
"""
|
||||
if not isinstance(db, Database):
|
||||
raise TypeError("database must be an instance of Database")
|
||||
|
||||
db = _clear_entity_type_registry(db)
|
||||
|
||||
wtc = write_concern if write_concern is not None else db.write_concern
|
||||
if not wtc.acknowledged:
|
||||
raise ConfigurationError('write concern must be acknowledged')
|
||||
|
||||
self._db = db
|
||||
self._bucket_name = bucket_name
|
||||
self._collection = db[bucket_name]
|
||||
self._disable_md5 = disable_md5
|
||||
|
||||
self._chunks = self._collection.chunks.with_options(
|
||||
write_concern=write_concern,
|
||||
read_preference=read_preference)
|
||||
|
||||
self._files = self._collection.files.with_options(
|
||||
write_concern=write_concern,
|
||||
read_preference=read_preference)
|
||||
|
||||
self._chunk_size_bytes = chunk_size_bytes
|
||||
|
||||
def open_upload_stream(self, filename, chunk_size_bytes=None,
|
||||
metadata=None, session=None):
|
||||
"""Opens a Stream that the application can write the contents of the
|
||||
file to.
|
||||
|
||||
The user must specify the filename, and can choose to add any
|
||||
additional information in the metadata field of the file document or
|
||||
modify the chunk size.
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
grid_in = fs.open_upload_stream(
|
||||
"test_file", chunk_size_bytes=4,
|
||||
metadata={"contentType": "text/plain"})
|
||||
grid_in.write("data I want to store!")
|
||||
grid_in.close() # uploaded on close
|
||||
|
||||
Returns an instance of :class:`~gridfs.grid_file.GridIn`.
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no such version of
|
||||
that file exists.
|
||||
Raises :exc:`~ValueError` if `filename` is not a string.
|
||||
|
||||
:Parameters:
|
||||
- `filename`: The name of the file to upload.
|
||||
- `chunk_size_bytes` (options): The number of bytes per chunk of this
|
||||
file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`.
|
||||
- `metadata` (optional): User data for the 'metadata' field of the
|
||||
files collection document. If not provided the metadata field will
|
||||
be omitted from the files collection document.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
validate_string("filename", filename)
|
||||
|
||||
opts = {"filename": filename,
|
||||
"chunk_size": (chunk_size_bytes if chunk_size_bytes
|
||||
is not None else self._chunk_size_bytes)}
|
||||
if metadata is not None:
|
||||
opts["metadata"] = metadata
|
||||
|
||||
return GridIn(
|
||||
self._collection,
|
||||
session=session,
|
||||
disable_md5=self._disable_md5,
|
||||
**opts)
|
||||
|
||||
def open_upload_stream_with_id(
|
||||
self, file_id, filename, chunk_size_bytes=None, metadata=None,
|
||||
session=None):
|
||||
"""Opens a Stream that the application can write the contents of the
|
||||
file to.
|
||||
|
||||
The user must specify the file id and filename, and can choose to add
|
||||
any additional information in the metadata field of the file document
|
||||
or modify the chunk size.
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
grid_in = fs.open_upload_stream_with_id(
|
||||
ObjectId(),
|
||||
"test_file",
|
||||
chunk_size_bytes=4,
|
||||
metadata={"contentType": "text/plain"})
|
||||
grid_in.write("data I want to store!")
|
||||
grid_in.close() # uploaded on close
|
||||
|
||||
Returns an instance of :class:`~gridfs.grid_file.GridIn`.
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no such version of
|
||||
that file exists.
|
||||
Raises :exc:`~ValueError` if `filename` is not a string.
|
||||
|
||||
:Parameters:
|
||||
- `file_id`: The id to use for this file. The id must not have
|
||||
already been used for another file.
|
||||
- `filename`: The name of the file to upload.
|
||||
- `chunk_size_bytes` (options): The number of bytes per chunk of this
|
||||
file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`.
|
||||
- `metadata` (optional): User data for the 'metadata' field of the
|
||||
files collection document. If not provided the metadata field will
|
||||
be omitted from the files collection document.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
validate_string("filename", filename)
|
||||
|
||||
opts = {"_id": file_id,
|
||||
"filename": filename,
|
||||
"chunk_size": (chunk_size_bytes if chunk_size_bytes
|
||||
is not None else self._chunk_size_bytes)}
|
||||
if metadata is not None:
|
||||
opts["metadata"] = metadata
|
||||
|
||||
return GridIn(
|
||||
self._collection,
|
||||
session=session,
|
||||
disable_md5=self._disable_md5,
|
||||
**opts)
|
||||
|
||||
def upload_from_stream(self, filename, source, chunk_size_bytes=None,
|
||||
metadata=None, session=None):
|
||||
"""Uploads a user file to a GridFS bucket.
|
||||
|
||||
Reads the contents of the user file from `source` and uploads
|
||||
it to the file `filename`. Source can be a string or file-like object.
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
file_id = fs.upload_from_stream(
|
||||
"test_file",
|
||||
"data I want to store!",
|
||||
chunk_size_bytes=4,
|
||||
metadata={"contentType": "text/plain"})
|
||||
|
||||
Returns the _id of the uploaded file.
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no such version of
|
||||
that file exists.
|
||||
Raises :exc:`~ValueError` if `filename` is not a string.
|
||||
|
||||
:Parameters:
|
||||
- `filename`: The name of the file to upload.
|
||||
- `source`: The source stream of the content to be uploaded. Must be
|
||||
a file-like object that implements :meth:`read` or a string.
|
||||
- `chunk_size_bytes` (options): The number of bytes per chunk of this
|
||||
file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`.
|
||||
- `metadata` (optional): User data for the 'metadata' field of the
|
||||
files collection document. If not provided the metadata field will
|
||||
be omitted from the files collection document.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
with self.open_upload_stream(
|
||||
filename, chunk_size_bytes, metadata, session=session) as gin:
|
||||
gin.write(source)
|
||||
|
||||
return gin._id
|
||||
|
||||
def upload_from_stream_with_id(self, file_id, filename, source,
|
||||
chunk_size_bytes=None, metadata=None,
|
||||
session=None):
|
||||
"""Uploads a user file to a GridFS bucket with a custom file id.
|
||||
|
||||
Reads the contents of the user file from `source` and uploads
|
||||
it to the file `filename`. Source can be a string or file-like object.
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
file_id = fs.upload_from_stream(
|
||||
ObjectId(),
|
||||
"test_file",
|
||||
"data I want to store!",
|
||||
chunk_size_bytes=4,
|
||||
metadata={"contentType": "text/plain"})
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no such version of
|
||||
that file exists.
|
||||
Raises :exc:`~ValueError` if `filename` is not a string.
|
||||
|
||||
:Parameters:
|
||||
- `file_id`: The id to use for this file. The id must not have
|
||||
already been used for another file.
|
||||
- `filename`: The name of the file to upload.
|
||||
- `source`: The source stream of the content to be uploaded. Must be
|
||||
a file-like object that implements :meth:`read` or a string.
|
||||
- `chunk_size_bytes` (options): The number of bytes per chunk of this
|
||||
file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`.
|
||||
- `metadata` (optional): User data for the 'metadata' field of the
|
||||
files collection document. If not provided the metadata field will
|
||||
be omitted from the files collection document.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
with self.open_upload_stream_with_id(
|
||||
file_id, filename, chunk_size_bytes, metadata,
|
||||
session=session) as gin:
|
||||
gin.write(source)
|
||||
|
||||
def open_download_stream(self, file_id, session=None):
|
||||
"""Opens a Stream from which the application can read the contents of
|
||||
the stored file specified by file_id.
|
||||
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
# get _id of file to read.
|
||||
file_id = fs.upload_from_stream("test_file", "data I want to store!")
|
||||
grid_out = fs.open_download_stream(file_id)
|
||||
contents = grid_out.read()
|
||||
|
||||
Returns an instance of :class:`~gridfs.grid_file.GridOut`.
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
|
||||
|
||||
:Parameters:
|
||||
- `file_id`: The _id of the file to be downloaded.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
gout = GridOut(self._collection, file_id, session=session)
|
||||
|
||||
# Raise NoFile now, instead of on first attribute access.
|
||||
gout._ensure_file()
|
||||
return gout
|
||||
|
||||
def download_to_stream(self, file_id, destination, session=None):
|
||||
"""Downloads the contents of the stored file specified by file_id and
|
||||
writes the contents to `destination`.
|
||||
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
# Get _id of file to read
|
||||
file_id = fs.upload_from_stream("test_file", "data I want to store!")
|
||||
# Get file to write to
|
||||
file = open('myfile','wb+')
|
||||
fs.download_to_stream(file_id, file)
|
||||
file.seek(0)
|
||||
contents = file.read()
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
|
||||
|
||||
:Parameters:
|
||||
- `file_id`: The _id of the file to be downloaded.
|
||||
- `destination`: a file-like object implementing :meth:`write`.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
with self.open_download_stream(file_id, session=session) as gout:
|
||||
for chunk in gout:
|
||||
destination.write(chunk)
|
||||
|
||||
def delete(self, file_id, session=None):
|
||||
"""Given an file_id, delete this stored file's files collection document
|
||||
and associated chunks from a GridFS bucket.
|
||||
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
# Get _id of file to delete
|
||||
file_id = fs.upload_from_stream("test_file", "data I want to store!")
|
||||
fs.delete(file_id)
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
|
||||
|
||||
:Parameters:
|
||||
- `file_id`: The _id of the file to be deleted.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
res = self._files.delete_one({"_id": file_id}, session=session)
|
||||
self._chunks.delete_many({"files_id": file_id}, session=session)
|
||||
if not res.deleted_count:
|
||||
raise NoFile(
|
||||
"no file could be deleted because none matched %s" % file_id)
|
||||
|
||||
def find(self, *args, **kwargs):
|
||||
"""Find and return the files collection documents that match ``filter``
|
||||
|
||||
Returns a cursor that iterates across files matching
|
||||
arbitrary queries on the files collection. Can be combined
|
||||
with other modifiers for additional control.
|
||||
|
||||
For example::
|
||||
|
||||
for grid_data in fs.find({"filename": "lisa.txt"},
|
||||
no_cursor_timeout=True):
|
||||
data = grid_data.read()
|
||||
|
||||
would iterate through all versions of "lisa.txt" stored in GridFS.
|
||||
Note that setting no_cursor_timeout to True may be important to
|
||||
prevent the cursor from timing out during long multi-file processing
|
||||
work.
|
||||
|
||||
As another example, the call::
|
||||
|
||||
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
|
||||
|
||||
would return a cursor to the three most recently uploaded files
|
||||
in GridFS.
|
||||
|
||||
Follows a similar interface to
|
||||
:meth:`~pymongo.collection.Collection.find`
|
||||
in :class:`~pymongo.collection.Collection`.
|
||||
|
||||
If a :class:`~pymongo.client_session.ClientSession` is passed to
|
||||
:meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances
|
||||
are associated with that session.
|
||||
|
||||
:Parameters:
|
||||
- `filter`: Search query.
|
||||
- `batch_size` (optional): The number of documents to return per
|
||||
batch.
|
||||
- `limit` (optional): The maximum number of documents to return.
|
||||
- `no_cursor_timeout` (optional): The server normally times out idle
|
||||
cursors after an inactivity period (10 minutes) to prevent excess
|
||||
memory use. Set this option to True prevent that.
|
||||
- `skip` (optional): The number of documents to skip before
|
||||
returning.
|
||||
- `sort` (optional): The order by which to sort results. Defaults to
|
||||
None.
|
||||
"""
|
||||
return GridOutCursor(self._collection, *args, **kwargs)
|
||||
|
||||
def open_download_stream_by_name(self, filename, revision=-1, session=None):
|
||||
"""Opens a Stream from which the application can read the contents of
|
||||
`filename` and optional `revision`.
|
||||
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
grid_out = fs.open_download_stream_by_name("test_file")
|
||||
contents = grid_out.read()
|
||||
|
||||
Returns an instance of :class:`~gridfs.grid_file.GridOut`.
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no such version of
|
||||
that file exists.
|
||||
|
||||
Raises :exc:`~ValueError` filename is not a string.
|
||||
|
||||
:Parameters:
|
||||
- `filename`: The name of the file to read from.
|
||||
- `revision` (optional): Which revision (documents with the same
|
||||
filename and different uploadDate) of the file to retrieve.
|
||||
Defaults to -1 (the most recent revision).
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
:Note: Revision numbers are defined as follows:
|
||||
|
||||
- 0 = the original stored file
|
||||
- 1 = the first revision
|
||||
- 2 = the second revision
|
||||
- etc...
|
||||
- -2 = the second most recent revision
|
||||
- -1 = the most recent revision
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
validate_string("filename", filename)
|
||||
|
||||
query = {"filename": filename}
|
||||
|
||||
cursor = self._files.find(query, session=session)
|
||||
if revision < 0:
|
||||
skip = abs(revision) - 1
|
||||
cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING)
|
||||
else:
|
||||
cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING)
|
||||
try:
|
||||
grid_file = next(cursor)
|
||||
return GridOut(
|
||||
self._collection, file_document=grid_file, session=session)
|
||||
except StopIteration:
|
||||
raise NoFile(
|
||||
"no version %d for filename %r" % (revision, filename))
|
||||
|
||||
def download_to_stream_by_name(self, filename, destination, revision=-1,
|
||||
session=None):
|
||||
"""Write the contents of `filename` (with optional `revision`) to
|
||||
`destination`.
|
||||
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
# Get file to write to
|
||||
file = open('myfile','wb')
|
||||
fs.download_to_stream_by_name("test_file", file)
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no such version of
|
||||
that file exists.
|
||||
|
||||
Raises :exc:`~ValueError` if `filename` is not a string.
|
||||
|
||||
:Parameters:
|
||||
- `filename`: The name of the file to read from.
|
||||
- `destination`: A file-like object that implements :meth:`write`.
|
||||
- `revision` (optional): Which revision (documents with the same
|
||||
filename and different uploadDate) of the file to retrieve.
|
||||
Defaults to -1 (the most recent revision).
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
:Note: Revision numbers are defined as follows:
|
||||
|
||||
- 0 = the original stored file
|
||||
- 1 = the first revision
|
||||
- 2 = the second revision
|
||||
- etc...
|
||||
- -2 = the second most recent revision
|
||||
- -1 = the most recent revision
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
with self.open_download_stream_by_name(
|
||||
filename, revision, session=session) as gout:
|
||||
for chunk in gout:
|
||||
destination.write(chunk)
|
||||
|
||||
def rename(self, file_id, new_filename, session=None):
|
||||
"""Renames the stored file with the specified file_id.
|
||||
|
||||
For example::
|
||||
|
||||
my_db = MongoClient().test
|
||||
fs = GridFSBucket(my_db)
|
||||
# Get _id of file to rename
|
||||
file_id = fs.upload_from_stream("test_file", "data I want to store!")
|
||||
fs.rename(file_id, "new_test_name")
|
||||
|
||||
Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists.
|
||||
|
||||
:Parameters:
|
||||
- `file_id`: The _id of the file to be renamed.
|
||||
- `new_filename`: The new name of the file.
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession`
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
"""
|
||||
result = self._files.update_one({"_id": file_id},
|
||||
{"$set": {"filename": new_filename}},
|
||||
session=session)
|
||||
if not result.matched_count:
|
||||
raise NoFile("no files could be renamed %r because none "
|
||||
"matched file_id %i" % (new_filename, file_id))
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
33
venv/lib/python3.8/site-packages/gridfs/errors.py
Normal file
33
venv/lib/python3.8/site-packages/gridfs/errors.py
Normal file
@@ -0,0 +1,33 @@
|
||||
# Copyright 2009-2015 MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Exceptions raised by the :mod:`gridfs` package"""
|
||||
|
||||
from pymongo.errors import PyMongoError
|
||||
|
||||
|
||||
class GridFSError(PyMongoError):
|
||||
"""Base class for all GridFS exceptions."""
|
||||
|
||||
|
||||
class CorruptGridFile(GridFSError):
|
||||
"""Raised when a file in :class:`~gridfs.GridFS` is malformed."""
|
||||
|
||||
|
||||
class NoFile(GridFSError):
|
||||
"""Raised when trying to read from a non-existent file."""
|
||||
|
||||
|
||||
class FileExists(GridFSError):
|
||||
"""Raised when trying to create a file that already exists."""
|
||||
842
venv/lib/python3.8/site-packages/gridfs/grid_file.py
Normal file
842
venv/lib/python3.8/site-packages/gridfs/grid_file.py
Normal file
@@ -0,0 +1,842 @@
|
||||
# Copyright 2009-present MongoDB, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Tools for representing files stored in GridFS."""
|
||||
import datetime
|
||||
import hashlib
|
||||
import io
|
||||
import math
|
||||
import os
|
||||
|
||||
from bson.int64 import Int64
|
||||
from bson.son import SON
|
||||
from bson.binary import Binary
|
||||
from bson.objectid import ObjectId
|
||||
from bson.py3compat import text_type, StringIO
|
||||
from gridfs.errors import CorruptGridFile, FileExists, NoFile
|
||||
from pymongo import ASCENDING
|
||||
from pymongo.collection import Collection
|
||||
from pymongo.cursor import Cursor
|
||||
from pymongo.errors import (ConfigurationError,
|
||||
CursorNotFound,
|
||||
DuplicateKeyError,
|
||||
OperationFailure)
|
||||
from pymongo.read_preferences import ReadPreference
|
||||
|
||||
try:
|
||||
_SEEK_SET = os.SEEK_SET
|
||||
_SEEK_CUR = os.SEEK_CUR
|
||||
_SEEK_END = os.SEEK_END
|
||||
# before 2.5
|
||||
except AttributeError:
|
||||
_SEEK_SET = 0
|
||||
_SEEK_CUR = 1
|
||||
_SEEK_END = 2
|
||||
|
||||
EMPTY = b""
|
||||
NEWLN = b"\n"
|
||||
|
||||
"""Default chunk size, in bytes."""
|
||||
# Slightly under a power of 2, to work well with server's record allocations.
|
||||
DEFAULT_CHUNK_SIZE = 255 * 1024
|
||||
|
||||
_C_INDEX = SON([("files_id", ASCENDING), ("n", ASCENDING)])
|
||||
_F_INDEX = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)])
|
||||
|
||||
|
||||
def _grid_in_property(field_name, docstring, read_only=False,
|
||||
closed_only=False):
|
||||
"""Create a GridIn property."""
|
||||
def getter(self):
|
||||
if closed_only and not self._closed:
|
||||
raise AttributeError("can only get %r on a closed file" %
|
||||
field_name)
|
||||
# Protect against PHP-237
|
||||
if field_name == 'length':
|
||||
return self._file.get(field_name, 0)
|
||||
return self._file.get(field_name, None)
|
||||
|
||||
def setter(self, value):
|
||||
if self._closed:
|
||||
self._coll.files.update_one({"_id": self._file["_id"]},
|
||||
{"$set": {field_name: value}})
|
||||
self._file[field_name] = value
|
||||
|
||||
if read_only:
|
||||
docstring += "\n\nThis attribute is read-only."
|
||||
elif closed_only:
|
||||
docstring = "%s\n\n%s" % (docstring, "This attribute is read-only and "
|
||||
"can only be read after :meth:`close` "
|
||||
"has been called.")
|
||||
|
||||
if not read_only and not closed_only:
|
||||
return property(getter, setter, doc=docstring)
|
||||
return property(getter, doc=docstring)
|
||||
|
||||
|
||||
def _grid_out_property(field_name, docstring):
|
||||
"""Create a GridOut property."""
|
||||
def getter(self):
|
||||
self._ensure_file()
|
||||
|
||||
# Protect against PHP-237
|
||||
if field_name == 'length':
|
||||
return self._file.get(field_name, 0)
|
||||
return self._file.get(field_name, None)
|
||||
|
||||
docstring += "\n\nThis attribute is read-only."
|
||||
return property(getter, doc=docstring)
|
||||
|
||||
|
||||
def _clear_entity_type_registry(entity, **kwargs):
|
||||
"""Clear the given database/collection object's type registry."""
|
||||
codecopts = entity.codec_options.with_options(type_registry=None)
|
||||
return entity.with_options(codec_options=codecopts, **kwargs)
|
||||
|
||||
|
||||
class GridIn(object):
|
||||
"""Class to write data to GridFS.
|
||||
"""
|
||||
def __init__(
|
||||
self, root_collection, session=None, disable_md5=False, **kwargs):
|
||||
"""Write a file to GridFS
|
||||
|
||||
Application developers should generally not need to
|
||||
instantiate this class directly - instead see the methods
|
||||
provided by :class:`~gridfs.GridFS`.
|
||||
|
||||
Raises :class:`TypeError` if `root_collection` is not an
|
||||
instance of :class:`~pymongo.collection.Collection`.
|
||||
|
||||
Any of the file level options specified in the `GridFS Spec
|
||||
<http://dochub.mongodb.org/core/gridfsspec>`_ may be passed as
|
||||
keyword arguments. Any additional keyword arguments will be
|
||||
set as additional fields on the file document. Valid keyword
|
||||
arguments include:
|
||||
|
||||
- ``"_id"``: unique ID for this file (default:
|
||||
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
|
||||
not have already been used for another file
|
||||
|
||||
- ``"filename"``: human name for the file
|
||||
|
||||
- ``"contentType"`` or ``"content_type"``: valid mime-type
|
||||
for the file
|
||||
|
||||
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
|
||||
chunks, in bytes (default: 255 kb)
|
||||
|
||||
- ``"encoding"``: encoding used for this file. In Python 2,
|
||||
any :class:`unicode` that is written to the file will be
|
||||
converted to a :class:`str`. In Python 3, any :class:`str`
|
||||
that is written to the file will be converted to
|
||||
:class:`bytes`.
|
||||
|
||||
:Parameters:
|
||||
- `root_collection`: root collection to write to
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession` to use for all
|
||||
commands
|
||||
- `disable_md5` (optional): When True, an MD5 checksum will not be
|
||||
computed for the uploaded file. Useful in environments where
|
||||
MD5 cannot be used for regulatory or other reasons. Defaults to
|
||||
False.
|
||||
- `**kwargs` (optional): file level options (see above)
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
`root_collection` must use an acknowledged
|
||||
:attr:`~pymongo.collection.Collection.write_concern`
|
||||
"""
|
||||
if not isinstance(root_collection, Collection):
|
||||
raise TypeError("root_collection must be an "
|
||||
"instance of Collection")
|
||||
|
||||
if not root_collection.write_concern.acknowledged:
|
||||
raise ConfigurationError('root_collection must use '
|
||||
'acknowledged write_concern')
|
||||
|
||||
# Handle alternative naming
|
||||
if "content_type" in kwargs:
|
||||
kwargs["contentType"] = kwargs.pop("content_type")
|
||||
if "chunk_size" in kwargs:
|
||||
kwargs["chunkSize"] = kwargs.pop("chunk_size")
|
||||
|
||||
coll = _clear_entity_type_registry(
|
||||
root_collection, read_preference=ReadPreference.PRIMARY)
|
||||
|
||||
if not disable_md5:
|
||||
kwargs["md5"] = hashlib.md5()
|
||||
# Defaults
|
||||
kwargs["_id"] = kwargs.get("_id", ObjectId())
|
||||
kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE)
|
||||
object.__setattr__(self, "_session", session)
|
||||
object.__setattr__(self, "_coll", coll)
|
||||
object.__setattr__(self, "_chunks", coll.chunks)
|
||||
object.__setattr__(self, "_file", kwargs)
|
||||
object.__setattr__(self, "_buffer", StringIO())
|
||||
object.__setattr__(self, "_position", 0)
|
||||
object.__setattr__(self, "_chunk_number", 0)
|
||||
object.__setattr__(self, "_closed", False)
|
||||
object.__setattr__(self, "_ensured_index", False)
|
||||
|
||||
def __create_index(self, collection, index_key, unique):
|
||||
doc = collection.find_one(projection={"_id": 1}, session=self._session)
|
||||
if doc is None:
|
||||
try:
|
||||
index_keys = [index_spec['key'] for index_spec in
|
||||
collection.list_indexes(session=self._session)]
|
||||
except OperationFailure:
|
||||
index_keys = []
|
||||
if index_key not in index_keys:
|
||||
collection.create_index(
|
||||
index_key.items(), unique=unique, session=self._session)
|
||||
|
||||
def __ensure_indexes(self):
|
||||
if not object.__getattribute__(self, "_ensured_index"):
|
||||
self.__create_index(self._coll.files, _F_INDEX, False)
|
||||
self.__create_index(self._coll.chunks, _C_INDEX, True)
|
||||
object.__setattr__(self, "_ensured_index", True)
|
||||
|
||||
def abort(self):
|
||||
"""Remove all chunks/files that may have been uploaded and close.
|
||||
"""
|
||||
self._coll.chunks.delete_many(
|
||||
{"files_id": self._file['_id']}, session=self._session)
|
||||
self._coll.files.delete_one(
|
||||
{"_id": self._file['_id']}, session=self._session)
|
||||
object.__setattr__(self, "_closed", True)
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
"""Is this file closed?
|
||||
"""
|
||||
return self._closed
|
||||
|
||||
_id = _grid_in_property("_id", "The ``'_id'`` value for this file.",
|
||||
read_only=True)
|
||||
filename = _grid_in_property("filename", "Name of this file.")
|
||||
name = _grid_in_property("filename", "Alias for `filename`.")
|
||||
content_type = _grid_in_property("contentType", "Mime-type for this file.")
|
||||
length = _grid_in_property("length", "Length (in bytes) of this file.",
|
||||
closed_only=True)
|
||||
chunk_size = _grid_in_property("chunkSize", "Chunk size for this file.",
|
||||
read_only=True)
|
||||
upload_date = _grid_in_property("uploadDate",
|
||||
"Date that this file was uploaded.",
|
||||
closed_only=True)
|
||||
md5 = _grid_in_property("md5", "MD5 of the contents of this file "
|
||||
"if an md5 sum was created.",
|
||||
closed_only=True)
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in self._file:
|
||||
return self._file[name]
|
||||
raise AttributeError("GridIn object has no attribute '%s'" % name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
# For properties of this instance like _buffer, or descriptors set on
|
||||
# the class like filename, use regular __setattr__
|
||||
if name in self.__dict__ or name in self.__class__.__dict__:
|
||||
object.__setattr__(self, name, value)
|
||||
else:
|
||||
# All other attributes are part of the document in db.fs.files.
|
||||
# Store them to be sent to server on close() or if closed, send
|
||||
# them now.
|
||||
self._file[name] = value
|
||||
if self._closed:
|
||||
self._coll.files.update_one({"_id": self._file["_id"]},
|
||||
{"$set": {name: value}})
|
||||
|
||||
def __flush_data(self, data):
|
||||
"""Flush `data` to a chunk.
|
||||
"""
|
||||
self.__ensure_indexes()
|
||||
if 'md5' in self._file:
|
||||
self._file['md5'].update(data)
|
||||
|
||||
if not data:
|
||||
return
|
||||
assert(len(data) <= self.chunk_size)
|
||||
|
||||
chunk = {"files_id": self._file["_id"],
|
||||
"n": self._chunk_number,
|
||||
"data": Binary(data)}
|
||||
|
||||
try:
|
||||
self._chunks.insert_one(chunk, session=self._session)
|
||||
except DuplicateKeyError:
|
||||
self._raise_file_exists(self._file['_id'])
|
||||
self._chunk_number += 1
|
||||
self._position += len(data)
|
||||
|
||||
def __flush_buffer(self):
|
||||
"""Flush the buffer contents out to a chunk.
|
||||
"""
|
||||
self.__flush_data(self._buffer.getvalue())
|
||||
self._buffer.close()
|
||||
self._buffer = StringIO()
|
||||
|
||||
def __flush(self):
|
||||
"""Flush the file to the database.
|
||||
"""
|
||||
try:
|
||||
self.__flush_buffer()
|
||||
|
||||
if "md5" in self._file:
|
||||
self._file["md5"] = self._file["md5"].hexdigest()
|
||||
# The GridFS spec says length SHOULD be an Int64.
|
||||
self._file["length"] = Int64(self._position)
|
||||
self._file["uploadDate"] = datetime.datetime.utcnow()
|
||||
|
||||
return self._coll.files.insert_one(
|
||||
self._file, session=self._session)
|
||||
except DuplicateKeyError:
|
||||
self._raise_file_exists(self._id)
|
||||
|
||||
def _raise_file_exists(self, file_id):
|
||||
"""Raise a FileExists exception for the given file_id."""
|
||||
raise FileExists("file with _id %r already exists" % file_id)
|
||||
|
||||
def close(self):
|
||||
"""Flush the file and close it.
|
||||
|
||||
A closed file cannot be written any more. Calling
|
||||
:meth:`close` more than once is allowed.
|
||||
"""
|
||||
if not self._closed:
|
||||
self.__flush()
|
||||
object.__setattr__(self, "_closed", True)
|
||||
|
||||
def read(self, size=-1):
|
||||
raise io.UnsupportedOperation('read')
|
||||
|
||||
def readable(self):
|
||||
return False
|
||||
|
||||
def seekable(self):
|
||||
return False
|
||||
|
||||
def write(self, data):
|
||||
"""Write data to the file. There is no return value.
|
||||
|
||||
`data` can be either a string of bytes or a file-like object
|
||||
(implementing :meth:`read`). If the file has an
|
||||
:attr:`encoding` attribute, `data` can also be a
|
||||
:class:`unicode` (:class:`str` in python 3) instance, which
|
||||
will be encoded as :attr:`encoding` before being written.
|
||||
|
||||
Due to buffering, the data may not actually be written to the
|
||||
database until the :meth:`close` method is called. Raises
|
||||
:class:`ValueError` if this file is already closed. Raises
|
||||
:class:`TypeError` if `data` is not an instance of
|
||||
:class:`str` (:class:`bytes` in python 3), a file-like object,
|
||||
or an instance of :class:`unicode` (:class:`str` in python 3).
|
||||
Unicode data is only allowed if the file has an :attr:`encoding`
|
||||
attribute.
|
||||
|
||||
:Parameters:
|
||||
- `data`: string of bytes or file-like object to be written
|
||||
to the file
|
||||
"""
|
||||
if self._closed:
|
||||
raise ValueError("cannot write to a closed file")
|
||||
|
||||
try:
|
||||
# file-like
|
||||
read = data.read
|
||||
except AttributeError:
|
||||
# string
|
||||
if not isinstance(data, (text_type, bytes)):
|
||||
raise TypeError("can only write strings or file-like objects")
|
||||
if isinstance(data, text_type):
|
||||
try:
|
||||
data = data.encode(self.encoding)
|
||||
except AttributeError:
|
||||
raise TypeError("must specify an encoding for file in "
|
||||
"order to write %s" % (text_type.__name__,))
|
||||
read = StringIO(data).read
|
||||
|
||||
if self._buffer.tell() > 0:
|
||||
# Make sure to flush only when _buffer is complete
|
||||
space = self.chunk_size - self._buffer.tell()
|
||||
if space:
|
||||
try:
|
||||
to_write = read(space)
|
||||
except:
|
||||
self.abort()
|
||||
raise
|
||||
self._buffer.write(to_write)
|
||||
if len(to_write) < space:
|
||||
return # EOF or incomplete
|
||||
self.__flush_buffer()
|
||||
to_write = read(self.chunk_size)
|
||||
while to_write and len(to_write) == self.chunk_size:
|
||||
self.__flush_data(to_write)
|
||||
to_write = read(self.chunk_size)
|
||||
self._buffer.write(to_write)
|
||||
|
||||
def writelines(self, sequence):
|
||||
"""Write a sequence of strings to the file.
|
||||
|
||||
Does not add seperators.
|
||||
"""
|
||||
for line in sequence:
|
||||
self.write(line)
|
||||
|
||||
def writeable(self):
|
||||
return True
|
||||
|
||||
def __enter__(self):
|
||||
"""Support for the context manager protocol.
|
||||
"""
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Support for the context manager protocol.
|
||||
|
||||
Close the file and allow exceptions to propagate.
|
||||
"""
|
||||
self.close()
|
||||
|
||||
# propagate exceptions
|
||||
return False
|
||||
|
||||
|
||||
class GridOut(object):
|
||||
"""Class to read data out of GridFS.
|
||||
"""
|
||||
def __init__(self, root_collection, file_id=None, file_document=None,
|
||||
session=None):
|
||||
"""Read a file from GridFS
|
||||
|
||||
Application developers should generally not need to
|
||||
instantiate this class directly - instead see the methods
|
||||
provided by :class:`~gridfs.GridFS`.
|
||||
|
||||
Either `file_id` or `file_document` must be specified,
|
||||
`file_document` will be given priority if present. Raises
|
||||
:class:`TypeError` if `root_collection` is not an instance of
|
||||
:class:`~pymongo.collection.Collection`.
|
||||
|
||||
:Parameters:
|
||||
- `root_collection`: root collection to read from
|
||||
- `file_id` (optional): value of ``"_id"`` for the file to read
|
||||
- `file_document` (optional): file document from
|
||||
`root_collection.files`
|
||||
- `session` (optional): a
|
||||
:class:`~pymongo.client_session.ClientSession` to use for all
|
||||
commands
|
||||
|
||||
.. versionchanged:: 3.8
|
||||
For better performance and to better follow the GridFS spec,
|
||||
:class:`GridOut` now uses a single cursor to read all the chunks in
|
||||
the file.
|
||||
|
||||
.. versionchanged:: 3.6
|
||||
Added ``session`` parameter.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Creating a GridOut does not immediately retrieve the file metadata
|
||||
from the server. Metadata is fetched when first needed.
|
||||
"""
|
||||
if not isinstance(root_collection, Collection):
|
||||
raise TypeError("root_collection must be an "
|
||||
"instance of Collection")
|
||||
|
||||
root_collection = _clear_entity_type_registry(root_collection)
|
||||
|
||||
self.__chunks = root_collection.chunks
|
||||
self.__files = root_collection.files
|
||||
self.__file_id = file_id
|
||||
self.__buffer = EMPTY
|
||||
self.__chunk_iter = None
|
||||
self.__position = 0
|
||||
self._file = file_document
|
||||
self._session = session
|
||||
|
||||
_id = _grid_out_property("_id", "The ``'_id'`` value for this file.")
|
||||
filename = _grid_out_property("filename", "Name of this file.")
|
||||
name = _grid_out_property("filename", "Alias for `filename`.")
|
||||
content_type = _grid_out_property("contentType", "Mime-type for this file.")
|
||||
length = _grid_out_property("length", "Length (in bytes) of this file.")
|
||||
chunk_size = _grid_out_property("chunkSize", "Chunk size for this file.")
|
||||
upload_date = _grid_out_property("uploadDate",
|
||||
"Date that this file was first uploaded.")
|
||||
aliases = _grid_out_property("aliases", "List of aliases for this file.")
|
||||
metadata = _grid_out_property("metadata", "Metadata attached to this file.")
|
||||
md5 = _grid_out_property("md5", "MD5 of the contents of this file "
|
||||
"if an md5 sum was created.")
|
||||
|
||||
def _ensure_file(self):
|
||||
if not self._file:
|
||||
self._file = self.__files.find_one({"_id": self.__file_id},
|
||||
session=self._session)
|
||||
if not self._file:
|
||||
raise NoFile("no file in gridfs collection %r with _id %r" %
|
||||
(self.__files, self.__file_id))
|
||||
|
||||
def __getattr__(self, name):
|
||||
self._ensure_file()
|
||||
if name in self._file:
|
||||
return self._file[name]
|
||||
raise AttributeError("GridOut object has no attribute '%s'" % name)
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def readchunk(self):
|
||||
"""Reads a chunk at a time. If the current position is within a
|
||||
chunk the remainder of the chunk is returned.
|
||||
"""
|
||||
received = len(self.__buffer)
|
||||
chunk_data = EMPTY
|
||||
chunk_size = int(self.chunk_size)
|
||||
|
||||
if received > 0:
|
||||
chunk_data = self.__buffer
|
||||
elif self.__position < int(self.length):
|
||||
chunk_number = int((received + self.__position) / chunk_size)
|
||||
if self.__chunk_iter is None:
|
||||
self.__chunk_iter = _GridOutChunkIterator(
|
||||
self, self.__chunks, self._session, chunk_number)
|
||||
|
||||
chunk = self.__chunk_iter.next()
|
||||
chunk_data = chunk["data"][self.__position % chunk_size:]
|
||||
|
||||
if not chunk_data:
|
||||
raise CorruptGridFile("truncated chunk")
|
||||
|
||||
self.__position += len(chunk_data)
|
||||
self.__buffer = EMPTY
|
||||
return chunk_data
|
||||
|
||||
def read(self, size=-1):
|
||||
"""Read at most `size` bytes from the file (less if there
|
||||
isn't enough data).
|
||||
|
||||
The bytes are returned as an instance of :class:`str` (:class:`bytes`
|
||||
in python 3). If `size` is negative or omitted all data is read.
|
||||
|
||||
:Parameters:
|
||||
- `size` (optional): the number of bytes to read
|
||||
|
||||
.. versionchanged:: 3.8
|
||||
This method now only checks for extra chunks after reading the
|
||||
entire file. Previously, this method would check for extra chunks
|
||||
on every call.
|
||||
"""
|
||||
self._ensure_file()
|
||||
|
||||
remainder = int(self.length) - self.__position
|
||||
if size < 0 or size > remainder:
|
||||
size = remainder
|
||||
|
||||
if size == 0:
|
||||
return EMPTY
|
||||
|
||||
received = 0
|
||||
data = StringIO()
|
||||
while received < size:
|
||||
chunk_data = self.readchunk()
|
||||
received += len(chunk_data)
|
||||
data.write(chunk_data)
|
||||
|
||||
# Detect extra chunks after reading the entire file.
|
||||
if size == remainder and self.__chunk_iter:
|
||||
try:
|
||||
self.__chunk_iter.next()
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
self.__position -= received - size
|
||||
|
||||
# Return 'size' bytes and store the rest.
|
||||
data.seek(size)
|
||||
self.__buffer = data.read()
|
||||
data.seek(0)
|
||||
return data.read(size)
|
||||
|
||||
def readline(self, size=-1):
|
||||
"""Read one line or up to `size` bytes from the file.
|
||||
|
||||
:Parameters:
|
||||
- `size` (optional): the maximum number of bytes to read
|
||||
"""
|
||||
remainder = int(self.length) - self.__position
|
||||
if size < 0 or size > remainder:
|
||||
size = remainder
|
||||
|
||||
if size == 0:
|
||||
return EMPTY
|
||||
|
||||
received = 0
|
||||
data = StringIO()
|
||||
while received < size:
|
||||
chunk_data = self.readchunk()
|
||||
pos = chunk_data.find(NEWLN, 0, size)
|
||||
if pos != -1:
|
||||
size = received + pos + 1
|
||||
|
||||
received += len(chunk_data)
|
||||
data.write(chunk_data)
|
||||
if pos != -1:
|
||||
break
|
||||
|
||||
self.__position -= received - size
|
||||
|
||||
# Return 'size' bytes and store the rest.
|
||||
data.seek(size)
|
||||
self.__buffer = data.read()
|
||||
data.seek(0)
|
||||
return data.read(size)
|
||||
|
||||
def tell(self):
|
||||
"""Return the current position of this file.
|
||||
"""
|
||||
return self.__position
|
||||
|
||||
def seek(self, pos, whence=_SEEK_SET):
|
||||
"""Set the current position of this file.
|
||||
|
||||
:Parameters:
|
||||
- `pos`: the position (or offset if using relative
|
||||
positioning) to seek to
|
||||
- `whence` (optional): where to seek
|
||||
from. :attr:`os.SEEK_SET` (``0``) for absolute file
|
||||
positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative
|
||||
to the current position, :attr:`os.SEEK_END` (``2``) to
|
||||
seek relative to the file's end.
|
||||
"""
|
||||
if whence == _SEEK_SET:
|
||||
new_pos = pos
|
||||
elif whence == _SEEK_CUR:
|
||||
new_pos = self.__position + pos
|
||||
elif whence == _SEEK_END:
|
||||
new_pos = int(self.length) + pos
|
||||
else:
|
||||
raise IOError(22, "Invalid value for `whence`")
|
||||
|
||||
if new_pos < 0:
|
||||
raise IOError(22, "Invalid value for `pos` - must be positive")
|
||||
|
||||
# Optimization, continue using the same buffer and chunk iterator.
|
||||
if new_pos == self.__position:
|
||||
return
|
||||
|
||||
self.__position = new_pos
|
||||
self.__buffer = EMPTY
|
||||
if self.__chunk_iter:
|
||||
self.__chunk_iter.close()
|
||||
self.__chunk_iter = None
|
||||
|
||||
def seekable(self):
|
||||
return True
|
||||
|
||||
def __iter__(self):
|
||||
"""Return an iterator over all of this file's data.
|
||||
|
||||
The iterator will return chunk-sized instances of
|
||||
:class:`str` (:class:`bytes` in python 3). This can be
|
||||
useful when serving files using a webserver that handles
|
||||
such an iterator efficiently.
|
||||
|
||||
.. note::
|
||||
This is different from :py:class:`io.IOBase` which iterates over
|
||||
*lines* in the file. Use :meth:`GridOut.readline` to read line by
|
||||
line instead of chunk by chunk.
|
||||
|
||||
.. versionchanged:: 3.8
|
||||
The iterator now raises :class:`CorruptGridFile` when encountering
|
||||
any truncated, missing, or extra chunk in a file. The previous
|
||||
behavior was to only raise :class:`CorruptGridFile` on a missing
|
||||
chunk.
|
||||
"""
|
||||
return GridOutIterator(self, self.__chunks, self._session)
|
||||
|
||||
def close(self):
|
||||
"""Make GridOut more generically file-like."""
|
||||
if self.__chunk_iter:
|
||||
self.__chunk_iter.close()
|
||||
self.__chunk_iter = None
|
||||
|
||||
def write(self, value):
|
||||
raise io.UnsupportedOperation('write')
|
||||
|
||||
def __enter__(self):
|
||||
"""Makes it possible to use :class:`GridOut` files
|
||||
with the context manager protocol.
|
||||
"""
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Makes it possible to use :class:`GridOut` files
|
||||
with the context manager protocol.
|
||||
"""
|
||||
self.close()
|
||||
return False
|
||||
|
||||
|
||||
class _GridOutChunkIterator(object):
|
||||
"""Iterates over a file's chunks using a single cursor.
|
||||
|
||||
Raises CorruptGridFile when encountering any truncated, missing, or extra
|
||||
chunk in a file.
|
||||
"""
|
||||
def __init__(self, grid_out, chunks, session, next_chunk):
|
||||
self._id = grid_out._id
|
||||
self._chunk_size = int(grid_out.chunk_size)
|
||||
self._length = int(grid_out.length)
|
||||
self._chunks = chunks
|
||||
self._session = session
|
||||
self._next_chunk = next_chunk
|
||||
self._num_chunks = math.ceil(float(self._length) / self._chunk_size)
|
||||
self._cursor = None
|
||||
|
||||
def expected_chunk_length(self, chunk_n):
|
||||
if chunk_n < self._num_chunks - 1:
|
||||
return self._chunk_size
|
||||
return self._length - (self._chunk_size * (self._num_chunks - 1))
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def _create_cursor(self):
|
||||
filter = {"files_id": self._id}
|
||||
if self._next_chunk > 0:
|
||||
filter["n"] = {"$gte": self._next_chunk}
|
||||
self._cursor = self._chunks.find(filter, sort=[("n", 1)],
|
||||
session=self._session)
|
||||
|
||||
def _next_with_retry(self):
|
||||
"""Return the next chunk and retry once on CursorNotFound.
|
||||
|
||||
We retry on CursorNotFound to maintain backwards compatibility in
|
||||
cases where two calls to read occur more than 10 minutes apart (the
|
||||
server's default cursor timeout).
|
||||
"""
|
||||
if self._cursor is None:
|
||||
self._create_cursor()
|
||||
|
||||
try:
|
||||
return self._cursor.next()
|
||||
except CursorNotFound:
|
||||
self._cursor.close()
|
||||
self._create_cursor()
|
||||
return self._cursor.next()
|
||||
|
||||
def next(self):
|
||||
try:
|
||||
chunk = self._next_with_retry()
|
||||
except StopIteration:
|
||||
if self._next_chunk >= self._num_chunks:
|
||||
raise
|
||||
raise CorruptGridFile("no chunk #%d" % self._next_chunk)
|
||||
|
||||
if chunk["n"] != self._next_chunk:
|
||||
self.close()
|
||||
raise CorruptGridFile(
|
||||
"Missing chunk: expected chunk #%d but found "
|
||||
"chunk with n=%d" % (self._next_chunk, chunk["n"]))
|
||||
|
||||
if chunk["n"] >= self._num_chunks:
|
||||
# According to spec, ignore extra chunks if they are empty.
|
||||
if len(chunk["data"]):
|
||||
self.close()
|
||||
raise CorruptGridFile(
|
||||
"Extra chunk found: expected %d chunks but found "
|
||||
"chunk with n=%d" % (self._num_chunks, chunk["n"]))
|
||||
|
||||
expected_length = self.expected_chunk_length(chunk["n"])
|
||||
if len(chunk["data"]) != expected_length:
|
||||
self.close()
|
||||
raise CorruptGridFile(
|
||||
"truncated chunk #%d: expected chunk length to be %d but "
|
||||
"found chunk with length %d" % (
|
||||
chunk["n"], expected_length, len(chunk["data"])))
|
||||
|
||||
self._next_chunk += 1
|
||||
return chunk
|
||||
|
||||
__next__ = next
|
||||
|
||||
def close(self):
|
||||
if self._cursor:
|
||||
self._cursor.close()
|
||||
self._cursor = None
|
||||
|
||||
|
||||
class GridOutIterator(object):
|
||||
def __init__(self, grid_out, chunks, session):
|
||||
self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
chunk = self.__chunk_iter.next()
|
||||
return bytes(chunk["data"])
|
||||
|
||||
__next__ = next
|
||||
|
||||
|
||||
class GridOutCursor(Cursor):
|
||||
"""A cursor / iterator for returning GridOut objects as the result
|
||||
of an arbitrary query against the GridFS files collection.
|
||||
"""
|
||||
def __init__(self, collection, filter=None, skip=0, limit=0,
|
||||
no_cursor_timeout=False, sort=None, batch_size=0,
|
||||
session=None):
|
||||
"""Create a new cursor, similar to the normal
|
||||
:class:`~pymongo.cursor.Cursor`.
|
||||
|
||||
Should not be called directly by application developers - see
|
||||
the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead.
|
||||
|
||||
.. versionadded 2.7
|
||||
|
||||
.. mongodoc:: cursors
|
||||
"""
|
||||
collection = _clear_entity_type_registry(collection)
|
||||
|
||||
# Hold on to the base "fs" collection to create GridOut objects later.
|
||||
self.__root_collection = collection
|
||||
|
||||
super(GridOutCursor, self).__init__(
|
||||
collection.files, filter, skip=skip, limit=limit,
|
||||
no_cursor_timeout=no_cursor_timeout, sort=sort,
|
||||
batch_size=batch_size, session=session)
|
||||
|
||||
def next(self):
|
||||
"""Get next GridOut object from cursor.
|
||||
"""
|
||||
# Work around "super is not iterable" issue in Python 3.x
|
||||
next_file = super(GridOutCursor, self).next()
|
||||
return GridOut(self.__root_collection, file_document=next_file,
|
||||
session=self.session)
|
||||
|
||||
__next__ = next
|
||||
|
||||
def add_option(self, *args, **kwargs):
|
||||
raise NotImplementedError("Method does not exist for GridOutCursor")
|
||||
|
||||
def remove_option(self, *args, **kwargs):
|
||||
raise NotImplementedError("Method does not exist for GridOutCursor")
|
||||
|
||||
def _clone_base(self, session):
|
||||
"""Creates an empty GridOutCursor for information to be copied into.
|
||||
"""
|
||||
return GridOutCursor(self.__root_collection, session=session)
|
||||
Reference in New Issue
Block a user