Fixed database typo and removed unnecessary class identifier.
This commit is contained in:
parent
00ad49a143
commit
45fb349a7d
5098 changed files with 952558 additions and 85 deletions
112
venv/Lib/site-packages/imageio/plugins/__init__.py
Normal file
112
venv/Lib/site-packages/imageio/plugins/__init__.py
Normal file
|
@ -0,0 +1,112 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
# flake8: noqa
|
||||
|
||||
"""
|
||||
|
||||
Imagio is plugin-based. Every supported format is provided with a
|
||||
plugin. You can write your own plugins to make imageio support
|
||||
additional formats. And we would be interested in adding such code to the
|
||||
imageio codebase!
|
||||
|
||||
|
||||
What is a plugin
|
||||
----------------
|
||||
|
||||
In imageio, a plugin provides one or more :class:`.Format` objects, and
|
||||
corresponding :class:`.Reader` and :class:`.Writer` classes.
|
||||
Each Format object represents an implementation to read/write a
|
||||
particular file format. Its Reader and Writer classes do the actual
|
||||
reading/saving.
|
||||
|
||||
The reader and writer objects have a ``request`` attribute that can be
|
||||
used to obtain information about the read or write :class:`.Request`, such as
|
||||
user-provided keyword arguments, as well get access to the raw image
|
||||
data.
|
||||
|
||||
|
||||
Registering
|
||||
-----------
|
||||
|
||||
Strictly speaking a format can be used stand alone. However, to allow
|
||||
imageio to automatically select it for a specific file, the format must
|
||||
be registered using ``imageio.formats.add_format()``.
|
||||
|
||||
Note that a plugin is not required to be part of the imageio package; as
|
||||
long as a format is registered, imageio can use it. This makes imageio very
|
||||
easy to extend.
|
||||
|
||||
|
||||
What methods to implement
|
||||
--------------------------
|
||||
|
||||
Imageio is designed such that plugins only need to implement a few
|
||||
private methods. The public API is implemented by the base classes.
|
||||
In effect, the public methods can be given a descent docstring which
|
||||
does not have to be repeated at the plugins.
|
||||
|
||||
For the Format class, the following needs to be implemented/specified:
|
||||
|
||||
* The format needs a short name, a description, and a list of file
|
||||
extensions that are common for the file-format in question.
|
||||
These ase set when instantiation the Format object.
|
||||
* Use a docstring to provide more detailed information about the
|
||||
format/plugin, such as parameters for reading and saving that the user
|
||||
can supply via keyword arguments.
|
||||
* Implement ``_can_read(request)``, return a bool.
|
||||
See also the :class:`.Request` class.
|
||||
* Implement ``_can_write(request)``, dito.
|
||||
|
||||
For the Format.Reader class:
|
||||
|
||||
* Implement ``_open(**kwargs)`` to initialize the reader. Deal with the
|
||||
user-provided keyword arguments here.
|
||||
* Implement ``_close()`` to clean up.
|
||||
* Implement ``_get_length()`` to provide a suitable length based on what
|
||||
the user expects. Can be ``inf`` for streaming data.
|
||||
* Implement ``_get_data(index)`` to return an array and a meta-data dict.
|
||||
* Implement ``_get_meta_data(index)`` to return a meta-data dict. If index
|
||||
is None, it should return the 'global' meta-data.
|
||||
|
||||
For the Format.Writer class:
|
||||
|
||||
* Implement ``_open(**kwargs)`` to initialize the writer. Deal with the
|
||||
user-provided keyword arguments here.
|
||||
* Implement ``_close()`` to clean up.
|
||||
* Implement ``_append_data(im, meta)`` to add data (and meta-data).
|
||||
* Implement ``_set_meta_data(meta)`` to set the global meta-data.
|
||||
|
||||
"""
|
||||
|
||||
# First import plugins that we want to take precedence over freeimage
|
||||
from . import tifffile
|
||||
from . import pillow
|
||||
from . import grab
|
||||
|
||||
from . import freeimage
|
||||
from . import freeimagemulti
|
||||
|
||||
from . import ffmpeg
|
||||
|
||||
from . import bsdf
|
||||
from . import dicom
|
||||
from . import npz
|
||||
from . import swf
|
||||
from . import feisem # special kind of tiff, uses _tiffile
|
||||
|
||||
from . import fits # depends on astropy
|
||||
from . import simpleitk # depends on itk or SimpleITK
|
||||
from . import gdal # depends on gdal
|
||||
|
||||
from . import lytro
|
||||
from . import spe
|
||||
|
||||
from . import example
|
||||
|
||||
# Sort
|
||||
import os
|
||||
from .. import formats
|
||||
|
||||
formats.sort(*os.getenv("IMAGEIO_FORMAT_ORDER", "").split(","))
|
||||
del os, formats
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
940
venv/Lib/site-packages/imageio/plugins/_bsdf.py
Normal file
940
venv/Lib/site-packages/imageio/plugins/_bsdf.py
Normal file
|
@ -0,0 +1,940 @@
|
|||
#!/usr/bin/env python
|
||||
# This file is distributed under the terms of the 2-clause BSD License.
|
||||
# Copyright (c) 2017-2018, Almar Klein
|
||||
|
||||
"""
|
||||
Python implementation of the Binary Structured Data Format (BSDF).
|
||||
|
||||
BSDF is a binary format for serializing structured (scientific) data.
|
||||
See http://bsdf.io for more information.
|
||||
|
||||
This is the reference implementation, which is relatively relatively
|
||||
sophisticated, providing e.g. lazy loading of blobs and streamed
|
||||
reading/writing. A simpler Python implementation is available as
|
||||
``bsdf_lite.py``.
|
||||
|
||||
This module has no dependencies and works on Python 2.7 and 3.4+.
|
||||
|
||||
Note: on Legacy Python (Python 2.7), non-Unicode strings are encoded as bytes.
|
||||
"""
|
||||
|
||||
# todo: in 2020, remove six stuff, __future__ and _isidentifier
|
||||
# todo: in 2020, remove 'utf-8' args to encode/decode; it's faster
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import bz2
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import sys
|
||||
import types
|
||||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Notes on versioning: the major and minor numbers correspond to the
|
||||
# BSDF format version. The major number if increased when backward
|
||||
# incompatible changes are introduced. An implementation must raise an
|
||||
# exception when the file being read has a higher major version. The
|
||||
# minor number is increased when new backward compatible features are
|
||||
# introduced. An implementation must display a warning when the file
|
||||
# being read has a higher minor version. The patch version is increased
|
||||
# for subsequent releases of the implementation.
|
||||
VERSION = 2, 1, 2
|
||||
__version__ = ".".join(str(i) for i in VERSION)
|
||||
|
||||
|
||||
# %% The encoder and decoder implementation
|
||||
|
||||
# From six.py
|
||||
PY3 = sys.version_info[0] >= 3
|
||||
if PY3:
|
||||
text_type = str
|
||||
string_types = str
|
||||
unicode_types = str
|
||||
integer_types = int
|
||||
classtypes = type
|
||||
else: # pragma: no cover
|
||||
logging.basicConfig() # avoid "no handlers found" error
|
||||
text_type = unicode # noqa
|
||||
string_types = basestring # noqa
|
||||
unicode_types = unicode # noqa
|
||||
integer_types = (int, long) # noqa
|
||||
classtypes = type, types.ClassType
|
||||
|
||||
# Shorthands
|
||||
spack = struct.pack
|
||||
strunpack = struct.unpack
|
||||
|
||||
|
||||
def lencode(x):
|
||||
""" Encode an unsigned integer into a variable sized blob of bytes.
|
||||
"""
|
||||
# We could support 16 bit and 32 bit as well, but the gain is low, since
|
||||
# 9 bytes for collections with over 250 elements is marginal anyway.
|
||||
if x <= 250:
|
||||
return spack("<B", x)
|
||||
# elif x < 65536:
|
||||
# return spack('<BH', 251, x)
|
||||
# elif x < 4294967296:
|
||||
# return spack('<BI', 252, x)
|
||||
else:
|
||||
return spack("<BQ", 253, x)
|
||||
|
||||
|
||||
# Include len decoder for completeness; we've inlined it for performance.
|
||||
def lendecode(f):
|
||||
""" Decode an unsigned integer from a file.
|
||||
"""
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
return n
|
||||
|
||||
|
||||
def encode_type_id(b, ext_id):
|
||||
""" Encode the type identifier, with or without extension id.
|
||||
"""
|
||||
if ext_id is not None:
|
||||
bb = ext_id.encode("UTF-8")
|
||||
return b.upper() + lencode(len(bb)) + bb # noqa
|
||||
else:
|
||||
return b # noqa
|
||||
|
||||
|
||||
def _isidentifier(s): # pragma: no cover
|
||||
""" Use of str.isidentifier() for Legacy Python, but slower.
|
||||
"""
|
||||
# http://stackoverflow.com/questions/2544972/
|
||||
return (
|
||||
isinstance(s, string_types)
|
||||
and re.match(r"^\w+$", s, re.UNICODE)
|
||||
and re.match(r"^[0-9]", s) is None
|
||||
)
|
||||
|
||||
|
||||
class BsdfSerializer(object):
|
||||
""" Instances of this class represent a BSDF encoder/decoder.
|
||||
|
||||
It acts as a placeholder for a set of extensions and encoding/decoding
|
||||
options. Use this to predefine extensions and options for high
|
||||
performance encoding/decoding. For general use, see the functions
|
||||
`save()`, `encode()`, `load()`, and `decode()`.
|
||||
|
||||
This implementation of BSDF supports streaming lists (keep adding
|
||||
to a list after writing the main file), lazy loading of blobs, and
|
||||
in-place editing of blobs (for streams opened with a+).
|
||||
|
||||
Options for encoding:
|
||||
|
||||
* compression (int or str): ``0`` or "no" for no compression (default),
|
||||
``1`` or "zlib" for Zlib compression (same as zip files and PNG), and
|
||||
``2`` or "bz2" for Bz2 compression (more compact but slower writing).
|
||||
Note that some BSDF implementations (e.g. JavaScript) may not support
|
||||
compression.
|
||||
* use_checksum (bool): whether to include a checksum with binary blobs.
|
||||
* float64 (bool): Whether to write floats as 64 bit (default) or 32 bit.
|
||||
|
||||
Options for decoding:
|
||||
|
||||
* load_streaming (bool): if True, and the final object in the structure was
|
||||
a stream, will make it available as a stream in the decoded object.
|
||||
* lazy_blob (bool): if True, bytes are represented as Blob objects that can
|
||||
be used to lazily access the data, and also overwrite the data if the
|
||||
file is open in a+ mode.
|
||||
"""
|
||||
|
||||
def __init__(self, extensions=None, **options):
|
||||
self._extensions = {} # name -> extension
|
||||
self._extensions_by_cls = {} # cls -> (name, extension.encode)
|
||||
if extensions is None:
|
||||
extensions = standard_extensions
|
||||
for extension in extensions:
|
||||
self.add_extension(extension)
|
||||
self._parse_options(**options)
|
||||
|
||||
def _parse_options(
|
||||
self,
|
||||
compression=0,
|
||||
use_checksum=False,
|
||||
float64=True,
|
||||
load_streaming=False,
|
||||
lazy_blob=False,
|
||||
):
|
||||
|
||||
# Validate compression
|
||||
if isinstance(compression, string_types):
|
||||
m = {"no": 0, "zlib": 1, "bz2": 2}
|
||||
compression = m.get(compression.lower(), compression)
|
||||
if compression not in (0, 1, 2):
|
||||
raise TypeError("Compression must be 0, 1, 2, " '"no", "zlib", or "bz2"')
|
||||
self._compression = compression
|
||||
|
||||
# Other encoding args
|
||||
self._use_checksum = bool(use_checksum)
|
||||
self._float64 = bool(float64)
|
||||
|
||||
# Decoding args
|
||||
self._load_streaming = bool(load_streaming)
|
||||
self._lazy_blob = bool(lazy_blob)
|
||||
|
||||
def add_extension(self, extension_class):
|
||||
""" Add an extension to this serializer instance, which must be
|
||||
a subclass of Extension. Can be used as a decorator.
|
||||
"""
|
||||
# Check class
|
||||
if not (
|
||||
isinstance(extension_class, type) and issubclass(extension_class, Extension)
|
||||
):
|
||||
raise TypeError("add_extension() expects a Extension class.")
|
||||
extension = extension_class()
|
||||
|
||||
# Get name
|
||||
name = extension.name
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("Extension name must be str.")
|
||||
if len(name) == 0 or len(name) > 250:
|
||||
raise NameError(
|
||||
"Extension names must be nonempty and shorter " "than 251 chars."
|
||||
)
|
||||
if name in self._extensions:
|
||||
logger.warning(
|
||||
'BSDF warning: overwriting extension "%s", '
|
||||
"consider removing first" % name
|
||||
)
|
||||
|
||||
# Get classes
|
||||
cls = extension.cls
|
||||
if not cls:
|
||||
clss = []
|
||||
elif isinstance(cls, (tuple, list)):
|
||||
clss = cls
|
||||
else:
|
||||
clss = [cls]
|
||||
for cls in clss:
|
||||
if not isinstance(cls, classtypes):
|
||||
raise TypeError("Extension classes must be types.")
|
||||
|
||||
# Store
|
||||
for cls in clss:
|
||||
self._extensions_by_cls[cls] = name, extension.encode
|
||||
self._extensions[name] = extension
|
||||
return extension_class
|
||||
|
||||
def remove_extension(self, name):
|
||||
""" Remove a converted by its unique name.
|
||||
"""
|
||||
if not isinstance(name, str):
|
||||
raise TypeError("Extension name must be str.")
|
||||
if name in self._extensions:
|
||||
self._extensions.pop(name)
|
||||
for cls in list(self._extensions_by_cls.keys()):
|
||||
if self._extensions_by_cls[cls][0] == name:
|
||||
self._extensions_by_cls.pop(cls)
|
||||
|
||||
def _encode(self, f, value, streams, ext_id):
|
||||
""" Main encoder function.
|
||||
"""
|
||||
x = encode_type_id
|
||||
|
||||
if value is None:
|
||||
f.write(x(b"v", ext_id)) # V for void
|
||||
elif value is True:
|
||||
f.write(x(b"y", ext_id)) # Y for yes
|
||||
elif value is False:
|
||||
f.write(x(b"n", ext_id)) # N for no
|
||||
elif isinstance(value, integer_types):
|
||||
if -32768 <= value <= 32767:
|
||||
f.write(x(b"h", ext_id) + spack("h", value)) # H for ...
|
||||
else:
|
||||
f.write(x(b"i", ext_id) + spack("<q", value)) # I for int
|
||||
elif isinstance(value, float):
|
||||
if self._float64:
|
||||
f.write(x(b"d", ext_id) + spack("<d", value)) # D for double
|
||||
else:
|
||||
f.write(x(b"f", ext_id) + spack("<f", value)) # f for float
|
||||
elif isinstance(value, unicode_types):
|
||||
bb = value.encode("UTF-8")
|
||||
f.write(x(b"s", ext_id) + lencode(len(bb))) # S for str
|
||||
f.write(bb)
|
||||
elif isinstance(value, (list, tuple)):
|
||||
f.write(x(b"l", ext_id) + lencode(len(value))) # L for list
|
||||
for v in value:
|
||||
self._encode(f, v, streams, None)
|
||||
elif isinstance(value, dict):
|
||||
f.write(x(b"m", ext_id) + lencode(len(value))) # M for mapping
|
||||
for key, v in value.items():
|
||||
if PY3:
|
||||
assert key.isidentifier() # faster
|
||||
else: # pragma: no cover
|
||||
assert _isidentifier(key)
|
||||
# yield ' ' * indent + key
|
||||
name_b = key.encode("UTF-8")
|
||||
f.write(lencode(len(name_b)))
|
||||
f.write(name_b)
|
||||
self._encode(f, v, streams, None)
|
||||
elif isinstance(value, bytes):
|
||||
f.write(x(b"b", ext_id)) # B for blob
|
||||
blob = Blob(
|
||||
value, compression=self._compression, use_checksum=self._use_checksum
|
||||
)
|
||||
blob._to_file(f) # noqa
|
||||
elif isinstance(value, Blob):
|
||||
f.write(x(b"b", ext_id)) # B for blob
|
||||
value._to_file(f) # noqa
|
||||
elif isinstance(value, BaseStream):
|
||||
# Initialize the stream
|
||||
if value.mode != "w":
|
||||
raise ValueError("Cannot serialize a read-mode stream.")
|
||||
elif isinstance(value, ListStream):
|
||||
f.write(x(b"l", ext_id) + spack("<BQ", 255, 0)) # L for list
|
||||
else:
|
||||
raise TypeError("Only ListStream is supported")
|
||||
# Mark this as *the* stream, and activate the stream.
|
||||
# The save() function verifies this is the last written object.
|
||||
if len(streams) > 0:
|
||||
raise ValueError("Can only have one stream per file.")
|
||||
streams.append(value)
|
||||
value._activate(f, self._encode, self._decode) # noqa
|
||||
else:
|
||||
if ext_id is not None:
|
||||
raise ValueError(
|
||||
"Extension %s wronfully encodes object to another "
|
||||
"extension object (though it may encode to a list/dict "
|
||||
"that contains other extension objects)." % ext_id
|
||||
)
|
||||
# Try if the value is of a type we know
|
||||
ex = self._extensions_by_cls.get(value.__class__, None)
|
||||
# Maybe its a subclass of a type we know
|
||||
if ex is None:
|
||||
for name, c in self._extensions.items():
|
||||
if c.match(self, value):
|
||||
ex = name, c.encode
|
||||
break
|
||||
else:
|
||||
ex = None
|
||||
# Success or fail
|
||||
if ex is not None:
|
||||
ext_id2, extension_encode = ex
|
||||
self._encode(f, extension_encode(self, value), streams, ext_id2)
|
||||
else:
|
||||
t = (
|
||||
"Class %r is not a valid base BSDF type, nor is it "
|
||||
"handled by an extension."
|
||||
)
|
||||
raise TypeError(t % value.__class__.__name__)
|
||||
|
||||
def _decode(self, f):
|
||||
""" Main decoder function.
|
||||
"""
|
||||
|
||||
# Get value
|
||||
char = f.read(1)
|
||||
c = char.lower()
|
||||
|
||||
# Conversion (uppercase value identifiers signify converted values)
|
||||
if not char:
|
||||
raise EOFError()
|
||||
elif char != c:
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
# if n == 253: n = strunpack('<Q', f.read(8))[0] # noqa - noneed
|
||||
ext_id = f.read(n).decode("UTF-8")
|
||||
else:
|
||||
ext_id = None
|
||||
|
||||
if c == b"v":
|
||||
value = None
|
||||
elif c == b"y":
|
||||
value = True
|
||||
elif c == b"n":
|
||||
value = False
|
||||
elif c == b"h":
|
||||
value = strunpack("<h", f.read(2))[0]
|
||||
elif c == b"i":
|
||||
value = strunpack("<q", f.read(8))[0]
|
||||
elif c == b"f":
|
||||
value = strunpack("<f", f.read(4))[0]
|
||||
elif c == b"d":
|
||||
value = strunpack("<d", f.read(8))[0]
|
||||
elif c == b"s":
|
||||
n_s = strunpack("<B", f.read(1))[0]
|
||||
if n_s == 253:
|
||||
n_s = strunpack("<Q", f.read(8))[0] # noqa
|
||||
value = f.read(n_s).decode("UTF-8")
|
||||
elif c == b"l":
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n >= 254:
|
||||
# Streaming
|
||||
closed = n == 254
|
||||
n = strunpack("<Q", f.read(8))[0]
|
||||
if self._load_streaming:
|
||||
value = ListStream(n if closed else "r")
|
||||
value._activate(f, self._encode, self._decode) # noqa
|
||||
elif closed:
|
||||
value = [self._decode(f) for i in range(n)]
|
||||
else:
|
||||
value = []
|
||||
try:
|
||||
while True:
|
||||
value.append(self._decode(f))
|
||||
except EOFError:
|
||||
pass
|
||||
else:
|
||||
# Normal
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
value = [self._decode(f) for i in range(n)]
|
||||
elif c == b"m":
|
||||
value = dict()
|
||||
n = strunpack("<B", f.read(1))[0]
|
||||
if n == 253:
|
||||
n = strunpack("<Q", f.read(8))[0] # noqa
|
||||
for i in range(n):
|
||||
n_name = strunpack("<B", f.read(1))[0]
|
||||
if n_name == 253:
|
||||
n_name = strunpack("<Q", f.read(8))[0] # noqa
|
||||
assert n_name > 0
|
||||
name = f.read(n_name).decode("UTF-8")
|
||||
value[name] = self._decode(f)
|
||||
elif c == b"b":
|
||||
if self._lazy_blob:
|
||||
value = Blob((f, True))
|
||||
else:
|
||||
blob = Blob((f, False))
|
||||
value = blob.get_bytes()
|
||||
else:
|
||||
raise RuntimeError("Parse error %r" % char)
|
||||
|
||||
# Convert value if we have an extension for it
|
||||
if ext_id is not None:
|
||||
extension = self._extensions.get(ext_id, None)
|
||||
if extension is not None:
|
||||
value = extension.decode(self, value)
|
||||
else:
|
||||
logger.warning("BSDF warning: no extension found for %r" % ext_id)
|
||||
|
||||
return value
|
||||
|
||||
def encode(self, ob):
|
||||
""" Save the given object to bytes.
|
||||
"""
|
||||
f = BytesIO()
|
||||
self.save(f, ob)
|
||||
return f.getvalue()
|
||||
|
||||
def save(self, f, ob):
|
||||
""" Write the given object to the given file object.
|
||||
"""
|
||||
f.write(b"BSDF")
|
||||
f.write(struct.pack("<B", VERSION[0]))
|
||||
f.write(struct.pack("<B", VERSION[1]))
|
||||
|
||||
# Prepare streaming, this list will have 0 or 1 item at the end
|
||||
streams = []
|
||||
|
||||
self._encode(f, ob, streams, None)
|
||||
|
||||
# Verify that stream object was at the end, and add initial elements
|
||||
if len(streams) > 0:
|
||||
stream = streams[0]
|
||||
if stream._start_pos != f.tell():
|
||||
raise ValueError(
|
||||
"The stream object must be " "the last object to be encoded."
|
||||
)
|
||||
|
||||
def decode(self, bb):
|
||||
""" Load the data structure that is BSDF-encoded in the given bytes.
|
||||
"""
|
||||
f = BytesIO(bb)
|
||||
return self.load(f)
|
||||
|
||||
def load(self, f):
|
||||
""" Load a BSDF-encoded object from the given file object.
|
||||
"""
|
||||
# Check magic string
|
||||
f4 = f.read(4)
|
||||
if f4 != b"BSDF":
|
||||
raise RuntimeError("This does not look like a BSDF file: %r" % f4)
|
||||
# Check version
|
||||
major_version = strunpack("<B", f.read(1))[0]
|
||||
minor_version = strunpack("<B", f.read(1))[0]
|
||||
file_version = "%i.%i" % (major_version, minor_version)
|
||||
if major_version != VERSION[0]: # major version should be 2
|
||||
t = (
|
||||
"Reading file with different major version (%s) "
|
||||
"from the implementation (%s)."
|
||||
)
|
||||
raise RuntimeError(t % (__version__, file_version))
|
||||
if minor_version > VERSION[1]: # minor should be < ours
|
||||
t = (
|
||||
"BSDF warning: reading file with higher minor version (%s) "
|
||||
"than the implementation (%s)."
|
||||
)
|
||||
logger.warning(t % (__version__, file_version))
|
||||
|
||||
return self._decode(f)
|
||||
|
||||
|
||||
# %% Streaming and blob-files
|
||||
|
||||
|
||||
class BaseStream(object):
|
||||
""" Base class for streams.
|
||||
"""
|
||||
|
||||
def __init__(self, mode="w"):
|
||||
self._i = 0
|
||||
self._count = -1
|
||||
if isinstance(mode, int):
|
||||
self._count = mode
|
||||
mode = "r"
|
||||
elif mode == "w":
|
||||
self._count = 0
|
||||
assert mode in ("r", "w")
|
||||
self._mode = mode
|
||||
self._f = None
|
||||
self._start_pos = 0
|
||||
|
||||
def _activate(self, file, encode_func, decode_func):
|
||||
if self._f is not None: # Associated with another write
|
||||
raise IOError("Stream object cannot be activated twice?")
|
||||
self._f = file
|
||||
self._start_pos = self._f.tell()
|
||||
self._encode = encode_func
|
||||
self._decode = decode_func
|
||||
|
||||
@property
|
||||
def mode(self):
|
||||
""" The mode of this stream: 'r' or 'w'.
|
||||
"""
|
||||
return self._mode
|
||||
|
||||
|
||||
class ListStream(BaseStream):
|
||||
""" A streamable list object used for writing or reading.
|
||||
In read mode, it can also be iterated over.
|
||||
"""
|
||||
|
||||
@property
|
||||
def count(self):
|
||||
""" The number of elements in the stream (can be -1 for unclosed
|
||||
streams in read-mode).
|
||||
"""
|
||||
return self._count
|
||||
|
||||
@property
|
||||
def index(self):
|
||||
""" The current index of the element to read/write.
|
||||
"""
|
||||
return self._i
|
||||
|
||||
def append(self, item):
|
||||
""" Append an item to the streaming list. The object is immediately
|
||||
serialized and written to the underlying file.
|
||||
"""
|
||||
# if self._mode != 'w':
|
||||
# raise IOError('This ListStream is not in write mode.')
|
||||
if self._count != self._i:
|
||||
raise IOError("Can only append items to the end of the stream.")
|
||||
if self._f is None:
|
||||
raise IOError("List stream is not associated with a file yet.")
|
||||
if self._f.closed:
|
||||
raise IOError("Cannot stream to a close file.")
|
||||
self._encode(self._f, item, [self], None)
|
||||
self._i += 1
|
||||
self._count += 1
|
||||
|
||||
def close(self, unstream=False):
|
||||
""" Close the stream, marking the number of written elements. New
|
||||
elements may still be appended, but they won't be read during decoding.
|
||||
If ``unstream`` is False, the stream is turned into a regular list
|
||||
(not streaming).
|
||||
"""
|
||||
# if self._mode != 'w':
|
||||
# raise IOError('This ListStream is not in write mode.')
|
||||
if self._count != self._i:
|
||||
raise IOError("Can only close when at the end of the stream.")
|
||||
if self._f is None:
|
||||
raise IOError("ListStream is not associated with a file yet.")
|
||||
if self._f.closed:
|
||||
raise IOError("Cannot close a stream on a close file.")
|
||||
i = self._f.tell()
|
||||
self._f.seek(self._start_pos - 8 - 1)
|
||||
self._f.write(spack("<B", 253 if unstream else 254))
|
||||
self._f.write(spack("<Q", self._count))
|
||||
self._f.seek(i)
|
||||
|
||||
def next(self):
|
||||
""" Read and return the next element in the streaming list.
|
||||
Raises StopIteration if the stream is exhausted.
|
||||
"""
|
||||
if self._mode != "r":
|
||||
raise IOError("This ListStream in not in read mode.")
|
||||
if self._f is None:
|
||||
raise IOError("ListStream is not associated with a file yet.")
|
||||
if getattr(self._f, "closed", None): # not present on 2.7 http req :/
|
||||
raise IOError("Cannot read a stream from a close file.")
|
||||
if self._count >= 0:
|
||||
if self._i >= self._count:
|
||||
raise StopIteration()
|
||||
self._i += 1
|
||||
return self._decode(self._f)
|
||||
else:
|
||||
# This raises EOFError at some point.
|
||||
try:
|
||||
res = self._decode(self._f)
|
||||
self._i += 1
|
||||
return res
|
||||
except EOFError:
|
||||
self._count = self._i
|
||||
raise StopIteration()
|
||||
|
||||
def __iter__(self):
|
||||
if self._mode != "r":
|
||||
raise IOError("Cannot iterate: ListStream in not in read mode.")
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
return self.next()
|
||||
|
||||
|
||||
class Blob(object):
|
||||
""" Object to represent a blob of bytes. When used to write a BSDF file,
|
||||
it's a wrapper for bytes plus properties such as what compression to apply.
|
||||
When used to read a BSDF file, it can be used to read the data lazily, and
|
||||
also modify the data if reading in 'r+' mode and the blob isn't compressed.
|
||||
"""
|
||||
|
||||
# For now, this does not allow re-sizing blobs (within the allocated size)
|
||||
# but this can be added later.
|
||||
|
||||
def __init__(self, bb, compression=0, extra_size=0, use_checksum=False):
|
||||
if isinstance(bb, bytes):
|
||||
self._f = None
|
||||
self.compressed = self._from_bytes(bb, compression)
|
||||
self.compression = compression
|
||||
self.allocated_size = self.used_size + extra_size
|
||||
self.use_checksum = use_checksum
|
||||
elif isinstance(bb, tuple) and len(bb) == 2 and hasattr(bb[0], "read"):
|
||||
self._f, allow_seek = bb
|
||||
self.compressed = None
|
||||
self._from_file(self._f, allow_seek)
|
||||
self._modified = False
|
||||
else:
|
||||
raise TypeError("Wrong argument to create Blob.")
|
||||
|
||||
def _from_bytes(self, value, compression):
|
||||
""" When used to wrap bytes in a blob.
|
||||
"""
|
||||
if compression == 0:
|
||||
compressed = value
|
||||
elif compression == 1:
|
||||
compressed = zlib.compress(value, 9)
|
||||
elif compression == 2:
|
||||
compressed = bz2.compress(value, 9)
|
||||
else: # pragma: no cover
|
||||
assert False, "Unknown compression identifier"
|
||||
|
||||
self.data_size = len(value)
|
||||
self.used_size = len(compressed)
|
||||
return compressed
|
||||
|
||||
def _to_file(self, f):
|
||||
""" Private friend method called by encoder to write a blob to a file.
|
||||
"""
|
||||
# Write sizes - write at least in a size that allows resizing
|
||||
if self.allocated_size <= 250 and self.compression == 0:
|
||||
f.write(spack("<B", self.allocated_size))
|
||||
f.write(spack("<B", self.used_size))
|
||||
f.write(lencode(self.data_size))
|
||||
else:
|
||||
f.write(spack("<BQ", 253, self.allocated_size))
|
||||
f.write(spack("<BQ", 253, self.used_size))
|
||||
f.write(spack("<BQ", 253, self.data_size))
|
||||
# Compression and checksum
|
||||
f.write(spack("B", self.compression))
|
||||
if self.use_checksum:
|
||||
f.write(b"\xff" + hashlib.md5(self.compressed).digest())
|
||||
else:
|
||||
f.write(b"\x00")
|
||||
# Byte alignment (only necessary for uncompressed data)
|
||||
if self.compression == 0:
|
||||
alignment = 8 - (f.tell() + 1) % 8 # +1 for the byte to write
|
||||
f.write(spack("<B", alignment)) # padding for byte alignment
|
||||
f.write(b"\x00" * alignment)
|
||||
else:
|
||||
f.write(spack("<B", 0))
|
||||
# The actual data and extra space
|
||||
f.write(self.compressed)
|
||||
f.write(b"\x00" * (self.allocated_size - self.used_size))
|
||||
|
||||
def _from_file(self, f, allow_seek):
|
||||
""" Used when a blob is read by the decoder.
|
||||
"""
|
||||
# Read blob header data (5 to 42 bytes)
|
||||
# Size
|
||||
allocated_size = strunpack("<B", f.read(1))[0]
|
||||
if allocated_size == 253:
|
||||
allocated_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
used_size = strunpack("<B", f.read(1))[0]
|
||||
if used_size == 253:
|
||||
used_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
data_size = strunpack("<B", f.read(1))[0]
|
||||
if data_size == 253:
|
||||
data_size = strunpack("<Q", f.read(8))[0] # noqa
|
||||
# Compression and checksum
|
||||
compression = strunpack("<B", f.read(1))[0]
|
||||
has_checksum = strunpack("<B", f.read(1))[0]
|
||||
if has_checksum:
|
||||
checksum = f.read(16)
|
||||
# Skip alignment
|
||||
alignment = strunpack("<B", f.read(1))[0]
|
||||
f.read(alignment)
|
||||
# Get or skip data + extra space
|
||||
if allow_seek:
|
||||
self.start_pos = f.tell()
|
||||
self.end_pos = self.start_pos + used_size
|
||||
f.seek(self.start_pos + allocated_size)
|
||||
else:
|
||||
self.start_pos = None
|
||||
self.end_pos = None
|
||||
self.compressed = f.read(used_size)
|
||||
f.read(allocated_size - used_size)
|
||||
# Store info
|
||||
self.alignment = alignment
|
||||
self.compression = compression
|
||||
self.use_checksum = checksum if has_checksum else None
|
||||
self.used_size = used_size
|
||||
self.allocated_size = allocated_size
|
||||
self.data_size = data_size
|
||||
|
||||
def seek(self, p):
|
||||
""" Seek to the given position (relative to the blob start).
|
||||
"""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot seek in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if p < 0:
|
||||
p = self.allocated_size + p
|
||||
if p < 0 or p > self.allocated_size:
|
||||
raise IOError("Seek beyond blob boundaries.")
|
||||
self._f.seek(self.start_pos + p)
|
||||
|
||||
def tell(self):
|
||||
""" Get the current file pointer position (relative to the blob start).
|
||||
"""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot tell in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
return self._f.tell() - self.start_pos
|
||||
|
||||
def write(self, bb):
|
||||
""" Write bytes to the blob.
|
||||
"""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot write in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if self.compression:
|
||||
raise IOError("Cannot arbitrarily write in compressed blob.")
|
||||
if self._f.tell() + len(bb) > self.end_pos:
|
||||
raise IOError("Write beyond blob boundaries.")
|
||||
self._modified = True
|
||||
return self._f.write(bb)
|
||||
|
||||
def read(self, n):
|
||||
""" Read n bytes from the blob.
|
||||
"""
|
||||
if self._f is None:
|
||||
raise RuntimeError(
|
||||
"Cannot read in a blob " "that is not created by the BSDF decoder."
|
||||
)
|
||||
if self.compression:
|
||||
raise IOError("Cannot arbitrarily read in compressed blob.")
|
||||
if self._f.tell() + n > self.end_pos:
|
||||
raise IOError("Read beyond blob boundaries.")
|
||||
return self._f.read(n)
|
||||
|
||||
def get_bytes(self):
|
||||
""" Get the contents of the blob as bytes.
|
||||
"""
|
||||
if self.compressed is not None:
|
||||
compressed = self.compressed
|
||||
else:
|
||||
i = self._f.tell()
|
||||
self.seek(0)
|
||||
compressed = self._f.read(self.used_size)
|
||||
self._f.seek(i)
|
||||
if self.compression == 0:
|
||||
value = compressed
|
||||
elif self.compression == 1:
|
||||
value = zlib.decompress(compressed)
|
||||
elif self.compression == 2:
|
||||
value = bz2.decompress(compressed)
|
||||
else: # pragma: no cover
|
||||
raise RuntimeError("Invalid compression %i" % self.compression)
|
||||
return value
|
||||
|
||||
def update_checksum(self):
|
||||
""" Reset the blob's checksum if present. Call this after modifying
|
||||
the data.
|
||||
"""
|
||||
# or ... should the presence of a checksum mean that data is proteced?
|
||||
if self.use_checksum and self._modified:
|
||||
self.seek(0)
|
||||
compressed = self._f.read(self.used_size)
|
||||
self._f.seek(self.start_pos - self.alignment - 1 - 16)
|
||||
self._f.write(hashlib.md5(compressed).digest())
|
||||
|
||||
|
||||
# %% High-level functions
|
||||
|
||||
|
||||
def encode(ob, extensions=None, **options):
|
||||
""" Save (BSDF-encode) the given object to bytes.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
return s.encode(ob)
|
||||
|
||||
|
||||
def save(f, ob, extensions=None, **options):
|
||||
""" Save (BSDF-encode) the given object to the given filename or
|
||||
file object. See` BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
if isinstance(f, string_types):
|
||||
with open(f, "wb") as fp:
|
||||
return s.save(fp, ob)
|
||||
else:
|
||||
return s.save(f, ob)
|
||||
|
||||
|
||||
def decode(bb, extensions=None, **options):
|
||||
""" Load a (BSDF-encoded) structure from bytes.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
return s.decode(bb)
|
||||
|
||||
|
||||
def load(f, extensions=None, **options):
|
||||
""" Load a (BSDF-encoded) structure from the given filename or file object.
|
||||
See `BSDFSerializer` for details on extensions and options.
|
||||
"""
|
||||
s = BsdfSerializer(extensions, **options)
|
||||
if isinstance(f, string_types):
|
||||
if f.startswith(("~/", "~\\")): # pragma: no cover
|
||||
f = os.path.expanduser(f)
|
||||
with open(f, "rb") as fp:
|
||||
return s.load(fp)
|
||||
else:
|
||||
return s.load(f)
|
||||
|
||||
|
||||
# Aliases for json compat
|
||||
loads = decode
|
||||
dumps = encode
|
||||
|
||||
|
||||
# %% Standard extensions
|
||||
|
||||
# Defining extensions as a dict would be more compact and feel lighter, but
|
||||
# that would only allow lambdas, which is too limiting, e.g. for ndarray
|
||||
# extension.
|
||||
|
||||
|
||||
class Extension(object):
|
||||
""" Base class to implement BSDF extensions for special data types.
|
||||
|
||||
Extension classes are provided to the BSDF serializer, which
|
||||
instantiates the class. That way, the extension can be somewhat dynamic:
|
||||
e.g. the NDArrayExtension exposes the ndarray class only when numpy
|
||||
is imported.
|
||||
|
||||
A extension instance must have two attributes. These can be attribiutes of
|
||||
the class, or of the instance set in ``__init__()``:
|
||||
|
||||
* name (str): the name by which encoded values will be identified.
|
||||
* cls (type): the type (or list of types) to match values with.
|
||||
This is optional, but it makes the encoder select extensions faster.
|
||||
|
||||
Further, it needs 3 methods:
|
||||
|
||||
* `match(serializer, value) -> bool`: return whether the extension can
|
||||
convert the given value. The default is ``isinstance(value, self.cls)``.
|
||||
* `encode(serializer, value) -> encoded_value`: the function to encode a
|
||||
value to more basic data types.
|
||||
* `decode(serializer, encoded_value) -> value`: the function to decode an
|
||||
encoded value back to its intended representation.
|
||||
|
||||
"""
|
||||
|
||||
name = ""
|
||||
cls = ()
|
||||
|
||||
def __repr__(self):
|
||||
return "<BSDF extension %r at 0x%s>" % (self.name, hex(id(self)))
|
||||
|
||||
def match(self, s, v):
|
||||
return isinstance(v, self.cls)
|
||||
|
||||
def encode(self, s, v):
|
||||
raise NotImplementedError()
|
||||
|
||||
def decode(self, s, v):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class ComplexExtension(Extension):
|
||||
|
||||
name = "c"
|
||||
cls = complex
|
||||
|
||||
def encode(self, s, v):
|
||||
return (v.real, v.imag)
|
||||
|
||||
def decode(self, s, v):
|
||||
return complex(v[0], v[1])
|
||||
|
||||
|
||||
class NDArrayExtension(Extension):
|
||||
|
||||
name = "ndarray"
|
||||
|
||||
def __init__(self):
|
||||
if "numpy" in sys.modules:
|
||||
import numpy as np
|
||||
|
||||
self.cls = np.ndarray
|
||||
|
||||
def match(self, s, v): # pragma: no cover - e.g. work for nd arrays in JS
|
||||
return hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "tobytes")
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(shape=v.shape, dtype=text_type(v.dtype), data=v.tobytes())
|
||||
|
||||
def decode(self, s, v):
|
||||
try:
|
||||
import numpy as np
|
||||
except ImportError: # pragma: no cover
|
||||
return v
|
||||
a = np.frombuffer(v["data"], dtype=v["dtype"])
|
||||
a.shape = v["shape"]
|
||||
return a
|
||||
|
||||
|
||||
standard_extensions = [ComplexExtension, NDArrayExtension]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Invoke CLI
|
||||
import bsdf_cli
|
||||
|
||||
bsdf_cli.main()
|
926
venv/Lib/site-packages/imageio/plugins/_dicom.py
Normal file
926
venv/Lib/site-packages/imageio/plugins/_dicom.py
Normal file
|
@ -0,0 +1,926 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin for reading DICOM files.
|
||||
"""
|
||||
|
||||
# todo: Use pydicom:
|
||||
# * Note: is not py3k ready yet
|
||||
# * Allow reading the full meta info
|
||||
# I think we can more or less replace the SimpleDicomReader with a
|
||||
# pydicom.Dataset For series, only ned to read the full info from one
|
||||
# file: speed still high
|
||||
# * Perhaps allow writing?
|
||||
|
||||
import sys
|
||||
import os
|
||||
import struct
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Determine endianity of system
|
||||
sys_is_little_endian = sys.byteorder == "little"
|
||||
|
||||
# Define a dictionary that contains the tags that we would like to know
|
||||
MINIDICT = {
|
||||
(0x7FE0, 0x0010): ("PixelData", "OB"),
|
||||
# Date and time
|
||||
(0x0008, 0x0020): ("StudyDate", "DA"),
|
||||
(0x0008, 0x0021): ("SeriesDate", "DA"),
|
||||
(0x0008, 0x0022): ("AcquisitionDate", "DA"),
|
||||
(0x0008, 0x0023): ("ContentDate", "DA"),
|
||||
(0x0008, 0x0030): ("StudyTime", "TM"),
|
||||
(0x0008, 0x0031): ("SeriesTime", "TM"),
|
||||
(0x0008, 0x0032): ("AcquisitionTime", "TM"),
|
||||
(0x0008, 0x0033): ("ContentTime", "TM"),
|
||||
# With what, where, by whom?
|
||||
(0x0008, 0x0060): ("Modality", "CS"),
|
||||
(0x0008, 0x0070): ("Manufacturer", "LO"),
|
||||
(0x0008, 0x0080): ("InstitutionName", "LO"),
|
||||
# Descriptions
|
||||
(0x0008, 0x1030): ("StudyDescription", "LO"),
|
||||
(0x0008, 0x103E): ("SeriesDescription", "LO"),
|
||||
# UID's
|
||||
(0x0008, 0x0016): ("SOPClassUID", "UI"),
|
||||
(0x0008, 0x0018): ("SOPInstanceUID", "UI"),
|
||||
(0x0020, 0x000D): ("StudyInstanceUID", "UI"),
|
||||
(0x0020, 0x000E): ("SeriesInstanceUID", "UI"),
|
||||
(0x0008, 0x0117): ("ContextUID", "UI"),
|
||||
# Numbers
|
||||
(0x0020, 0x0011): ("SeriesNumber", "IS"),
|
||||
(0x0020, 0x0012): ("AcquisitionNumber", "IS"),
|
||||
(0x0020, 0x0013): ("InstanceNumber", "IS"),
|
||||
(0x0020, 0x0014): ("IsotopeNumber", "IS"),
|
||||
(0x0020, 0x0015): ("PhaseNumber", "IS"),
|
||||
(0x0020, 0x0016): ("IntervalNumber", "IS"),
|
||||
(0x0020, 0x0017): ("TimeSlotNumber", "IS"),
|
||||
(0x0020, 0x0018): ("AngleNumber", "IS"),
|
||||
(0x0020, 0x0019): ("ItemNumber", "IS"),
|
||||
(0x0020, 0x0020): ("PatientOrientation", "CS"),
|
||||
(0x0020, 0x0030): ("ImagePosition", "CS"),
|
||||
(0x0020, 0x0032): ("ImagePositionPatient", "CS"),
|
||||
(0x0020, 0x0035): ("ImageOrientation", "CS"),
|
||||
(0x0020, 0x0037): ("ImageOrientationPatient", "CS"),
|
||||
# Patient information
|
||||
(0x0010, 0x0010): ("PatientName", "PN"),
|
||||
(0x0010, 0x0020): ("PatientID", "LO"),
|
||||
(0x0010, 0x0030): ("PatientBirthDate", "DA"),
|
||||
(0x0010, 0x0040): ("PatientSex", "CS"),
|
||||
(0x0010, 0x1010): ("PatientAge", "AS"),
|
||||
(0x0010, 0x1020): ("PatientSize", "DS"),
|
||||
(0x0010, 0x1030): ("PatientWeight", "DS"),
|
||||
# Image specific (required to construct numpy array)
|
||||
(0x0028, 0x0002): ("SamplesPerPixel", "US"),
|
||||
(0x0028, 0x0008): ("NumberOfFrames", "IS"),
|
||||
(0x0028, 0x0100): ("BitsAllocated", "US"),
|
||||
(0x0028, 0x0101): ("BitsStored", "US"),
|
||||
(0x0028, 0x0102): ("HighBit", "US"),
|
||||
(0x0028, 0x0103): ("PixelRepresentation", "US"),
|
||||
(0x0028, 0x0010): ("Rows", "US"),
|
||||
(0x0028, 0x0011): ("Columns", "US"),
|
||||
(0x0028, 0x1052): ("RescaleIntercept", "DS"),
|
||||
(0x0028, 0x1053): ("RescaleSlope", "DS"),
|
||||
# Image specific (for the user)
|
||||
(0x0028, 0x0030): ("PixelSpacing", "DS"),
|
||||
(0x0018, 0x0088): ("SliceSpacing", "DS"),
|
||||
}
|
||||
|
||||
# Define some special tags:
|
||||
# See PS 3.5-2008 section 7.5 (p.40)
|
||||
ItemTag = (0xFFFE, 0xE000) # start of Sequence Item
|
||||
ItemDelimiterTag = (0xFFFE, 0xE00D) # end of Sequence Item
|
||||
SequenceDelimiterTag = (0xFFFE, 0xE0DD) # end of Sequence of undefined length
|
||||
|
||||
# Define set of groups that we're interested in (so we can quickly skip others)
|
||||
GROUPS = set([key[0] for key in MINIDICT.keys()])
|
||||
VRS = set([val[1] for val in MINIDICT.values()])
|
||||
|
||||
|
||||
class NotADicomFile(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CompressedDicom(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class SimpleDicomReader(object):
|
||||
"""
|
||||
This class provides reading of pixel data from DICOM files. It is
|
||||
focussed on getting the pixel data, not the meta info.
|
||||
|
||||
To use, first create an instance of this class (giving it
|
||||
a file object or filename). Next use the info attribute to
|
||||
get a dict of the meta data. The loading of pixel data is
|
||||
deferred until get_numpy_array() is called.
|
||||
|
||||
Comparison with Pydicom
|
||||
-----------------------
|
||||
|
||||
This code focusses on getting the pixel data out, which allows some
|
||||
shortcuts, resulting in the code being much smaller.
|
||||
|
||||
Since the processing of data elements is much cheaper (it skips a lot
|
||||
of tags), this code is about 3x faster than pydicom (except for the
|
||||
deflated DICOM files).
|
||||
|
||||
This class does borrow some code (and ideas) from the pydicom
|
||||
project, and (to the best of our knowledge) has the same limitations
|
||||
as pydicom with regard to the type of files that it can handle.
|
||||
|
||||
Limitations
|
||||
-----------
|
||||
|
||||
For more advanced DICOM processing, please check out pydicom.
|
||||
|
||||
* Only a predefined subset of data elements (meta information) is read.
|
||||
* This is a reader; it can not write DICOM files.
|
||||
* (just like pydicom) it can handle none of the compressed DICOM
|
||||
formats except for "Deflated Explicit VR Little Endian"
|
||||
(1.2.840.10008.1.2.1.99).
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, file):
|
||||
# Open file if filename given
|
||||
if isinstance(file, str):
|
||||
self._filename = file
|
||||
self._file = open(file, "rb")
|
||||
else:
|
||||
self._filename = "<unknown file>"
|
||||
self._file = file
|
||||
# Init variable to store position and size of pixel data
|
||||
self._pixel_data_loc = None
|
||||
# The meta header is always explicit and little endian
|
||||
self.is_implicit_VR = False
|
||||
self.is_little_endian = True
|
||||
self._unpackPrefix = "<"
|
||||
# Dict to store data elements of interest in
|
||||
self._info = {}
|
||||
# VR Conversion
|
||||
self._converters = {
|
||||
# Numbers
|
||||
"US": lambda x: self._unpack("H", x),
|
||||
"UL": lambda x: self._unpack("L", x),
|
||||
# Numbers encoded as strings
|
||||
"DS": lambda x: self._splitValues(x, float, "\\"),
|
||||
"IS": lambda x: self._splitValues(x, int, "\\"),
|
||||
# strings
|
||||
"AS": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"DA": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"TM": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"UI": lambda x: x.decode("ascii", "ignore").strip("\x00"),
|
||||
"LO": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
|
||||
"CS": lambda x: self._splitValues(x, float, "\\"),
|
||||
"PN": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(),
|
||||
}
|
||||
|
||||
# Initiate reading
|
||||
self._read()
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
return self._info
|
||||
|
||||
def _splitValues(self, x, type, splitter):
|
||||
s = x.decode("ascii").strip("\x00")
|
||||
try:
|
||||
if splitter in s:
|
||||
return tuple([type(v) for v in s.split(splitter) if v.strip()])
|
||||
else:
|
||||
return type(s)
|
||||
except ValueError:
|
||||
return s
|
||||
|
||||
def _unpack(self, fmt, value):
|
||||
return struct.unpack(self._unpackPrefix + fmt, value)[0]
|
||||
|
||||
# Really only so we need minimal changes to _pixel_data_numpy
|
||||
def __iter__(self):
|
||||
return iter(self._info.keys())
|
||||
|
||||
def __getattr__(self, key):
|
||||
info = object.__getattribute__(self, "_info")
|
||||
if key in info:
|
||||
return info[key]
|
||||
return object.__getattribute__(self, key) # pragma: no cover
|
||||
|
||||
def _read(self):
|
||||
f = self._file
|
||||
# Check prefix after peamble
|
||||
f.seek(128)
|
||||
if f.read(4) != b"DICM":
|
||||
raise NotADicomFile("Not a valid DICOM file.")
|
||||
# Read
|
||||
self._read_header()
|
||||
self._read_data_elements()
|
||||
self._get_shape_and_sampling()
|
||||
# Close if done, reopen if necessary to read pixel data
|
||||
if os.path.isfile(self._filename):
|
||||
self._file.close()
|
||||
self._file = None
|
||||
|
||||
def _readDataElement(self):
|
||||
f = self._file
|
||||
# Get group and element
|
||||
group = self._unpack("H", f.read(2))
|
||||
element = self._unpack("H", f.read(2))
|
||||
# Get value length
|
||||
if self.is_implicit_VR:
|
||||
vl = self._unpack("I", f.read(4))
|
||||
else:
|
||||
vr = f.read(2)
|
||||
if vr in (b"OB", b"OW", b"SQ", b"UN"):
|
||||
reserved = f.read(2) # noqa
|
||||
vl = self._unpack("I", f.read(4))
|
||||
else:
|
||||
vl = self._unpack("H", f.read(2))
|
||||
# Get value
|
||||
if group == 0x7FE0 and element == 0x0010:
|
||||
here = f.tell()
|
||||
self._pixel_data_loc = here, vl
|
||||
f.seek(here + vl)
|
||||
return group, element, b"Deferred loading of pixel data"
|
||||
else:
|
||||
if vl == 0xFFFFFFFF:
|
||||
value = self._read_undefined_length_value()
|
||||
else:
|
||||
value = f.read(vl)
|
||||
return group, element, value
|
||||
|
||||
def _read_undefined_length_value(self, read_size=128):
|
||||
""" Copied (in compacted form) from PyDicom
|
||||
Copyright Darcy Mason.
|
||||
"""
|
||||
fp = self._file
|
||||
# data_start = fp.tell()
|
||||
search_rewind = 3
|
||||
bytes_to_find = struct.pack(
|
||||
self._unpackPrefix + "HH", SequenceDelimiterTag[0], SequenceDelimiterTag[1]
|
||||
)
|
||||
|
||||
found = False
|
||||
value_chunks = []
|
||||
while not found:
|
||||
chunk_start = fp.tell()
|
||||
bytes_read = fp.read(read_size)
|
||||
if len(bytes_read) < read_size:
|
||||
# try again,
|
||||
# if still don't get required amount, this is last block
|
||||
new_bytes = fp.read(read_size - len(bytes_read))
|
||||
bytes_read += new_bytes
|
||||
if len(bytes_read) < read_size:
|
||||
raise EOFError(
|
||||
"End of file reached before sequence " "delimiter found."
|
||||
)
|
||||
index = bytes_read.find(bytes_to_find)
|
||||
if index != -1:
|
||||
found = True
|
||||
value_chunks.append(bytes_read[:index])
|
||||
fp.seek(chunk_start + index + 4) # rewind to end of delimiter
|
||||
length = fp.read(4)
|
||||
if length != b"\0\0\0\0":
|
||||
logger.warning(
|
||||
"Expected 4 zero bytes after undefined length " "delimiter"
|
||||
)
|
||||
else:
|
||||
fp.seek(fp.tell() - search_rewind) # rewind a bit
|
||||
# accumulate the bytes read (not including the rewind)
|
||||
value_chunks.append(bytes_read[:-search_rewind])
|
||||
|
||||
# if get here then have found the byte string
|
||||
return b"".join(value_chunks)
|
||||
|
||||
def _read_header(self):
|
||||
f = self._file
|
||||
TransferSyntaxUID = None
|
||||
|
||||
# Read all elements, store transferSyntax when we encounter it
|
||||
try:
|
||||
while True:
|
||||
fp_save = f.tell()
|
||||
# Get element
|
||||
group, element, value = self._readDataElement()
|
||||
if group == 0x02:
|
||||
if group == 0x02 and element == 0x10:
|
||||
TransferSyntaxUID = value.decode("ascii").strip("\x00")
|
||||
else:
|
||||
# No more group 2: rewind and break
|
||||
# (don't trust group length)
|
||||
f.seek(fp_save)
|
||||
break
|
||||
except (EOFError, struct.error): # pragma: no cover
|
||||
raise RuntimeError("End of file reached while still in header.")
|
||||
|
||||
# Handle transfer syntax
|
||||
self._info["TransferSyntaxUID"] = TransferSyntaxUID
|
||||
#
|
||||
if TransferSyntaxUID is None:
|
||||
# Assume ExplicitVRLittleEndian
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.1":
|
||||
# ExplicitVRLittleEndian
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.2":
|
||||
# ExplicitVRBigEndian
|
||||
is_implicit_VR, is_little_endian = False, False
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2":
|
||||
# implicit VR little endian
|
||||
is_implicit_VR, is_little_endian = True, True
|
||||
elif TransferSyntaxUID == "1.2.840.10008.1.2.1.99":
|
||||
# DeflatedExplicitVRLittleEndian:
|
||||
is_implicit_VR, is_little_endian = False, True
|
||||
self._inflate()
|
||||
else:
|
||||
# http://www.dicomlibrary.com/dicom/transfer-syntax/
|
||||
t, extra_info = TransferSyntaxUID, ""
|
||||
if "1.2.840.10008.1.2.4.50" <= t < "1.2.840.10008.1.2.4.99":
|
||||
extra_info = " (JPEG)"
|
||||
if "1.2.840.10008.1.2.4.90" <= t < "1.2.840.10008.1.2.4.99":
|
||||
extra_info = " (JPEG 2000)"
|
||||
if t == "1.2.840.10008.1.2.5":
|
||||
extra_info = " (RLE)"
|
||||
if t == "1.2.840.10008.1.2.6.1":
|
||||
extra_info = " (RFC 2557)"
|
||||
raise CompressedDicom(
|
||||
"The dicom reader can only read files with "
|
||||
"uncompressed image data - not %r%s. You "
|
||||
"can try using dcmtk or gdcm to convert the "
|
||||
"image." % (t, extra_info)
|
||||
)
|
||||
|
||||
# From hereon, use implicit/explicit big/little endian
|
||||
self.is_implicit_VR = is_implicit_VR
|
||||
self.is_little_endian = is_little_endian
|
||||
self._unpackPrefix = "><"[is_little_endian]
|
||||
|
||||
def _read_data_elements(self):
|
||||
info = self._info
|
||||
try:
|
||||
while True:
|
||||
# Get element
|
||||
group, element, value = self._readDataElement()
|
||||
# Is it a group we are interested in?
|
||||
if group in GROUPS:
|
||||
key = (group, element)
|
||||
name, vr = MINIDICT.get(key, (None, None))
|
||||
# Is it an element we are interested in?
|
||||
if name:
|
||||
# Store value
|
||||
converter = self._converters.get(vr, lambda x: x)
|
||||
info[name] = converter(value)
|
||||
except (EOFError, struct.error):
|
||||
pass # end of file ...
|
||||
|
||||
def get_numpy_array(self):
|
||||
""" Get numpy arra for this DICOM file, with the correct shape,
|
||||
and pixel values scaled appropriately.
|
||||
"""
|
||||
# Is there pixel data at all?
|
||||
if "PixelData" not in self:
|
||||
raise TypeError("No pixel data found in this dataset.")
|
||||
|
||||
# Load it now if it was not already loaded
|
||||
if self._pixel_data_loc and len(self.PixelData) < 100:
|
||||
# Reopen file?
|
||||
close_file = False
|
||||
if self._file is None:
|
||||
close_file = True
|
||||
self._file = open(self._filename, "rb")
|
||||
# Read data
|
||||
self._file.seek(self._pixel_data_loc[0])
|
||||
if self._pixel_data_loc[1] == 0xFFFFFFFF:
|
||||
value = self._read_undefined_length_value()
|
||||
else:
|
||||
value = self._file.read(self._pixel_data_loc[1])
|
||||
# Close file
|
||||
if close_file:
|
||||
self._file.close()
|
||||
self._file = None
|
||||
# Overwrite
|
||||
self._info["PixelData"] = value
|
||||
|
||||
# Get data
|
||||
data = self._pixel_data_numpy()
|
||||
data = self._apply_slope_and_offset(data)
|
||||
|
||||
# Remove data again to preserve memory
|
||||
# Note that the data for the original file is loaded twice ...
|
||||
self._info["PixelData"] = (
|
||||
b"Data converted to numpy array, " + b"raw data removed to preserve memory"
|
||||
)
|
||||
return data
|
||||
|
||||
def _get_shape_and_sampling(self):
|
||||
""" Get shape and sampling without actuall using the pixel data.
|
||||
In this way, the user can get an idea what's inside without having
|
||||
to load it.
|
||||
"""
|
||||
# Get shape (in the same way that pydicom does)
|
||||
if "NumberOfFrames" in self and self.NumberOfFrames > 1:
|
||||
if self.SamplesPerPixel > 1:
|
||||
shape = (
|
||||
self.SamplesPerPixel,
|
||||
self.NumberOfFrames,
|
||||
self.Rows,
|
||||
self.Columns,
|
||||
)
|
||||
else:
|
||||
shape = self.NumberOfFrames, self.Rows, self.Columns
|
||||
elif "SamplesPerPixel" in self:
|
||||
if self.SamplesPerPixel > 1:
|
||||
if self.BitsAllocated == 8:
|
||||
shape = self.SamplesPerPixel, self.Rows, self.Columns
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"DICOM plugin only handles "
|
||||
"SamplesPerPixel > 1 if Bits "
|
||||
"Allocated = 8"
|
||||
)
|
||||
else:
|
||||
shape = self.Rows, self.Columns
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"DICOM file has no SamplesPerPixel " "(perhaps this is a report?)"
|
||||
)
|
||||
|
||||
# Try getting sampling between pixels
|
||||
if "PixelSpacing" in self:
|
||||
sampling = float(self.PixelSpacing[0]), float(self.PixelSpacing[1])
|
||||
else:
|
||||
sampling = 1.0, 1.0
|
||||
if "SliceSpacing" in self:
|
||||
sampling = (abs(self.SliceSpacing),) + sampling
|
||||
|
||||
# Ensure that sampling has as many elements as shape
|
||||
sampling = (1.0,) * (len(shape) - len(sampling)) + sampling[-len(shape) :]
|
||||
|
||||
# Set shape and sampling
|
||||
self._info["shape"] = shape
|
||||
self._info["sampling"] = sampling
|
||||
|
||||
def _pixel_data_numpy(self):
|
||||
"""Return a NumPy array of the pixel data.
|
||||
"""
|
||||
# Taken from pydicom
|
||||
# Copyright (c) 2008-2012 Darcy Mason
|
||||
|
||||
if "PixelData" not in self:
|
||||
raise TypeError("No pixel data found in this dataset.")
|
||||
|
||||
# determine the type used for the array
|
||||
need_byteswap = self.is_little_endian != sys_is_little_endian
|
||||
|
||||
# Make NumPy format code, e.g. "uint16", "int32" etc
|
||||
# from two pieces of info:
|
||||
# self.PixelRepresentation -- 0 for unsigned, 1 for signed;
|
||||
# self.BitsAllocated -- 8, 16, or 32
|
||||
format_str = "%sint%d" % (
|
||||
("u", "")[self.PixelRepresentation],
|
||||
self.BitsAllocated,
|
||||
)
|
||||
try:
|
||||
numpy_format = np.dtype(format_str)
|
||||
except TypeError: # pragma: no cover
|
||||
raise TypeError(
|
||||
"Data type not understood by NumPy: format='%s', "
|
||||
" PixelRepresentation=%d, BitsAllocated=%d"
|
||||
% (numpy_format, self.PixelRepresentation, self.BitsAllocated)
|
||||
)
|
||||
|
||||
# Have correct Numpy format, so create the NumPy array
|
||||
arr = np.frombuffer(self.PixelData, numpy_format).copy()
|
||||
|
||||
# XXX byte swap - may later handle this in read_file!!?
|
||||
if need_byteswap:
|
||||
arr.byteswap(True) # True means swap in-place, don't make new copy
|
||||
|
||||
# Note the following reshape operations return a new *view* onto arr,
|
||||
# but don't copy the data
|
||||
arr = arr.reshape(*self._info["shape"])
|
||||
return arr
|
||||
|
||||
def _apply_slope_and_offset(self, data):
|
||||
"""
|
||||
If RescaleSlope and RescaleIntercept are present in the data,
|
||||
apply them. The data type of the data is changed if necessary.
|
||||
"""
|
||||
# Obtain slope and offset
|
||||
slope, offset = 1, 0
|
||||
needFloats, needApplySlopeOffset = False, False
|
||||
if "RescaleSlope" in self:
|
||||
needApplySlopeOffset = True
|
||||
slope = self.RescaleSlope
|
||||
if "RescaleIntercept" in self:
|
||||
needApplySlopeOffset = True
|
||||
offset = self.RescaleIntercept
|
||||
if int(slope) != slope or int(offset) != offset:
|
||||
needFloats = True
|
||||
if not needFloats:
|
||||
slope, offset = int(slope), int(offset)
|
||||
|
||||
# Apply slope and offset
|
||||
if needApplySlopeOffset:
|
||||
# Maybe we need to change the datatype?
|
||||
if data.dtype in [np.float32, np.float64]:
|
||||
pass
|
||||
elif needFloats:
|
||||
data = data.astype(np.float32)
|
||||
else:
|
||||
# Determine required range
|
||||
minReq, maxReq = data.min(), data.max()
|
||||
minReq = min([minReq, minReq * slope + offset, maxReq * slope + offset])
|
||||
maxReq = max([maxReq, minReq * slope + offset, maxReq * slope + offset])
|
||||
|
||||
# Determine required datatype from that
|
||||
dtype = None
|
||||
if minReq < 0:
|
||||
# Signed integer type
|
||||
maxReq = max([-minReq, maxReq])
|
||||
if maxReq < 2 ** 7:
|
||||
dtype = np.int8
|
||||
elif maxReq < 2 ** 15:
|
||||
dtype = np.int16
|
||||
elif maxReq < 2 ** 31:
|
||||
dtype = np.int32
|
||||
else:
|
||||
dtype = np.float32
|
||||
else:
|
||||
# Unsigned integer type
|
||||
if maxReq < 2 ** 8:
|
||||
dtype = np.int8
|
||||
elif maxReq < 2 ** 16:
|
||||
dtype = np.int16
|
||||
elif maxReq < 2 ** 32:
|
||||
dtype = np.int32
|
||||
else:
|
||||
dtype = np.float32
|
||||
# Change datatype
|
||||
if dtype != data.dtype:
|
||||
data = data.astype(dtype)
|
||||
|
||||
# Apply slope and offset
|
||||
data *= slope
|
||||
data += offset
|
||||
|
||||
# Done
|
||||
return data
|
||||
|
||||
def _inflate(self):
|
||||
# Taken from pydicom
|
||||
# Copyright (c) 2008-2012 Darcy Mason
|
||||
import zlib
|
||||
from io import BytesIO
|
||||
|
||||
# See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset
|
||||
# following the file metadata was prepared the normal way,
|
||||
# then "deflate" compression applied.
|
||||
# All that is needed here is to decompress and then
|
||||
# use as normal in a file-like object
|
||||
zipped = self._file.read()
|
||||
# -MAX_WBITS part is from comp.lang.python answer:
|
||||
# groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799
|
||||
unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS)
|
||||
self._file = BytesIO(unzipped) # a file-like object
|
||||
|
||||
|
||||
class DicomSeries(object):
|
||||
""" DicomSeries
|
||||
This class represents a serie of dicom files (SimpleDicomReader
|
||||
objects) that belong together. If these are multiple files, they
|
||||
represent the slices of a volume (like for CT or MRI).
|
||||
"""
|
||||
|
||||
def __init__(self, suid, progressIndicator):
|
||||
# Init dataset list and the callback
|
||||
self._entries = []
|
||||
|
||||
# Init props
|
||||
self._suid = suid
|
||||
self._info = {}
|
||||
self._progressIndicator = progressIndicator
|
||||
|
||||
def __len__(self):
|
||||
return len(self._entries)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._entries)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._entries[index]
|
||||
|
||||
@property
|
||||
def suid(self):
|
||||
return self._suid
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
""" The shape of the data (nz, ny, nx). """
|
||||
return self._info["shape"]
|
||||
|
||||
@property
|
||||
def sampling(self):
|
||||
""" The sampling (voxel distances) of the data (dz, dy, dx). """
|
||||
return self._info["sampling"]
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
""" A dictionary containing the information as present in the
|
||||
first dicomfile of this serie. None if there are no entries. """
|
||||
return self._info
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
""" A description of the dicom series. Used fields are
|
||||
PatientName, shape of the data, SeriesDescription, and
|
||||
ImageComments.
|
||||
"""
|
||||
info = self.info
|
||||
|
||||
# If no info available, return simple description
|
||||
if not info: # pragma: no cover
|
||||
return "DicomSeries containing %i images" % len(self)
|
||||
|
||||
fields = []
|
||||
# Give patient name
|
||||
if "PatientName" in info:
|
||||
fields.append("" + info["PatientName"])
|
||||
# Also add dimensions
|
||||
if self.shape:
|
||||
tmp = [str(d) for d in self.shape]
|
||||
fields.append("x".join(tmp))
|
||||
# Try adding more fields
|
||||
if "SeriesDescription" in info:
|
||||
fields.append("'" + info["SeriesDescription"] + "'")
|
||||
if "ImageComments" in info:
|
||||
fields.append("'" + info["ImageComments"] + "'")
|
||||
|
||||
# Combine
|
||||
return " ".join(fields)
|
||||
|
||||
def __repr__(self):
|
||||
adr = hex(id(self)).upper()
|
||||
return "<DicomSeries with %i images at %s>" % (len(self), adr)
|
||||
|
||||
def get_numpy_array(self):
|
||||
""" Get (load) the data that this DicomSeries represents, and return
|
||||
it as a numpy array. If this serie contains multiple images, the
|
||||
resulting array is 3D, otherwise it's 2D.
|
||||
"""
|
||||
|
||||
# It's easy if no file or if just a single file
|
||||
if len(self) == 0:
|
||||
raise ValueError("Serie does not contain any files.")
|
||||
elif len(self) == 1:
|
||||
return self[0].get_numpy_array()
|
||||
|
||||
# Check info
|
||||
if self.info is None:
|
||||
raise RuntimeError("Cannot return volume if series not finished.")
|
||||
|
||||
# Init data (using what the dicom packaged produces as a reference)
|
||||
slice = self[0].get_numpy_array()
|
||||
vol = np.zeros(self.shape, dtype=slice.dtype)
|
||||
vol[0] = slice
|
||||
|
||||
# Fill volume
|
||||
self._progressIndicator.start("loading data", "", len(self))
|
||||
for z in range(1, len(self)):
|
||||
vol[z] = self[z].get_numpy_array()
|
||||
self._progressIndicator.set_progress(z + 1)
|
||||
self._progressIndicator.finish()
|
||||
|
||||
# Done
|
||||
import gc
|
||||
|
||||
gc.collect()
|
||||
return vol
|
||||
|
||||
def _append(self, dcm):
|
||||
self._entries.append(dcm)
|
||||
|
||||
def _sort(self):
|
||||
self._entries.sort(key=lambda k: k.InstanceNumber)
|
||||
|
||||
def _finish(self):
|
||||
"""
|
||||
Evaluate the series of dicom files. Together they should make up
|
||||
a volumetric dataset. This means the files should meet certain
|
||||
conditions. Also some additional information has to be calculated,
|
||||
such as the distance between the slices. This method sets the
|
||||
attributes for "shape", "sampling" and "info".
|
||||
|
||||
This method checks:
|
||||
* that there are no missing files
|
||||
* that the dimensions of all images match
|
||||
* that the pixel spacing of all images match
|
||||
"""
|
||||
|
||||
# The datasets list should be sorted by instance number
|
||||
L = self._entries
|
||||
if len(L) == 0:
|
||||
return
|
||||
elif len(L) == 1:
|
||||
self._info = L[0].info
|
||||
return
|
||||
|
||||
# Get previous
|
||||
ds1 = L[0]
|
||||
# Init measures to calculate average of
|
||||
distance_sum = 0.0
|
||||
# Init measures to check (these are in 2D)
|
||||
dimensions = ds1.Rows, ds1.Columns
|
||||
# sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1])
|
||||
sampling = ds1.info["sampling"][:2] # row, column
|
||||
|
||||
for index in range(len(L)):
|
||||
# The first round ds1 and ds2 will be the same, for the
|
||||
# distance calculation this does not matter
|
||||
# Get current
|
||||
ds2 = L[index]
|
||||
# Get positions
|
||||
pos1 = float(ds1.ImagePositionPatient[2])
|
||||
pos2 = float(ds2.ImagePositionPatient[2])
|
||||
# Update distance_sum to calculate distance later
|
||||
distance_sum += abs(pos1 - pos2)
|
||||
# Test measures
|
||||
dimensions2 = ds2.Rows, ds2.Columns
|
||||
# sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1])
|
||||
sampling2 = ds2.info["sampling"][:2] # row, column
|
||||
if dimensions != dimensions2:
|
||||
# We cannot produce a volume if the dimensions match
|
||||
raise ValueError("Dimensions of slices does not match.")
|
||||
if sampling != sampling2:
|
||||
# We can still produce a volume, but we should notify the user
|
||||
self._progressIndicator.write("Warn: sampling does not match.")
|
||||
# Store previous
|
||||
ds1 = ds2
|
||||
|
||||
# Finish calculating average distance
|
||||
# (Note that there are len(L)-1 distances)
|
||||
distance_mean = distance_sum / (len(L) - 1)
|
||||
|
||||
# Set info dict
|
||||
self._info = L[0].info.copy()
|
||||
|
||||
# Store information that is specific for the serie
|
||||
self._info["shape"] = (len(L),) + ds2.info["shape"]
|
||||
self._info["sampling"] = (distance_mean,) + ds2.info["sampling"]
|
||||
|
||||
|
||||
def list_files(files, path):
|
||||
"""List all files in the directory, recursively. """
|
||||
for item in os.listdir(path):
|
||||
item = os.path.join(path, item)
|
||||
if os.path.isdir(item):
|
||||
list_files(files, item)
|
||||
elif os.path.isfile(item):
|
||||
files.append(item)
|
||||
|
||||
|
||||
def process_directory(request, progressIndicator, readPixelData=False):
|
||||
"""
|
||||
Reads dicom files and returns a list of DicomSeries objects, which
|
||||
contain information about the data, and can be used to load the
|
||||
image or volume data.
|
||||
|
||||
if readPixelData is True, the pixel data of all series is read. By
|
||||
default the loading of pixeldata is deferred until it is requested
|
||||
using the DicomSeries.get_pixel_array() method. In general, both
|
||||
methods should be equally fast.
|
||||
"""
|
||||
# Get directory to examine
|
||||
if os.path.isdir(request.filename):
|
||||
path = request.filename
|
||||
elif os.path.isfile(request.filename):
|
||||
path = os.path.dirname(request.filename)
|
||||
else: # pragma: no cover - tested earlier
|
||||
raise ValueError(
|
||||
"Dicom plugin needs a valid filename to examine " "the directory"
|
||||
)
|
||||
|
||||
# Check files
|
||||
files = []
|
||||
list_files(files, path) # Find files recursively
|
||||
|
||||
# Gather file data and put in DicomSeries
|
||||
series = {}
|
||||
count = 0
|
||||
progressIndicator.start("examining files", "files", len(files))
|
||||
for filename in files:
|
||||
# Show progress (note that we always start with a 0.0)
|
||||
count += 1
|
||||
progressIndicator.set_progress(count)
|
||||
# Skip DICOMDIR files
|
||||
if filename.count("DICOMDIR"): # pragma: no cover
|
||||
continue
|
||||
# Try loading dicom ...
|
||||
try:
|
||||
dcm = SimpleDicomReader(filename)
|
||||
except NotADicomFile:
|
||||
continue # skip non-dicom file
|
||||
except Exception as why: # pragma: no cover
|
||||
progressIndicator.write(str(why))
|
||||
continue
|
||||
# Get SUID and register the file with an existing or new series object
|
||||
try:
|
||||
suid = dcm.SeriesInstanceUID
|
||||
except AttributeError: # pragma: no cover
|
||||
continue # some other kind of dicom file
|
||||
if suid not in series:
|
||||
series[suid] = DicomSeries(suid, progressIndicator)
|
||||
series[suid]._append(dcm)
|
||||
|
||||
# Finish progress
|
||||
# progressIndicator.finish('Found %i series.' % len(series))
|
||||
|
||||
# Make a list and sort, so that the order is deterministic
|
||||
series = list(series.values())
|
||||
series.sort(key=lambda x: x.suid)
|
||||
|
||||
# Split series if necessary
|
||||
for serie in reversed([serie for serie in series]):
|
||||
splitSerieIfRequired(serie, series, progressIndicator)
|
||||
|
||||
# Finish all series
|
||||
# progressIndicator.start('analyse series', '', len(series))
|
||||
series_ = []
|
||||
for i in range(len(series)):
|
||||
try:
|
||||
series[i]._finish()
|
||||
series_.append(series[i])
|
||||
except Exception as err: # pragma: no cover
|
||||
progressIndicator.write(str(err))
|
||||
pass # Skip serie (probably report-like file without pixels)
|
||||
# progressIndicator.set_progress(i+1)
|
||||
progressIndicator.finish("Found %i correct series." % len(series_))
|
||||
|
||||
# Done
|
||||
return series_
|
||||
|
||||
|
||||
def splitSerieIfRequired(serie, series, progressIndicator):
|
||||
"""
|
||||
Split the serie in multiple series if this is required. The choice
|
||||
is based on examing the image position relative to the previous
|
||||
image. If it differs too much, it is assumed that there is a new
|
||||
dataset. This can happen for example in unspitted gated CT data.
|
||||
"""
|
||||
|
||||
# Sort the original list and get local name
|
||||
serie._sort()
|
||||
L = serie._entries
|
||||
# Init previous slice
|
||||
ds1 = L[0]
|
||||
# Check whether we can do this
|
||||
if "ImagePositionPatient" not in ds1:
|
||||
return
|
||||
# Initialize a list of new lists
|
||||
L2 = [[ds1]]
|
||||
# Init slice distance estimate
|
||||
distance = 0
|
||||
|
||||
for index in range(1, len(L)):
|
||||
# Get current slice
|
||||
ds2 = L[index]
|
||||
# Get positions
|
||||
pos1 = float(ds1.ImagePositionPatient[2])
|
||||
pos2 = float(ds2.ImagePositionPatient[2])
|
||||
# Get distances
|
||||
newDist = abs(pos1 - pos2)
|
||||
# deltaDist = abs(firstPos-pos2)
|
||||
# If the distance deviates more than 2x from what we've seen,
|
||||
# we can agree it's a new dataset.
|
||||
if distance and newDist > 2.1 * distance:
|
||||
L2.append([])
|
||||
distance = 0
|
||||
else:
|
||||
# Test missing file
|
||||
if distance and newDist > 1.5 * distance:
|
||||
progressIndicator.write(
|
||||
"Warning: missing file after %r" % ds1._filename
|
||||
)
|
||||
distance = newDist
|
||||
# Add to last list
|
||||
L2[-1].append(ds2)
|
||||
# Store previous
|
||||
ds1 = ds2
|
||||
|
||||
# Split if we should
|
||||
if len(L2) > 1:
|
||||
# At what position are we now?
|
||||
i = series.index(serie)
|
||||
# Create new series
|
||||
series2insert = []
|
||||
for L in L2:
|
||||
newSerie = DicomSeries(serie.suid, progressIndicator)
|
||||
newSerie._entries = L
|
||||
series2insert.append(newSerie)
|
||||
# Insert series and remove self
|
||||
for newSerie in reversed(series2insert):
|
||||
series.insert(i, newSerie)
|
||||
series.remove(serie)
|
1332
venv/Lib/site-packages/imageio/plugins/_freeimage.py
Normal file
1332
venv/Lib/site-packages/imageio/plugins/_freeimage.py
Normal file
File diff suppressed because it is too large
Load diff
902
venv/Lib/site-packages/imageio/plugins/_swf.py
Normal file
902
venv/Lib/site-packages/imageio/plugins/_swf.py
Normal file
|
@ -0,0 +1,902 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
# This code was taken from visvis/vvmovy/images2swf.py
|
||||
|
||||
# styletest: ignore E261
|
||||
|
||||
"""
|
||||
Provides a function (write_swf) to store a series of numpy arrays in an
|
||||
SWF movie, that can be played on a wide range of OS's.
|
||||
|
||||
In desperation of wanting to share animated images, and then lacking a good
|
||||
writer for animated gif or .avi, I decided to look into SWF. This format
|
||||
is very well documented.
|
||||
|
||||
This is a pure python module to create an SWF file that shows a series
|
||||
of images. The images are stored using the DEFLATE algorithm (same as
|
||||
PNG and ZIP and which is included in the standard Python distribution).
|
||||
As this compression algorithm is much more effective than that used in
|
||||
GIF images, we obtain better quality (24 bit colors + alpha channel)
|
||||
while still producesing smaller files (a test showed ~75%). Although
|
||||
SWF also allows for JPEG compression, doing so would probably require
|
||||
a third party library for the JPEG encoding/decoding, we could
|
||||
perhaps do this via Pillow or freeimage.
|
||||
|
||||
sources and tools:
|
||||
|
||||
- SWF on wikipedia
|
||||
- Adobes "SWF File Format Specification" version 10
|
||||
(http://www.adobe.com/devnet/swf/pdf/swf_file_format_spec_v10.pdf)
|
||||
- swftools (swfdump in specific) for debugging
|
||||
- iwisoft swf2avi can be used to convert swf to avi/mpg/flv with really
|
||||
good quality, while file size is reduced with factors 20-100.
|
||||
A good program in my opinion. The free version has the limitation
|
||||
of a watermark in the upper left corner.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import zlib
|
||||
import time # noqa
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# todo: use Pillow to support reading JPEG images from SWF?
|
||||
|
||||
|
||||
## Base functions and classes
|
||||
|
||||
|
||||
class BitArray:
|
||||
""" Dynamic array of bits that automatically resizes
|
||||
with factors of two.
|
||||
Append bits using .append() or +=
|
||||
You can reverse bits using .reverse()
|
||||
"""
|
||||
|
||||
def __init__(self, initvalue=None):
|
||||
self.data = np.zeros((16,), dtype=np.uint8)
|
||||
self._len = 0
|
||||
if initvalue is not None:
|
||||
self.append(initvalue)
|
||||
|
||||
def __len__(self):
|
||||
return self._len # self.data.shape[0]
|
||||
|
||||
def __repr__(self):
|
||||
return self.data[: self._len].tostring().decode("ascii")
|
||||
|
||||
def _checkSize(self):
|
||||
# check length... grow if necessary
|
||||
arraylen = self.data.shape[0]
|
||||
if self._len >= arraylen:
|
||||
tmp = np.zeros((arraylen * 2,), dtype=np.uint8)
|
||||
tmp[: self._len] = self.data[: self._len]
|
||||
self.data = tmp
|
||||
|
||||
def __add__(self, value):
|
||||
self.append(value)
|
||||
return self
|
||||
|
||||
def append(self, bits):
|
||||
|
||||
# check input
|
||||
if isinstance(bits, BitArray):
|
||||
bits = str(bits)
|
||||
if isinstance(bits, int): # pragma: no cover - we dont use it
|
||||
bits = str(bits)
|
||||
if not isinstance(bits, str): # pragma: no cover
|
||||
raise ValueError("Append bits as strings or integers!")
|
||||
|
||||
# add bits
|
||||
for bit in bits:
|
||||
self.data[self._len] = ord(bit)
|
||||
self._len += 1
|
||||
self._checkSize()
|
||||
|
||||
def reverse(self):
|
||||
""" In-place reverse. """
|
||||
tmp = self.data[: self._len].copy()
|
||||
self.data[: self._len] = tmp[::-1]
|
||||
|
||||
def tobytes(self):
|
||||
""" Convert to bytes. If necessary,
|
||||
zeros are padded to the end (right side).
|
||||
"""
|
||||
bits = str(self)
|
||||
|
||||
# determine number of bytes
|
||||
nbytes = 0
|
||||
while nbytes * 8 < len(bits):
|
||||
nbytes += 1
|
||||
# pad
|
||||
bits = bits.ljust(nbytes * 8, "0")
|
||||
|
||||
# go from bits to bytes
|
||||
bb = bytes()
|
||||
for i in range(nbytes):
|
||||
tmp = int(bits[i * 8 : (i + 1) * 8], 2)
|
||||
bb += int2uint8(tmp)
|
||||
|
||||
# done
|
||||
return bb
|
||||
|
||||
|
||||
def int2uint32(i):
|
||||
return int(i).to_bytes(4, "little")
|
||||
|
||||
|
||||
def int2uint16(i):
|
||||
return int(i).to_bytes(2, "little")
|
||||
|
||||
|
||||
def int2uint8(i):
|
||||
return int(i).to_bytes(1, "little")
|
||||
|
||||
|
||||
def int2bits(i, n=None):
|
||||
""" convert int to a string of bits (0's and 1's in a string),
|
||||
pad to n elements. Convert back using int(ss,2). """
|
||||
ii = i
|
||||
|
||||
# make bits
|
||||
bb = BitArray()
|
||||
while ii > 0:
|
||||
bb += str(ii % 2)
|
||||
ii = ii >> 1
|
||||
bb.reverse()
|
||||
|
||||
# justify
|
||||
if n is not None:
|
||||
if len(bb) > n: # pragma: no cover
|
||||
raise ValueError("int2bits fail: len larger than padlength.")
|
||||
bb = str(bb).rjust(n, "0")
|
||||
|
||||
# done
|
||||
return BitArray(bb)
|
||||
|
||||
|
||||
def bits2int(bb, n=8):
|
||||
# Init
|
||||
value = ""
|
||||
|
||||
# Get value in bits
|
||||
for i in range(len(bb)):
|
||||
b = bb[i : i + 1]
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
|
||||
# Make decimal
|
||||
return int(value[:n], 2)
|
||||
|
||||
|
||||
def get_type_and_len(bb):
|
||||
""" bb should be 6 bytes at least
|
||||
Return (type, length, length_of_full_tag)
|
||||
"""
|
||||
# Init
|
||||
value = ""
|
||||
|
||||
# Get first 16 bits
|
||||
for i in range(2):
|
||||
b = bb[i : i + 1]
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
|
||||
# Get type and length
|
||||
type = int(value[:10], 2)
|
||||
L = int(value[10:], 2)
|
||||
L2 = L + 2
|
||||
|
||||
# Long tag header?
|
||||
if L == 63: # '111111'
|
||||
value = ""
|
||||
for i in range(2, 6):
|
||||
b = bb[i : i + 1] # becomes a single-byte bytes()
|
||||
tmp = bin(ord(b))[2:]
|
||||
# value += tmp.rjust(8,'0')
|
||||
value = tmp.rjust(8, "0") + value
|
||||
L = int(value, 2)
|
||||
L2 = L + 6
|
||||
|
||||
# Done
|
||||
return type, L, L2
|
||||
|
||||
|
||||
def signedint2bits(i, n=None):
|
||||
""" convert signed int to a string of bits (0's and 1's in a string),
|
||||
pad to n elements. Negative numbers are stored in 2's complement bit
|
||||
patterns, thus positive numbers always start with a 0.
|
||||
"""
|
||||
|
||||
# negative number?
|
||||
ii = i
|
||||
if i < 0:
|
||||
# A negative number, -n, is represented as the bitwise opposite of
|
||||
ii = abs(ii) - 1 # the positive-zero number n-1.
|
||||
|
||||
# make bits
|
||||
bb = BitArray()
|
||||
while ii > 0:
|
||||
bb += str(ii % 2)
|
||||
ii = ii >> 1
|
||||
bb.reverse()
|
||||
|
||||
# justify
|
||||
bb = "0" + str(bb) # always need the sign bit in front
|
||||
if n is not None:
|
||||
if len(bb) > n: # pragma: no cover
|
||||
raise ValueError("signedint2bits fail: len larger than padlength.")
|
||||
bb = bb.rjust(n, "0")
|
||||
|
||||
# was it negative? (then opposite bits)
|
||||
if i < 0:
|
||||
bb = bb.replace("0", "x").replace("1", "0").replace("x", "1")
|
||||
|
||||
# done
|
||||
return BitArray(bb)
|
||||
|
||||
|
||||
def twits2bits(arr):
|
||||
""" Given a few (signed) numbers, store them
|
||||
as compactly as possible in the wat specifief by the swf format.
|
||||
The numbers are multiplied by 20, assuming they
|
||||
are twits.
|
||||
Can be used to make the RECT record.
|
||||
"""
|
||||
|
||||
# first determine length using non justified bit strings
|
||||
maxlen = 1
|
||||
for i in arr:
|
||||
tmp = len(signedint2bits(i * 20))
|
||||
if tmp > maxlen:
|
||||
maxlen = tmp
|
||||
|
||||
# build array
|
||||
bits = int2bits(maxlen, 5)
|
||||
for i in arr:
|
||||
bits += signedint2bits(i * 20, maxlen)
|
||||
|
||||
return bits
|
||||
|
||||
|
||||
def floats2bits(arr):
|
||||
""" Given a few (signed) numbers, convert them to bits,
|
||||
stored as FB (float bit values). We always use 16.16.
|
||||
Negative numbers are not (yet) possible, because I don't
|
||||
know how the're implemented (ambiguity).
|
||||
"""
|
||||
bits = int2bits(31, 5) # 32 does not fit in 5 bits!
|
||||
for i in arr:
|
||||
if i < 0: # pragma: no cover
|
||||
raise ValueError("Dit not implement negative floats!")
|
||||
i1 = int(i)
|
||||
i2 = i - i1
|
||||
bits += int2bits(i1, 15)
|
||||
bits += int2bits(i2 * 2 ** 16, 16)
|
||||
return bits
|
||||
|
||||
|
||||
## Base Tag
|
||||
|
||||
|
||||
class Tag:
|
||||
def __init__(self):
|
||||
self.bytes = bytes()
|
||||
self.tagtype = -1
|
||||
|
||||
def process_tag(self):
|
||||
""" Implement this to create the tag. """
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_tag(self):
|
||||
""" Calls processTag and attaches the header. """
|
||||
self.process_tag()
|
||||
|
||||
# tag to binary
|
||||
bits = int2bits(self.tagtype, 10)
|
||||
|
||||
# complete header uint16 thing
|
||||
bits += "1" * 6 # = 63 = 0x3f
|
||||
# make uint16
|
||||
bb = int2uint16(int(str(bits), 2))
|
||||
|
||||
# now add 32bit length descriptor
|
||||
bb += int2uint32(len(self.bytes))
|
||||
|
||||
# done, attach and return
|
||||
bb += self.bytes
|
||||
return bb
|
||||
|
||||
def make_rect_record(self, xmin, xmax, ymin, ymax):
|
||||
""" Simply uses makeCompactArray to produce
|
||||
a RECT Record. """
|
||||
return twits2bits([xmin, xmax, ymin, ymax])
|
||||
|
||||
def make_matrix_record(self, scale_xy=None, rot_xy=None, trans_xy=None):
|
||||
|
||||
# empty matrix?
|
||||
if scale_xy is None and rot_xy is None and trans_xy is None:
|
||||
return "0" * 8
|
||||
|
||||
# init
|
||||
bits = BitArray()
|
||||
|
||||
# scale
|
||||
if scale_xy:
|
||||
bits += "1"
|
||||
bits += floats2bits([scale_xy[0], scale_xy[1]])
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# rotation
|
||||
if rot_xy:
|
||||
bits += "1"
|
||||
bits += floats2bits([rot_xy[0], rot_xy[1]])
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# translation (no flag here)
|
||||
if trans_xy:
|
||||
bits += twits2bits([trans_xy[0], trans_xy[1]])
|
||||
else:
|
||||
bits += twits2bits([0, 0])
|
||||
|
||||
# done
|
||||
return bits
|
||||
|
||||
|
||||
## Control tags
|
||||
|
||||
|
||||
class ControlTag(Tag):
|
||||
def __init__(self):
|
||||
Tag.__init__(self)
|
||||
|
||||
|
||||
class FileAttributesTag(ControlTag):
|
||||
def __init__(self):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 69
|
||||
|
||||
def process_tag(self):
|
||||
self.bytes = "\x00".encode("ascii") * (1 + 3)
|
||||
|
||||
|
||||
class ShowFrameTag(ControlTag):
|
||||
def __init__(self):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 1
|
||||
|
||||
def process_tag(self):
|
||||
self.bytes = bytes()
|
||||
|
||||
|
||||
class SetBackgroundTag(ControlTag):
|
||||
""" Set the color in 0-255, or 0-1 (if floats given). """
|
||||
|
||||
def __init__(self, *rgb):
|
||||
self.tagtype = 9
|
||||
if len(rgb) == 1:
|
||||
rgb = rgb[0]
|
||||
self.rgb = rgb
|
||||
|
||||
def process_tag(self):
|
||||
bb = bytes()
|
||||
for i in range(3):
|
||||
clr = self.rgb[i]
|
||||
if isinstance(clr, float): # pragma: no cover - not used
|
||||
clr = clr * 255
|
||||
bb += int2uint8(clr)
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class DoActionTag(Tag):
|
||||
def __init__(self, action="stop"):
|
||||
Tag.__init__(self)
|
||||
self.tagtype = 12
|
||||
self.actions = [action]
|
||||
|
||||
def append(self, action): # pragma: no cover - not used
|
||||
self.actions.append(action)
|
||||
|
||||
def process_tag(self):
|
||||
bb = bytes()
|
||||
|
||||
for action in self.actions:
|
||||
action = action.lower()
|
||||
if action == "stop":
|
||||
bb += "\x07".encode("ascii")
|
||||
elif action == "play": # pragma: no cover - not used
|
||||
bb += "\x06".encode("ascii")
|
||||
else: # pragma: no cover
|
||||
logger.warning("unkown action: %s" % action)
|
||||
|
||||
bb += int2uint8(0)
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
## Definition tags
|
||||
class DefinitionTag(Tag):
|
||||
counter = 0 # to give automatically id's
|
||||
|
||||
def __init__(self):
|
||||
Tag.__init__(self)
|
||||
DefinitionTag.counter += 1
|
||||
self.id = DefinitionTag.counter # id in dictionary
|
||||
|
||||
|
||||
class BitmapTag(DefinitionTag):
|
||||
def __init__(self, im):
|
||||
DefinitionTag.__init__(self)
|
||||
self.tagtype = 36 # DefineBitsLossless2
|
||||
|
||||
# convert image (note that format is ARGB)
|
||||
# even a grayscale image is stored in ARGB, nevertheless,
|
||||
# the fabilous deflate compression will make it that not much
|
||||
# more data is required for storing (25% or so, and less than 10%
|
||||
# when storing RGB as ARGB).
|
||||
|
||||
if len(im.shape) == 3:
|
||||
if im.shape[2] in [3, 4]:
|
||||
tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255
|
||||
for i in range(3):
|
||||
tmp[:, :, i + 1] = im[:, :, i]
|
||||
if im.shape[2] == 4:
|
||||
tmp[:, :, 0] = im[:, :, 3] # swap channel where alpha is
|
||||
else: # pragma: no cover
|
||||
raise ValueError("Invalid shape to be an image.")
|
||||
|
||||
elif len(im.shape) == 2:
|
||||
tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255
|
||||
for i in range(3):
|
||||
tmp[:, :, i + 1] = im[:, :]
|
||||
else: # pragma: no cover
|
||||
raise ValueError("Invalid shape to be an image.")
|
||||
|
||||
# we changed the image to uint8 4 channels.
|
||||
# now compress!
|
||||
self._data = zlib.compress(tmp.tostring(), zlib.DEFLATED)
|
||||
self.imshape = im.shape
|
||||
|
||||
def process_tag(self):
|
||||
|
||||
# build tag
|
||||
bb = bytes()
|
||||
bb += int2uint16(self.id) # CharacterID
|
||||
bb += int2uint8(5) # BitmapFormat
|
||||
bb += int2uint16(self.imshape[1]) # BitmapWidth
|
||||
bb += int2uint16(self.imshape[0]) # BitmapHeight
|
||||
bb += self._data # ZlibBitmapData
|
||||
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class PlaceObjectTag(ControlTag):
|
||||
def __init__(self, depth, idToPlace=None, xy=(0, 0), move=False):
|
||||
ControlTag.__init__(self)
|
||||
self.tagtype = 26
|
||||
self.depth = depth
|
||||
self.idToPlace = idToPlace
|
||||
self.xy = xy
|
||||
self.move = move
|
||||
|
||||
def process_tag(self):
|
||||
# retrieve stuff
|
||||
depth = self.depth
|
||||
xy = self.xy
|
||||
id = self.idToPlace
|
||||
|
||||
# build PlaceObject2
|
||||
bb = bytes()
|
||||
if self.move:
|
||||
bb += "\x07".encode("ascii")
|
||||
else:
|
||||
# (8 bit flags): 4:matrix, 2:character, 1:move
|
||||
bb += "\x06".encode("ascii")
|
||||
bb += int2uint16(depth) # Depth
|
||||
bb += int2uint16(id) # character id
|
||||
bb += self.make_matrix_record(trans_xy=xy).tobytes() # MATRIX record
|
||||
self.bytes = bb
|
||||
|
||||
|
||||
class ShapeTag(DefinitionTag):
|
||||
def __init__(self, bitmapId, xy, wh):
|
||||
DefinitionTag.__init__(self)
|
||||
self.tagtype = 2
|
||||
self.bitmapId = bitmapId
|
||||
self.xy = xy
|
||||
self.wh = wh
|
||||
|
||||
def process_tag(self):
|
||||
""" Returns a defineshape tag. with a bitmap fill """
|
||||
|
||||
bb = bytes()
|
||||
bb += int2uint16(self.id)
|
||||
xy, wh = self.xy, self.wh
|
||||
tmp = self.make_rect_record(xy[0], wh[0], xy[1], wh[1]) # ShapeBounds
|
||||
bb += tmp.tobytes()
|
||||
|
||||
# make SHAPEWITHSTYLE structure
|
||||
|
||||
# first entry: FILLSTYLEARRAY with in it a single fill style
|
||||
bb += int2uint8(1) # FillStyleCount
|
||||
bb += "\x41".encode("ascii") # FillStyleType (0x41 or 0x43 unsmoothed)
|
||||
bb += int2uint16(self.bitmapId) # BitmapId
|
||||
# bb += '\x00' # BitmapMatrix (empty matrix with leftover bits filled)
|
||||
bb += self.make_matrix_record(scale_xy=(20, 20)).tobytes()
|
||||
|
||||
# # first entry: FILLSTYLEARRAY with in it a single fill style
|
||||
# bb += int2uint8(1) # FillStyleCount
|
||||
# bb += '\x00' # solid fill
|
||||
# bb += '\x00\x00\xff' # color
|
||||
|
||||
# second entry: LINESTYLEARRAY with a single line style
|
||||
bb += int2uint8(0) # LineStyleCount
|
||||
# bb += int2uint16(0*20) # Width
|
||||
# bb += '\x00\xff\x00' # Color
|
||||
|
||||
# third and fourth entry: NumFillBits and NumLineBits (4 bits each)
|
||||
# I each give them four bits, so 16 styles possible.
|
||||
bb += "\x44".encode("ascii")
|
||||
|
||||
self.bytes = bb
|
||||
|
||||
# last entries: SHAPERECORDs ... (individual shape records not aligned)
|
||||
# STYLECHANGERECORD
|
||||
bits = BitArray()
|
||||
bits += self.make_style_change_record(0, 1, moveTo=(self.wh[0], self.wh[1]))
|
||||
# STRAIGHTEDGERECORD 4x
|
||||
bits += self.make_straight_edge_record(-self.wh[0], 0)
|
||||
bits += self.make_straight_edge_record(0, -self.wh[1])
|
||||
bits += self.make_straight_edge_record(self.wh[0], 0)
|
||||
bits += self.make_straight_edge_record(0, self.wh[1])
|
||||
|
||||
# ENDSHAPRECORD
|
||||
bits += self.make_end_shape_record()
|
||||
|
||||
self.bytes += bits.tobytes()
|
||||
|
||||
# done
|
||||
# self.bytes = bb
|
||||
|
||||
def make_style_change_record(self, lineStyle=None, fillStyle=None, moveTo=None):
|
||||
|
||||
# first 6 flags
|
||||
# Note that we use FillStyle1. If we don't flash (at least 8) does not
|
||||
# recognize the frames properly when importing to library.
|
||||
|
||||
bits = BitArray()
|
||||
bits += "0" # TypeFlag (not an edge record)
|
||||
bits += "0" # StateNewStyles (only for DefineShape2 and Defineshape3)
|
||||
if lineStyle:
|
||||
bits += "1" # StateLineStyle
|
||||
else:
|
||||
bits += "0"
|
||||
if fillStyle:
|
||||
bits += "1" # StateFillStyle1
|
||||
else:
|
||||
bits += "0"
|
||||
bits += "0" # StateFillStyle0
|
||||
if moveTo:
|
||||
bits += "1" # StateMoveTo
|
||||
else:
|
||||
bits += "0"
|
||||
|
||||
# give information
|
||||
# todo: nbits for fillStyle and lineStyle is hard coded.
|
||||
|
||||
if moveTo:
|
||||
bits += twits2bits([moveTo[0], moveTo[1]])
|
||||
if fillStyle:
|
||||
bits += int2bits(fillStyle, 4)
|
||||
if lineStyle:
|
||||
bits += int2bits(lineStyle, 4)
|
||||
|
||||
return bits
|
||||
|
||||
def make_straight_edge_record(self, *dxdy):
|
||||
if len(dxdy) == 1:
|
||||
dxdy = dxdy[0]
|
||||
|
||||
# determine required number of bits
|
||||
xbits = signedint2bits(dxdy[0] * 20)
|
||||
ybits = signedint2bits(dxdy[1] * 20)
|
||||
nbits = max([len(xbits), len(ybits)])
|
||||
|
||||
bits = BitArray()
|
||||
bits += "11" # TypeFlag and StraightFlag
|
||||
bits += int2bits(nbits - 2, 4)
|
||||
bits += "1" # GeneralLineFlag
|
||||
bits += signedint2bits(dxdy[0] * 20, nbits)
|
||||
bits += signedint2bits(dxdy[1] * 20, nbits)
|
||||
|
||||
# note: I do not make use of vertical/horizontal only lines...
|
||||
|
||||
return bits
|
||||
|
||||
def make_end_shape_record(self):
|
||||
bits = BitArray()
|
||||
bits += "0" # TypeFlag: no edge
|
||||
bits += "0" * 5 # EndOfShape
|
||||
return bits
|
||||
|
||||
|
||||
def read_pixels(bb, i, tagType, L1):
|
||||
""" With pf's seed after the recordheader, reads the pixeldata.
|
||||
"""
|
||||
|
||||
# Get info
|
||||
charId = bb[i : i + 2] # noqa
|
||||
i += 2
|
||||
format = ord(bb[i : i + 1])
|
||||
i += 1
|
||||
width = bits2int(bb[i : i + 2], 16)
|
||||
i += 2
|
||||
height = bits2int(bb[i : i + 2], 16)
|
||||
i += 2
|
||||
|
||||
# If we can, get pixeldata and make numpy array
|
||||
if format != 5:
|
||||
logger.warning("Can only read 24bit or 32bit RGB(A) lossless images.")
|
||||
else:
|
||||
# Read byte data
|
||||
offset = 2 + 1 + 2 + 2 # all the info bits
|
||||
bb2 = bb[i : i + (L1 - offset)]
|
||||
|
||||
# Decompress and make numpy array
|
||||
data = zlib.decompress(bb2)
|
||||
a = np.frombuffer(data, dtype=np.uint8)
|
||||
|
||||
# Set shape
|
||||
if tagType == 20:
|
||||
# DefineBitsLossless - RGB data
|
||||
try:
|
||||
a.shape = height, width, 3
|
||||
except Exception:
|
||||
# Byte align stuff might cause troubles
|
||||
logger.warning("Cannot read image due to byte alignment")
|
||||
if tagType == 36:
|
||||
# DefineBitsLossless2 - ARGB data
|
||||
a.shape = height, width, 4
|
||||
# Swap alpha channel to make RGBA
|
||||
b = a
|
||||
a = np.zeros_like(a)
|
||||
a[:, :, 0] = b[:, :, 1]
|
||||
a[:, :, 1] = b[:, :, 2]
|
||||
a[:, :, 2] = b[:, :, 3]
|
||||
a[:, :, 3] = b[:, :, 0]
|
||||
|
||||
return a
|
||||
|
||||
|
||||
## Last few functions
|
||||
|
||||
|
||||
# These are the original public functions, we don't use them, but we
|
||||
# keep it so that in principle this module can be used stand-alone.
|
||||
|
||||
|
||||
def checkImages(images): # pragma: no cover
|
||||
""" checkImages(images)
|
||||
Check numpy images and correct intensity range etc.
|
||||
The same for all movie formats.
|
||||
"""
|
||||
# Init results
|
||||
images2 = []
|
||||
|
||||
for im in images:
|
||||
if isinstance(im, np.ndarray):
|
||||
# Check and convert dtype
|
||||
if im.dtype == np.uint8:
|
||||
images2.append(im) # Ok
|
||||
elif im.dtype in [np.float32, np.float64]:
|
||||
theMax = im.max()
|
||||
if 128 < theMax < 300:
|
||||
pass # assume 0:255
|
||||
else:
|
||||
im = im.copy()
|
||||
im[im < 0] = 0
|
||||
im[im > 1] = 1
|
||||
im *= 255
|
||||
images2.append(im.astype(np.uint8))
|
||||
else:
|
||||
im = im.astype(np.uint8)
|
||||
images2.append(im)
|
||||
# Check size
|
||||
if im.ndim == 2:
|
||||
pass # ok
|
||||
elif im.ndim == 3:
|
||||
if im.shape[2] not in [3, 4]:
|
||||
raise ValueError("This array can not represent an image.")
|
||||
else:
|
||||
raise ValueError("This array can not represent an image.")
|
||||
else:
|
||||
raise ValueError("Invalid image type: " + str(type(im)))
|
||||
|
||||
# Done
|
||||
return images2
|
||||
|
||||
|
||||
def build_file(
|
||||
fp, taglist, nframes=1, framesize=(500, 500), fps=10, version=8
|
||||
): # pragma: no cover
|
||||
""" Give the given file (as bytes) a header. """
|
||||
|
||||
# compose header
|
||||
bb = bytes()
|
||||
bb += "F".encode("ascii") # uncompressed
|
||||
bb += "WS".encode("ascii") # signature bytes
|
||||
bb += int2uint8(version) # version
|
||||
bb += "0000".encode("ascii") # FileLength (leave open for now)
|
||||
bb += Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes()
|
||||
bb += int2uint8(0) + int2uint8(fps) # FrameRate
|
||||
bb += int2uint16(nframes)
|
||||
fp.write(bb)
|
||||
|
||||
# produce all tags
|
||||
for tag in taglist:
|
||||
fp.write(tag.get_tag())
|
||||
|
||||
# finish with end tag
|
||||
fp.write("\x00\x00".encode("ascii"))
|
||||
|
||||
# set size
|
||||
sze = fp.tell()
|
||||
fp.seek(4)
|
||||
fp.write(int2uint32(sze))
|
||||
|
||||
|
||||
def write_swf(filename, images, duration=0.1, repeat=True): # pragma: no cover
|
||||
"""Write an swf-file from the specified images. If repeat is False,
|
||||
the movie is finished with a stop action. Duration may also
|
||||
be a list with durations for each frame (note that the duration
|
||||
for each frame is always an integer amount of the minimum duration.)
|
||||
|
||||
Images should be a list consisting numpy arrays with values between
|
||||
0 and 255 for integer types, and between 0 and 1 for float types.
|
||||
|
||||
"""
|
||||
|
||||
# Check images
|
||||
images2 = checkImages(images)
|
||||
|
||||
# Init
|
||||
taglist = [FileAttributesTag(), SetBackgroundTag(0, 0, 0)]
|
||||
|
||||
# Check duration
|
||||
if hasattr(duration, "__len__"):
|
||||
if len(duration) == len(images2):
|
||||
duration = [d for d in duration]
|
||||
else:
|
||||
raise ValueError("len(duration) doesn't match amount of images.")
|
||||
else:
|
||||
duration = [duration for im in images2]
|
||||
|
||||
# Build delays list
|
||||
minDuration = float(min(duration))
|
||||
delays = [round(d / minDuration) for d in duration]
|
||||
delays = [max(1, int(d)) for d in delays]
|
||||
|
||||
# Get FPS
|
||||
fps = 1.0 / minDuration
|
||||
|
||||
# Produce series of tags for each image
|
||||
# t0 = time.time()
|
||||
nframes = 0
|
||||
for im in images2:
|
||||
bm = BitmapTag(im)
|
||||
wh = (im.shape[1], im.shape[0])
|
||||
sh = ShapeTag(bm.id, (0, 0), wh)
|
||||
po = PlaceObjectTag(1, sh.id, move=nframes > 0)
|
||||
taglist.extend([bm, sh, po])
|
||||
for i in range(delays[nframes]):
|
||||
taglist.append(ShowFrameTag())
|
||||
nframes += 1
|
||||
|
||||
if not repeat:
|
||||
taglist.append(DoActionTag("stop"))
|
||||
|
||||
# Build file
|
||||
# t1 = time.time()
|
||||
fp = open(filename, "wb")
|
||||
try:
|
||||
build_file(fp, taglist, nframes=nframes, framesize=wh, fps=fps)
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
fp.close()
|
||||
# t2 = time.time()
|
||||
|
||||
# logger.warning("Writing SWF took %1.2f and %1.2f seconds" % (t1-t0, t2-t1) )
|
||||
|
||||
|
||||
def read_swf(filename): # pragma: no cover
|
||||
"""Read all images from an SWF (shockwave flash) file. Returns a list
|
||||
of numpy arrays.
|
||||
|
||||
Limitation: only read the PNG encoded images (not the JPG encoded ones).
|
||||
"""
|
||||
|
||||
# Check whether it exists
|
||||
if not os.path.isfile(filename):
|
||||
raise IOError("File not found: " + str(filename))
|
||||
|
||||
# Init images
|
||||
images = []
|
||||
|
||||
# Open file and read all
|
||||
fp = open(filename, "rb")
|
||||
bb = fp.read()
|
||||
|
||||
try:
|
||||
# Check opening tag
|
||||
tmp = bb[0:3].decode("ascii", "ignore")
|
||||
if tmp.upper() == "FWS":
|
||||
pass # ok
|
||||
elif tmp.upper() == "CWS":
|
||||
# Decompress movie
|
||||
bb = bb[:8] + zlib.decompress(bb[8:])
|
||||
else:
|
||||
raise IOError("Not a valid SWF file: " + str(filename))
|
||||
|
||||
# Set filepointer at first tag (skipping framesize RECT and two uin16's
|
||||
i = 8
|
||||
nbits = bits2int(bb[i : i + 1], 5) # skip FrameSize
|
||||
nbits = 5 + nbits * 4
|
||||
Lrect = nbits / 8.0
|
||||
if Lrect % 1:
|
||||
Lrect += 1
|
||||
Lrect = int(Lrect)
|
||||
i += Lrect + 4
|
||||
|
||||
# Iterate over the tags
|
||||
counter = 0
|
||||
while True:
|
||||
counter += 1
|
||||
|
||||
# Get tag header
|
||||
head = bb[i : i + 6]
|
||||
if not head:
|
||||
break # Done (we missed end tag)
|
||||
|
||||
# Determine type and length
|
||||
T, L1, L2 = get_type_and_len(head)
|
||||
if not L2:
|
||||
logger.warning("Invalid tag length, could not proceed")
|
||||
break
|
||||
# logger.warning(T, L2)
|
||||
|
||||
# Read image if we can
|
||||
if T in [20, 36]:
|
||||
im = read_pixels(bb, i + 6, T, L1)
|
||||
if im is not None:
|
||||
images.append(im)
|
||||
elif T in [6, 21, 35, 90]:
|
||||
logger.warning("Ignoring JPEG image: cannot read JPEG.")
|
||||
else:
|
||||
pass # Not an image tag
|
||||
|
||||
# Detect end tag
|
||||
if T == 0:
|
||||
break
|
||||
|
||||
# Next tag!
|
||||
i += L2
|
||||
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
# Done
|
||||
return images
|
||||
|
||||
|
||||
# Backward compatibility; same public names as when this was images2swf.
|
||||
writeSwf = write_swf
|
||||
readSwf = read_swf
|
10182
venv/Lib/site-packages/imageio/plugins/_tifffile.py
Normal file
10182
venv/Lib/site-packages/imageio/plugins/_tifffile.py
Normal file
File diff suppressed because it is too large
Load diff
301
venv/Lib/site-packages/imageio/plugins/bsdf.py
Normal file
301
venv/Lib/site-packages/imageio/plugins/bsdf.py
Normal file
|
@ -0,0 +1,301 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" BSDF plugin.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
|
||||
def get_bsdf_serializer(options):
|
||||
from . import _bsdf as bsdf
|
||||
|
||||
class NDArrayExtension(bsdf.Extension):
|
||||
""" Copy of BSDF's NDArrayExtension but deal with lazy blobs.
|
||||
"""
|
||||
|
||||
name = "ndarray"
|
||||
cls = np.ndarray
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(shape=v.shape, dtype=str(v.dtype), data=v.tobytes())
|
||||
|
||||
def decode(self, s, v):
|
||||
return v # return as dict, because of lazy blobs, decode in Image
|
||||
|
||||
class ImageExtension(bsdf.Extension):
|
||||
""" We implement two extensions that trigger on the Image classes.
|
||||
"""
|
||||
|
||||
def encode(self, s, v):
|
||||
return dict(array=v.array, meta=v.meta)
|
||||
|
||||
def decode(self, s, v):
|
||||
return Image(v["array"], v["meta"])
|
||||
|
||||
class Image2DExtension(ImageExtension):
|
||||
|
||||
name = "image2d"
|
||||
cls = Image2D
|
||||
|
||||
class Image3DExtension(ImageExtension):
|
||||
|
||||
name = "image3d"
|
||||
cls = Image3D
|
||||
|
||||
exts = [NDArrayExtension, Image2DExtension, Image3DExtension]
|
||||
serializer = bsdf.BsdfSerializer(exts, **options)
|
||||
|
||||
return bsdf, serializer
|
||||
|
||||
|
||||
class Image:
|
||||
""" Class in which we wrap the array and meta data. By using an extension
|
||||
we can make BSDF trigger on these classes and thus encode the images.
|
||||
as actual images.
|
||||
"""
|
||||
|
||||
def __init__(self, array, meta):
|
||||
self.array = array
|
||||
self.meta = meta
|
||||
|
||||
def get_array(self):
|
||||
if not isinstance(self.array, np.ndarray):
|
||||
v = self.array
|
||||
blob = v["data"]
|
||||
if not isinstance(blob, bytes): # then it's a lazy bsdf.Blob
|
||||
blob = blob.get_bytes()
|
||||
self.array = np.frombuffer(blob, dtype=v["dtype"])
|
||||
self.array.shape = v["shape"]
|
||||
return self.array
|
||||
|
||||
def get_meta(self):
|
||||
return self.meta
|
||||
|
||||
|
||||
class Image2D(Image):
|
||||
pass
|
||||
|
||||
|
||||
class Image3D(Image):
|
||||
pass
|
||||
|
||||
|
||||
class BsdfFormat(Format):
|
||||
""" The BSDF format enables reading and writing of image data in the
|
||||
BSDF serialization format. This format allows storage of images, volumes,
|
||||
and series thereof. Data can be of any numeric data type, and can
|
||||
optionally be compressed. Each image/volume can have associated
|
||||
meta data, which can consist of any data type supported by BSDF.
|
||||
|
||||
By default, image data is lazily loaded; the actual image data is
|
||||
not read until it is requested. This allows storing multiple images
|
||||
in a single file and still have fast access to individual images.
|
||||
Alternatively, a series of images can be read in streaming mode, reading
|
||||
images as they are read (e.g. from http).
|
||||
|
||||
BSDF is a simple generic binary format. It is easy to extend and there
|
||||
are standard extension definitions for 2D and 3D image data.
|
||||
Read more at http://bsdf.io.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
random_access : bool
|
||||
Whether individual images in the file can be read in random order.
|
||||
Defaults to True for normal files, and to False when reading from HTTP.
|
||||
If False, the file is read in "streaming mode", allowing reading
|
||||
files as they are read, but without support for "rewinding".
|
||||
Note that setting this to True when reading from HTTP, the whole file
|
||||
is read upon opening it (since lazy loading is not possible over HTTP).
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : {0, 1, 2}
|
||||
Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib
|
||||
compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2
|
||||
compression (more compact but slower). Default 1 (zlib).
|
||||
Note that some BSDF implementations may not support compression
|
||||
(e.g. JavaScript).
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
# if request.extension in self.extensions:
|
||||
# return True
|
||||
if request.firstbytes.startswith(b"BSDF"):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, random_access=None):
|
||||
# Validate - we need a BSDF file consisting of a list of images
|
||||
# The list is typically a stream, but does not have to be.
|
||||
assert self.request.firstbytes[:4] == b"BSDF", "Not a BSDF file"
|
||||
# self.request.firstbytes[5:6] == major and minor version
|
||||
if not (
|
||||
self.request.firstbytes[6:15] == b"M\x07image2D"
|
||||
or self.request.firstbytes[6:15] == b"M\x07image3D"
|
||||
or self.request.firstbytes[6:7] == b"l"
|
||||
):
|
||||
pass # Actually, follow a more duck-type approach ...
|
||||
# raise RuntimeError('BSDF file does not look like an '
|
||||
# 'image container.')
|
||||
# Set options. If we think that seeking is allowed, we lazily load
|
||||
# blobs, and set streaming to False (i.e. the whole file is read,
|
||||
# but we skip over binary blobs), so that we subsequently allow
|
||||
# random access to the images.
|
||||
# If seeking is not allowed (e.g. with a http request), we cannot
|
||||
# lazily load blobs, but we can still load streaming from the web.
|
||||
options = {}
|
||||
if self.request.filename.startswith(("http://", "https://")):
|
||||
ra = False if random_access is None else bool(random_access)
|
||||
options["lazy_blob"] = False # Because we cannot seek now
|
||||
options["load_streaming"] = not ra # Load as a stream?
|
||||
else:
|
||||
ra = True if random_access is None else bool(random_access)
|
||||
options["lazy_blob"] = ra # Don't read data until needed
|
||||
options["load_streaming"] = not ra
|
||||
|
||||
file = self.request.get_file()
|
||||
bsdf, self._serializer = get_bsdf_serializer(options)
|
||||
self._stream = self._serializer.load(file)
|
||||
# Another validation
|
||||
if (
|
||||
isinstance(self._stream, dict)
|
||||
and "meta" in self._stream
|
||||
and "array" in self._stream
|
||||
):
|
||||
self._stream = Image(self._stream["array"], self._stream["meta"])
|
||||
if not isinstance(self._stream, (Image, list, bsdf.ListStream)):
|
||||
raise RuntimeError(
|
||||
"BSDF file does not look seem to have an " "image container."
|
||||
)
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
if isinstance(self._stream, Image):
|
||||
return 1
|
||||
elif isinstance(self._stream, list):
|
||||
return len(self._stream)
|
||||
elif self._stream.count < 0:
|
||||
return np.inf
|
||||
return self._stream.count
|
||||
|
||||
def _get_data(self, index):
|
||||
# Validate
|
||||
if index < 0 or index >= self.get_length():
|
||||
raise IndexError(
|
||||
"Image index %i not in [0 %i]." % (index, self.get_length())
|
||||
)
|
||||
# Get Image object
|
||||
if isinstance(self._stream, Image):
|
||||
image_ob = self._stream # singleton
|
||||
elif isinstance(self._stream, list):
|
||||
# Easy when we have random access
|
||||
image_ob = self._stream[index]
|
||||
else:
|
||||
# For streaming, we need to skip over frames
|
||||
if index < self._stream.index:
|
||||
raise IndexError(
|
||||
"BSDF file is being read in streaming "
|
||||
"mode, thus does not allow rewinding."
|
||||
)
|
||||
while index > self._stream.index:
|
||||
self._stream.next()
|
||||
image_ob = self._stream.next() # Can raise StopIteration
|
||||
# Is this an image?
|
||||
if (
|
||||
isinstance(image_ob, dict)
|
||||
and "meta" in image_ob
|
||||
and "array" in image_ob
|
||||
):
|
||||
image_ob = Image(image_ob["array"], image_ob["meta"])
|
||||
if isinstance(image_ob, Image):
|
||||
# Return as array (if we have lazy blobs, they are read now)
|
||||
return image_ob.get_array(), image_ob.get_meta()
|
||||
else:
|
||||
r = repr(image_ob)
|
||||
r = r if len(r) < 200 else r[:197] + "..."
|
||||
raise RuntimeError("BSDF file contains non-image " + r)
|
||||
|
||||
def _get_meta_data(self, index): # pragma: no cover
|
||||
return {} # This format does not support global meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, compression=1):
|
||||
options = {"compression": compression}
|
||||
bsdf, self._serializer = get_bsdf_serializer(options)
|
||||
if self.request.mode[1] in "iv":
|
||||
self._stream = None # Singleton image
|
||||
self._written = False
|
||||
else:
|
||||
# Series (stream) of images
|
||||
file = self.request.get_file()
|
||||
self._stream = bsdf.ListStream()
|
||||
self._serializer.save(file, self._stream)
|
||||
|
||||
def _close(self):
|
||||
# We close the stream here, which will mark the number of written
|
||||
# elements. If we would not close it, the file would be fine, it's
|
||||
# just that upon reading it would not be known how many items are
|
||||
# in there.
|
||||
if self._stream is not None:
|
||||
self._stream.close(False) # False says "keep this a stream"
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Determine dimension
|
||||
ndim = None
|
||||
if self.request.mode[1] in "iI":
|
||||
ndim = 2
|
||||
elif self.request.mode[1] in "vV":
|
||||
ndim = 3
|
||||
else:
|
||||
ndim = 3 # Make an educated guess
|
||||
if im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4):
|
||||
ndim = 2
|
||||
# Validate shape
|
||||
assert ndim in (2, 3)
|
||||
if ndim == 2:
|
||||
assert im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4)
|
||||
else:
|
||||
assert im.ndim == 3 or (im.ndim == 4 and im.shape[-1] <= 4)
|
||||
# Wrap data and meta data in our special class that will trigger
|
||||
# the BSDF image2D or image3D extension.
|
||||
if ndim == 2:
|
||||
ob = Image2D(im, meta)
|
||||
else:
|
||||
ob = Image3D(im, meta)
|
||||
# Write directly or to stream
|
||||
if self._stream is None:
|
||||
assert not self._written, "Cannot write singleton image twice"
|
||||
self._written = True
|
||||
file = self.request.get_file()
|
||||
self._serializer.save(file, ob)
|
||||
else:
|
||||
self._stream.append(ob)
|
||||
|
||||
def set_meta_data(self, meta): # pragma: no cover
|
||||
raise RuntimeError("The BSDF format only supports " "per-image meta data.")
|
||||
|
||||
|
||||
format = BsdfFormat(
|
||||
"bsdf", # short name
|
||||
"Format based on the Binary Structured Data Format",
|
||||
".bsdf",
|
||||
"iIvV",
|
||||
)
|
||||
formats.add_format(format)
|
327
venv/Lib/site-packages/imageio/plugins/dicom.py
Normal file
327
venv/Lib/site-packages/imageio/plugins/dicom.py
Normal file
|
@ -0,0 +1,327 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin for reading DICOM files.
|
||||
"""
|
||||
|
||||
# todo: Use pydicom:
|
||||
# * Note: is not py3k ready yet
|
||||
# * Allow reading the full meta info
|
||||
# I think we can more or less replace the SimpleDicomReader with a
|
||||
# pydicom.Dataset For series, only ned to read the full info from one
|
||||
# file: speed still high
|
||||
# * Perhaps allow writing?
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format, BaseProgressIndicator, StdoutProgressIndicator
|
||||
from ..core import read_n_bytes
|
||||
|
||||
_dicom = None # lazily loaded in load_lib()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _dicom
|
||||
from . import _dicom
|
||||
|
||||
return _dicom
|
||||
|
||||
|
||||
# Determine endianity of system
|
||||
sys_is_little_endian = sys.byteorder == "little"
|
||||
|
||||
|
||||
def get_dcmdjpeg_exe():
|
||||
fname = "dcmdjpeg" + ".exe" * sys.platform.startswith("win")
|
||||
for dir in (
|
||||
"c:\\dcmtk",
|
||||
"c:\\Program Files",
|
||||
"c:\\Program Files\\dcmtk",
|
||||
"c:\\Program Files (x86)\\dcmtk",
|
||||
):
|
||||
filename = os.path.join(dir, fname)
|
||||
if os.path.isfile(filename):
|
||||
return [filename]
|
||||
|
||||
try:
|
||||
subprocess.check_call([fname, "--version"])
|
||||
return [fname]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def get_gdcmconv_exe():
|
||||
fname = "gdcmconv" + ".exe" * sys.platform.startswith("win")
|
||||
# Maybe it's on the path
|
||||
try:
|
||||
subprocess.check_call([fname, "--version"])
|
||||
return [fname, "--raw"]
|
||||
except Exception:
|
||||
pass
|
||||
# Select directories where it could be
|
||||
candidates = []
|
||||
base_dirs = [r"c:\Program Files"]
|
||||
for base_dir in base_dirs:
|
||||
if os.path.isdir(base_dir):
|
||||
for dname in os.listdir(base_dir):
|
||||
if dname.lower().startswith("gdcm"):
|
||||
suffix = dname[4:].strip()
|
||||
candidates.append((suffix, os.path.join(base_dir, dname)))
|
||||
# Sort, so higher versions are tried earlier
|
||||
candidates.sort(reverse=True)
|
||||
# Select executable
|
||||
filename = None
|
||||
for _, dirname in candidates:
|
||||
exe1 = os.path.join(dirname, "gdcmconv.exe")
|
||||
exe2 = os.path.join(dirname, "bin", "gdcmconv.exe")
|
||||
if os.path.isfile(exe1):
|
||||
filename = exe1
|
||||
break
|
||||
if os.path.isfile(exe2):
|
||||
filename = exe2
|
||||
break
|
||||
else:
|
||||
return None
|
||||
return [filename, "--raw"]
|
||||
|
||||
|
||||
class DicomFormat(Format):
|
||||
""" A format for reading DICOM images: a common format used to store
|
||||
medical image data, such as X-ray, CT and MRI.
|
||||
|
||||
This format borrows some code (and ideas) from the pydicom project. However,
|
||||
only a predefined subset of tags are extracted from the file. This allows
|
||||
for great simplifications allowing us to make a stand-alone reader, and
|
||||
also results in a much faster read time.
|
||||
|
||||
By default, only uncompressed and deflated transfer syntaxes are supported.
|
||||
If gdcm or dcmtk is installed, these will be used to automatically convert
|
||||
the data. See https://github.com/malaterre/GDCM/releases for installing GDCM.
|
||||
|
||||
This format provides functionality to group images of the same
|
||||
series together, thus extracting volumes (and multiple volumes).
|
||||
Using volread will attempt to yield a volume. If multiple volumes
|
||||
are present, the first one is given. Using mimread will simply yield
|
||||
all images in the given directory (not taking series into account).
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
progress : {True, False, BaseProgressIndicator}
|
||||
Whether to show progress when reading from multiple files.
|
||||
Default True. By passing an object that inherits from
|
||||
BaseProgressIndicator, the way in which progress is reported
|
||||
can be costumized.
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# If user URI was a directory, we check whether it has a DICOM file
|
||||
if os.path.isdir(request.filename):
|
||||
files = os.listdir(request.filename)
|
||||
for fname in sorted(files): # Sorting make it consistent
|
||||
filename = os.path.join(request.filename, fname)
|
||||
if os.path.isfile(filename) and "DICOMDIR" not in fname:
|
||||
with open(filename, "rb") as f:
|
||||
first_bytes = read_n_bytes(f, 140)
|
||||
return first_bytes[128:132] == b"DICM"
|
||||
else:
|
||||
return False
|
||||
# Check
|
||||
return request.firstbytes[128:132] == b"DICM"
|
||||
|
||||
def _can_write(self, request):
|
||||
# We cannot save yet. May be possible if we will used pydicom as
|
||||
# a backend.
|
||||
return False
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
|
||||
_compressed_warning_dirs = set()
|
||||
|
||||
def _open(self, progress=True):
|
||||
if not _dicom:
|
||||
load_lib()
|
||||
if os.path.isdir(self.request.filename):
|
||||
# A dir can be given if the user used the format explicitly
|
||||
self._info = {}
|
||||
self._data = None
|
||||
else:
|
||||
# Read the given dataset now ...
|
||||
try:
|
||||
dcm = _dicom.SimpleDicomReader(self.request.get_file())
|
||||
except _dicom.CompressedDicom as err:
|
||||
# We cannot do this on our own. Perhaps with some help ...
|
||||
cmd = get_gdcmconv_exe()
|
||||
if not cmd and "JPEG" in str(err):
|
||||
cmd = get_dcmdjpeg_exe()
|
||||
if not cmd:
|
||||
msg = err.args[0].replace("using", "installing")
|
||||
msg = msg.replace("convert", "auto-convert")
|
||||
err.args = (msg,)
|
||||
raise
|
||||
else:
|
||||
fname1 = self.request.get_local_filename()
|
||||
fname2 = fname1 + ".raw"
|
||||
try:
|
||||
subprocess.check_call(cmd + [fname1, fname2])
|
||||
except Exception:
|
||||
raise err
|
||||
d = os.path.dirname(fname1)
|
||||
if d not in self._compressed_warning_dirs:
|
||||
self._compressed_warning_dirs.add(d)
|
||||
logger.warning(
|
||||
"DICOM file contained compressed data. "
|
||||
+ "Autoconverting with "
|
||||
+ cmd[0]
|
||||
+ " (this warning is shown once for each directory)"
|
||||
)
|
||||
dcm = _dicom.SimpleDicomReader(fname2)
|
||||
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
# Initialize series, list of DicomSeries objects
|
||||
self._series = None # only created if needed
|
||||
|
||||
# Set progress indicator
|
||||
if isinstance(progress, BaseProgressIndicator):
|
||||
self._progressIndicator = progress
|
||||
elif progress is True:
|
||||
p = StdoutProgressIndicator("Reading DICOM")
|
||||
self._progressIndicator = p
|
||||
elif progress in (None, False):
|
||||
self._progressIndicator = BaseProgressIndicator("Dummy")
|
||||
else:
|
||||
raise ValueError("Invalid value for progress.")
|
||||
|
||||
def _close(self):
|
||||
# Clean up
|
||||
self._info = None
|
||||
self._data = None
|
||||
self._series = None
|
||||
|
||||
@property
|
||||
def series(self):
|
||||
if self._series is None:
|
||||
pi = self._progressIndicator
|
||||
self._series = _dicom.process_directory(self.request, pi)
|
||||
return self._series
|
||||
|
||||
def _get_length(self):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
# User expects one, but lets be honest about this file
|
||||
return nslices
|
||||
elif self.request.mode[1] == "I":
|
||||
# User expects multiple, if this file has multiple slices, ok.
|
||||
# Otherwise we have to check the series.
|
||||
if nslices > 1:
|
||||
return nslices
|
||||
else:
|
||||
return sum([len(serie) for serie in self.series])
|
||||
elif self.request.mode[1] == "v":
|
||||
# User expects a volume, if this file has one, ok.
|
||||
# Otherwise we have to check the series
|
||||
if nslices > 1:
|
||||
return 1
|
||||
else:
|
||||
return len(self.series) # We assume one volume per series
|
||||
elif self.request.mode[1] == "V":
|
||||
# User expects multiple volumes. We have to check the series
|
||||
return len(self.series) # We assume one volume per series
|
||||
else:
|
||||
raise RuntimeError("DICOM plugin should know what to expect.")
|
||||
|
||||
def _get_data(self, index):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
# Allow index >1 only if this file contains >1
|
||||
if nslices > 1:
|
||||
return self._data[index], self._info
|
||||
elif index == 0:
|
||||
return self._data, self._info
|
||||
else:
|
||||
raise IndexError("Dicom file contains only one slice.")
|
||||
elif self.request.mode[1] == "I":
|
||||
# Return slice from volume, or return item from series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._data[index], self._info
|
||||
else:
|
||||
L = []
|
||||
for serie in self.series:
|
||||
L.extend([dcm_ for dcm_ in serie])
|
||||
return L[index].get_numpy_array(), L[index].info
|
||||
elif self.request.mode[1] in "vV":
|
||||
# Return volume or series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._data, self._info
|
||||
else:
|
||||
return (
|
||||
self.series[index].get_numpy_array(),
|
||||
self.series[index].info,
|
||||
)
|
||||
else: # pragma: no cover
|
||||
raise ValueError("DICOM plugin should know what to expect.")
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if self._data is None:
|
||||
dcm = self.series[0][0]
|
||||
self._info = dcm._info
|
||||
self._data = dcm.get_numpy_array()
|
||||
|
||||
nslices = self._data.shape[0] if (self._data.ndim == 3) else 1
|
||||
|
||||
# Default is the meta data of the given file, or the "first" file.
|
||||
if index is None:
|
||||
return self._info
|
||||
|
||||
if self.request.mode[1] == "i":
|
||||
return self._info
|
||||
elif self.request.mode[1] == "I":
|
||||
# Return slice from volume, or return item from series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._info
|
||||
else:
|
||||
L = []
|
||||
for serie in self.series:
|
||||
L.extend([dcm_ for dcm_ in serie])
|
||||
return L[index].info
|
||||
elif self.request.mode[1] in "vV":
|
||||
# Return volume or series
|
||||
if index == 0 and nslices > 1:
|
||||
return self._info
|
||||
else:
|
||||
return self.series[index].info
|
||||
else: # pragma: no cover
|
||||
raise ValueError("DICOM plugin should know what to expect.")
|
||||
|
||||
|
||||
# Add this format
|
||||
formats.add_format(
|
||||
DicomFormat(
|
||||
"DICOM",
|
||||
"Digital Imaging and Communications in Medicine",
|
||||
".dcm .ct .mri",
|
||||
"iIvV",
|
||||
)
|
||||
) # Often DICOM files have weird or no extensions
|
148
venv/Lib/site-packages/imageio/plugins/example.py
Normal file
148
venv/Lib/site-packages/imageio/plugins/example.py
Normal file
|
@ -0,0 +1,148 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Example plugin. You can use this as a template for your own plugin.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class DummyFormat(Format):
|
||||
""" The dummy format is an example format that does nothing.
|
||||
It will never indicate that it can read or write a file. When
|
||||
explicitly asked to read, it will simply read the bytes. When
|
||||
explicitly asked to write, it will raise an error.
|
||||
|
||||
This documentation is shown when the user does ``help('thisformat')``.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
Specify arguments in numpy doc style here.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
Specify arguments in numpy doc style here.
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# This method is called when the format manager is searching
|
||||
# for a format to read a certain image. Return True if this format
|
||||
# can do it.
|
||||
#
|
||||
# The format manager is aware of the extensions and the modes
|
||||
# that each format can handle. It will first ask all formats
|
||||
# that *seem* to be able to read it whether they can. If none
|
||||
# can, it will ask the remaining formats if they can: the
|
||||
# extension might be missing, and this allows formats to provide
|
||||
# functionality for certain extensions, while giving preference
|
||||
# to other plugins.
|
||||
#
|
||||
# If a format says it can, it should live up to it. The format
|
||||
# would ideally check the request.firstbytes and look for a
|
||||
# header of some kind.
|
||||
#
|
||||
# The request object has:
|
||||
# request.filename: a representation of the source (only for reporting)
|
||||
# request.firstbytes: the first 256 bytes of the file.
|
||||
# request.mode[0]: read or write mode
|
||||
# request.mode[1]: what kind of data the user expects: one of 'iIvV?'
|
||||
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
# This method is called when the format manager is searching
|
||||
# for a format to write a certain image. It will first ask all
|
||||
# formats that *seem* to be able to write it whether they can.
|
||||
# If none can, it will ask the remaining formats if they can.
|
||||
#
|
||||
# Return True if the format can do it.
|
||||
|
||||
# In most cases, this code does suffice:
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, some_option=False, length=1):
|
||||
# Specify kwargs here. Optionally, the user-specified kwargs
|
||||
# can also be accessed via the request.kwargs object.
|
||||
#
|
||||
# The request object provides two ways to get access to the
|
||||
# data. Use just one:
|
||||
# - Use request.get_file() for a file object (preferred)
|
||||
# - Use request.get_local_filename() for a file on the system
|
||||
self._fp = self.request.get_file()
|
||||
self._length = length # passed as an arg in this case for testing
|
||||
self._data = None
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return self._length
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index >= self._length:
|
||||
raise IndexError("Image index %i > %i" % (index, self._length))
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._fp.read()
|
||||
# Put in a numpy array
|
||||
im = np.frombuffer(self._data, "uint8")
|
||||
im.shape = len(im), 1
|
||||
# Return array and dummy meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
return {} # This format does not support meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
# Specify kwargs here. Optionally, the user-specified kwargs
|
||||
# can also be accessed via the request.kwargs object.
|
||||
#
|
||||
# The request object provides two ways to write the data.
|
||||
# Use just one:
|
||||
# - Use request.get_file() for a file object (preferred)
|
||||
# - Use request.get_local_filename() for a file on the system
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Process the given data and meta data.
|
||||
raise RuntimeError("The dummy format cannot write image data.")
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
# Process the given meta data (global for all images)
|
||||
# It is not mandatory to support this.
|
||||
raise RuntimeError("The dummy format cannot write meta data.")
|
||||
|
||||
|
||||
# Register. You register an *instance* of a Format class. Here specify:
|
||||
format = DummyFormat(
|
||||
"dummy", # short name
|
||||
"An example format that does nothing.", # one line descr.
|
||||
".foobar .nonexistentext", # list of extensions
|
||||
"iI", # modes, characters in iIvV
|
||||
)
|
||||
formats.add_format(format)
|
91
venv/Lib/site-packages/imageio/plugins/feisem.py
Normal file
91
venv/Lib/site-packages/imageio/plugins/feisem.py
Normal file
|
@ -0,0 +1,91 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
from .tifffile import TiffFormat
|
||||
|
||||
from .. import formats
|
||||
|
||||
|
||||
class FEISEMFormat(TiffFormat):
|
||||
"""Provide read support for TIFFs produced by an FEI SEM microscope.
|
||||
|
||||
This format is based on TIFF, and supports the same parameters.
|
||||
|
||||
FEI microscopes append metadata as ASCII text at the end of the file,
|
||||
which this reader correctly extracts.
|
||||
|
||||
Parameters for get_data
|
||||
-----------------------
|
||||
discard_watermark : bool
|
||||
If True (default), discard the bottom rows of the image, which
|
||||
contain no image data, only a watermark with metadata.
|
||||
watermark_height : int
|
||||
The height in pixels of the FEI watermark. The default is 70.
|
||||
"""
|
||||
|
||||
def _can_write(self, request):
|
||||
return False # FEI-SEM only supports reading
|
||||
|
||||
class Reader(TiffFormat.Reader):
|
||||
def _get_data(self, index=0, discard_watermark=True, watermark_height=70):
|
||||
"""Get image and metadata from given index.
|
||||
|
||||
FEI images usually (always?) contain a watermark at the
|
||||
bottom of the image, 70 pixels high. We discard this by
|
||||
default as it does not contain any information not present
|
||||
in the metadata.
|
||||
"""
|
||||
im, meta = super(FEISEMFormat.Reader, self)._get_data(index)
|
||||
if discard_watermark:
|
||||
im = im[:-watermark_height]
|
||||
return im, meta
|
||||
|
||||
def _get_meta_data(self, index=None):
|
||||
"""Read the metadata from an FEI SEM TIFF.
|
||||
|
||||
This metadata is included as ASCII text at the end of the file.
|
||||
|
||||
The index, if provided, is ignored.
|
||||
|
||||
Returns
|
||||
-------
|
||||
metadata : dict
|
||||
Dictionary of metadata.
|
||||
"""
|
||||
md = {"root": {}}
|
||||
current_tag = "root"
|
||||
reading_metadata = False
|
||||
filename = self.request.get_local_filename()
|
||||
with open(filename, encoding="utf8", errors="ignore") as fin:
|
||||
for line in fin:
|
||||
if not reading_metadata:
|
||||
if not line.startswith("Date="):
|
||||
continue
|
||||
else:
|
||||
reading_metadata = True
|
||||
line = line.rstrip()
|
||||
if line.startswith("["):
|
||||
current_tag = line.lstrip("[").rstrip("]")
|
||||
md[current_tag] = {}
|
||||
else:
|
||||
if "=" in line: # ignore empty and irrelevant lines
|
||||
key, val = line.split("=", maxsplit=1)
|
||||
for tag_type in (int, float):
|
||||
try:
|
||||
val = tag_type(val)
|
||||
except ValueError:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
md[current_tag][key] = val
|
||||
if not md["root"] and len(md) == 1:
|
||||
raise ValueError("Input file %s contains no FEI metadata." % filename)
|
||||
self._meta.update(md)
|
||||
return md
|
||||
|
||||
|
||||
# Register plugin
|
||||
format = FEISEMFormat(
|
||||
"fei", "FEI-SEM TIFF format", extensions=[".tif", ".tiff"], modes="iv"
|
||||
)
|
||||
formats.add_format(format)
|
710
venv/Lib/site-packages/imageio/plugins/ffmpeg.py
Normal file
710
venv/Lib/site-packages/imageio/plugins/ffmpeg.py
Normal file
|
@ -0,0 +1,710 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin that uses ffmpeg to read and write series of images to
|
||||
a wide range of video formats.
|
||||
|
||||
Code inspired/based on code from moviepy: https://github.com/Zulko/moviepy/
|
||||
by Zulko
|
||||
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import threading
|
||||
import subprocess as sp
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format, image_as_uint
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get camera format
|
||||
if sys.platform.startswith("win"):
|
||||
CAM_FORMAT = "dshow" # dshow or vfwcap
|
||||
elif sys.platform.startswith("linux"):
|
||||
CAM_FORMAT = "video4linux2"
|
||||
elif sys.platform.startswith("darwin"):
|
||||
CAM_FORMAT = "avfoundation"
|
||||
else: # pragma: no cover
|
||||
CAM_FORMAT = "unknown-cam-format"
|
||||
|
||||
|
||||
def download(directory=None, force_download=False): # pragma: no cover
|
||||
raise RuntimeError(
|
||||
"imageio.ffmpeg.download() has been deprecated. "
|
||||
"Use 'pip install imageio-ffmpeg' instead.'"
|
||||
)
|
||||
|
||||
|
||||
# For backwards compatibility - we dont use this ourselves
|
||||
def get_exe(): # pragma: no cover
|
||||
""" Wrapper for imageio_ffmpeg.get_ffmpeg_exe()
|
||||
"""
|
||||
import imageio_ffmpeg
|
||||
|
||||
return imageio_ffmpeg.get_ffmpeg_exe()
|
||||
|
||||
|
||||
_ffmpeg_api = None
|
||||
|
||||
|
||||
def _get_ffmpeg_api():
|
||||
global _ffmpeg_api
|
||||
if _ffmpeg_api is None:
|
||||
try:
|
||||
import imageio_ffmpeg
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"To use the imageio ffmpeg plugin you need to "
|
||||
"'pip install imageio-ffmpeg'"
|
||||
)
|
||||
_ffmpeg_api = imageio_ffmpeg
|
||||
return _ffmpeg_api
|
||||
|
||||
|
||||
class FfmpegFormat(Format):
|
||||
""" The ffmpeg format provides reading and writing for a wide range
|
||||
of movie formats such as .avi, .mpeg, .mp4, etc. And also to read
|
||||
streams from webcams and USB cameras.
|
||||
|
||||
To read from camera streams, supply "<video0>" as the filename,
|
||||
where the "0" can be replaced with any index of cameras known to
|
||||
the system.
|
||||
|
||||
To use this plugin, the ``imageio-ffmpeg`` library should be installed
|
||||
(e.g. via pip). For most platforms this includes the ffmpeg executable.
|
||||
One can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force
|
||||
using a specific ffmpeg executable.
|
||||
|
||||
When reading from a video, the number of available frames is hard/expensive
|
||||
to calculate, which is why its set to inf by default, indicating
|
||||
"stream mode". To get the number of frames before having read them all,
|
||||
you can use the ``reader.count_frames()`` method (the reader will then use
|
||||
``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of
|
||||
frames, note that this operation can take a few seconds on large files).
|
||||
Alternatively, the number of frames can be estimated from the fps and
|
||||
duration in the meta data (though these values themselves are not always
|
||||
present/reliable).
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
fps : scalar
|
||||
The number of frames per second to read the data at. Default None (i.e.
|
||||
read at the file's own fps). One can use this for files with a
|
||||
variable fps, or in cases where imageio is unable to correctly detect
|
||||
the fps.
|
||||
loop : bool
|
||||
If True, the video will rewind as soon as a frame is requested
|
||||
beyond the last frame. Otherwise, IndexError is raised. Default False.
|
||||
Setting this to True will internally call ``count_frames()``,
|
||||
and set the reader's length to that value instead of inf.
|
||||
size : str | tuple
|
||||
The frame size (i.e. resolution) to read the images, e.g.
|
||||
(100, 100) or "640x480". For camera streams, this allows setting
|
||||
the capture resolution. For normal video data, ffmpeg will
|
||||
rescale the data.
|
||||
dtype : str | type
|
||||
The dtype for the output arrays. Determines the bit-depth that
|
||||
is requested from ffmpeg. Supported dtypes: uint8, uint16.
|
||||
Default: uint8.
|
||||
pixelformat : str
|
||||
The pixel format for the camera to use (e.g. "yuyv422" or
|
||||
"gray"). The camera needs to support the format in order for
|
||||
this to take effect. Note that the images produced by this
|
||||
reader are always RGB.
|
||||
input_params : list
|
||||
List additional arguments to ffmpeg for input file options.
|
||||
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
|
||||
Example ffmpeg arguments to use aggressive error handling:
|
||||
['-err_detect', 'aggressive']
|
||||
output_params : list
|
||||
List additional arguments to ffmpeg for output file options (i.e. the
|
||||
stream being read by imageio).
|
||||
print_info : bool
|
||||
Print information about the video file as reported by ffmpeg.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
fps : scalar
|
||||
The number of frames per second. Default 10.
|
||||
codec : str
|
||||
the video codec to use. Default 'libx264', which represents the
|
||||
widely available mpeg4. Except when saving .wmv files, then the
|
||||
defaults is 'msmpeg4' which is more commonly supported for windows
|
||||
quality : float | None
|
||||
Video output quality. Default is 5. Uses variable bit rate. Highest
|
||||
quality is 10, lowest is 0. Set to None to prevent variable bitrate
|
||||
flags to FFMPEG so you can manually specify them using output_params
|
||||
instead. Specifying a fixed bitrate using 'bitrate' disables this
|
||||
parameter.
|
||||
bitrate : int | None
|
||||
Set a constant bitrate for the video encoding. Default is None causing
|
||||
'quality' parameter to be used instead. Better quality videos with
|
||||
smaller file sizes will result from using the 'quality' variable
|
||||
bitrate parameter rather than specifiying a fixed bitrate with this
|
||||
parameter.
|
||||
pixelformat: str
|
||||
The output video pixel format. Default is 'yuv420p' which most widely
|
||||
supported by video players.
|
||||
input_params : list
|
||||
List additional arguments to ffmpeg for input file options (i.e. the
|
||||
stream that imageio provides).
|
||||
output_params : list
|
||||
List additional arguments to ffmpeg for output file options.
|
||||
(Can also be provided as ``ffmpeg_params`` for backwards compatibility)
|
||||
Example ffmpeg arguments to use only intra frames and set aspect ratio:
|
||||
['-intra', '-aspect', '16:9']
|
||||
ffmpeg_log_level: str
|
||||
Sets ffmpeg output log level. Default is "warning".
|
||||
Values can be "quiet", "panic", "fatal", "error", "warning", "info"
|
||||
"verbose", or "debug". Also prints the FFMPEG command being used by
|
||||
imageio if "info", "verbose", or "debug".
|
||||
macro_block_size: int
|
||||
Size constraint for video. Width and height, must be divisible by this
|
||||
number. If not divisible by this number imageio will tell ffmpeg to
|
||||
scale the image up to the next closest size
|
||||
divisible by this number. Most codecs are compatible with a macroblock
|
||||
size of 16 (default), some can go smaller (4, 8). To disable this
|
||||
automatic feature set it to None or 1, however be warned many players
|
||||
can't decode videos that are odd in size and some codecs will produce
|
||||
poor results or fail. See https://en.wikipedia.org/wiki/Macroblock.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.mode[1] not in "I?":
|
||||
return False
|
||||
|
||||
# Read from video stream?
|
||||
# Note that we could write the _video flag here, but a user might
|
||||
# select this format explicitly (and this code is not run)
|
||||
if request.filename in ["<video%i>" % i for i in range(10)]:
|
||||
return True
|
||||
|
||||
# Read from file that we know?
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
|
||||
_frame_catcher = None
|
||||
_read_gen = None
|
||||
|
||||
def _get_cam_inputname(self, index):
|
||||
if sys.platform.startswith("linux"):
|
||||
return "/dev/" + self.request._video[1:-1]
|
||||
|
||||
elif sys.platform.startswith("win"):
|
||||
# Ask ffmpeg for list of dshow device names
|
||||
ffmpeg_api = _get_ffmpeg_api()
|
||||
cmd = [
|
||||
ffmpeg_api.get_ffmpeg_exe(),
|
||||
"-list_devices",
|
||||
"true",
|
||||
"-f",
|
||||
CAM_FORMAT,
|
||||
"-i",
|
||||
"dummy",
|
||||
]
|
||||
# Set `shell=True` in sp.Popen to prevent popup of a command line
|
||||
# window in frozen applications. Note: this would be a security
|
||||
# vulnerability if user-input goes into the cmd.
|
||||
proc = sp.Popen(
|
||||
cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, shell=True
|
||||
)
|
||||
proc.stdout.readline()
|
||||
proc.terminate()
|
||||
infos = proc.stderr.read().decode("utf-8", errors="ignore")
|
||||
# Return device name at index
|
||||
try:
|
||||
name = parse_device_names(infos)[index]
|
||||
except IndexError:
|
||||
raise IndexError("No ffdshow camera at index %i." % index)
|
||||
return "video=%s" % name
|
||||
|
||||
elif sys.platform.startswith("darwin"):
|
||||
# Appears that newer ffmpeg builds don't support -list-devices
|
||||
# on OS X. But you can directly open the camera by index.
|
||||
name = str(index)
|
||||
return name
|
||||
|
||||
else: # pragma: no cover
|
||||
return "??"
|
||||
|
||||
def _open(
|
||||
self,
|
||||
loop=False,
|
||||
size=None,
|
||||
dtype=None,
|
||||
pixelformat=None,
|
||||
print_info=False,
|
||||
ffmpeg_params=None,
|
||||
input_params=None,
|
||||
output_params=None,
|
||||
fps=None,
|
||||
):
|
||||
# Get generator functions
|
||||
self._ffmpeg_api = _get_ffmpeg_api()
|
||||
# Process input args
|
||||
self._arg_loop = bool(loop)
|
||||
if size is None:
|
||||
self._arg_size = None
|
||||
elif isinstance(size, tuple):
|
||||
self._arg_size = "%ix%i" % size
|
||||
elif isinstance(size, str) and "x" in size:
|
||||
self._arg_size = size
|
||||
else:
|
||||
raise ValueError('FFMPEG size must be tuple of "NxM"')
|
||||
if pixelformat is None:
|
||||
pass
|
||||
elif not isinstance(pixelformat, str):
|
||||
raise ValueError("FFMPEG pixelformat must be str")
|
||||
if dtype is None:
|
||||
self._dtype = np.dtype("uint8")
|
||||
else:
|
||||
self._dtype = np.dtype(dtype)
|
||||
allowed_dtypes = ["uint8", "uint16"]
|
||||
if self._dtype.name not in allowed_dtypes:
|
||||
raise ValueError(
|
||||
"dtype must be one of: {}".format(", ".join(allowed_dtypes))
|
||||
)
|
||||
self._arg_pixelformat = pixelformat
|
||||
self._arg_input_params = input_params or []
|
||||
self._arg_output_params = output_params or []
|
||||
self._arg_input_params += ffmpeg_params or [] # backward compat
|
||||
# Write "_video"_arg - indicating webcam support
|
||||
self.request._video = None
|
||||
if self.request.filename in ["<video%i>" % i for i in range(10)]:
|
||||
self.request._video = self.request.filename
|
||||
# Specify input framerate?
|
||||
if self.request._video:
|
||||
if "-framerate" not in str(self._arg_input_params):
|
||||
self._arg_input_params.extend(["-framerate", str(float(fps or 30))])
|
||||
# Get local filename
|
||||
if self.request._video:
|
||||
index = int(self.request._video[-2])
|
||||
self._filename = self._get_cam_inputname(index)
|
||||
else:
|
||||
self._filename = self.request.get_local_filename()
|
||||
# When passed to ffmpeg on command line, carets need to be escaped.
|
||||
self._filename = self._filename.replace("^", "^^")
|
||||
# Determine pixel format and depth
|
||||
self._depth = 3
|
||||
if self._dtype.name == "uint8":
|
||||
self._pix_fmt = "rgb24"
|
||||
self._bytes_per_channel = 1
|
||||
else:
|
||||
self._pix_fmt = "rgb48le"
|
||||
self._bytes_per_channel = 2
|
||||
# Initialize parameters
|
||||
self._pos = -1
|
||||
self._meta = {"plugin": "ffmpeg"}
|
||||
self._lastread = None
|
||||
|
||||
# Calculating this from fps and duration is not accurate,
|
||||
# and calculating it exactly with ffmpeg_api.count_frames_and_secs
|
||||
# takes too long to do for each video. But we need it for looping.
|
||||
self._nframes = float("inf")
|
||||
if self._arg_loop and not self.request._video:
|
||||
self._nframes = self.count_frames()
|
||||
self._meta["nframes"] = self._nframes
|
||||
|
||||
# Start ffmpeg subprocess and get meta information
|
||||
self._initialize()
|
||||
|
||||
# For cameras, create thread that keeps reading the images
|
||||
if self.request._video:
|
||||
self._frame_catcher = FrameCatcher(self._read_gen)
|
||||
|
||||
# For reference - but disabled, because it is inaccurate
|
||||
# if self._meta["nframes"] == float("inf"):
|
||||
# if self._meta.get("fps", 0) > 0:
|
||||
# if self._meta.get("duration", 0) > 0:
|
||||
# n = round(self._meta["duration"] * self._meta["fps"])
|
||||
# self._meta["nframes"] = int(n)
|
||||
|
||||
def _close(self):
|
||||
# First close the frame catcher, because we cannot close the gen
|
||||
# if the frame catcher thread is using it
|
||||
if self._frame_catcher is not None:
|
||||
self._frame_catcher.stop_me()
|
||||
self._frame_catcher = None
|
||||
if self._read_gen is not None:
|
||||
self._read_gen.close()
|
||||
self._read_gen = None
|
||||
|
||||
def count_frames(self):
|
||||
""" Count the number of frames. Note that this can take a few
|
||||
seconds for large files. Also note that it counts the number
|
||||
of frames in the original video and does not take a given fps
|
||||
into account.
|
||||
"""
|
||||
# This would have been nice, but this does not work :(
|
||||
# oargs = []
|
||||
# if self.request.kwargs.get("fps", None):
|
||||
# fps = float(self.request.kwargs["fps"])
|
||||
# oargs += ["-r", "%.02f" % fps]
|
||||
cf = self._ffmpeg_api.count_frames_and_secs
|
||||
return cf(self._filename)[0]
|
||||
|
||||
def _get_length(self):
|
||||
return self._nframes # only not inf if loop is True
|
||||
|
||||
def _get_data(self, index):
|
||||
""" Reads a frame at index. Note for coders: getting an
|
||||
arbitrary frame in the video with ffmpeg can be painfully
|
||||
slow if some decoding has to be done. This function tries
|
||||
to avoid fectching arbitrary frames whenever possible, by
|
||||
moving between adjacent frames. """
|
||||
# Modulo index (for looping)
|
||||
if self._arg_loop and self._nframes < float("inf"):
|
||||
index %= self._nframes
|
||||
|
||||
if index == self._pos:
|
||||
return self._lastread, dict(new=False)
|
||||
elif index < 0:
|
||||
raise IndexError("Frame index must be >= 0")
|
||||
elif index >= self._nframes:
|
||||
raise IndexError("Reached end of video")
|
||||
else:
|
||||
if (index < self._pos) or (index > self._pos + 100):
|
||||
self._initialize(index)
|
||||
else:
|
||||
self._skip_frames(index - self._pos - 1)
|
||||
result, is_new = self._read_frame()
|
||||
self._pos = index
|
||||
return result, dict(new=is_new)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return self._meta
|
||||
|
||||
def _initialize(self, index=0):
|
||||
|
||||
# Close the current generator, and thereby terminate its subprocess
|
||||
if self._read_gen is not None:
|
||||
self._read_gen.close()
|
||||
|
||||
iargs = []
|
||||
oargs = []
|
||||
|
||||
# Create input args
|
||||
iargs += self._arg_input_params
|
||||
if self.request._video:
|
||||
iargs += ["-f", CAM_FORMAT]
|
||||
if self._arg_pixelformat:
|
||||
iargs += ["-pix_fmt", self._arg_pixelformat]
|
||||
if self._arg_size:
|
||||
iargs += ["-s", self._arg_size]
|
||||
elif index > 0: # re-initialize / seek
|
||||
# Note: only works if we initialized earlier, and now have meta
|
||||
# Some info here: https://trac.ffmpeg.org/wiki/Seeking
|
||||
# There are two ways to seek, one before -i (input_params) and
|
||||
# after (output_params). The former is fast, because it uses
|
||||
# keyframes, the latter is slow but accurate. According to
|
||||
# the article above, the fast method should also be accurate
|
||||
# from ffmpeg version 2.1, however in version 4.1 our tests
|
||||
# start failing again. Not sure why, but we can solve this
|
||||
# by combining slow and fast. Seek the long stretch using
|
||||
# the fast method, and seek the last 10s the slow way.
|
||||
starttime = index / self._meta["fps"]
|
||||
seek_slow = min(10, starttime)
|
||||
seek_fast = starttime - seek_slow
|
||||
# We used to have this epsilon earlier, when we did not use
|
||||
# the slow seek. I don't think we need it anymore.
|
||||
# epsilon = -1 / self._meta["fps"] * 0.1
|
||||
iargs += ["-ss", "%.06f" % (seek_fast)]
|
||||
oargs += ["-ss", "%.06f" % (seek_slow)]
|
||||
|
||||
# Output args, for writing to pipe
|
||||
if self._arg_size:
|
||||
oargs += ["-s", self._arg_size]
|
||||
if self.request.kwargs.get("fps", None):
|
||||
fps = float(self.request.kwargs["fps"])
|
||||
oargs += ["-r", "%.02f" % fps]
|
||||
oargs += self._arg_output_params
|
||||
|
||||
# Get pixelformat and bytes per pixel
|
||||
pix_fmt = self._pix_fmt
|
||||
bpp = self._depth * self._bytes_per_channel
|
||||
|
||||
# Create generator
|
||||
rf = self._ffmpeg_api.read_frames
|
||||
self._read_gen = rf(
|
||||
self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs
|
||||
)
|
||||
|
||||
# Read meta data. This start the generator (and ffmpeg subprocess)
|
||||
if self.request._video:
|
||||
# With cameras, catch error and turn into IndexError
|
||||
try:
|
||||
meta = self._read_gen.__next__()
|
||||
except IOError as err:
|
||||
err_text = str(err)
|
||||
if "darwin" in sys.platform:
|
||||
if "Unknown input format: 'avfoundation'" in err_text:
|
||||
err_text += (
|
||||
"Try installing FFMPEG using "
|
||||
"home brew to get a version with "
|
||||
"support for cameras."
|
||||
)
|
||||
raise IndexError(
|
||||
"No (working) camera at {}.\n\n{}".format(
|
||||
self.request._video, err_text
|
||||
)
|
||||
)
|
||||
else:
|
||||
self._meta.update(meta)
|
||||
elif index == 0:
|
||||
self._meta.update(self._read_gen.__next__())
|
||||
else:
|
||||
self._read_gen.__next__() # we already have meta data
|
||||
|
||||
def _skip_frames(self, n=1):
|
||||
""" Reads and throws away n frames """
|
||||
for i in range(n):
|
||||
self._read_gen.__next__()
|
||||
self._pos += n
|
||||
|
||||
def _read_frame(self):
|
||||
# Read and convert to numpy array
|
||||
w, h = self._meta["size"]
|
||||
framesize = w * h * self._depth * self._bytes_per_channel
|
||||
# t0 = time.time()
|
||||
|
||||
# Read frame
|
||||
if self._frame_catcher: # pragma: no cover - camera thing
|
||||
s, is_new = self._frame_catcher.get_frame()
|
||||
else:
|
||||
s = self._read_gen.__next__()
|
||||
is_new = True
|
||||
|
||||
# Check
|
||||
if len(s) != framesize:
|
||||
raise RuntimeError(
|
||||
"Frame is %i bytes, but expected %i." % (len(s), framesize)
|
||||
)
|
||||
|
||||
result = np.frombuffer(s, dtype=self._dtype).copy()
|
||||
result = result.reshape((h, w, self._depth))
|
||||
# t1 = time.time()
|
||||
# print('etime', t1-t0)
|
||||
|
||||
# Store and return
|
||||
self._lastread = result
|
||||
return result, is_new
|
||||
|
||||
# --
|
||||
|
||||
class Writer(Format.Writer):
|
||||
|
||||
_write_gen = None
|
||||
|
||||
def _open(
|
||||
self,
|
||||
fps=10,
|
||||
codec="libx264",
|
||||
bitrate=None,
|
||||
pixelformat="yuv420p",
|
||||
ffmpeg_params=None,
|
||||
input_params=None,
|
||||
output_params=None,
|
||||
ffmpeg_log_level="quiet",
|
||||
quality=5,
|
||||
macro_block_size=16,
|
||||
):
|
||||
self._ffmpeg_api = _get_ffmpeg_api()
|
||||
self._filename = self.request.get_local_filename()
|
||||
self._pix_fmt = None
|
||||
self._depth = None
|
||||
self._size = None
|
||||
|
||||
def _close(self):
|
||||
if self._write_gen is not None:
|
||||
self._write_gen.close()
|
||||
self._write_gen = None
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
|
||||
# Get props of image
|
||||
h, w = im.shape[:2]
|
||||
size = w, h
|
||||
depth = 1 if im.ndim == 2 else im.shape[2]
|
||||
|
||||
# Ensure that image is in uint8
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# To be written efficiently, ie. without creating an immutable
|
||||
# buffer, by calling im.tostring() the array must be contiguous.
|
||||
if not im.flags.c_contiguous:
|
||||
# checkign the flag is a micro optimization.
|
||||
# the image will be a numpy subclass. See discussion
|
||||
# https://github.com/numpy/numpy/issues/11804
|
||||
im = np.ascontiguousarray(im)
|
||||
|
||||
# Set size and initialize if not initialized yet
|
||||
if self._size is None:
|
||||
map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"}
|
||||
self._pix_fmt = map.get(depth, None)
|
||||
if self._pix_fmt is None:
|
||||
raise ValueError("Image must have 1, 2, 3 or 4 channels")
|
||||
self._size = size
|
||||
self._depth = depth
|
||||
self._initialize()
|
||||
|
||||
# Check size of image
|
||||
if size != self._size:
|
||||
raise ValueError("All images in a movie should have same size")
|
||||
if depth != self._depth:
|
||||
raise ValueError(
|
||||
"All images in a movie should have same " "number of channels"
|
||||
)
|
||||
|
||||
assert self._write_gen is not None # Check status
|
||||
|
||||
# Write. Yes, we can send the data in as a numpy array
|
||||
self._write_gen.send(im)
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
raise RuntimeError(
|
||||
"The ffmpeg format does not support setting " "meta data."
|
||||
)
|
||||
|
||||
def _initialize(self):
|
||||
|
||||
# Close existing generator
|
||||
if self._write_gen is not None:
|
||||
self._write_gen.close()
|
||||
|
||||
# Get parameters
|
||||
# Use None to let imageio-ffmpeg (or ffmpeg) select good results
|
||||
fps = self.request.kwargs.get("fps", 10)
|
||||
codec = self.request.kwargs.get("codec", None)
|
||||
bitrate = self.request.kwargs.get("bitrate", None)
|
||||
quality = self.request.kwargs.get("quality", None)
|
||||
input_params = self.request.kwargs.get("input_params") or []
|
||||
output_params = self.request.kwargs.get("output_params") or []
|
||||
output_params += self.request.kwargs.get("ffmpeg_params") or []
|
||||
pixelformat = self.request.kwargs.get("pixelformat", None)
|
||||
macro_block_size = self.request.kwargs.get("macro_block_size", 16)
|
||||
ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None)
|
||||
|
||||
macro_block_size = macro_block_size or 1 # None -> 1
|
||||
|
||||
# Create generator
|
||||
self._write_gen = self._ffmpeg_api.write_frames(
|
||||
self._filename,
|
||||
self._size,
|
||||
pix_fmt_in=self._pix_fmt,
|
||||
pix_fmt_out=pixelformat,
|
||||
fps=fps,
|
||||
quality=quality,
|
||||
bitrate=bitrate,
|
||||
codec=codec,
|
||||
macro_block_size=macro_block_size,
|
||||
ffmpeg_log_level=ffmpeg_log_level,
|
||||
input_params=input_params,
|
||||
output_params=output_params,
|
||||
)
|
||||
|
||||
# Seed the generator (this is where the ffmpeg subprocess starts)
|
||||
self._write_gen.send(None)
|
||||
|
||||
|
||||
class FrameCatcher(threading.Thread):
|
||||
""" Thread to keep reading the frame data from stdout. This is
|
||||
useful when streaming from a webcam. Otherwise, if the user code
|
||||
does not grab frames fast enough, the buffer will fill up, leading
|
||||
to lag, and ffmpeg can also stall (experienced on Linux). The
|
||||
get_frame() method always returns the last available image.
|
||||
"""
|
||||
|
||||
def __init__(self, gen):
|
||||
self._gen = gen
|
||||
self._frame = None
|
||||
self._frame_is_new = False
|
||||
self._lock = threading.RLock()
|
||||
threading.Thread.__init__(self)
|
||||
self.setDaemon(True) # do not let this thread hold up Python shutdown
|
||||
self._should_stop = False
|
||||
self.start()
|
||||
|
||||
def stop_me(self):
|
||||
self._should_stop = True
|
||||
while self.is_alive():
|
||||
time.sleep(0.001)
|
||||
|
||||
def get_frame(self):
|
||||
while self._frame is None: # pragma: no cover - an init thing
|
||||
time.sleep(0.001)
|
||||
with self._lock:
|
||||
is_new = self._frame_is_new
|
||||
self._frame_is_new = False # reset
|
||||
return self._frame, is_new
|
||||
|
||||
def run(self):
|
||||
# This runs in the worker thread
|
||||
try:
|
||||
while not self._should_stop:
|
||||
time.sleep(0) # give control to other threads
|
||||
frame = self._gen.__next__()
|
||||
with self._lock:
|
||||
self._frame = frame
|
||||
self._frame_is_new = True
|
||||
except (StopIteration, EOFError):
|
||||
pass
|
||||
|
||||
|
||||
def parse_device_names(ffmpeg_output):
|
||||
""" Parse the output of the ffmpeg -list-devices command"""
|
||||
# Collect device names - get [friendly_name, alt_name] of each
|
||||
device_names = []
|
||||
in_video_devices = False
|
||||
for line in ffmpeg_output.splitlines():
|
||||
if line.startswith("[dshow"):
|
||||
logger.debug(line)
|
||||
line = line.split("]", 1)[1].strip()
|
||||
if in_video_devices and line.startswith('"'):
|
||||
friendly_name = line[1:-1]
|
||||
device_names.append([friendly_name, ""])
|
||||
elif in_video_devices and line.lower().startswith("alternative name"):
|
||||
alt_name = line.split(" name ", 1)[1].strip()[1:-1]
|
||||
if sys.platform.startswith("win"):
|
||||
alt_name = alt_name.replace("&", "^&") # Tested to work
|
||||
else:
|
||||
alt_name = alt_name.replace("&", "\\&") # Does this work?
|
||||
device_names[-1][-1] = alt_name
|
||||
elif "video devices" in line:
|
||||
in_video_devices = True
|
||||
elif "devices" in line:
|
||||
# set False for subsequent "devices" sections
|
||||
in_video_devices = False
|
||||
# Post-process, see #441
|
||||
# prefer friendly names, use alt name if two cams have same friendly name
|
||||
device_names2 = []
|
||||
for friendly_name, alt_name in device_names:
|
||||
if friendly_name not in device_names2:
|
||||
device_names2.append(friendly_name)
|
||||
elif alt_name:
|
||||
device_names2.append(alt_name)
|
||||
else:
|
||||
device_names2.append(friendly_name) # duplicate, but not much we can do
|
||||
return device_names2
|
||||
|
||||
|
||||
# Register. You register an *instance* of a Format class.
|
||||
format = FfmpegFormat(
|
||||
"ffmpeg",
|
||||
"Many video formats and cameras (via ffmpeg)",
|
||||
".mov .avi .mpg .mpeg .mp4 .mkv .wmv",
|
||||
"I",
|
||||
)
|
||||
formats.add_format(format)
|
127
venv/Lib/site-packages/imageio/plugins/fits.py
Normal file
127
venv/Lib/site-packages/imageio/plugins/fits.py
Normal file
|
@ -0,0 +1,127 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin for reading FITS files.
|
||||
"""
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
_fits = None # lazily loaded
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _fits
|
||||
try:
|
||||
from astropy.io import fits as _fits
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The FITS format relies on the astropy package."
|
||||
"Please refer to http://www.astropy.org/ "
|
||||
"for further instructions."
|
||||
)
|
||||
return _fits
|
||||
|
||||
|
||||
class FitsFormat(Format):
|
||||
|
||||
""" Flexible Image Transport System (FITS) is an open standard defining a
|
||||
digital file format useful for storage, transmission and processing of
|
||||
scientific and other images. FITS is the most commonly used digital
|
||||
file format in astronomy.
|
||||
|
||||
This format requires the ``astropy`` package.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
cache : bool
|
||||
If the file name is a URL, `~astropy.utils.data.download_file` is used
|
||||
to open the file. This specifies whether or not to save the file
|
||||
locally in Astropy's download cache (default: `True`).
|
||||
uint : bool
|
||||
Interpret signed integer data where ``BZERO`` is the
|
||||
central value and ``BSCALE == 1`` as unsigned integer
|
||||
data. For example, ``int16`` data with ``BZERO = 32768``
|
||||
and ``BSCALE = 1`` would be treated as ``uint16`` data.
|
||||
|
||||
Note, for backward compatibility, the kwarg **uint16** may
|
||||
be used instead. The kwarg was renamed when support was
|
||||
added for integers of any size.
|
||||
ignore_missing_end : bool
|
||||
Do not issue an exception when opening a file that is
|
||||
missing an ``END`` card in the last header.
|
||||
checksum : bool or str
|
||||
If `True`, verifies that both ``DATASUM`` and
|
||||
``CHECKSUM`` card values (when present in the HDU header)
|
||||
match the header and data of all HDU's in the file. Updates to a
|
||||
file that already has a checksum will preserve and update the
|
||||
existing checksums unless this argument is given a value of
|
||||
'remove', in which case the CHECKSUM and DATASUM values are not
|
||||
checked, and are removed when saving changes to the file.
|
||||
disable_image_compression : bool, optional
|
||||
If `True`, treats compressed image HDU's like normal
|
||||
binary table HDU's.
|
||||
do_not_scale_image_data : bool
|
||||
If `True`, image data is not scaled using BSCALE/BZERO values
|
||||
when read.
|
||||
ignore_blank : bool
|
||||
If `True`, the BLANK keyword is ignored if present.
|
||||
scale_back : bool
|
||||
If `True`, when saving changes to a file that contained scaled
|
||||
image data, restore the data to the original type and reapply the
|
||||
original BSCALE/BZERO values. This could lead to loss of accuracy
|
||||
if scaling back to integer values after performing floating point
|
||||
operations on the data.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# We return True if ext matches, because this is the only plugin
|
||||
# that can. If astropy is not installed, a useful error follows.
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
# No write support
|
||||
return False
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, cache=False, **kwargs):
|
||||
if not _fits:
|
||||
load_lib()
|
||||
hdulist = _fits.open(self.request.get_file(), cache=cache, **kwargs)
|
||||
|
||||
self._index = []
|
||||
allowed_hdu_types = (_fits.ImageHDU, _fits.PrimaryHDU, _fits.CompImageHDU)
|
||||
for n, hdu in zip(range(len(hdulist)), hdulist):
|
||||
if isinstance(hdu, allowed_hdu_types):
|
||||
# Ignore (primary) header units with no data (use '.size'
|
||||
# rather than '.data' to avoid actually loading the image):
|
||||
if hdu.size > 0:
|
||||
self._index.append(n)
|
||||
self._hdulist = hdulist
|
||||
|
||||
def _close(self):
|
||||
self._hdulist.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._index)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index < 0 or index >= len(self._index):
|
||||
raise IndexError("Index out of range while reading from fits")
|
||||
im = self._hdulist[self._index[index]].data
|
||||
# Return array and empty meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index
|
||||
raise RuntimeError("The fits format does not support meta data.")
|
||||
|
||||
|
||||
# Register
|
||||
format = FitsFormat(
|
||||
"fits", "Flexible Image Transport System (FITS) format", "fits fit fts fz", "iIvV"
|
||||
)
|
||||
formats.add_format(format)
|
513
venv/Lib/site-packages/imageio/plugins/freeimage.py
Normal file
513
venv/Lib/site-packages/imageio/plugins/freeimage.py
Normal file
|
@ -0,0 +1,513 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin that wraps the freeimage lib. The wrapper for Freeimage is
|
||||
part of the core of imageio, but it's functionality is exposed via
|
||||
the plugin system (therefore this plugin is very thin).
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format, image_as_uint
|
||||
from ._freeimage import fi, download, IO_FLAGS, FNAME_PER_PLATFORM # noqa
|
||||
|
||||
|
||||
# todo: support files with only meta data
|
||||
|
||||
|
||||
class FreeimageFormat(Format):
|
||||
""" This is the default format used for FreeImage. Each Freeimage
|
||||
format has the 'flags' keyword argument. See the Freeimage
|
||||
documentation for more information.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
flags : int
|
||||
A freeimage-specific option. In most cases we provide explicit
|
||||
parameters for influencing image reading.
|
||||
|
||||
Parameters for saving
|
||||
----------------------
|
||||
flags : int
|
||||
A freeimage-specific option. In most cases we provide explicit
|
||||
parameters for influencing image saving.
|
||||
"""
|
||||
|
||||
_modes = "i"
|
||||
|
||||
@property
|
||||
def fif(self):
|
||||
return self._fif # Set when format is created
|
||||
|
||||
def _can_read(self, request):
|
||||
# Ask freeimage if it can read it, maybe ext missing
|
||||
if fi.has_lib():
|
||||
if not hasattr(request, "_fif"):
|
||||
try:
|
||||
request._fif = fi.getFIF(request.filename, "r", request.firstbytes)
|
||||
except Exception: # pragma: no cover
|
||||
request._fif = -1
|
||||
if request._fif == self.fif:
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
# Ask freeimage, because we are not aware of all formats
|
||||
if fi.has_lib():
|
||||
if not hasattr(request, "_fif"):
|
||||
try:
|
||||
request._fif = fi.getFIF(request.filename, "w")
|
||||
except Exception: # pragma: no cover
|
||||
request._fif = -1
|
||||
if request._fif is self.fif:
|
||||
return True
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _open(self, flags=0):
|
||||
self._bm = fi.create_bitmap(self.request.filename, self.format.fif, flags)
|
||||
self._bm.load_from_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
self._bm.close()
|
||||
|
||||
def _get_data(self, index):
|
||||
if index != 0:
|
||||
raise IndexError("This format only supports singleton images.")
|
||||
return self._bm.get_image_data(), self._bm.get_meta_data()
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if not (index is None or index == 0):
|
||||
raise IndexError()
|
||||
return self._bm.get_meta_data()
|
||||
|
||||
# --
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
self._flags = flags # Store flags for later use
|
||||
self._bm = None
|
||||
self._is_set = False # To prevent appending more than one image
|
||||
self._meta = {}
|
||||
|
||||
def _close(self):
|
||||
# Set global meta data
|
||||
self._bm.set_meta_data(self._meta)
|
||||
# Write and close
|
||||
self._bm.save_to_filename(self.request.get_local_filename())
|
||||
self._bm.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Check if set
|
||||
if not self._is_set:
|
||||
self._is_set = True
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Singleton image; " "can only append image data once."
|
||||
)
|
||||
# Pop unit dimension for grayscale images
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
# Lazy instantaion of the bitmap, we need image data
|
||||
if self._bm is None:
|
||||
self._bm = fi.create_bitmap(
|
||||
self.request.filename, self.format.fif, self._flags
|
||||
)
|
||||
self._bm.allocate(im)
|
||||
# Set data
|
||||
self._bm.set_image_data(im)
|
||||
# There is no distinction between global and per-image meta data
|
||||
# for singleton images
|
||||
self._meta = meta
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
self._meta = meta
|
||||
|
||||
|
||||
## Special plugins
|
||||
|
||||
# todo: there is also FIF_LOAD_NOPIXELS,
|
||||
# but perhaps that should be used with get_meta_data.
|
||||
|
||||
|
||||
class FreeimageBmpFormat(FreeimageFormat):
|
||||
""" A BMP format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : bool
|
||||
Whether to compress the bitmap using RLE when saving. Default False.
|
||||
It seems this does not always work, but who cares, you should use
|
||||
PNG anyway.
|
||||
|
||||
"""
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, compression=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if compression:
|
||||
flags |= IO_FLAGS.BMP_SAVE_RLE
|
||||
else:
|
||||
flags |= IO_FLAGS.BMP_DEFAULT
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
return FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class FreeimagePngFormat(FreeimageFormat):
|
||||
""" A PNG format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
ignoregamma : bool
|
||||
Avoid gamma correction. Default True.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
compression : {0, 1, 6, 9}
|
||||
The compression factor. Higher factors result in more
|
||||
compression at the cost of speed. Note that PNG compression is
|
||||
always lossless. Default 9.
|
||||
quantize : int
|
||||
If specified, turn the given RGB or RGBA image in a paletted image
|
||||
for more efficient storage. The value should be between 2 and 256.
|
||||
If the value of 0 the image is not quantized.
|
||||
interlaced : bool
|
||||
Save using Adam7 interlacing. Default False.
|
||||
"""
|
||||
|
||||
class Reader(FreeimageFormat.Reader):
|
||||
def _open(self, flags=0, ignoregamma=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if ignoregamma:
|
||||
flags |= IO_FLAGS.PNG_IGNOREGAMMA
|
||||
# Enter as usual, with modified flags
|
||||
return FreeimageFormat.Reader._open(self, flags)
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, compression=9, quantize=0, interlaced=False):
|
||||
compression_map = {
|
||||
0: IO_FLAGS.PNG_Z_NO_COMPRESSION,
|
||||
1: IO_FLAGS.PNG_Z_BEST_SPEED,
|
||||
6: IO_FLAGS.PNG_Z_DEFAULT_COMPRESSION,
|
||||
9: IO_FLAGS.PNG_Z_BEST_COMPRESSION,
|
||||
}
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if interlaced:
|
||||
flags |= IO_FLAGS.PNG_INTERLACED
|
||||
try:
|
||||
flags |= compression_map[compression]
|
||||
except KeyError:
|
||||
raise ValueError("Png compression must be 0, 1, 6, or 9.")
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if str(im.dtype) == "uint16":
|
||||
im = image_as_uint(im, bitdepth=16)
|
||||
else:
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
# Quantize?
|
||||
q = int(self.request.kwargs.get("quantize", False))
|
||||
if not q:
|
||||
pass
|
||||
elif not (im.ndim == 3 and im.shape[-1] == 3):
|
||||
raise ValueError("Can only quantize RGB images")
|
||||
elif q < 2 or q > 256:
|
||||
raise ValueError("PNG quantize param must be 2..256")
|
||||
else:
|
||||
bm = self._bm.quantize(0, q)
|
||||
self._bm.close()
|
||||
self._bm = bm
|
||||
|
||||
|
||||
class FreeimageJpegFormat(FreeimageFormat):
|
||||
""" A JPEG format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale and RGB images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
exifrotate : bool
|
||||
Automatically rotate the image according to the exif flag.
|
||||
Default True. If 2 is given, do the rotation in Python instead
|
||||
of freeimage.
|
||||
quickread : bool
|
||||
Read the image more quickly, at the expense of quality.
|
||||
Default False.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
quality : scalar
|
||||
The compression factor of the saved image (1..100), higher
|
||||
numbers result in higher quality but larger file size. Default 75.
|
||||
progressive : bool
|
||||
Save as a progressive JPEG file (e.g. for images on the web).
|
||||
Default False.
|
||||
optimize : bool
|
||||
On saving, compute optimal Huffman coding tables (can reduce a
|
||||
few percent of file size). Default False.
|
||||
baseline : bool
|
||||
Save basic JPEG, without metadata or any markers. Default False.
|
||||
|
||||
"""
|
||||
|
||||
class Reader(FreeimageFormat.Reader):
|
||||
def _open(self, flags=0, exifrotate=True, quickread=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if exifrotate and exifrotate != 2:
|
||||
flags |= IO_FLAGS.JPEG_EXIFROTATE
|
||||
if not quickread:
|
||||
flags |= IO_FLAGS.JPEG_ACCURATE
|
||||
# Enter as usual, with modified flags
|
||||
return FreeimageFormat.Reader._open(self, flags)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, meta = FreeimageFormat.Reader._get_data(self, index)
|
||||
im = self._rotate(im, meta)
|
||||
return im, meta
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
""" Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Freeimage is also supposed to
|
||||
support that, and I am pretty sure it once did, but now it
|
||||
does not, so let's just do it in Python.
|
||||
Edit: and now it works again, just leave in place as a fallback.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", None) == 2:
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(
|
||||
self, flags=0, quality=75, progressive=False, optimize=False, baseline=False
|
||||
):
|
||||
# Test quality
|
||||
quality = int(quality)
|
||||
if quality < 1 or quality > 100:
|
||||
raise ValueError("JPEG quality should be between 1 and 100.")
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
flags |= quality
|
||||
if progressive:
|
||||
flags |= IO_FLAGS.JPEG_PROGRESSIVE
|
||||
if optimize:
|
||||
flags |= IO_FLAGS.JPEG_OPTIMIZE
|
||||
if baseline:
|
||||
flags |= IO_FLAGS.JPEG_BASELINE
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError("JPEG does not support alpha channel.")
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
return FreeimageFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class FreeimagePnmFormat(FreeimageFormat):
|
||||
""" A PNM format based on the Freeimage library.
|
||||
|
||||
This format supports single bit (PBM), grayscale (PGM) and RGB (PPM)
|
||||
images, even with ASCII or binary coding.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
not available on the system, it can be downloaded manually from
|
||||
<https://github.com/imageio/imageio-binaries> by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
use_ascii : bool
|
||||
Save with ASCII coding. Default True.
|
||||
"""
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0, use_ascii=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if use_ascii:
|
||||
flags |= IO_FLAGS.PNM_SAVE_ASCII
|
||||
# Act as usual, but with modified flags
|
||||
return FreeimageFormat.Writer._open(self, flags)
|
||||
|
||||
|
||||
## Create the formats
|
||||
|
||||
SPECIAL_CLASSES = {
|
||||
"jpeg": FreeimageJpegFormat,
|
||||
"png": FreeimagePngFormat,
|
||||
"bmp": FreeimageBmpFormat,
|
||||
"ppm": FreeimagePnmFormat,
|
||||
"ppmraw": FreeimagePnmFormat,
|
||||
"gif": None, # defined in freeimagemulti
|
||||
"ico": None, # defined in freeimagemulti
|
||||
"mng": None, # defined in freeimagemulti
|
||||
}
|
||||
|
||||
# rename TIFF to make way for the tiffile plugin
|
||||
NAME_MAP = {"TIFF": "FI_TIFF"}
|
||||
|
||||
# This is a dump of supported FreeImage formats on Linux fi verion 3.16.0
|
||||
# > imageio.plugins.freeimage.create_freeimage_formats()
|
||||
# > for i in sorted(imageio.plugins.freeimage.fiformats): print('%r,' % (i, ))
|
||||
fiformats = [
|
||||
("BMP", 0, "Windows or OS/2 Bitmap", "bmp"),
|
||||
("CUT", 21, "Dr. Halo", "cut"),
|
||||
("DDS", 24, "DirectX Surface", "dds"),
|
||||
("EXR", 29, "ILM OpenEXR", "exr"),
|
||||
("G3", 27, "Raw fax format CCITT G.3", "g3"),
|
||||
("GIF", 25, "Graphics Interchange Format", "gif"),
|
||||
("HDR", 26, "High Dynamic Range Image", "hdr"),
|
||||
("ICO", 1, "Windows Icon", "ico"),
|
||||
("IFF", 5, "IFF Interleaved Bitmap", "iff,lbm"),
|
||||
("J2K", 30, "JPEG-2000 codestream", "j2k,j2c"),
|
||||
("JNG", 3, "JPEG Network Graphics", "jng"),
|
||||
("JP2", 31, "JPEG-2000 File Format", "jp2"),
|
||||
("JPEG", 2, "JPEG - JFIF Compliant", "jpg,jif,jpeg,jpe"),
|
||||
("JPEG-XR", 36, "JPEG XR image format", "jxr,wdp,hdp"),
|
||||
("KOALA", 4, "C64 Koala Graphics", "koa"),
|
||||
("MNG", 6, "Multiple-image Network Graphics", "mng"),
|
||||
("PBM", 7, "Portable Bitmap (ASCII)", "pbm"),
|
||||
("PBMRAW", 8, "Portable Bitmap (RAW)", "pbm"),
|
||||
("PCD", 9, "Kodak PhotoCD", "pcd"),
|
||||
("PCX", 10, "Zsoft Paintbrush", "pcx"),
|
||||
("PFM", 32, "Portable floatmap", "pfm"),
|
||||
("PGM", 11, "Portable Greymap (ASCII)", "pgm"),
|
||||
("PGMRAW", 12, "Portable Greymap (RAW)", "pgm"),
|
||||
("PICT", 33, "Macintosh PICT", "pct,pict,pic"),
|
||||
("PNG", 13, "Portable Network Graphics", "png"),
|
||||
("PPM", 14, "Portable Pixelmap (ASCII)", "ppm"),
|
||||
("PPMRAW", 15, "Portable Pixelmap (RAW)", "ppm"),
|
||||
("PSD", 20, "Adobe Photoshop", "psd"),
|
||||
("RAS", 16, "Sun Raster Image", "ras"),
|
||||
(
|
||||
"RAW",
|
||||
34,
|
||||
"RAW camera image",
|
||||
"3fr,arw,bay,bmq,cap,cine,cr2,crw,cs1,dc2,"
|
||||
"dcr,drf,dsc,dng,erf,fff,ia,iiq,k25,kc2,kdc,mdc,mef,mos,mrw,nef,nrw,orf,"
|
||||
"pef,ptx,pxn,qtk,raf,raw,rdc,rw2,rwl,rwz,sr2,srf,srw,sti",
|
||||
),
|
||||
("SGI", 28, "SGI Image Format", "sgi,rgb,rgba,bw"),
|
||||
("TARGA", 17, "Truevision Targa", "tga,targa"),
|
||||
("TIFF", 18, "Tagged Image File Format", "tif,tiff"),
|
||||
("WBMP", 19, "Wireless Bitmap", "wap,wbmp,wbm"),
|
||||
("WebP", 35, "Google WebP image format", "webp"),
|
||||
("XBM", 22, "X11 Bitmap Format", "xbm"),
|
||||
("XPM", 23, "X11 Pixmap Format", "xpm"),
|
||||
]
|
||||
|
||||
|
||||
def _create_predefined_freeimage_formats():
|
||||
|
||||
for name, i, des, ext in fiformats:
|
||||
# name = NAME_MAP.get(name, name)
|
||||
# Get class for format
|
||||
FormatClass = SPECIAL_CLASSES.get(name.lower(), FreeimageFormat)
|
||||
if FormatClass:
|
||||
# Create Format and add
|
||||
format = FormatClass(name + "-FI", des, ext, FormatClass._modes)
|
||||
format._fif = i
|
||||
formats.add_format(format)
|
||||
|
||||
|
||||
def create_freeimage_formats():
|
||||
""" By default, imageio registers a list of predefined formats
|
||||
that freeimage can handle. If your version of imageio can handle
|
||||
more formats, you can call this function to register them.
|
||||
"""
|
||||
fiformats[:] = []
|
||||
|
||||
# Freeimage available?
|
||||
if fi is None: # pragma: no cover
|
||||
return
|
||||
|
||||
# Init
|
||||
lib = fi._lib
|
||||
|
||||
# Create formats
|
||||
for i in range(lib.FreeImage_GetFIFCount()):
|
||||
if lib.FreeImage_IsPluginEnabled(i):
|
||||
# Get info
|
||||
name = lib.FreeImage_GetFormatFromFIF(i).decode("ascii")
|
||||
des = lib.FreeImage_GetFIFDescription(i).decode("ascii")
|
||||
ext = lib.FreeImage_GetFIFExtensionList(i).decode("ascii")
|
||||
fiformats.append((name, i, des, ext))
|
||||
# name = NAME_MAP.get(name, name)
|
||||
# Get class for format
|
||||
FormatClass = SPECIAL_CLASSES.get(name.lower(), FreeimageFormat)
|
||||
if not FormatClass:
|
||||
continue
|
||||
# Create Format and add
|
||||
format = FormatClass(name + "-FI", des, ext, FormatClass._modes)
|
||||
format._fif = i
|
||||
formats.add_format(format, overwrite=True)
|
||||
|
||||
|
||||
_create_predefined_freeimage_formats()
|
330
venv/Lib/site-packages/imageio/plugins/freeimagemulti.py
Normal file
330
venv/Lib/site-packages/imageio/plugins/freeimagemulti.py
Normal file
|
@ -0,0 +1,330 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin for multi-image freeimafe formats, like animated GIF and ico.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format, image_as_uint
|
||||
from ._freeimage import fi, IO_FLAGS
|
||||
from .freeimage import FreeimageFormat
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FreeimageMulti(FreeimageFormat):
|
||||
""" Base class for freeimage formats that support multiple images.
|
||||
"""
|
||||
|
||||
_modes = "iI"
|
||||
_fif = -1
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, flags=0):
|
||||
flags = int(flags)
|
||||
# Create bitmap
|
||||
self._bm = fi.create_multipage_bitmap(
|
||||
self.request.filename, self.format.fif, flags
|
||||
)
|
||||
self._bm.load_from_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
self._bm.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._bm)
|
||||
|
||||
def _get_data(self, index):
|
||||
sub = self._bm.get_page(index)
|
||||
try:
|
||||
return sub.get_image_data(), sub.get_meta_data()
|
||||
finally:
|
||||
sub.close()
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
index = index or 0
|
||||
if index < 0 or index >= len(self._bm):
|
||||
raise IndexError()
|
||||
sub = self._bm.get_page(index)
|
||||
try:
|
||||
return sub.get_meta_data()
|
||||
finally:
|
||||
sub.close()
|
||||
|
||||
# --
|
||||
|
||||
class Writer(FreeimageFormat.Writer):
|
||||
def _open(self, flags=0):
|
||||
# Set flags
|
||||
self._flags = flags = int(flags)
|
||||
# Instantiate multi-page bitmap
|
||||
self._bm = fi.create_multipage_bitmap(
|
||||
self.request.filename, self.format.fif, flags
|
||||
)
|
||||
self._bm.save_to_filename(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
# Close bitmap
|
||||
self._bm.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Prepare data
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# Create sub bitmap
|
||||
sub1 = fi.create_bitmap(self._bm._filename, self.format.fif)
|
||||
# Let subclass add data to bitmap, optionally return new
|
||||
sub2 = self._append_bitmap(im, meta, sub1)
|
||||
# Add
|
||||
self._bm.append_bitmap(sub2)
|
||||
sub2.close()
|
||||
if sub1 is not sub2:
|
||||
sub1.close()
|
||||
|
||||
def _append_bitmap(self, im, meta, bitmap):
|
||||
# Set data
|
||||
bitmap.allocate(im)
|
||||
bitmap.set_image_data(im)
|
||||
bitmap.set_meta_data(meta)
|
||||
# Return that same bitmap
|
||||
return bitmap
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
pass # ignore global meta data
|
||||
|
||||
|
||||
class MngFormat(FreeimageMulti):
|
||||
""" An Mng format based on the Freeimage library.
|
||||
|
||||
Read only. Seems broken.
|
||||
"""
|
||||
|
||||
_fif = 6
|
||||
|
||||
def _can_write(self, request): # pragma: no cover
|
||||
return False
|
||||
|
||||
|
||||
class IcoFormat(FreeimageMulti):
|
||||
""" An ICO format based on the Freeimage library.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
is not available on the system, it can be downloaded by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
makealpha : bool
|
||||
Convert to 32-bit and create an alpha channel from the AND-
|
||||
mask when loading. Default False. Note that this returns wrong
|
||||
results if the image was already RGBA.
|
||||
|
||||
"""
|
||||
|
||||
_fif = 1
|
||||
|
||||
class Reader(FreeimageMulti.Reader):
|
||||
def _open(self, flags=0, makealpha=False):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if makealpha:
|
||||
flags |= IO_FLAGS.ICO_MAKEALPHA
|
||||
return FreeimageMulti.Reader._open(self, flags)
|
||||
|
||||
|
||||
class GifFormat(FreeimageMulti):
|
||||
""" A format for reading and writing static and animated GIF, based
|
||||
on the Freeimage library.
|
||||
|
||||
Images read with this format are always RGBA. Currently,
|
||||
the alpha channel is ignored when saving RGB images with this
|
||||
format.
|
||||
|
||||
The freeimage plugin requires a `freeimage` binary. If this binary
|
||||
is not available on the system, it can be downloaded by either
|
||||
|
||||
- the command line script ``imageio_download_bin freeimage``
|
||||
- the Python method ``imageio.plugins.freeimage.download()``
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
playback : bool
|
||||
'Play' the GIF to generate each frame (as 32bpp) instead of
|
||||
returning raw frame data when loading. Default True.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
loop : int
|
||||
The number of iterations. Default 0 (meaning loop indefinitely)
|
||||
duration : {float, list}
|
||||
The duration (in seconds) of each frame. Either specify one value
|
||||
that is used for all frames, or one value for each frame.
|
||||
Note that in the GIF format the duration/delay is expressed in
|
||||
hundredths of a second, which limits the precision of the duration.
|
||||
fps : float
|
||||
The number of frames per second. If duration is not given, the
|
||||
duration for each frame is set to 1/fps. Default 10.
|
||||
palettesize : int
|
||||
The number of colors to quantize the image to. Is rounded to
|
||||
the nearest power of two. Default 256.
|
||||
quantizer : {'wu', 'nq'}
|
||||
The quantization algorithm:
|
||||
* wu - Wu, Xiaolin, Efficient Statistical Computations for
|
||||
Optimal Color Quantization
|
||||
* nq (neuqant) - Dekker A. H., Kohonen neural networks for
|
||||
optimal color quantization
|
||||
subrectangles : bool
|
||||
If True, will try and optimize the GIF by storing only the
|
||||
rectangular parts of each frame that change with respect to the
|
||||
previous. Unfortunately, this option seems currently broken
|
||||
because FreeImage does not handle DisposalMethod correctly.
|
||||
Default False.
|
||||
"""
|
||||
|
||||
_fif = 25
|
||||
|
||||
class Reader(FreeimageMulti.Reader):
|
||||
def _open(self, flags=0, playback=True):
|
||||
# Build flags from kwargs
|
||||
flags = int(flags)
|
||||
if playback:
|
||||
flags |= IO_FLAGS.GIF_PLAYBACK
|
||||
FreeimageMulti.Reader._open(self, flags)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, meta = FreeimageMulti.Reader._get_data(self, index)
|
||||
# im = im[:, :, :3] # Drop alpha channel
|
||||
return im, meta
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(FreeimageMulti.Writer):
|
||||
|
||||
# todo: subrectangles
|
||||
# todo: global palette
|
||||
|
||||
def _open(
|
||||
self,
|
||||
flags=0,
|
||||
loop=0,
|
||||
duration=None,
|
||||
fps=10,
|
||||
palettesize=256,
|
||||
quantizer="Wu",
|
||||
subrectangles=False,
|
||||
):
|
||||
# Check palettesize
|
||||
if palettesize < 2 or palettesize > 256:
|
||||
raise ValueError("GIF quantize param must be 2..256")
|
||||
if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]:
|
||||
palettesize = 2 ** int(np.log2(128) + 0.999)
|
||||
logger.warning(
|
||||
"Warning: palettesize (%r) modified to a factor of "
|
||||
"two between 2-256." % palettesize
|
||||
)
|
||||
self._palettesize = palettesize
|
||||
# Check quantizer
|
||||
self._quantizer = {"wu": 0, "nq": 1}.get(quantizer.lower(), None)
|
||||
if self._quantizer is None:
|
||||
raise ValueError('Invalid quantizer, must be "wu" or "nq".')
|
||||
# Check frametime
|
||||
if duration is None:
|
||||
self._frametime = [int(1000 / float(fps) + 0.5)]
|
||||
elif isinstance(duration, list):
|
||||
self._frametime = [int(1000 * d) for d in duration]
|
||||
elif isinstance(duration, (float, int)):
|
||||
self._frametime = [int(1000 * duration)]
|
||||
else:
|
||||
raise ValueError("Invalid value for duration: %r" % duration)
|
||||
# Check subrectangles
|
||||
self._subrectangles = bool(subrectangles)
|
||||
self._prev_im = None
|
||||
# Init
|
||||
FreeimageMulti.Writer._open(self, flags)
|
||||
# Set global meta data
|
||||
self._meta = {}
|
||||
self._meta["ANIMATION"] = {
|
||||
# 'GlobalPalette': np.array([0]).astype(np.uint8),
|
||||
"Loop": np.array([loop]).astype(np.uint32),
|
||||
# 'LogicalWidth': np.array([x]).astype(np.uint16),
|
||||
# 'LogicalHeight': np.array([x]).astype(np.uint16),
|
||||
}
|
||||
|
||||
def _append_bitmap(self, im, meta, bitmap):
|
||||
# Prepare meta data
|
||||
meta = meta.copy()
|
||||
meta_a = meta["ANIMATION"] = {}
|
||||
# If this is the first frame, assign it our "global" meta data
|
||||
if len(self._bm) == 0:
|
||||
meta.update(self._meta)
|
||||
meta_a = meta["ANIMATION"]
|
||||
# Set frame time
|
||||
index = len(self._bm)
|
||||
if index < len(self._frametime):
|
||||
ft = self._frametime[index]
|
||||
else:
|
||||
ft = self._frametime[-1]
|
||||
meta_a["FrameTime"] = np.array([ft]).astype(np.uint32)
|
||||
# Check array
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
im = im[:, :, :3]
|
||||
# Process subrectangles
|
||||
im_uncropped = im
|
||||
if self._subrectangles and self._prev_im is not None:
|
||||
im, xy = self._get_sub_rectangles(self._prev_im, im)
|
||||
meta_a["DisposalMethod"] = np.array([1]).astype(np.uint8)
|
||||
meta_a["FrameLeft"] = np.array([xy[0]]).astype(np.uint16)
|
||||
meta_a["FrameTop"] = np.array([xy[1]]).astype(np.uint16)
|
||||
self._prev_im = im_uncropped
|
||||
# Set image data
|
||||
sub2 = sub1 = bitmap
|
||||
sub1.allocate(im)
|
||||
sub1.set_image_data(im)
|
||||
# Quantize it if its RGB
|
||||
if im.ndim == 3 and im.shape[-1] == 3:
|
||||
sub2 = sub1.quantize(self._quantizer, self._palettesize)
|
||||
# If single image, omit animation data
|
||||
if self.request.mode[1] == "i":
|
||||
del meta["ANIMATION"]
|
||||
# Set meta data and return
|
||||
sub2.set_meta_data(meta)
|
||||
return sub2
|
||||
|
||||
def _get_sub_rectangles(self, prev, im):
|
||||
"""
|
||||
Calculate the minimal rectangles that need updating each frame.
|
||||
Returns a two-element tuple containing the cropped images and a
|
||||
list of x-y positions.
|
||||
"""
|
||||
# Get difference, sum over colors
|
||||
diff = np.abs(im - prev)
|
||||
if diff.ndim == 3:
|
||||
diff = diff.sum(2)
|
||||
# Get begin and end for both dimensions
|
||||
X = np.argwhere(diff.sum(0))
|
||||
Y = np.argwhere(diff.sum(1))
|
||||
# Get rect coordinates
|
||||
if X.size and Y.size:
|
||||
x0, x1 = int(X[0]), int(X[-1]) + 1
|
||||
y0, y1 = int(Y[0]), int(Y[-1]) + 1
|
||||
else: # No change ... make it minimal
|
||||
x0, x1 = 0, 2
|
||||
y0, y1 = 0, 2
|
||||
# Cut out and return
|
||||
return im[y0:y1, x0:x1], (x0, y0)
|
||||
|
||||
|
||||
# formats.add_format(MngFormat('MNG', 'Multiple network graphics',
|
||||
# '.mng', 'iI'))
|
||||
formats.add_format(IcoFormat("ICO-FI", "Windows icon", ".ico", "iI"))
|
||||
formats.add_format(
|
||||
GifFormat("GIF-FI", "Static and animated gif (FreeImage)", ".gif", "iI")
|
||||
)
|
76
venv/Lib/site-packages/imageio/plugins/gdal.py
Normal file
76
venv/Lib/site-packages/imageio/plugins/gdal.py
Normal file
|
@ -0,0 +1,76 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin for reading gdal files.
|
||||
"""
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format, has_module
|
||||
|
||||
_gdal = None # lazily loaded in load_lib()
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _gdal
|
||||
try:
|
||||
import osgeo.gdal as _gdal
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"The GDAL format relies on the GDAL package."
|
||||
"Please refer to http://www.gdal.org/"
|
||||
"for further instructions."
|
||||
)
|
||||
return _gdal
|
||||
|
||||
|
||||
GDAL_FORMATS = (".tiff", " .tif", ".img", ".ecw", ".jpg", ".jpeg")
|
||||
|
||||
|
||||
class GdalFormat(Format):
|
||||
|
||||
"""
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.extension in (".ecw",):
|
||||
return True
|
||||
if has_module("osgeo.gdal"):
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
return False
|
||||
|
||||
# --
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
if not _gdal:
|
||||
load_lib()
|
||||
self._ds = _gdal.Open(self.request.get_local_filename())
|
||||
|
||||
def _close(self):
|
||||
del self._ds
|
||||
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
if index != 0:
|
||||
raise IndexError("Gdal file contains only one dataset")
|
||||
return self._ds.ReadAsArray(), self._get_meta_data(index)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return self._ds.GetMetadata()
|
||||
|
||||
|
||||
# Add this format
|
||||
formats.add_format(
|
||||
GdalFormat(
|
||||
"gdal", "Geospatial Data Abstraction Library", " ".join(GDAL_FORMATS), "iIvV"
|
||||
)
|
||||
)
|
123
venv/Lib/site-packages/imageio/plugins/grab.py
Normal file
123
venv/Lib/site-packages/imageio/plugins/grab.py
Normal file
|
@ -0,0 +1,123 @@
|
|||
"""
|
||||
PIL-based formats to take screenshots and grab from the clipboard.
|
||||
"""
|
||||
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class BaseGrabFormat(Format):
|
||||
""" Base format for grab formats.
|
||||
"""
|
||||
|
||||
_pillow_imported = False
|
||||
_ImageGrab = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(BaseGrabFormat, self).__init__(*args, **kwargs)
|
||||
self._lock = threading.RLock()
|
||||
|
||||
def _can_write(self, request):
|
||||
return False
|
||||
|
||||
def _init_pillow(self):
|
||||
with self._lock:
|
||||
if not self._pillow_imported:
|
||||
self._pillow_imported = True # more like tried to import
|
||||
import PIL
|
||||
|
||||
if not hasattr(PIL, "__version__"): # pragma: no cover
|
||||
raise ImportError("Imageio Pillow requires " "Pillow, not PIL!")
|
||||
try:
|
||||
from PIL import ImageGrab
|
||||
except ImportError:
|
||||
return None
|
||||
self._ImageGrab = ImageGrab
|
||||
return self._ImageGrab
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
pass
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_data(self, index):
|
||||
return self.format._get_data(index)
|
||||
|
||||
|
||||
class ScreenGrabFormat(BaseGrabFormat):
|
||||
""" The ScreenGrabFormat provided a means to grab screenshots using
|
||||
the uri of "<screen>".
|
||||
|
||||
This functionality is provided via Pillow. Note that "<screen>" is
|
||||
only supported on Windows and OS X.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
No parameters.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.mode[1] not in "i?":
|
||||
return False
|
||||
if request.filename != "<screen>":
|
||||
return False
|
||||
return bool(self._init_pillow())
|
||||
|
||||
def _get_data(self, index):
|
||||
ImageGrab = self._init_pillow()
|
||||
assert ImageGrab
|
||||
|
||||
pil_im = ImageGrab.grab()
|
||||
assert pil_im is not None
|
||||
im = np.asarray(pil_im)
|
||||
return im, {}
|
||||
|
||||
|
||||
class ClipboardGrabFormat(BaseGrabFormat):
|
||||
""" The ClipboardGrabFormat provided a means to grab image data from
|
||||
the clipboard, using the uri "<clipboard>"
|
||||
|
||||
This functionality is provided via Pillow. Note that "<clipboard>" is
|
||||
only supported on Windows.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
No parameters.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.mode[1] not in "i?":
|
||||
return False
|
||||
if request.filename != "<clipboard>":
|
||||
return False
|
||||
return bool(self._init_pillow())
|
||||
|
||||
def _get_data(self, index):
|
||||
ImageGrab = self._init_pillow()
|
||||
assert ImageGrab
|
||||
|
||||
pil_im = ImageGrab.grabclipboard()
|
||||
if pil_im is None:
|
||||
raise RuntimeError(
|
||||
"There seems to be no image data on the " "clipboard now."
|
||||
)
|
||||
im = np.asarray(pil_im)
|
||||
return im, {}
|
||||
|
||||
|
||||
# Register. You register an *instance* of a Format class.
|
||||
format = ScreenGrabFormat(
|
||||
"screengrab", "Grab screenshots (Windows and OS X only)", [], "i"
|
||||
)
|
||||
formats.add_format(format)
|
||||
|
||||
format = ClipboardGrabFormat(
|
||||
"clipboardgrab", "Grab from clipboard (Windows only)", [], "i"
|
||||
)
|
||||
formats.add_format(format)
|
705
venv/Lib/site-packages/imageio/plugins/lytro.py
Normal file
705
venv/Lib/site-packages/imageio/plugins/lytro.py
Normal file
|
@ -0,0 +1,705 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2018, imageio contributors
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
#
|
||||
|
||||
""" Lytro Illum Plugin.
|
||||
Plugin to read Lytro Illum .lfr and .raw files as produced
|
||||
by the Lytro Illum light field camera.
|
||||
"""
|
||||
#
|
||||
#
|
||||
# This code is based on work by
|
||||
# David Uhlig and his lfr_reader
|
||||
# (https://www.iiit.kit.edu/uhlig.php)
|
||||
# Donald Dansereau and his Matlab LF Toolbox
|
||||
# (http://dgd.vision/Tools/LFToolbox/)
|
||||
# and Behnam Esfahbod and his Python LFP-Reader
|
||||
# (https://github.com/behnam/python-lfp-reader/)
|
||||
|
||||
|
||||
import os
|
||||
import json
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
from .. import imread
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Sensor size of Lytro Illum resp. Lytro F01 light field camera sensor
|
||||
LYTRO_ILLUM_IMAGE_SIZE = (5368, 7728)
|
||||
LYTRO_F01_IMAGE_SIZE = (3280, 3280)
|
||||
|
||||
# Parameter of lfr file format
|
||||
HEADER_LENGTH = 12
|
||||
SIZE_LENGTH = 4 # = 16 - header_length
|
||||
SHA1_LENGTH = 45 # = len("sha1-") + (160 / 4)
|
||||
PADDING_LENGTH = 35 # = (4*16) - header_length - size_length - sha1_length
|
||||
DATA_CHUNKS_ILLUM = 11
|
||||
DATA_CHUNKS_F01 = 3
|
||||
|
||||
|
||||
class LytroFormat(Format):
|
||||
""" Base class for Lytro format.
|
||||
The subclasses LytroLfrFormat, LytroLfpFormat, LytroIllumRawFormat and
|
||||
LytroF01RawFormat implement the Lytro-LFR, Lytro-LFP and Lytro-RAW format
|
||||
for the Illum and original F01 camera respectively.
|
||||
Writing is not supported.
|
||||
"""
|
||||
|
||||
# Only single images are supported.
|
||||
_modes = "i"
|
||||
|
||||
def _can_write(self, request):
|
||||
# Writing of Lytro files is not supported
|
||||
return False
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, flags=0):
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._fp
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Process the given data and meta data.
|
||||
raise RuntimeError("The lytro format cannot write image data.")
|
||||
|
||||
def _set_meta_data(self, meta):
|
||||
# Process the given meta data (global for all images)
|
||||
# It is not mandatory to support this.
|
||||
raise RuntimeError("The lytro format cannot write meta data.")
|
||||
|
||||
|
||||
class LytroIllumRawFormat(LytroFormat):
|
||||
""" This is the Lytro Illum RAW format.
|
||||
The raw format is a 10bit image format as used by the Lytro Illum
|
||||
light field camera. The format will read the specified raw file and will
|
||||
try to load a .txt or .json file with the associated meta data.
|
||||
This format does not support writing.
|
||||
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
None
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in (".raw",):
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def rearrange_bits(array):
|
||||
# Do bit rearrangement for the 10-bit lytro raw format
|
||||
# Normalize output to 1.0 as float64
|
||||
t0 = array[0::5]
|
||||
t1 = array[1::5]
|
||||
t2 = array[2::5]
|
||||
t3 = array[3::5]
|
||||
lsb = array[4::5]
|
||||
|
||||
t0 = np.left_shift(t0, 2) + np.bitwise_and(lsb, 3)
|
||||
t1 = np.left_shift(t1, 2) + np.right_shift(np.bitwise_and(lsb, 12), 2)
|
||||
t2 = np.left_shift(t2, 2) + np.right_shift(np.bitwise_and(lsb, 48), 4)
|
||||
t3 = np.left_shift(t3, 2) + np.right_shift(np.bitwise_and(lsb, 192), 6)
|
||||
|
||||
image = np.zeros(LYTRO_ILLUM_IMAGE_SIZE, dtype=np.uint16)
|
||||
image[:, 0::4] = t0.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 1::4] = t1.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 2::4] = t2.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
image[:, 3::4] = t3.reshape(
|
||||
(LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4)
|
||||
)
|
||||
|
||||
# Normalize data to 1.0 as 64-bit float.
|
||||
# Division is by 1023 as the Lytro Illum saves 10-bit raw data.
|
||||
return np.divide(image, 1023.0).astype(np.float64)
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images.
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
|
||||
if index not in [0, "None"]:
|
||||
raise IndexError("Lytro file contains only one dataset")
|
||||
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._file.read()
|
||||
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16)
|
||||
|
||||
# Rearrange bits
|
||||
img = LytroIllumRawFormat.rearrange_bits(raw)
|
||||
|
||||
# Return image and meta data
|
||||
return img, self._get_meta_data(index=0)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
# Try to read meta data from meta data file corresponding
|
||||
# to the raw data file, extension in [.txt, .TXT, .json, .JSON]
|
||||
filename_base = os.path.splitext(self.request.get_local_filename())[0]
|
||||
|
||||
meta_data = None
|
||||
|
||||
for ext in [".txt", ".TXT", ".json", ".JSON"]:
|
||||
if os.path.isfile(filename_base + ext):
|
||||
meta_data = json.load(open(filename_base + ext))
|
||||
|
||||
if meta_data is not None:
|
||||
return meta_data
|
||||
|
||||
else:
|
||||
logger.warning("No metadata file found for provided raw file.")
|
||||
return {}
|
||||
|
||||
|
||||
class LytroLfrFormat(LytroFormat):
|
||||
""" This is the Lytro Illum LFR format.
|
||||
The lfr is a image and meta data container format as used by the
|
||||
Lytro Illum light field camera.
|
||||
The format will read the specified lfr file.
|
||||
This format does not support writing.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
None
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in (".lfr",):
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._chunks = {}
|
||||
self.metadata = {}
|
||||
self._content = None
|
||||
|
||||
self._find_header()
|
||||
self._find_chunks()
|
||||
self._find_meta()
|
||||
|
||||
try:
|
||||
# Get sha1 dict and check if it is in dictionary of data chunks
|
||||
chunk_dict = self._content["frames"][0]["frame"]
|
||||
if (
|
||||
chunk_dict["metadataRef"] in self._chunks
|
||||
and chunk_dict["imageRef"] in self._chunks
|
||||
and chunk_dict["privateMetadataRef"] in self._chunks
|
||||
):
|
||||
|
||||
# Read raw image data byte buffer
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
self.raw_image_data = self._file.read(size)
|
||||
|
||||
# Read meta data
|
||||
data_pos, size = self._chunks[chunk_dict["metadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
metadata = self._file.read(size)
|
||||
# Add metadata to meta data dict
|
||||
self.metadata["metadata"] = json.loads(metadata.decode("ASCII"))
|
||||
|
||||
# Read private metadata
|
||||
data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
serial_numbers = self._file.read(size)
|
||||
self.serial_numbers = json.loads(serial_numbers.decode("ASCII"))
|
||||
# Add private metadata to meta data dict
|
||||
self.metadata["privateMetadata"] = self.serial_numbers
|
||||
|
||||
# Read image preview thumbnail
|
||||
chunk_dict = self._content["thumbnails"][0]
|
||||
if chunk_dict["imageRef"] in self._chunks:
|
||||
# Read thumbnail image from thumbnail chunk
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
# Read binary data, read image as jpeg
|
||||
thumbnail_data = self._file.read(size)
|
||||
thumbnail_img = imread(thumbnail_data, format="jpeg")
|
||||
|
||||
thumbnail_height = chunk_dict["height"]
|
||||
thumbnail_width = chunk_dict["width"]
|
||||
|
||||
# Add thumbnail to metadata
|
||||
self.metadata["thumbnail"] = {
|
||||
"image": thumbnail_img,
|
||||
"height": thumbnail_height,
|
||||
"width": thumbnail_width,
|
||||
}
|
||||
|
||||
except KeyError:
|
||||
raise RuntimeError("The specified file is not a valid LFR file.")
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return 1
|
||||
|
||||
def _find_header(self):
|
||||
"""
|
||||
Checks if file has correct header and skip it.
|
||||
"""
|
||||
file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01"
|
||||
# Read and check header of file
|
||||
header = self._file.read(HEADER_LENGTH)
|
||||
if header != file_header:
|
||||
raise RuntimeError("The LFR file header is invalid.")
|
||||
|
||||
# Read first bytes to skip header
|
||||
self._file.read(SIZE_LENGTH)
|
||||
|
||||
def _find_chunks(self):
|
||||
"""
|
||||
Gets start position and size of data chunks in file.
|
||||
"""
|
||||
chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
for i in range(0, DATA_CHUNKS_ILLUM):
|
||||
data_pos, size, sha1 = self._get_chunk(chunk_header)
|
||||
self._chunks[sha1] = (data_pos, size)
|
||||
|
||||
def _find_meta(self):
|
||||
"""
|
||||
Gets a data chunk that contains information over content
|
||||
of other data chunks.
|
||||
"""
|
||||
meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
data_pos, size, sha1 = self._get_chunk(meta_header)
|
||||
|
||||
# Get content
|
||||
self._file.seek(data_pos, 0)
|
||||
data = self._file.read(size)
|
||||
self._content = json.loads(data.decode("ASCII"))
|
||||
|
||||
def _get_chunk(self, header):
|
||||
"""
|
||||
Checks if chunk has correct header and skips it.
|
||||
Finds start position and length of next chunk and reads
|
||||
sha1-string that identifies the following data chunk.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
header : bytes
|
||||
Byte string that identifies start of chunk.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data_pos : int
|
||||
Start position of data chunk in file.
|
||||
size : int
|
||||
Size of data chunk.
|
||||
sha1 : str
|
||||
Sha1 value of chunk.
|
||||
"""
|
||||
# Read and check header of chunk
|
||||
header_chunk = self._file.read(HEADER_LENGTH)
|
||||
if header_chunk != header:
|
||||
raise RuntimeError("The LFR chunk header is invalid.")
|
||||
|
||||
data_pos = None
|
||||
sha1 = None
|
||||
|
||||
# Read size
|
||||
size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0]
|
||||
if size > 0:
|
||||
# Read sha1
|
||||
sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII"))
|
||||
# Skip fixed null chars
|
||||
self._file.read(PADDING_LENGTH)
|
||||
# Find start of data and skip data
|
||||
data_pos = self._file.tell()
|
||||
self._file.seek(size, 1)
|
||||
# Skip extra null chars
|
||||
ch = self._file.read(1)
|
||||
while ch == b"\0":
|
||||
ch = self._file.read(1)
|
||||
self._file.seek(-1, 1)
|
||||
|
||||
return data_pos, size, sha1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro lfr file contains only one dataset")
|
||||
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(np.uint16)
|
||||
im = LytroIllumRawFormat.rearrange_bits(raw)
|
||||
|
||||
# Return array and dummy meta data
|
||||
return im, self.metadata
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None,
|
||||
# it returns the global meta data.
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
return self.metadata
|
||||
|
||||
|
||||
class LytroF01RawFormat(LytroFormat):
|
||||
""" This is the Lytro RAW format for the original F01 Lytro camera.
|
||||
The raw format is a 12bit image format as used by the Lytro F01
|
||||
light field camera. The format will read the specified raw file and will
|
||||
try to load a .txt or .json file with the associated meta data.
|
||||
This format does not support writing.
|
||||
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
None
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in (".raw",):
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def rearrange_bits(array):
|
||||
# Do bit rearrangement for the 12-bit lytro raw format
|
||||
# Normalize output to 1.0 as float64
|
||||
t0 = array[0::3]
|
||||
t1 = array[1::3]
|
||||
t2 = array[2::3]
|
||||
|
||||
a0 = np.left_shift(t0, 4) + np.right_shift(np.bitwise_and(t1, 240), 4)
|
||||
a1 = np.left_shift(np.bitwise_and(t1, 15), 8) + t2
|
||||
|
||||
image = np.zeros(LYTRO_F01_IMAGE_SIZE, dtype=np.uint16)
|
||||
image[:, 0::2] = a0.reshape(
|
||||
(LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2)
|
||||
)
|
||||
image[:, 1::2] = a1.reshape(
|
||||
(LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2)
|
||||
)
|
||||
|
||||
# Normalize data to 1.0 as 64-bit float.
|
||||
# Division is by 4095 as the Lytro F01 saves 12-bit raw data.
|
||||
return np.divide(image, 4095.0).astype(np.float64)
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images.
|
||||
return 1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
|
||||
if index not in [0, "None"]:
|
||||
raise IndexError("Lytro file contains only one dataset")
|
||||
|
||||
# Read all bytes
|
||||
if self._data is None:
|
||||
self._data = self._file.read()
|
||||
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16)
|
||||
|
||||
# Rearrange bits
|
||||
img = LytroF01RawFormat.rearrange_bits(raw)
|
||||
|
||||
# Return image and meta data
|
||||
return img, self._get_meta_data(index=0)
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None, it
|
||||
# should return the global meta data.
|
||||
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
# Try to read meta data from meta data file corresponding
|
||||
# to the raw data file, extension in [.txt, .TXT, .json, .JSON]
|
||||
filename_base = os.path.splitext(self.request.get_local_filename())[0]
|
||||
|
||||
meta_data = None
|
||||
|
||||
for ext in [".txt", ".TXT", ".json", ".JSON"]:
|
||||
if os.path.isfile(filename_base + ext):
|
||||
meta_data = json.load(open(filename_base + ext))
|
||||
|
||||
if meta_data is not None:
|
||||
return meta_data
|
||||
|
||||
else:
|
||||
logger.warning("No metadata file found for provided raw file.")
|
||||
return {}
|
||||
|
||||
|
||||
class LytroLfpFormat(LytroFormat):
|
||||
""" This is the Lytro Illum LFP format.
|
||||
The lfp is a image and meta data container format as used by the
|
||||
Lytro F01 light field camera.
|
||||
The format will read the specified lfp file.
|
||||
This format does not support writing.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
None
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# Check if mode and extensions are supported by the format
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in (".lfp",):
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
self._file = self.request.get_file()
|
||||
self._data = None
|
||||
self._chunks = {}
|
||||
self.metadata = {}
|
||||
self._content = None
|
||||
|
||||
self._find_header()
|
||||
self._find_meta()
|
||||
self._find_chunks()
|
||||
|
||||
try:
|
||||
# Get sha1 dict and check if it is in dictionary of data chunks
|
||||
chunk_dict = self._content["picture"]["frameArray"][0]["frame"]
|
||||
if (
|
||||
chunk_dict["metadataRef"] in self._chunks
|
||||
and chunk_dict["imageRef"] in self._chunks
|
||||
and chunk_dict["privateMetadataRef"] in self._chunks
|
||||
):
|
||||
|
||||
# Read raw image data byte buffer
|
||||
data_pos, size = self._chunks[chunk_dict["imageRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
self.raw_image_data = self._file.read(size)
|
||||
|
||||
# Read meta data
|
||||
data_pos, size = self._chunks[chunk_dict["metadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
metadata = self._file.read(size)
|
||||
# Add metadata to meta data dict
|
||||
self.metadata["metadata"] = json.loads(metadata.decode("ASCII"))
|
||||
|
||||
# Read private metadata
|
||||
data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]]
|
||||
self._file.seek(data_pos, 0)
|
||||
serial_numbers = self._file.read(size)
|
||||
self.serial_numbers = json.loads(serial_numbers.decode("ASCII"))
|
||||
# Add private metadata to meta data dict
|
||||
self.metadata["privateMetadata"] = self.serial_numbers
|
||||
|
||||
except KeyError:
|
||||
raise RuntimeError("The specified file is not a valid LFP file.")
|
||||
|
||||
def _close(self):
|
||||
# Close the reader.
|
||||
# Note that the request object will close self._file
|
||||
del self._data
|
||||
|
||||
def _get_length(self):
|
||||
# Return the number of images. Can be np.inf
|
||||
return 1
|
||||
|
||||
def _find_header(self):
|
||||
"""
|
||||
Checks if file has correct header and skip it.
|
||||
"""
|
||||
file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01"
|
||||
|
||||
# Read and check header of file
|
||||
header = self._file.read(HEADER_LENGTH)
|
||||
if header != file_header:
|
||||
raise RuntimeError("The LFP file header is invalid.")
|
||||
|
||||
# Read first bytes to skip header
|
||||
self._file.read(SIZE_LENGTH)
|
||||
|
||||
def _find_chunks(self):
|
||||
"""
|
||||
Gets start position and size of data chunks in file.
|
||||
"""
|
||||
chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
for i in range(0, DATA_CHUNKS_F01):
|
||||
data_pos, size, sha1 = self._get_chunk(chunk_header)
|
||||
self._chunks[sha1] = (data_pos, size)
|
||||
|
||||
def _find_meta(self):
|
||||
"""
|
||||
Gets a data chunk that contains information over content
|
||||
of other data chunks.
|
||||
"""
|
||||
meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00"
|
||||
|
||||
data_pos, size, sha1 = self._get_chunk(meta_header)
|
||||
|
||||
# Get content
|
||||
self._file.seek(data_pos, 0)
|
||||
data = self._file.read(size)
|
||||
self._content = json.loads(data.decode("ASCII"))
|
||||
data = self._file.read(5) # Skip 5
|
||||
|
||||
def _get_chunk(self, header):
|
||||
"""
|
||||
Checks if chunk has correct header and skips it.
|
||||
Finds start position and length of next chunk and reads
|
||||
sha1-string that identifies the following data chunk.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
header : bytes
|
||||
Byte string that identifies start of chunk.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data_pos : int
|
||||
Start position of data chunk in file.
|
||||
size : int
|
||||
Size of data chunk.
|
||||
sha1 : str
|
||||
Sha1 value of chunk.
|
||||
"""
|
||||
# Read and check header of chunk
|
||||
header_chunk = self._file.read(HEADER_LENGTH)
|
||||
if header_chunk != header:
|
||||
raise RuntimeError("The LFP chunk header is invalid.")
|
||||
|
||||
data_pos = None
|
||||
sha1 = None
|
||||
|
||||
# Read size
|
||||
size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0]
|
||||
if size > 0:
|
||||
# Read sha1
|
||||
sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII"))
|
||||
# Skip fixed null chars
|
||||
self._file.read(PADDING_LENGTH)
|
||||
# Find start of data and skip data
|
||||
data_pos = self._file.tell()
|
||||
self._file.seek(size, 1)
|
||||
# Skip extra null chars
|
||||
ch = self._file.read(1)
|
||||
while ch == b"\0":
|
||||
ch = self._file.read(1)
|
||||
self._file.seek(-1, 1)
|
||||
|
||||
return data_pos, size, sha1
|
||||
|
||||
def _get_data(self, index):
|
||||
# Return the data and meta data for the given index
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro lfp file contains only one dataset")
|
||||
|
||||
# Read bytes from string and convert to uint16
|
||||
raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(np.uint16)
|
||||
im = LytroF01RawFormat.rearrange_bits(raw)
|
||||
|
||||
# Return array and dummy meta data
|
||||
return im, self.metadata
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index. If index is None,
|
||||
# it returns the global meta data.
|
||||
if index not in [0, None]:
|
||||
raise IndexError("Lytro meta data file contains only one dataset")
|
||||
|
||||
return self.metadata
|
||||
|
||||
|
||||
# Create the formats
|
||||
SPECIAL_CLASSES = {
|
||||
"lytro-lfr": LytroLfrFormat,
|
||||
"lytro-illum-raw": LytroIllumRawFormat,
|
||||
"lytro-lfp": LytroLfpFormat,
|
||||
"lytro-f01-raw": LytroF01RawFormat,
|
||||
}
|
||||
|
||||
# Supported Formats.
|
||||
# Only single image files supported.
|
||||
file_formats = [
|
||||
("LYTRO-LFR", "Lytro Illum lfr image file", "lfr", "i"),
|
||||
("LYTRO-ILLUM-RAW", "Lytro Illum raw image file", "raw", "i"),
|
||||
("LYTRO-LFP", "Lytro F01 lfp image file", "lfp", "i"),
|
||||
("LYTRO-F01-RAW", "Lytro F01 raw image file", "raw", "i"),
|
||||
]
|
||||
|
||||
|
||||
def _create_predefined_lytro_formats():
|
||||
for name, des, ext, i in file_formats:
|
||||
# Get format class for format
|
||||
format_class = SPECIAL_CLASSES.get(name.lower(), LytroFormat)
|
||||
if format_class:
|
||||
# Create Format and add
|
||||
format = format_class(name, des, ext, i)
|
||||
formats.add_format(format=format)
|
||||
|
||||
|
||||
# Register all created formats.
|
||||
_create_predefined_lytro_formats()
|
96
venv/Lib/site-packages/imageio/plugins/npz.py
Normal file
96
venv/Lib/site-packages/imageio/plugins/npz.py
Normal file
|
@ -0,0 +1,96 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Storage of image data in npz format. Not a great format, but at least
|
||||
it supports volumetric data. And its less than 100 lines.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
|
||||
class NpzFormat(Format):
|
||||
""" NPZ is a file format by numpy that provides storage of array
|
||||
data using gzip compression. This imageio plugin supports data of any
|
||||
shape, and also supports multiple images per file.
|
||||
|
||||
However, the npz format does not provide streaming; all data is
|
||||
read/written at once. Further, there is no support for meta data.
|
||||
|
||||
Beware that the numpy npz format has a bug on a certain combination
|
||||
of Python 2.7 and numpy, which can cause the resulting files to
|
||||
become unreadable on Python 3. Also, this format is not available
|
||||
on Pypy.
|
||||
|
||||
See the BSDF format for a similar (but more fully featured) format.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
None
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
None
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# We support any kind of image data
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
# We support any kind of image data
|
||||
return request.extension in self.extensions
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self):
|
||||
# Load npz file, which provides another file like object
|
||||
self._npz = np.load(self.request.get_file())
|
||||
assert isinstance(self._npz, np.lib.npyio.NpzFile)
|
||||
# Get list of names, ordered by name, but smarter
|
||||
sorter = lambda x: x.split("_")[-1]
|
||||
self._names = sorted(self._npz.files, key=sorter)
|
||||
|
||||
def _close(self):
|
||||
self._npz.close()
|
||||
|
||||
def _get_length(self):
|
||||
return len(self._names)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index < 0 or index >= len(self._names):
|
||||
raise IndexError("Index out of range while reading from nzp")
|
||||
im = self._npz[self._names[index]]
|
||||
# Return array and empty meta data
|
||||
return im, {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
# Get the meta data for the given index
|
||||
raise RuntimeError("The npz format does not support meta data.")
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
# Npz is not such a great format. We cannot stream to the file.
|
||||
# So we remember all images and write them to file at the end.
|
||||
self._images = []
|
||||
|
||||
def _close(self):
|
||||
# Write everything
|
||||
np.savez_compressed(self.request.get_file(), *self._images)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
self._images.append(im) # discart meta data
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
raise RuntimeError("The npz format does not support meta data.")
|
||||
|
||||
|
||||
# Register
|
||||
format = NpzFormat("npz", "Numpy's compressed array format", "npz", "iIvV")
|
||||
formats.add_format(format)
|
868
venv/Lib/site-packages/imageio/plugins/pillow.py
Normal file
868
venv/Lib/site-packages/imageio/plugins/pillow.py
Normal file
|
@ -0,0 +1,868 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Plugin that wraps the the Pillow library.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format, image_as_uint
|
||||
|
||||
# Get info about pillow formats without having to import PIL
|
||||
from .pillow_info import pillow_formats, pillow_docs
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# todo: Pillow ImageGrab module supports grabbing the screen on Win and OSX.
|
||||
|
||||
|
||||
GENERIC_DOCS = """
|
||||
Parameters for reading
|
||||
----------------------
|
||||
|
||||
pilmode : str
|
||||
From the Pillow documentation:
|
||||
|
||||
* 'L' (8-bit pixels, grayscale)
|
||||
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
|
||||
* 'RGB' (3x8-bit pixels, true color)
|
||||
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
|
||||
* 'CMYK' (4x8-bit pixels, color separation)
|
||||
* 'YCbCr' (3x8-bit pixels, color video format)
|
||||
* 'I' (32-bit signed integer pixels)
|
||||
* 'F' (32-bit floating point pixels)
|
||||
|
||||
PIL also provides limited support for a few special modes, including
|
||||
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
|
||||
(true color with premultiplied alpha).
|
||||
|
||||
When translating a color image to grayscale (mode 'L', 'I' or 'F'),
|
||||
the library uses the ITU-R 601-2 luma transform::
|
||||
|
||||
L = R * 299/1000 + G * 587/1000 + B * 114/1000
|
||||
as_gray : bool
|
||||
If True, the image is converted using mode 'F'. When `mode` is
|
||||
not None and `as_gray` is True, the image is first converted
|
||||
according to `mode`, and the result is then "flattened" using
|
||||
mode 'F'.
|
||||
"""
|
||||
|
||||
|
||||
class PillowFormat(Format):
|
||||
"""
|
||||
Base format class for Pillow formats.
|
||||
"""
|
||||
|
||||
_pillow_imported = False
|
||||
_Image = None
|
||||
_modes = "i"
|
||||
_description = ""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(PillowFormat, self).__init__(*args, **kwargs)
|
||||
# Used to synchronize _init_pillow(), see #244
|
||||
self._lock = threading.RLock()
|
||||
|
||||
@property
|
||||
def plugin_id(self):
|
||||
""" The PIL plugin id.
|
||||
"""
|
||||
return self._plugin_id # Set when format is created
|
||||
|
||||
def _init_pillow(self):
|
||||
with self._lock:
|
||||
if not self._pillow_imported:
|
||||
self._pillow_imported = True # more like tried to import
|
||||
import PIL
|
||||
|
||||
if not hasattr(PIL, "__version__"): # pragma: no cover
|
||||
raise ImportError(
|
||||
"Imageio Pillow plugin requires " "Pillow, not PIL!"
|
||||
)
|
||||
from PIL import Image
|
||||
|
||||
self._Image = Image
|
||||
elif self._Image is None: # pragma: no cover
|
||||
raise RuntimeError("Imageio Pillow plugin requires " "Pillow lib.")
|
||||
Image = self._Image
|
||||
|
||||
if self.plugin_id in ("PNG", "JPEG", "BMP", "GIF", "PPM"):
|
||||
Image.preinit()
|
||||
else:
|
||||
Image.init()
|
||||
return Image
|
||||
|
||||
def _can_read(self, request):
|
||||
Image = self._init_pillow()
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if self.plugin_id in Image.OPEN:
|
||||
factory, accept = Image.OPEN[self.plugin_id]
|
||||
if accept:
|
||||
if request.firstbytes and accept(request.firstbytes):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
Image = self._init_pillow()
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in self.extensions:
|
||||
if self.plugin_id in Image.SAVE:
|
||||
return True
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False):
|
||||
Image = self.format._init_pillow()
|
||||
try:
|
||||
factory, accept = Image.OPEN[self.format.plugin_id]
|
||||
except KeyError:
|
||||
raise RuntimeError("Format %s cannot read images." % self.format.name)
|
||||
self._fp = self._get_file()
|
||||
self._im = factory(self._fp, "")
|
||||
if hasattr(Image, "_decompression_bomb_check"):
|
||||
Image._decompression_bomb_check(self._im.size)
|
||||
# Save the raw mode used by the palette for a BMP because it may not be the number of channels
|
||||
# When the data is read, imageio hands the palette to PIL to handle and clears the rawmode argument
|
||||
# However, there is a bug in PIL with handling animated GIFs with a different color palette on each frame.
|
||||
# This issue is resolved by using the raw palette data but the rawmode information is now lost. So we
|
||||
# store the raw mode for later use
|
||||
if self._im.palette and self._im.palette.dirty:
|
||||
self._im.palette.rawmode_saved = self._im.palette.rawmode
|
||||
pil_try_read(self._im)
|
||||
# Store args
|
||||
self._kwargs = dict(
|
||||
as_gray=as_gray, is_gray=_palette_is_grayscale(self._im)
|
||||
)
|
||||
# setting mode=None is not the same as just not providing it
|
||||
if pilmode is not None:
|
||||
self._kwargs["mode"] = pilmode
|
||||
# Set length
|
||||
self._length = 1
|
||||
if hasattr(self._im, "n_frames"):
|
||||
self._length = self._im.n_frames
|
||||
|
||||
def _get_file(self):
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _close(self):
|
||||
save_pillow_close(self._im)
|
||||
if self._we_own_fp:
|
||||
self._fp.close()
|
||||
# else: request object handles closing the _fp
|
||||
|
||||
def _get_length(self):
|
||||
return self._length
|
||||
|
||||
def _seek(self, index):
|
||||
try:
|
||||
self._im.seek(index)
|
||||
except EOFError:
|
||||
raise IndexError("Could not seek to index %i" % index)
|
||||
|
||||
def _get_data(self, index):
|
||||
if index >= self._length:
|
||||
raise IndexError("Image index %i > %i" % (index, self._length))
|
||||
i = self._im.tell()
|
||||
if i > index:
|
||||
self._seek(index) # just try
|
||||
else:
|
||||
while i < index: # some formats need to be read in sequence
|
||||
i += 1
|
||||
self._seek(i)
|
||||
if self._im.palette and self._im.palette.dirty:
|
||||
self._im.palette.rawmode_saved = self._im.palette.rawmode
|
||||
self._im.getdata()[0]
|
||||
im = pil_get_frame(self._im, **self._kwargs)
|
||||
return im, self._im.info
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if not (index is None or index == 0):
|
||||
raise IndexError()
|
||||
return self._im.info
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
Image = self.format._init_pillow()
|
||||
try:
|
||||
self._save_func = Image.SAVE[self.format.plugin_id]
|
||||
except KeyError:
|
||||
raise RuntimeError("Format %s cannot write images." % self.format.name)
|
||||
self._fp = self.request.get_file()
|
||||
self._meta = {}
|
||||
self._written = False
|
||||
|
||||
def _close(self):
|
||||
pass # request object handled closing _fp
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if self._written:
|
||||
raise RuntimeError(
|
||||
"Format %s only supports single images." % self.format.name
|
||||
)
|
||||
# Pop unit dimension for grayscale images
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
self._written = True
|
||||
self._meta.update(meta)
|
||||
img = ndarray_to_pil(
|
||||
im, self.format.plugin_id, self._meta.pop("prefer_uint8", True)
|
||||
)
|
||||
if "bits" in self._meta:
|
||||
img = img.quantize() # Make it a P image, so bits arg is used
|
||||
img.save(self._fp, format=self.format.plugin_id, **self._meta)
|
||||
save_pillow_close(img)
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
self._meta.update(meta)
|
||||
|
||||
|
||||
class PNGFormat(PillowFormat):
|
||||
"""A PNG format based on Pillow.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
ignoregamma : bool
|
||||
Avoid gamma correction. Default True.
|
||||
pilmode : str
|
||||
From the Pillow documentation:
|
||||
|
||||
* 'L' (8-bit pixels, grayscale)
|
||||
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
|
||||
* 'RGB' (3x8-bit pixels, true color)
|
||||
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
|
||||
* 'CMYK' (4x8-bit pixels, color separation)
|
||||
* 'YCbCr' (3x8-bit pixels, color video format)
|
||||
* 'I' (32-bit signed integer pixels)
|
||||
* 'F' (32-bit floating point pixels)
|
||||
|
||||
PIL also provides limited support for a few special modes, including
|
||||
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
|
||||
(true color with premultiplied alpha).
|
||||
|
||||
When translating a color image to grayscale (mode 'L', 'I' or 'F'),
|
||||
the library uses the ITU-R 601-2 luma transform::
|
||||
|
||||
L = R * 299/1000 + G * 587/1000 + B * 114/1000
|
||||
as_gray : bool
|
||||
If True, the image is converted using mode 'F'. When `mode` is
|
||||
not None and `as_gray` is True, the image is first converted
|
||||
according to `mode`, and the result is then "flattened" using
|
||||
mode 'F'.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
optimize : bool
|
||||
If present and true, instructs the PNG writer to make the output file
|
||||
as small as possible. This includes extra processing in order to find
|
||||
optimal encoder settings.
|
||||
transparency:
|
||||
This option controls what color image to mark as transparent.
|
||||
dpi: tuple of two scalars
|
||||
The desired dpi in each direction.
|
||||
pnginfo: PIL.PngImagePlugin.PngInfo
|
||||
Object containing text tags.
|
||||
compress_level: int
|
||||
ZLIB compression level, a number between 0 and 9: 1 gives best speed,
|
||||
9 gives best compression, 0 gives no compression at all. Default is 9.
|
||||
When ``optimize`` option is True ``compress_level`` has no effect
|
||||
(it is set to 9 regardless of a value passed).
|
||||
compression: int
|
||||
Compatibility with the freeimage PNG format. If given, it overrides
|
||||
compress_level.
|
||||
icc_profile:
|
||||
The ICC Profile to include in the saved file.
|
||||
bits (experimental): int
|
||||
This option controls how many bits to store. If omitted,
|
||||
the PNG writer uses 8 bits (256 colors).
|
||||
quantize:
|
||||
Compatibility with the freeimage PNG format. If given, it overrides
|
||||
bits. In this case, given as a number between 1-256.
|
||||
dictionary (experimental): dict
|
||||
Set the ZLIB encoder dictionary.
|
||||
prefer_uint8: bool
|
||||
Let the PNG writer truncate uint16 image arrays to uint8 if their values fall
|
||||
within the range [0, 255]. Defaults to true for legacy compatibility, however
|
||||
it is recommended to set this to false to avoid unexpected behavior when
|
||||
saving e.g. weakly saturated images.
|
||||
"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False, ignoregamma=True):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
if not self.request.kwargs.get("ignoregamma", True):
|
||||
# The gamma value in the file represents the gamma factor for the
|
||||
# hardware on the system where the file was created, and is meant
|
||||
# to be able to match the colors with the system on which the
|
||||
# image is shown. See also issue #366
|
||||
try:
|
||||
gamma = float(info["gamma"])
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
else:
|
||||
scale = float(65536 if im.dtype == np.uint16 else 255)
|
||||
gain = 1.0
|
||||
im[:] = ((im / scale) ** gamma) * scale * gain + 0.4999
|
||||
return im, info
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, compression=None, quantize=None, interlaced=False, **kwargs):
|
||||
|
||||
# Better default for compression
|
||||
kwargs["compress_level"] = kwargs.get("compress_level", 9)
|
||||
|
||||
if compression is not None:
|
||||
if compression < 0 or compression > 9:
|
||||
raise ValueError("Invalid PNG compression level: %r" % compression)
|
||||
kwargs["compress_level"] = compression
|
||||
if quantize is not None:
|
||||
for bits in range(1, 9):
|
||||
if 2 ** bits == quantize:
|
||||
break
|
||||
else:
|
||||
raise ValueError(
|
||||
"PNG quantize must be power of two, " "not %r" % quantize
|
||||
)
|
||||
kwargs["bits"] = bits
|
||||
if interlaced:
|
||||
logger.warning("PIL PNG writer cannot produce interlaced images.")
|
||||
|
||||
ok_keys = (
|
||||
"optimize",
|
||||
"transparency",
|
||||
"dpi",
|
||||
"pnginfo",
|
||||
"bits",
|
||||
"compress_level",
|
||||
"icc_profile",
|
||||
"dictionary",
|
||||
"prefer_uint8",
|
||||
)
|
||||
for key in kwargs:
|
||||
if key not in ok_keys:
|
||||
raise TypeError("Invalid arg for PNG writer: %r" % key)
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if str(im.dtype) == "uint16" and (im.ndim == 2 or im.shape[-1] == 1):
|
||||
im = image_as_uint(im, bitdepth=16)
|
||||
else:
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
|
||||
|
||||
class JPEGFormat(PillowFormat):
|
||||
"""A JPEG format based on Pillow.
|
||||
|
||||
This format supports grayscale, RGB and RGBA images.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
exifrotate : bool
|
||||
Automatically rotate the image according to exif flag. Default True.
|
||||
pilmode : str
|
||||
From the Pillow documentation:
|
||||
|
||||
* 'L' (8-bit pixels, grayscale)
|
||||
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
|
||||
* 'RGB' (3x8-bit pixels, true color)
|
||||
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
|
||||
* 'CMYK' (4x8-bit pixels, color separation)
|
||||
* 'YCbCr' (3x8-bit pixels, color video format)
|
||||
* 'I' (32-bit signed integer pixels)
|
||||
* 'F' (32-bit floating point pixels)
|
||||
|
||||
PIL also provides limited support for a few special modes, including
|
||||
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
|
||||
(true color with premultiplied alpha).
|
||||
|
||||
When translating a color image to grayscale (mode 'L', 'I' or 'F'),
|
||||
the library uses the ITU-R 601-2 luma transform::
|
||||
|
||||
L = R * 299/1000 + G * 587/1000 + B * 114/1000
|
||||
as_gray : bool
|
||||
If True, the image is converted using mode 'F'. When `mode` is
|
||||
not None and `as_gray` is True, the image is first converted
|
||||
according to `mode`, and the result is then "flattened" using
|
||||
mode 'F'.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
quality : scalar
|
||||
The compression factor of the saved image (1..100), higher
|
||||
numbers result in higher quality but larger file size. Default 75.
|
||||
progressive : bool
|
||||
Save as a progressive JPEG file (e.g. for images on the web).
|
||||
Default False.
|
||||
optimize : bool
|
||||
On saving, compute optimal Huffman coding tables (can reduce a few
|
||||
percent of file size). Default False.
|
||||
dpi : tuple of int
|
||||
The pixel density, ``(x,y)``.
|
||||
icc_profile : object
|
||||
If present and true, the image is stored with the provided ICC profile.
|
||||
If this parameter is not provided, the image will be saved with no
|
||||
profile attached.
|
||||
exif : dict
|
||||
If present, the image will be stored with the provided raw EXIF data.
|
||||
subsampling : str
|
||||
Sets the subsampling for the encoder. See Pillow docs for details.
|
||||
qtables : object
|
||||
Set the qtables for the encoder. See Pillow docs for details.
|
||||
"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False, exifrotate=True):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_file(self):
|
||||
# Pillow uses seek for JPG, so we cannot directly stream from web
|
||||
if self.request.filename.startswith(
|
||||
("http://", "https://")
|
||||
) or ".zip/" in self.request.filename.replace("\\", "/"):
|
||||
self._we_own_fp = True
|
||||
return open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
|
||||
# Handle exif
|
||||
if "exif" in info:
|
||||
from PIL.ExifTags import TAGS
|
||||
|
||||
info["EXIF_MAIN"] = {}
|
||||
for tag, value in self._im._getexif().items():
|
||||
decoded = TAGS.get(tag, tag)
|
||||
info["EXIF_MAIN"][decoded] = value
|
||||
|
||||
im = self._rotate(im, info)
|
||||
return im, info
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
""" Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Similar code as in FreeImage plugin.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", True):
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, quality=75, progressive=False, optimize=False, **kwargs):
|
||||
|
||||
# Check quality - in Pillow it should be no higher than 95
|
||||
quality = int(quality)
|
||||
if quality < 1 or quality > 100:
|
||||
raise ValueError("JPEG quality should be between 1 and 100.")
|
||||
quality = min(95, max(1, quality))
|
||||
|
||||
kwargs["quality"] = quality
|
||||
kwargs["progressive"] = bool(progressive)
|
||||
kwargs["optimize"] = bool(progressive)
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError("JPEG does not support alpha channel.")
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
return
|
||||
|
||||
|
||||
class JPEG2000Format(PillowFormat):
|
||||
"""A JPEG 2000 format based on Pillow.
|
||||
|
||||
This format supports grayscale and RGB images.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
pilmode : str
|
||||
From the Pillow documentation:
|
||||
|
||||
* 'L' (8-bit pixels, grayscale)
|
||||
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
|
||||
* 'RGB' (3x8-bit pixels, true color)
|
||||
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
|
||||
* 'CMYK' (4x8-bit pixels, color separation)
|
||||
* 'YCbCr' (3x8-bit pixels, color video format)
|
||||
* 'I' (32-bit signed integer pixels)
|
||||
* 'F' (32-bit floating point pixels)
|
||||
|
||||
PIL also provides limited support for a few special modes, including
|
||||
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
|
||||
(true color with premultiplied alpha).
|
||||
|
||||
When translating a color image to grayscale (mode 'L', 'I' or 'F'),
|
||||
the library uses the ITU-R 601-2 luma transform::
|
||||
|
||||
L = R * 299/1000 + G * 587/1000 + B * 114/1000
|
||||
as_gray : bool
|
||||
If True, the image is converted using mode 'F'. When `mode` is
|
||||
not None and `as_gray` is True, the image is first converted
|
||||
according to `mode`, and the result is then "flattened" using
|
||||
mode 'F'.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
**quality_mode**
|
||||
Either `"rates"` or `"dB"` depending on the units you want to use to
|
||||
specify image quality.
|
||||
|
||||
**quality**
|
||||
Approximate size reduction (if quality mode is `rates`) or a signal to noise ratio
|
||||
in decibels (if quality mode is `dB`).
|
||||
|
||||
.. note::
|
||||
|
||||
To enable JPEG 2000 support, you need to build and install the OpenJPEG
|
||||
library, version 2.0.0 or higher, before building the Python Imaging
|
||||
Library.
|
||||
|
||||
Windows users can install the OpenJPEG binaries available on the
|
||||
OpenJPEG website, but must add them to their PATH in order to use PIL (if
|
||||
you fail to do this, you will get errors about not being able to load the
|
||||
``_imaging`` DLL).
|
||||
|
||||
"""
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, pilmode=None, as_gray=False):
|
||||
return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray)
|
||||
|
||||
def _get_file(self):
|
||||
# Pillow uses seek for JPG, so we cannot directly stream from web
|
||||
if self.request.filename.startswith(
|
||||
("http://", "https://")
|
||||
) or ".zip/" in self.request.filename.replace("\\", "/"):
|
||||
self._we_own_fp = True
|
||||
return open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._we_own_fp = False
|
||||
return self.request.get_file()
|
||||
|
||||
def _get_data(self, index):
|
||||
im, info = PillowFormat.Reader._get_data(self, index)
|
||||
|
||||
# Handle exif
|
||||
if "exif" in info:
|
||||
from PIL.ExifTags import TAGS
|
||||
|
||||
info["EXIF_MAIN"] = {}
|
||||
for tag, value in self._im._getexif().items():
|
||||
decoded = TAGS.get(tag, tag)
|
||||
info["EXIF_MAIN"][decoded] = value
|
||||
|
||||
im = self._rotate(im, info)
|
||||
return im, info
|
||||
|
||||
def _rotate(self, im, meta):
|
||||
""" Use Orientation information from EXIF meta data to
|
||||
orient the image correctly. Similar code as in FreeImage plugin.
|
||||
"""
|
||||
if self.request.kwargs.get("exifrotate", True):
|
||||
try:
|
||||
ori = meta["EXIF_MAIN"]["Orientation"]
|
||||
except KeyError: # pragma: no cover
|
||||
pass # Orientation not available
|
||||
else: # pragma: no cover - we cannot touch all cases
|
||||
# www.impulseadventure.com/photo/exif-orientation.html
|
||||
if ori in [1, 2]:
|
||||
pass
|
||||
if ori in [3, 4]:
|
||||
im = np.rot90(im, 2)
|
||||
if ori in [5, 6]:
|
||||
im = np.rot90(im, 3)
|
||||
if ori in [7, 8]:
|
||||
im = np.rot90(im)
|
||||
if ori in [2, 4, 5, 7]: # Flipped cases (rare)
|
||||
im = np.fliplr(im)
|
||||
return im
|
||||
|
||||
# --
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(self, quality_mode="rates", quality=5, **kwargs):
|
||||
|
||||
# Check quality - in Pillow it should be no higher than 95
|
||||
if quality_mode not in {"rates", "dB"}:
|
||||
raise ValueError("Quality mode should be either 'rates' or 'dB'")
|
||||
|
||||
quality = float(quality)
|
||||
|
||||
if quality_mode == "rates" and (quality < 1 or quality > 1000):
|
||||
raise ValueError(
|
||||
"The quality value {} seems to be an invalid rate!".format(quality)
|
||||
)
|
||||
elif quality_mode == "dB" and (quality < 15 or quality > 100):
|
||||
raise ValueError(
|
||||
"The quality value {} seems to be an invalid PSNR!".format(quality)
|
||||
)
|
||||
|
||||
kwargs["quality_mode"] = quality_mode
|
||||
kwargs["quality_layers"] = [quality]
|
||||
|
||||
PillowFormat.Writer._open(self)
|
||||
self._meta.update(kwargs)
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if im.ndim == 3 and im.shape[-1] == 4:
|
||||
raise IOError(
|
||||
"The current implementation of JPEG 2000 does not support alpha channel."
|
||||
)
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
PillowFormat.Writer._append_data(self, im, meta)
|
||||
return
|
||||
|
||||
|
||||
def save_pillow_close(im):
|
||||
# see issue #216 and #300
|
||||
if hasattr(im, "close"):
|
||||
if hasattr(getattr(im, "fp", None), "close"):
|
||||
im.close()
|
||||
|
||||
|
||||
## Func from skimage
|
||||
|
||||
# This cells contains code from scikit-image, in particular from
|
||||
# http://github.com/scikit-image/scikit-image/blob/master/
|
||||
# skimage/io/_plugins/pil_plugin.py
|
||||
# The scikit-image license applies.
|
||||
|
||||
|
||||
def pil_try_read(im):
|
||||
try:
|
||||
# this will raise an IOError if the file is not readable
|
||||
im.getdata()[0]
|
||||
except IOError as e:
|
||||
site = "http://pillow.readthedocs.io/en/latest/installation.html"
|
||||
site += "#external-libraries"
|
||||
pillow_error_message = str(e)
|
||||
error_message = (
|
||||
'Could not load "%s" \n'
|
||||
'Reason: "%s"\n'
|
||||
"Please see documentation at: %s"
|
||||
% (im.filename, pillow_error_message, site)
|
||||
)
|
||||
raise ValueError(error_message)
|
||||
|
||||
|
||||
def _palette_is_grayscale(pil_image):
|
||||
if pil_image.mode != "P":
|
||||
return False
|
||||
elif pil_image.info.get("transparency", None): # see issue #475
|
||||
return False
|
||||
# get palette as an array with R, G, B columns
|
||||
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
|
||||
# Not all palette colors are used; unused colors have junk values.
|
||||
start, stop = pil_image.getextrema()
|
||||
valid_palette = palette[start : stop + 1]
|
||||
# Image is grayscale if channel differences (R - G and G - B)
|
||||
# are all zero.
|
||||
return np.allclose(np.diff(valid_palette), 0)
|
||||
|
||||
|
||||
def pil_get_frame(im, is_gray=None, as_gray=None, mode=None, dtype=None):
|
||||
"""
|
||||
is_gray: Whether the image *is* gray (by inspecting its palette).
|
||||
as_gray: Whether the resulting image must be converted to gaey.
|
||||
mode: The mode to convert to.
|
||||
"""
|
||||
|
||||
if is_gray is None:
|
||||
is_gray = _palette_is_grayscale(im)
|
||||
|
||||
frame = im
|
||||
|
||||
# Convert ...
|
||||
if mode is not None:
|
||||
# Mode is explicitly given ...
|
||||
if mode != im.mode:
|
||||
frame = im.convert(mode)
|
||||
elif as_gray:
|
||||
pass # don't do any auto-conversions (but do the explit one above)
|
||||
elif im.mode == "P" and is_gray:
|
||||
# Paletted images that are already gray by their palette
|
||||
# are converted so that the resulting numpy array is 2D.
|
||||
frame = im.convert("L")
|
||||
elif im.mode == "P":
|
||||
# Paletted images are converted to RGB/RGBA. We jump some loops to make
|
||||
# this work well.
|
||||
if im.info.get("transparency", None) is not None:
|
||||
# Let Pillow apply the transparency, see issue #210 and #246
|
||||
frame = im.convert("RGBA")
|
||||
elif im.palette.mode in ("RGB", "RGBA"):
|
||||
# We can do this ourselves. Pillow seems to sometimes screw
|
||||
# this up if a multi-gif has a palette for each frame ...
|
||||
# Create palette array
|
||||
p = np.frombuffer(im.palette.getdata()[1], np.uint8)
|
||||
# Restore the raw mode that was saved to be used to parse the palette
|
||||
if hasattr(im.palette, "rawmode_saved"):
|
||||
im.palette.rawmode = im.palette.rawmode_saved
|
||||
mode = im.palette.rawmode if im.palette.rawmode else im.palette.mode
|
||||
nchannels = len(mode)
|
||||
# Shape it.
|
||||
p.shape = -1, nchannels
|
||||
if p.shape[1] == 3 or (p.shape[1] == 4 and mode[-1] == "X"):
|
||||
p = np.column_stack((p[:, :3], 255 * np.ones(p.shape[0], p.dtype)))
|
||||
# Swap the axes if the mode is in BGR and not RGB
|
||||
if mode.startswith("BGR"):
|
||||
p = p[:, [2, 1, 0]] if p.shape[1] == 3 else p[:, [2, 1, 0, 3]]
|
||||
# Apply palette
|
||||
frame_paletted = np.array(im, np.uint8)
|
||||
try:
|
||||
frame = p[frame_paletted]
|
||||
except Exception:
|
||||
# Ok, let PIL do it. The introduction of the branch that
|
||||
# tests `im.info['transparency']` should make this happen
|
||||
# much less often, but let's keep it, to be safe.
|
||||
frame = im.convert("RGBA")
|
||||
else:
|
||||
# Let Pillow do it. Unlinke skimage, we always convert
|
||||
# to RGBA; palettes can be RGBA.
|
||||
if True: # im.format == 'PNG' and 'transparency' in im.info:
|
||||
frame = im.convert("RGBA")
|
||||
else:
|
||||
frame = im.convert("RGB")
|
||||
elif "A" in im.mode:
|
||||
frame = im.convert("RGBA")
|
||||
elif im.mode == "CMYK":
|
||||
frame = im.convert("RGB")
|
||||
|
||||
# Apply a post-convert if necessary
|
||||
if as_gray:
|
||||
frame = frame.convert("F") # Scipy compat
|
||||
elif not isinstance(frame, np.ndarray) and frame.mode == "1":
|
||||
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
|
||||
# can cause a segfault, or generate garbage. See
|
||||
# https://github.com/scipy/scipy/issues/2138 and
|
||||
# https://github.com/python-pillow/Pillow/issues/350.
|
||||
#
|
||||
# This converts im from a 1-bit image to an 8-bit image.
|
||||
frame = frame.convert("L")
|
||||
|
||||
# Convert to numpy array
|
||||
if im.mode.startswith("I;16"):
|
||||
# e.g. in16 PNG's
|
||||
shape = im.size
|
||||
dtype = ">u2" if im.mode.endswith("B") else "<u2"
|
||||
if "S" in im.mode:
|
||||
dtype = dtype.replace("u", "i")
|
||||
frame = np.frombuffer(frame.tobytes(), dtype).copy()
|
||||
frame.shape = shape[::-1]
|
||||
else:
|
||||
# Use uint16 for PNG's in mode I
|
||||
if im.format == "PNG" and im.mode == "I" and dtype is None:
|
||||
dtype = "uint16"
|
||||
frame = np.array(frame, dtype=dtype)
|
||||
|
||||
return frame
|
||||
|
||||
|
||||
def ndarray_to_pil(arr, format_str=None, prefer_uint8=True):
|
||||
|
||||
from PIL import Image
|
||||
|
||||
if arr.ndim == 3:
|
||||
arr = image_as_uint(arr, bitdepth=8)
|
||||
mode = {3: "RGB", 4: "RGBA"}[arr.shape[2]]
|
||||
|
||||
elif format_str in ["png", "PNG"]:
|
||||
mode = "I;16"
|
||||
mode_base = "I"
|
||||
|
||||
if arr.dtype.kind == "f":
|
||||
arr = image_as_uint(arr)
|
||||
|
||||
elif prefer_uint8 and arr.max() < 256 and arr.min() >= 0:
|
||||
arr = arr.astype(np.uint8)
|
||||
mode = mode_base = "L"
|
||||
|
||||
else:
|
||||
arr = image_as_uint(arr, bitdepth=16)
|
||||
|
||||
else:
|
||||
arr = image_as_uint(arr, bitdepth=8)
|
||||
mode = "L"
|
||||
mode_base = "L"
|
||||
|
||||
if mode == "I;16" and int(getattr(Image, "__version__", "0").split(".")[0]) < 6:
|
||||
# Pillow < v6.0.0 has limited support for the "I;16" mode,
|
||||
# requiring us to fall back to this expensive workaround.
|
||||
# tobytes actually creates a copy of the image, which is costly.
|
||||
array_buffer = arr.tobytes()
|
||||
if arr.ndim == 2:
|
||||
im = Image.new(mode_base, arr.T.shape)
|
||||
im.frombytes(array_buffer, "raw", mode)
|
||||
else:
|
||||
image_shape = (arr.shape[1], arr.shape[0])
|
||||
im = Image.frombytes(mode, image_shape, array_buffer)
|
||||
return im
|
||||
else:
|
||||
return Image.fromarray(arr, mode)
|
||||
|
||||
|
||||
## End of code from scikit-image
|
||||
|
||||
|
||||
from .pillowmulti import GIFFormat, TIFFFormat
|
||||
|
||||
IGNORE_FORMATS = "MPEG"
|
||||
|
||||
SPECIAL_FORMATS = dict(
|
||||
PNG=PNGFormat,
|
||||
JPEG=JPEGFormat,
|
||||
GIF=GIFFormat,
|
||||
TIFF=TIFFFormat,
|
||||
JPEG2000=JPEG2000Format,
|
||||
)
|
||||
|
||||
|
||||
def register_pillow_formats():
|
||||
|
||||
for id, summary, ext in pillow_formats:
|
||||
if id in IGNORE_FORMATS:
|
||||
continue
|
||||
FormatCls = SPECIAL_FORMATS.get(id, PillowFormat)
|
||||
summary = FormatCls._description or summary
|
||||
format = FormatCls(id + "-PIL", summary, ext, FormatCls._modes)
|
||||
format._plugin_id = id
|
||||
if FormatCls is PillowFormat or not FormatCls.__doc__:
|
||||
format.__doc__ = pillow_docs[id] + GENERIC_DOCS
|
||||
formats.add_format(format)
|
||||
|
||||
|
||||
register_pillow_formats()
|
1045
venv/Lib/site-packages/imageio/plugins/pillow_info.py
Normal file
1045
venv/Lib/site-packages/imageio/plugins/pillow_info.py
Normal file
File diff suppressed because it is too large
Load diff
364
venv/Lib/site-packages/imageio/plugins/pillowmulti.py
Normal file
364
venv/Lib/site-packages/imageio/plugins/pillowmulti.py
Normal file
|
@ -0,0 +1,364 @@
|
|||
"""
|
||||
PIL formats for multiple images.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .pillow import PillowFormat, ndarray_to_pil, image_as_uint
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NeuQuant = None # we can implement this when we need it
|
||||
|
||||
|
||||
class TIFFFormat(PillowFormat):
|
||||
_modes = "i" # arg, why bother; people should use the tiffile version
|
||||
_description = "TIFF format (Pillow)"
|
||||
|
||||
|
||||
class GIFFormat(PillowFormat):
|
||||
""" A format for reading and writing static and animated GIF, based
|
||||
on Pillow.
|
||||
|
||||
Images read with this format are always RGBA. Currently,
|
||||
the alpha channel is ignored when saving RGB images with this
|
||||
format.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
None
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
loop : int
|
||||
The number of iterations. Default 0 (meaning loop indefinitely).
|
||||
duration : {float, list}
|
||||
The duration (in seconds) of each frame. Either specify one value
|
||||
that is used for all frames, or one value for each frame.
|
||||
Note that in the GIF format the duration/delay is expressed in
|
||||
hundredths of a second, which limits the precision of the duration.
|
||||
fps : float
|
||||
The number of frames per second. If duration is not given, the
|
||||
duration for each frame is set to 1/fps. Default 10.
|
||||
palettesize : int
|
||||
The number of colors to quantize the image to. Is rounded to
|
||||
the nearest power of two. Default 256.
|
||||
subrectangles : bool
|
||||
If True, will try and optimize the GIF by storing only the
|
||||
rectangular parts of each frame that change with respect to the
|
||||
previous. Default False.
|
||||
"""
|
||||
|
||||
_modes = "iI"
|
||||
_description = "Static and animated gif (Pillow)"
|
||||
|
||||
class Reader(PillowFormat.Reader):
|
||||
def _open(self, playback=None): # compat with FI format
|
||||
return PillowFormat.Reader._open(self)
|
||||
|
||||
class Writer(PillowFormat.Writer):
|
||||
def _open(
|
||||
self,
|
||||
loop=0,
|
||||
duration=None,
|
||||
fps=10,
|
||||
palettesize=256,
|
||||
quantizer=0,
|
||||
subrectangles=False,
|
||||
):
|
||||
|
||||
# Check palettesize
|
||||
palettesize = int(palettesize)
|
||||
if palettesize < 2 or palettesize > 256:
|
||||
raise ValueError("GIF quantize param must be 2..256")
|
||||
if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]:
|
||||
palettesize = 2 ** int(np.log2(128) + 0.999)
|
||||
logger.warning(
|
||||
"Warning: palettesize (%r) modified to a factor of "
|
||||
"two between 2-256." % palettesize
|
||||
)
|
||||
# Duratrion / fps
|
||||
if duration is None:
|
||||
self._duration = 1.0 / float(fps)
|
||||
elif isinstance(duration, (list, tuple)):
|
||||
self._duration = [float(d) for d in duration]
|
||||
else:
|
||||
self._duration = float(duration)
|
||||
# loop
|
||||
loop = float(loop)
|
||||
if loop <= 0 or loop == float("inf"):
|
||||
loop = 0
|
||||
loop = int(loop)
|
||||
# Subrectangles / dispose
|
||||
subrectangles = bool(subrectangles)
|
||||
self._dispose = 1 if subrectangles else 2
|
||||
# The "0" (median cut) quantizer is by far the best
|
||||
|
||||
fp = self.request.get_file()
|
||||
self._writer = GifWriter(
|
||||
fp, subrectangles, loop, quantizer, int(palettesize)
|
||||
)
|
||||
|
||||
def _close(self):
|
||||
self._writer.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
duration = self._duration
|
||||
if isinstance(duration, list):
|
||||
duration = duration[min(len(duration) - 1, self._writer._count)]
|
||||
dispose = self._dispose
|
||||
self._writer.add_image(im, duration, dispose)
|
||||
|
||||
return
|
||||
|
||||
|
||||
intToBin = lambda i: i.to_bytes(2, byteorder="little")
|
||||
|
||||
|
||||
class GifWriter:
|
||||
""" Class that for helping write the animated GIF file. This is based on
|
||||
code from images2gif.py (part of visvis). The version here is modified
|
||||
to allow streamed writing.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file,
|
||||
opt_subrectangle=True,
|
||||
opt_loop=0,
|
||||
opt_quantizer=0,
|
||||
opt_palette_size=256,
|
||||
):
|
||||
self.fp = file
|
||||
|
||||
self.opt_subrectangle = opt_subrectangle
|
||||
self.opt_loop = opt_loop
|
||||
self.opt_quantizer = opt_quantizer
|
||||
self.opt_palette_size = opt_palette_size
|
||||
|
||||
self._previous_image = None # as np array
|
||||
self._global_palette = None # as bytes
|
||||
self._count = 0
|
||||
|
||||
from PIL.GifImagePlugin import getdata
|
||||
|
||||
self.getdata = getdata
|
||||
|
||||
def add_image(self, im, duration, dispose):
|
||||
|
||||
# Prepare image
|
||||
im_rect, rect = im, (0, 0)
|
||||
if self.opt_subrectangle:
|
||||
im_rect, rect = self.getSubRectangle(im)
|
||||
im_pil = self.converToPIL(im_rect, self.opt_quantizer, self.opt_palette_size)
|
||||
|
||||
# Get pallette - apparently, this is the 3d element of the header
|
||||
# (but it has not always been). Best we've got. Its not the same
|
||||
# as im_pil.palette.tobytes().
|
||||
from PIL.GifImagePlugin import getheader
|
||||
|
||||
palette = getheader(im_pil)[0][3]
|
||||
|
||||
# Write image
|
||||
if self._count == 0:
|
||||
self.write_header(im_pil, palette, self.opt_loop)
|
||||
self._global_palette = palette
|
||||
self.write_image(im_pil, palette, rect, duration, dispose)
|
||||
# assert len(palette) == len(self._global_palette)
|
||||
|
||||
# Bookkeeping
|
||||
self._previous_image = im
|
||||
self._count += 1
|
||||
|
||||
def write_header(self, im, globalPalette, loop):
|
||||
# Gather info
|
||||
header = self.getheaderAnim(im)
|
||||
appext = self.getAppExt(loop)
|
||||
# Write
|
||||
self.fp.write(header)
|
||||
self.fp.write(globalPalette)
|
||||
self.fp.write(appext)
|
||||
|
||||
def close(self):
|
||||
self.fp.write(";".encode("utf-8")) # end gif
|
||||
|
||||
def write_image(self, im, palette, rect, duration, dispose):
|
||||
|
||||
fp = self.fp
|
||||
|
||||
# Gather local image header and data, using PIL's getdata. That
|
||||
# function returns a list of bytes objects, but which parts are
|
||||
# what has changed multiple times, so we put together the first
|
||||
# parts until we have enough to form the image header.
|
||||
data = self.getdata(im)
|
||||
imdes = b""
|
||||
while data and len(imdes) < 11:
|
||||
imdes += data.pop(0)
|
||||
assert len(imdes) == 11
|
||||
|
||||
# Make image descriptor suitable for using 256 local color palette
|
||||
lid = self.getImageDescriptor(im, rect)
|
||||
graphext = self.getGraphicsControlExt(duration, dispose)
|
||||
|
||||
# Write local header
|
||||
if (palette != self._global_palette) or (dispose != 2):
|
||||
# Use local color palette
|
||||
fp.write(graphext)
|
||||
fp.write(lid) # write suitable image descriptor
|
||||
fp.write(palette) # write local color table
|
||||
fp.write(b"\x08") # LZW minimum size code
|
||||
else:
|
||||
# Use global color palette
|
||||
fp.write(graphext)
|
||||
fp.write(imdes) # write suitable image descriptor
|
||||
|
||||
# Write image data
|
||||
for d in data:
|
||||
fp.write(d)
|
||||
|
||||
def getheaderAnim(self, im):
|
||||
""" Get animation header. To replace PILs getheader()[0]
|
||||
"""
|
||||
bb = b"GIF89a"
|
||||
bb += intToBin(im.size[0])
|
||||
bb += intToBin(im.size[1])
|
||||
bb += b"\x87\x00\x00"
|
||||
return bb
|
||||
|
||||
def getImageDescriptor(self, im, xy=None):
|
||||
""" Used for the local color table properties per image.
|
||||
Otherwise global color table applies to all frames irrespective of
|
||||
whether additional colors comes in play that require a redefined
|
||||
palette. Still a maximum of 256 color per frame, obviously.
|
||||
|
||||
Written by Ant1 on 2010-08-22
|
||||
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
|
||||
"""
|
||||
|
||||
# Defaule use full image and place at upper left
|
||||
if xy is None:
|
||||
xy = (0, 0)
|
||||
|
||||
# Image separator,
|
||||
bb = b"\x2C"
|
||||
|
||||
# Image position and size
|
||||
bb += intToBin(xy[0]) # Left position
|
||||
bb += intToBin(xy[1]) # Top position
|
||||
bb += intToBin(im.size[0]) # image width
|
||||
bb += intToBin(im.size[1]) # image height
|
||||
|
||||
# packed field: local color table flag1, interlace0, sorted table0,
|
||||
# reserved00, lct size111=7=2^(7 + 1)=256.
|
||||
bb += b"\x87"
|
||||
|
||||
# LZW minimum size code now comes later, begining of [imagedata] blocks
|
||||
return bb
|
||||
|
||||
def getAppExt(self, loop):
|
||||
""" Application extension. This part specifies the amount of loops.
|
||||
If loop is 0 or inf, it goes on infinitely.
|
||||
"""
|
||||
if loop == 1:
|
||||
return b""
|
||||
if loop == 0:
|
||||
loop = 2 ** 16 - 1
|
||||
bb = b""
|
||||
if loop != 0: # omit the extension if we would like a nonlooping gif
|
||||
bb = b"\x21\xFF\x0B" # application extension
|
||||
bb += b"NETSCAPE2.0"
|
||||
bb += b"\x03\x01"
|
||||
bb += intToBin(loop)
|
||||
bb += b"\x00" # end
|
||||
return bb
|
||||
|
||||
def getGraphicsControlExt(self, duration=0.1, dispose=2):
|
||||
""" Graphics Control Extension. A sort of header at the start of
|
||||
each image. Specifies duration and transparancy.
|
||||
|
||||
Dispose
|
||||
-------
|
||||
* 0 - No disposal specified.
|
||||
* 1 - Do not dispose. The graphic is to be left in place.
|
||||
* 2 - Restore to background color. The area used by the graphic
|
||||
must be restored to the background color.
|
||||
* 3 - Restore to previous. The decoder is required to restore the
|
||||
area overwritten by the graphic with what was there prior to
|
||||
rendering the graphic.
|
||||
* 4-7 -To be defined.
|
||||
"""
|
||||
|
||||
bb = b"\x21\xF9\x04"
|
||||
bb += chr((dispose & 3) << 2).encode("utf-8")
|
||||
# low bit 1 == transparency,
|
||||
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
|
||||
# are dispose.
|
||||
bb += intToBin(int(duration * 100 + 0.5)) # in 100th of seconds
|
||||
bb += b"\x00" # no transparant color
|
||||
bb += b"\x00" # end
|
||||
return bb
|
||||
|
||||
def getSubRectangle(self, im):
|
||||
""" Calculate the minimal rectangle that need updating. Returns
|
||||
a two-element tuple containing the cropped image and an x-y tuple.
|
||||
|
||||
Calculating the subrectangles takes extra time, obviously. However,
|
||||
if the image sizes were reduced, the actual writing of the GIF
|
||||
goes faster. In some cases applying this method produces a GIF faster.
|
||||
"""
|
||||
|
||||
# Cannot do subrectangle for first image
|
||||
if self._count == 0:
|
||||
return im, (0, 0)
|
||||
|
||||
prev = self._previous_image
|
||||
|
||||
# Get difference, sum over colors
|
||||
diff = np.abs(im - prev)
|
||||
if diff.ndim == 3:
|
||||
diff = diff.sum(2)
|
||||
# Get begin and end for both dimensions
|
||||
X = np.argwhere(diff.sum(0))
|
||||
Y = np.argwhere(diff.sum(1))
|
||||
# Get rect coordinates
|
||||
if X.size and Y.size:
|
||||
x0, x1 = int(X[0]), int(X[-1] + 1)
|
||||
y0, y1 = int(Y[0]), int(Y[-1] + 1)
|
||||
else: # No change ... make it minimal
|
||||
x0, x1 = 0, 2
|
||||
y0, y1 = 0, 2
|
||||
|
||||
return im[y0:y1, x0:x1], (x0, y0)
|
||||
|
||||
def converToPIL(self, im, quantizer, palette_size=256):
|
||||
"""Convert image to Paletted PIL image.
|
||||
|
||||
PIL used to not do a very good job at quantization, but I guess
|
||||
this has improved a lot (at least in Pillow). I don't think we need
|
||||
neuqant (and we can add it later if we really want).
|
||||
"""
|
||||
|
||||
im_pil = ndarray_to_pil(im, "gif")
|
||||
|
||||
if quantizer in ("nq", "neuquant"):
|
||||
# NeuQuant algorithm
|
||||
nq_samplefac = 10 # 10 seems good in general
|
||||
im_pil = im_pil.convert("RGBA") # NQ assumes RGBA
|
||||
nqInstance = NeuQuant(im_pil, nq_samplefac) # Learn colors
|
||||
im_pil = nqInstance.quantize(im_pil, colors=palette_size)
|
||||
elif quantizer in (0, 1, 2):
|
||||
# Adaptive PIL algorithm
|
||||
if quantizer == 2:
|
||||
im_pil = im_pil.convert("RGBA")
|
||||
else:
|
||||
im_pil = im_pil.convert("RGB")
|
||||
im_pil = im_pil.quantize(colors=palette_size, method=quantizer)
|
||||
else:
|
||||
raise ValueError("Invalid value for quantizer: %r" % quantizer)
|
||||
return im_pil
|
162
venv/Lib/site-packages/imageio/plugins/simpleitk.py
Normal file
162
venv/Lib/site-packages/imageio/plugins/simpleitk.py
Normal file
|
@ -0,0 +1,162 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Storage of image data in multiple formats.
|
||||
"""
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format, has_module
|
||||
|
||||
_itk = None # Defer loading to load_lib() function.
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _itk, _read_function, _write_function
|
||||
try:
|
||||
import itk as _itk
|
||||
|
||||
_read_function = _itk.imread
|
||||
_write_function = _itk.imwrite
|
||||
except ImportError:
|
||||
try:
|
||||
import SimpleITK as _itk
|
||||
|
||||
_read_function = _itk.ReadImage
|
||||
_write_function = _itk.WriteImage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"itk could not be found. "
|
||||
"Please try "
|
||||
" python -m pip install itk "
|
||||
"or "
|
||||
" python -m pip install simpleitk "
|
||||
"or refer to "
|
||||
" https://itkpythonpackage.readthedocs.io/ "
|
||||
"for further instructions."
|
||||
)
|
||||
return _itk
|
||||
|
||||
|
||||
# Split up in real ITK and all supported formats.
|
||||
ITK_FORMATS = (
|
||||
".gipl",
|
||||
".ipl",
|
||||
".mha",
|
||||
".mhd",
|
||||
".nhdr",
|
||||
"nia",
|
||||
"hdr",
|
||||
".nrrd",
|
||||
".nii",
|
||||
".nii.gz",
|
||||
".img",
|
||||
".img.gz",
|
||||
".vtk",
|
||||
"hdf5",
|
||||
"lsm",
|
||||
"mnc",
|
||||
"mnc2",
|
||||
"mgh",
|
||||
"mnc",
|
||||
"pic",
|
||||
)
|
||||
ALL_FORMATS = ITK_FORMATS + (
|
||||
".bmp",
|
||||
".jpeg",
|
||||
".jpg",
|
||||
".png",
|
||||
".tiff",
|
||||
".tif",
|
||||
".dicom",
|
||||
".dcm",
|
||||
".gdcm",
|
||||
)
|
||||
|
||||
|
||||
class ItkFormat(Format):
|
||||
""" The ItkFormat uses the ITK or SimpleITK library to support a range of
|
||||
ITK-related formats. It also supports a few common formats that are
|
||||
also supported by the freeimage plugin (e.g. PNG and JPEG).
|
||||
|
||||
This format requires the ``itk`` or ``SimpleITK`` package.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
None.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
None.
|
||||
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# If the request is a format that only this plugin can handle,
|
||||
# we report that we can do it; a useful error will be raised
|
||||
# when simpleitk is not installed. For the more common formats
|
||||
# we only report that we can read if the library is installed.
|
||||
if request.extension in ITK_FORMATS:
|
||||
return True
|
||||
if has_module("itk.ImageIOBase") or has_module("SimpleITK"):
|
||||
return request.extension in ALL_FORMATS
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.extension in ITK_FORMATS:
|
||||
return True
|
||||
if has_module("itk.ImageIOBase") or has_module("SimpleITK"):
|
||||
return request.extension in ALL_FORMATS
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, pixel_type=None, fallback_only=None, **kwargs):
|
||||
if not _itk:
|
||||
load_lib()
|
||||
args = ()
|
||||
if pixel_type is not None:
|
||||
args += (pixel_type,)
|
||||
if fallback_only is not None:
|
||||
args += (fallback_only,)
|
||||
self._img = _read_function(self.request.get_local_filename(), *args)
|
||||
|
||||
def _get_length(self):
|
||||
return 1
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_data(self, index):
|
||||
# Get data
|
||||
if index != 0:
|
||||
error_msg = "Index out of range while reading from itk file"
|
||||
raise IndexError(error_msg)
|
||||
|
||||
# Return array and empty meta data
|
||||
return _itk.GetArrayFromImage(self._img), {}
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
error_msg = "The itk plugin does not support meta data, currently."
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
# -- writer
|
||||
class Writer(Format.Writer):
|
||||
def _open(self):
|
||||
if not _itk:
|
||||
load_lib()
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
_itk_img = _itk.GetImageFromArray(im)
|
||||
_write_function(_itk_img, self.request.get_local_filename())
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
error_msg = "The itk plugin does not support meta data, currently."
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
|
||||
# Register
|
||||
title = "Insight Segmentation and Registration Toolkit (ITK) format"
|
||||
format = ItkFormat("itk", title, " ".join(ALL_FORMATS), "iIvV")
|
||||
formats.add_format(format)
|
469
venv/Lib/site-packages/imageio/plugins/spe.py
Normal file
469
venv/Lib/site-packages/imageio/plugins/spe.py
Normal file
|
@ -0,0 +1,469 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" SPE file reader
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Spec:
|
||||
"""SPE file specification data
|
||||
|
||||
Tuples of (offset, datatype, count), where offset is the offset in the SPE
|
||||
file and datatype is the datatype as used in `numpy.fromfile`()
|
||||
|
||||
`data_start` is the offset of actual image data.
|
||||
|
||||
`dtypes` translates SPE datatypes (0...4) to numpy ones, e. g. dtypes[0]
|
||||
is dtype("<f") (which is np.float32).
|
||||
|
||||
`controllers` maps the `type` metadata to a human readable name
|
||||
|
||||
`readout_modes` maps the `readoutMode` metadata to something human readable
|
||||
although this may not be accurate since there is next to no documentation
|
||||
to be found.
|
||||
"""
|
||||
|
||||
basic = {
|
||||
"datatype": (108, "<h"), # dtypes
|
||||
"xdim": (42, "<H"),
|
||||
"ydim": (656, "<H"),
|
||||
"xml_footer_offset": (678, "<Q"),
|
||||
"NumFrames": (1446, "<i"),
|
||||
"file_header_ver": (1992, "<f"),
|
||||
}
|
||||
|
||||
metadata = {
|
||||
# ROI information
|
||||
"NumROI": (1510, "<h"),
|
||||
"ROIs": (
|
||||
1512,
|
||||
np.dtype(
|
||||
[
|
||||
("startx", "<H"),
|
||||
("endx", "<H"),
|
||||
("groupx", "<H"),
|
||||
("starty", "<H"),
|
||||
("endy", "<H"),
|
||||
("groupy", "<H"),
|
||||
]
|
||||
),
|
||||
10,
|
||||
),
|
||||
# chip-related sizes
|
||||
"xDimDet": (6, "<H"),
|
||||
"yDimDet": (18, "<H"),
|
||||
"VChipXdim": (14, "<h"),
|
||||
"VChipYdim": (16, "<h"),
|
||||
# other stuff
|
||||
"controller_version": (0, "<h"),
|
||||
"logic_output": (2, "<h"),
|
||||
"amp_high_cap_low_noise": (4, "<H"), # enum?
|
||||
"mode": (8, "<h"), # enum?
|
||||
"exposure_sec": (10, "<f"),
|
||||
"date": (20, "<10S"),
|
||||
"detector_temp": (36, "<f"),
|
||||
"detector_type": (40, "<h"),
|
||||
"st_diode": (44, "<h"),
|
||||
"delay_time": (46, "<f"),
|
||||
# shutter_control: normal, disabled open, disabled closed
|
||||
# But which one is which?
|
||||
"shutter_control": (50, "<H"),
|
||||
"absorb_live": (52, "<h"),
|
||||
"absorb_mode": (54, "<H"),
|
||||
"can_do_virtual_chip": (56, "<h"),
|
||||
"threshold_min_live": (58, "<h"),
|
||||
"threshold_min_val": (60, "<f"),
|
||||
"threshold_max_live": (64, "<h"),
|
||||
"threshold_max_val": (66, "<f"),
|
||||
"time_local": (172, "<7S"),
|
||||
"time_utc": (179, "<7S"),
|
||||
"adc_offset": (188, "<H"),
|
||||
"adc_rate": (190, "<H"),
|
||||
"adc_type": (192, "<H"),
|
||||
"adc_resolution": (194, "<H"),
|
||||
"adc_bit_adjust": (196, "<H"),
|
||||
"gain": (198, "<H"),
|
||||
"comments": (200, "<80S", 5),
|
||||
"geometric": (600, "<H"), # flags
|
||||
"sw_version": (688, "<16S"),
|
||||
"spare_4": (742, "<436S"),
|
||||
"XPrePixels": (98, "<h"),
|
||||
"XPostPixels": (100, "<h"),
|
||||
"YPrePixels": (102, "<h"),
|
||||
"YPostPixels": (104, "<h"),
|
||||
"readout_time": (672, "<f"),
|
||||
"xml_footer_offset": (678, "<Q"),
|
||||
"type": (704, "<h"), # controllers
|
||||
"clockspeed_us": (1428, "<f"),
|
||||
"readout_mode": (1480, "<H"), # readout_modes
|
||||
"window_size": (1482, "<H"),
|
||||
"file_header_ver": (1992, "<f"),
|
||||
}
|
||||
|
||||
data_start = 4100
|
||||
|
||||
dtypes = {
|
||||
0: np.dtype(np.float32),
|
||||
1: np.dtype(np.int32),
|
||||
2: np.dtype(np.int16),
|
||||
3: np.dtype(np.uint16),
|
||||
8: np.dtype(np.uint32),
|
||||
}
|
||||
|
||||
controllers = [
|
||||
"new120 (Type II)",
|
||||
"old120 (Type I)",
|
||||
"ST130",
|
||||
"ST121",
|
||||
"ST138",
|
||||
"DC131 (PentaMax)",
|
||||
"ST133 (MicroMax/Roper)",
|
||||
"ST135 (GPIB)",
|
||||
"VTCCD",
|
||||
"ST116 (GPIB)",
|
||||
"OMA3 (GPIB)",
|
||||
"OMA4",
|
||||
]
|
||||
|
||||
# This was gathered from random places on the internet and own experiments
|
||||
# with the camera. May not be accurate.
|
||||
readout_modes = ["full frame", "frame transfer", "kinetics"]
|
||||
|
||||
# Do not decode the following metadata keys into strings, but leave them
|
||||
# as byte arrays
|
||||
no_decode = ["spare_4"]
|
||||
|
||||
|
||||
class SpeFormat(Format):
|
||||
""" Some CCD camera software produces images in the Princeton Instruments
|
||||
SPE file format. This plugin supports reading such files.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
char_encoding : str
|
||||
Character encoding used to decode strings in the metadata. Defaults
|
||||
to "latin1".
|
||||
check_filesize : bool
|
||||
The number of frames in the file is stored in the file header. However,
|
||||
this number may be wrong for certain software. If this is `True`
|
||||
(default), derive the number of frames also from the file size and
|
||||
raise a warning if the two values do not match.
|
||||
|
||||
Metadata for reading
|
||||
--------------------
|
||||
ROIs : list of dict
|
||||
Regions of interest used for recording images. Each dict has the
|
||||
"top_left" key containing x and y coordinates of the top left corner,
|
||||
the "bottom_right" key with x and y coordinates of the bottom right
|
||||
corner, and the "bin" key with number of binned pixels in x and y
|
||||
directions.
|
||||
comments : list of str
|
||||
The SPE format allows for 5 comment strings of 80 characters each.
|
||||
controller_version : int
|
||||
Hardware version
|
||||
logic_output : int
|
||||
Definition of output BNC
|
||||
amp_hi_cap_low_noise : int
|
||||
Amp switching mode
|
||||
mode : int
|
||||
Timing mode
|
||||
exp_sec : float
|
||||
Alternative exposure in seconds
|
||||
date : str
|
||||
Date string
|
||||
detector_temp : float
|
||||
Detector temperature
|
||||
detector_type : int
|
||||
CCD / diode array type
|
||||
st_diode : int
|
||||
Trigger diode
|
||||
delay_time : float
|
||||
Used with async mode
|
||||
shutter_control : int
|
||||
Normal, disabled open, or disabled closed
|
||||
absorb_live : bool
|
||||
on / off
|
||||
absorb_mode : int
|
||||
Reference strip or file
|
||||
can_do_virtual_chip : bool
|
||||
True or False whether chip can do virtual chip
|
||||
threshold_min_live : bool
|
||||
on / off
|
||||
threshold_min_val : float
|
||||
Threshold minimum value
|
||||
threshold_max_live : bool
|
||||
on / off
|
||||
threshold_max_val : float
|
||||
Threshold maximum value
|
||||
time_local : str
|
||||
Experiment local time
|
||||
time_utc : str
|
||||
Experiment UTC time
|
||||
adc_offset : int
|
||||
ADC offset
|
||||
adc_rate : int
|
||||
ADC rate
|
||||
adc_type : int
|
||||
ADC type
|
||||
adc_resolution : int
|
||||
ADC resolution
|
||||
adc_bit_adjust : int
|
||||
ADC bit adjust
|
||||
gain : int
|
||||
gain
|
||||
sw_version : str
|
||||
Version of software which created this file
|
||||
spare_4 : bytes
|
||||
Reserved space
|
||||
readout_time : float
|
||||
Experiment readout time
|
||||
type : str
|
||||
Controller type
|
||||
clockspeed_us : float
|
||||
Vertical clock speed in microseconds
|
||||
readout_mode : {"full frame", "frame transfer", "kinetics", ""}
|
||||
Readout mode. Empty string means that this was not set by the
|
||||
Software.
|
||||
window_size : int
|
||||
Window size for Kinetics mode
|
||||
file_header_ver : float
|
||||
File header version
|
||||
chip_size : [int, int]
|
||||
x and y dimensions of the camera chip
|
||||
virt_chip_size : [int, int]
|
||||
Virtual chip x and y dimensions
|
||||
pre_pixels : [int, int]
|
||||
Pre pixels in x and y dimensions
|
||||
post_pixels : [int, int],
|
||||
Post pixels in x and y dimensions
|
||||
geometric : list of {"rotate", "reverse", "flip"}
|
||||
Geometric operations
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
return (
|
||||
request.mode[1] in self.modes + "?" and request.extension in self.extensions
|
||||
)
|
||||
|
||||
def _can_write(self, request):
|
||||
return False
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, char_encoding="latin1", check_filesize=True):
|
||||
self._file = self.request.get_file()
|
||||
self._char_encoding = char_encoding
|
||||
|
||||
info = self._parse_header(Spec.basic)
|
||||
self._file_header_ver = info["file_header_ver"]
|
||||
self._dtype = Spec.dtypes[info["datatype"]]
|
||||
self._shape = (info["ydim"], info["xdim"])
|
||||
self._len = info["NumFrames"]
|
||||
|
||||
if check_filesize:
|
||||
# Some software writes incorrect `NumFrames` metadata.
|
||||
# To determine the number of frames, check the size of the data
|
||||
# segment -- until the end of the file for SPE<3, until the
|
||||
# xml footer for SPE>=3.
|
||||
data_end = (
|
||||
info["xml_footer_offset"]
|
||||
if info["file_header_ver"] >= 3
|
||||
else os.path.getsize(self.request.get_local_filename())
|
||||
)
|
||||
l = data_end - Spec.data_start
|
||||
l //= self._shape[0] * self._shape[1] * self._dtype.itemsize
|
||||
if l != self._len:
|
||||
logger.warning(
|
||||
"The file header of %s claims there are %s frames, "
|
||||
"but there are actually %s frames.",
|
||||
self.request.filename,
|
||||
self._len,
|
||||
l,
|
||||
)
|
||||
self._len = min(l, self._len)
|
||||
|
||||
self._meta = None
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
if self._meta is None:
|
||||
if self._file_header_ver < 3:
|
||||
self._init_meta_data_pre_v3()
|
||||
else:
|
||||
self._init_meta_data_post_v3()
|
||||
return self._meta
|
||||
|
||||
def _close(self):
|
||||
# The file should be closed by `self.request`
|
||||
pass
|
||||
|
||||
def _init_meta_data_pre_v3(self):
|
||||
self._meta = self._parse_header(Spec.metadata)
|
||||
|
||||
nr = self._meta.pop("NumROI", None)
|
||||
nr = 1 if nr < 1 else nr
|
||||
self._meta["ROIs"] = roi_array_to_dict(self._meta["ROIs"][:nr])
|
||||
|
||||
# chip sizes
|
||||
self._meta["chip_size"] = [
|
||||
self._meta.pop("xDimDet", None),
|
||||
self._meta.pop("yDimDet", None),
|
||||
]
|
||||
self._meta["virt_chip_size"] = [
|
||||
self._meta.pop("VChipXdim", None),
|
||||
self._meta.pop("VChipYdim", None),
|
||||
]
|
||||
self._meta["pre_pixels"] = [
|
||||
self._meta.pop("XPrePixels", None),
|
||||
self._meta.pop("YPrePixels", None),
|
||||
]
|
||||
self._meta["post_pixels"] = [
|
||||
self._meta.pop("XPostPixels", None),
|
||||
self._meta.pop("YPostPixels", None),
|
||||
]
|
||||
|
||||
# comments
|
||||
self._meta["comments"] = [str(c) for c in self._meta["comments"]]
|
||||
|
||||
# geometric operations
|
||||
g = []
|
||||
f = self._meta.pop("geometric", 0)
|
||||
if f & 1:
|
||||
g.append("rotate")
|
||||
if f & 2:
|
||||
g.append("reverse")
|
||||
if f & 4:
|
||||
g.append("flip")
|
||||
self._meta["geometric"] = g
|
||||
|
||||
# Make some additional information more human-readable
|
||||
t = self._meta["type"]
|
||||
if 1 <= t <= len(Spec.controllers):
|
||||
self._meta["type"] = Spec.controllers[t - 1]
|
||||
else:
|
||||
self._meta["type"] = ""
|
||||
m = self._meta["readout_mode"]
|
||||
if 1 <= m <= len(Spec.readout_modes):
|
||||
self._meta["readout_mode"] = Spec.readout_modes[m - 1]
|
||||
else:
|
||||
self._meta["readout_mode"] = ""
|
||||
|
||||
# bools
|
||||
for k in (
|
||||
"absorb_live",
|
||||
"can_do_virtual_chip",
|
||||
"threshold_min_live",
|
||||
"threshold_max_live",
|
||||
):
|
||||
self._meta[k] = bool(self._meta[k])
|
||||
|
||||
# frame shape
|
||||
self._meta["frame_shape"] = self._shape
|
||||
|
||||
def _parse_header(self, spec):
|
||||
ret = {}
|
||||
# Decode each string from the numpy array read by np.fromfile
|
||||
decode = np.vectorize(lambda x: x.decode(self._char_encoding))
|
||||
|
||||
for name, sp in spec.items():
|
||||
self._file.seek(sp[0])
|
||||
cnt = 1 if len(sp) < 3 else sp[2]
|
||||
v = np.fromfile(self._file, dtype=sp[1], count=cnt)
|
||||
if v.dtype.kind == "S" and name not in Spec.no_decode:
|
||||
# Silently ignore string decoding failures
|
||||
try:
|
||||
v = decode(v)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
'Failed to decode "{}" metadata '
|
||||
"string. Check `char_encoding` "
|
||||
"parameter.".format(name)
|
||||
)
|
||||
|
||||
try:
|
||||
# For convenience, if the array contains only one single
|
||||
# entry, return this entry itself.
|
||||
v = v.item()
|
||||
except ValueError:
|
||||
v = np.squeeze(v)
|
||||
ret[name] = v
|
||||
return ret
|
||||
|
||||
def _init_meta_data_post_v3(self):
|
||||
info = self._parse_header(Spec.basic)
|
||||
self._file.seek(info["xml_footer_offset"])
|
||||
xml = self._file.read()
|
||||
self._meta = {"__xml": xml}
|
||||
|
||||
def _get_length(self):
|
||||
if self.request.mode[1] in "vV":
|
||||
return 1
|
||||
else:
|
||||
return self._len
|
||||
|
||||
def _get_data(self, index):
|
||||
if index < 0:
|
||||
raise IndexError("Image index %i < 0" % index)
|
||||
if index >= self._len:
|
||||
raise IndexError("Image index %i > %i" % (index, self._len))
|
||||
|
||||
if self.request.mode[1] in "vV":
|
||||
if index != 0:
|
||||
raise IndexError("Index has to be 0 in v and V modes")
|
||||
self._file.seek(Spec.data_start)
|
||||
data = np.fromfile(
|
||||
self._file,
|
||||
dtype=self._dtype,
|
||||
count=self._shape[0] * self._shape[1] * self._len,
|
||||
)
|
||||
data = data.reshape((self._len,) + self._shape)
|
||||
else:
|
||||
self._file.seek(
|
||||
Spec.data_start
|
||||
+ index * self._shape[0] * self._shape[1] * self._dtype.itemsize
|
||||
)
|
||||
data = np.fromfile(
|
||||
self._file, dtype=self._dtype, count=self._shape[0] * self._shape[1]
|
||||
)
|
||||
data = data.reshape(self._shape)
|
||||
return data, self._get_meta_data(index)
|
||||
|
||||
|
||||
def roi_array_to_dict(a):
|
||||
"""Convert the `ROIs` structured arrays to :py:class:`dict`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
a : numpy.ndarray
|
||||
Structured array containing ROI data
|
||||
|
||||
Returns
|
||||
-------
|
||||
list of dict
|
||||
One dict per ROI. Keys are "top_left", "bottom_right", and "bin",
|
||||
values are tuples whose first element is the x axis value and the
|
||||
second element is the y axis value.
|
||||
"""
|
||||
l = []
|
||||
a = a[["startx", "starty", "endx", "endy", "groupx", "groupy"]]
|
||||
for sx, sy, ex, ey, gx, gy in a:
|
||||
d = {
|
||||
"top_left": [int(sx), int(sy)],
|
||||
"bottom_right": [int(ex), int(ey)],
|
||||
"bin": [int(gx), int(gy)],
|
||||
}
|
||||
l.append(d)
|
||||
return l
|
||||
|
||||
|
||||
fmt = SpeFormat("spe", "SPE file format", ".spe", "iIvV")
|
||||
formats.add_format(fmt, overwrite=True)
|
344
venv/Lib/site-packages/imageio/plugins/swf.py
Normal file
344
venv/Lib/site-packages/imageio/plugins/swf.py
Normal file
|
@ -0,0 +1,344 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" SWF plugin. Most of the actual work is done in _swf.py.
|
||||
"""
|
||||
|
||||
import os
|
||||
import zlib
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format, read_n_bytes, image_as_uint
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_swf = None # lazily loaded in lib()
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _swf
|
||||
from . import _swf
|
||||
|
||||
return _swf
|
||||
|
||||
|
||||
class SWFFormat(Format):
|
||||
""" Shockwave flash (SWF) is a media format designed for rich and
|
||||
interactive animations. This plugin makes use of this format to
|
||||
store a series of images in a lossless format with good compression
|
||||
(zlib). The resulting images can be shown as an animation using
|
||||
a flash player (such as the browser).
|
||||
|
||||
SWF stores images in RGBA format. RGB or grayscale images are
|
||||
automatically converted. SWF does not support meta data.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
loop : bool
|
||||
If True, the video will rewind as soon as a frame is requested
|
||||
beyond the last frame. Otherwise, IndexError is raised. Default False.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
fps : int
|
||||
The speed to play the animation. Default 12.
|
||||
loop : bool
|
||||
If True, add a tag to the end of the file to play again from
|
||||
the first frame. Most flash players will then play the movie
|
||||
in a loop. Note that the imageio SWF Reader does not check this
|
||||
tag. Default True.
|
||||
html : bool
|
||||
If the output is a file on the file system, write an html file
|
||||
(in HTML5) that shows the animation. Default False.
|
||||
compress : bool
|
||||
Whether to compress the swf file. Default False. You probably don't
|
||||
want to use this. This does not decrease the file size since
|
||||
the images are already compressed. It will result in slower
|
||||
read and write time. The only purpose of this feature is to
|
||||
create compressed SWF files, so that we can test the
|
||||
functionality to read them.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
tmp = request.firstbytes[0:3].decode("ascii", "ignore")
|
||||
if tmp in ("FWS", "CWS"):
|
||||
return True
|
||||
|
||||
def _can_write(self, request):
|
||||
if request.mode[1] in (self.modes + "?"):
|
||||
if request.extension in self.extensions:
|
||||
return True
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, loop=False):
|
||||
if not _swf:
|
||||
load_lib()
|
||||
|
||||
self._arg_loop = bool(loop)
|
||||
|
||||
self._fp = self.request.get_file()
|
||||
|
||||
# Check file ...
|
||||
tmp = self.request.firstbytes[0:3].decode("ascii", "ignore")
|
||||
if tmp == "FWS":
|
||||
pass # OK
|
||||
elif tmp == "CWS":
|
||||
# Compressed, we need to decompress
|
||||
bb = self._fp.read()
|
||||
bb = bb[:8] + zlib.decompress(bb[8:])
|
||||
# Wrap up in a file object
|
||||
self._fp = BytesIO(bb)
|
||||
else:
|
||||
raise IOError("This does not look like a valid SWF file")
|
||||
|
||||
# Skip first bytes. This also tests support got seeking ...
|
||||
try:
|
||||
self._fp.seek(8)
|
||||
self._streaming_mode = False
|
||||
except Exception:
|
||||
self._streaming_mode = True
|
||||
self._fp_read(8)
|
||||
|
||||
# Skip header
|
||||
# Note that the number of frames is there, which we could
|
||||
# potentially use, but the number of frames does not necessarily
|
||||
# correspond to the number of images.
|
||||
nbits = _swf.bits2int(self._fp_read(1), 5)
|
||||
nbits = 5 + nbits * 4
|
||||
Lrect = nbits / 8.0
|
||||
if Lrect % 1:
|
||||
Lrect += 1
|
||||
Lrect = int(Lrect)
|
||||
self._fp_read(Lrect + 3)
|
||||
|
||||
# Now the rest is basically tags ...
|
||||
self._imlocs = [] # tuple (loc, sze, T, L1)
|
||||
if not self._streaming_mode:
|
||||
# Collect locations of frame, while skipping through the data
|
||||
# This does not read any of the tag *data*.
|
||||
try:
|
||||
while True:
|
||||
isimage, sze, T, L1 = self._read_one_tag()
|
||||
loc = self._fp.tell()
|
||||
if isimage:
|
||||
# Still need to check if the format is right
|
||||
format = ord(self._fp_read(3)[2:])
|
||||
if format == 5: # RGB or RGBA lossless
|
||||
self._imlocs.append((loc, sze, T, L1))
|
||||
self._fp.seek(loc + sze) # Skip over tag
|
||||
except IndexError:
|
||||
pass # done reading
|
||||
|
||||
def _fp_read(self, n):
|
||||
return read_n_bytes(self._fp, n)
|
||||
|
||||
def _close(self):
|
||||
pass
|
||||
|
||||
def _get_length(self):
|
||||
if self._streaming_mode:
|
||||
return np.inf
|
||||
else:
|
||||
return len(self._imlocs)
|
||||
|
||||
def _get_data(self, index):
|
||||
# Check index
|
||||
if index < 0:
|
||||
raise IndexError("Index in swf file must be > 0")
|
||||
if not self._streaming_mode:
|
||||
if self._arg_loop and self._imlocs:
|
||||
index = index % len(self._imlocs)
|
||||
if index >= len(self._imlocs):
|
||||
raise IndexError("Index out of bounds")
|
||||
|
||||
if self._streaming_mode:
|
||||
# Walk over tags until we find an image
|
||||
while True:
|
||||
isimage, sze, T, L1 = self._read_one_tag()
|
||||
bb = self._fp_read(sze) # always read data
|
||||
if isimage:
|
||||
im = _swf.read_pixels(bb, 0, T, L1) # can be None
|
||||
if im is not None:
|
||||
return im, {}
|
||||
|
||||
else:
|
||||
# Go to corresponding location, read data, and convert to image
|
||||
loc, sze, T, L1 = self._imlocs[index]
|
||||
self._fp.seek(loc)
|
||||
bb = self._fp_read(sze)
|
||||
# Read_pixels should return ndarry, since we checked format
|
||||
im = _swf.read_pixels(bb, 0, T, L1)
|
||||
return im, {}
|
||||
|
||||
def _read_one_tag(self):
|
||||
"""
|
||||
Return (True, loc, size, T, L1) if an image that we can read.
|
||||
Return (False, loc, size, T, L1) if any other tag.
|
||||
"""
|
||||
|
||||
# Get head
|
||||
head = self._fp_read(6)
|
||||
if not head: # pragma: no cover
|
||||
raise IndexError("Reached end of swf movie")
|
||||
|
||||
# Determine type and length
|
||||
T, L1, L2 = _swf.get_type_and_len(head)
|
||||
if not L2: # pragma: no cover
|
||||
raise RuntimeError("Invalid tag length, could not proceed")
|
||||
|
||||
# Read data
|
||||
isimage = False
|
||||
sze = L2 - 6
|
||||
# bb = self._fp_read(L2 - 6)
|
||||
|
||||
# Parse tag
|
||||
if T == 0:
|
||||
raise IndexError("Reached end of swf movie")
|
||||
elif T in [20, 36]:
|
||||
isimage = True
|
||||
# im = _swf.read_pixels(bb, 0, T, L1) # can be None
|
||||
elif T in [6, 21, 35, 90]: # pragma: no cover
|
||||
logger.warning("Ignoring JPEG image: cannot read JPEG.")
|
||||
else:
|
||||
pass # Not an image tag
|
||||
|
||||
# Done. Return image. Can be None
|
||||
# return im
|
||||
return isimage, sze, T, L1
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
return {} # This format does not support meta data
|
||||
|
||||
# -- writer
|
||||
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, fps=12, loop=True, html=False, compress=False):
|
||||
if not _swf:
|
||||
load_lib()
|
||||
|
||||
self._arg_fps = int(fps)
|
||||
self._arg_loop = bool(loop)
|
||||
self._arg_html = bool(html)
|
||||
self._arg_compress = bool(compress)
|
||||
|
||||
self._fp = self.request.get_file()
|
||||
self._framecounter = 0
|
||||
self._framesize = (100, 100)
|
||||
|
||||
# For compress, we use an in-memory file object
|
||||
if self._arg_compress:
|
||||
self._fp_real = self._fp
|
||||
self._fp = BytesIO()
|
||||
|
||||
def _close(self):
|
||||
self._complete()
|
||||
# Get size of (uncompressed) file
|
||||
sze = self._fp.tell()
|
||||
# set nframes, this is in the potentially compressed region
|
||||
self._fp.seek(self._location_to_save_nframes)
|
||||
self._fp.write(_swf.int2uint16(self._framecounter))
|
||||
# Compress body?
|
||||
if self._arg_compress:
|
||||
bb = self._fp.getvalue()
|
||||
self._fp = self._fp_real
|
||||
self._fp.write(bb[:8])
|
||||
self._fp.write(zlib.compress(bb[8:]))
|
||||
sze = self._fp.tell() # renew sze value
|
||||
# set size
|
||||
self._fp.seek(4)
|
||||
self._fp.write(_swf.int2uint32(sze))
|
||||
self._fp = None # Disable
|
||||
|
||||
# Write html?
|
||||
if self._arg_html and os.path.isfile(self.request.filename):
|
||||
dirname, fname = os.path.split(self.request.filename)
|
||||
filename = os.path.join(dirname, fname[:-4] + ".html")
|
||||
w, h = self._framesize
|
||||
html = HTML % (fname, w, h, fname)
|
||||
with open(filename, "wb") as f:
|
||||
f.write(html.encode("utf-8"))
|
||||
|
||||
def _write_header(self, framesize, fps):
|
||||
self._framesize = framesize
|
||||
# Called as soon as we know framesize; when we get first frame
|
||||
bb = b""
|
||||
bb += "FC"[self._arg_compress].encode("ascii")
|
||||
bb += "WS".encode("ascii") # signature bytes
|
||||
bb += _swf.int2uint8(8) # version
|
||||
bb += "0000".encode("ascii") # FileLength (leave open for now)
|
||||
bb += (
|
||||
_swf.Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes()
|
||||
)
|
||||
bb += _swf.int2uint8(0) + _swf.int2uint8(fps) # FrameRate
|
||||
self._location_to_save_nframes = len(bb)
|
||||
bb += "00".encode("ascii") # nframes (leave open for now)
|
||||
self._fp.write(bb)
|
||||
|
||||
# Write some initial tags
|
||||
taglist = _swf.FileAttributesTag(), _swf.SetBackgroundTag(0, 0, 0)
|
||||
for tag in taglist:
|
||||
self._fp.write(tag.get_tag())
|
||||
|
||||
def _complete(self):
|
||||
# What if no images were saved?
|
||||
if not self._framecounter:
|
||||
self._write_header((10, 10), self._arg_fps)
|
||||
# Write stop tag if we do not loop
|
||||
if not self._arg_loop:
|
||||
self._fp.write(_swf.DoActionTag("stop").get_tag())
|
||||
# finish with end tag
|
||||
self._fp.write("\x00\x00".encode("ascii"))
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
# Correct shape and type
|
||||
if im.ndim == 3 and im.shape[-1] == 1:
|
||||
im = im[:, :, 0]
|
||||
im = image_as_uint(im, bitdepth=8)
|
||||
# Get frame size
|
||||
wh = im.shape[1], im.shape[0]
|
||||
# Write header on first frame
|
||||
isfirstframe = False
|
||||
if self._framecounter == 0:
|
||||
isfirstframe = True
|
||||
self._write_header(wh, self._arg_fps)
|
||||
# Create tags
|
||||
bm = _swf.BitmapTag(im)
|
||||
sh = _swf.ShapeTag(bm.id, (0, 0), wh)
|
||||
po = _swf.PlaceObjectTag(1, sh.id, move=(not isfirstframe))
|
||||
sf = _swf.ShowFrameTag()
|
||||
# Write tags
|
||||
for tag in [bm, sh, po, sf]:
|
||||
self._fp.write(tag.get_tag())
|
||||
self._framecounter += 1
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
pass
|
||||
|
||||
|
||||
HTML = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Show Flash animation %s</title>
|
||||
</head>
|
||||
<body>
|
||||
<embed width="%i" height="%i" src="%s">
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Register. You register an *instance* of a Format class. Here specify:
|
||||
format = SWFFormat(
|
||||
"swf", # shot name
|
||||
"Shockwave flash", # one line descr.
|
||||
".swf", # list of extensions as a space separated string
|
||||
"I", # modes, characters in iIvV
|
||||
)
|
||||
formats.add_format(format)
|
327
venv/Lib/site-packages/imageio/plugins/tifffile.py
Normal file
327
venv/Lib/site-packages/imageio/plugins/tifffile.py
Normal file
|
@ -0,0 +1,327 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# imageio is distributed under the terms of the (new) BSD License.
|
||||
|
||||
""" Storage of image data in tiff format.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from .. import formats
|
||||
from ..core import Format
|
||||
|
||||
import numpy as np
|
||||
|
||||
_tifffile = None # Defer loading to lib() function.
|
||||
|
||||
|
||||
def load_lib():
|
||||
global _tifffile
|
||||
try:
|
||||
import tifffile as _tifffile
|
||||
except ImportError:
|
||||
from . import _tifffile
|
||||
return _tifffile
|
||||
|
||||
|
||||
TIFF_FORMATS = (".tif", ".tiff", ".stk", ".lsm")
|
||||
WRITE_METADATA_KEYS = (
|
||||
"photometric",
|
||||
"planarconfig",
|
||||
"resolution",
|
||||
"description",
|
||||
"compress",
|
||||
"predictor",
|
||||
"volume",
|
||||
"writeshape",
|
||||
"extratags",
|
||||
"datetime",
|
||||
)
|
||||
READ_METADATA_KEYS = (
|
||||
"planar_configuration",
|
||||
"is_fluoview",
|
||||
"is_nih",
|
||||
"is_contig",
|
||||
"is_micromanager",
|
||||
"is_ome",
|
||||
"is_lsm" "is_palette",
|
||||
"is_reduced",
|
||||
"is_rgb",
|
||||
"is_sgi",
|
||||
"is_shaped",
|
||||
"is_stk",
|
||||
"is_tiled",
|
||||
"is_mdgel" "resolution_unit",
|
||||
"compression",
|
||||
"predictor",
|
||||
"is_mediacy",
|
||||
"orientation",
|
||||
"description",
|
||||
"description1",
|
||||
"is_imagej",
|
||||
"software",
|
||||
)
|
||||
|
||||
|
||||
class TiffFormat(Format):
|
||||
""" Provides support for a wide range of Tiff images.
|
||||
|
||||
Images that contain multiple pages can be read using ``imageio.mimread()``
|
||||
to read the individual pages, or ``imageio.volread()`` to obtain a
|
||||
single (higher dimensional) array.
|
||||
|
||||
Parameters for reading
|
||||
----------------------
|
||||
offset : int
|
||||
Optional start position of embedded file. By default this is
|
||||
the current file position.
|
||||
size : int
|
||||
Optional size of embedded file. By default this is the number
|
||||
of bytes from the 'offset' to the end of the file.
|
||||
multifile : bool
|
||||
If True (default), series may include pages from multiple files.
|
||||
Currently applies to OME-TIFF only.
|
||||
multifile_close : bool
|
||||
If True (default), keep the handles of other files in multifile
|
||||
series closed. This is inefficient when few files refer to
|
||||
many pages. If False, the C runtime may run out of resources.
|
||||
|
||||
Parameters for saving
|
||||
---------------------
|
||||
bigtiff : bool
|
||||
If True, the BigTIFF format is used.
|
||||
byteorder : {'<', '>'}
|
||||
The endianness of the data in the file.
|
||||
By default this is the system's native byte order.
|
||||
software : str
|
||||
Name of the software used to create the image.
|
||||
Saved with the first page only.
|
||||
|
||||
Metadata for reading
|
||||
--------------------
|
||||
planar_configuration : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution_unit : (float, float) or ((int, int), (int, int))
|
||||
X and Y resolution in dots per inch as float or rational numbers.
|
||||
compression : int
|
||||
Value indicating the compression algorithm used, e.g. 5 is LZW,
|
||||
7 is JPEG, 8 is deflate.
|
||||
If 1, data are uncompressed.
|
||||
predictor : int
|
||||
Value 2 indicates horizontal differencing was used before compression,
|
||||
while 3 indicates floating point horizontal differencing.
|
||||
If 1, no prediction scheme was used before compression.
|
||||
orientation : {'top_left', 'bottom_right', ...}
|
||||
Oriented of image array.
|
||||
is_rgb : bool
|
||||
True if page contains a RGB image.
|
||||
is_contig : bool
|
||||
True if page contains a contiguous image.
|
||||
is_tiled : bool
|
||||
True if page contains tiled image.
|
||||
is_palette : bool
|
||||
True if page contains a palette-colored image and not OME or STK.
|
||||
is_reduced : bool
|
||||
True if page is a reduced image of another image.
|
||||
is_shaped : bool
|
||||
True if page contains shape in image_description tag.
|
||||
is_fluoview : bool
|
||||
True if page contains FluoView MM_STAMP tag.
|
||||
is_nih : bool
|
||||
True if page contains NIH image header.
|
||||
is_micromanager : bool
|
||||
True if page contains Micro-Manager metadata.
|
||||
is_ome : bool
|
||||
True if page contains OME-XML in image_description tag.
|
||||
is_sgi : bool
|
||||
True if page contains SGI image and tile depth tags.
|
||||
is_stk : bool
|
||||
True if page contains UIC2Tag tag.
|
||||
is_mdgel : bool
|
||||
True if page contains md_file_tag tag.
|
||||
is_mediacy : bool
|
||||
True if page contains Media Cybernetics Id tag.
|
||||
is_stk : bool
|
||||
True if page contains UIC2Tag tag.
|
||||
is_lsm : bool
|
||||
True if page contains LSM CZ_LSM_INFO tag.
|
||||
description : str
|
||||
Image description
|
||||
description1 : str
|
||||
Additional description
|
||||
is_imagej : None or str
|
||||
ImageJ metadata
|
||||
software : str
|
||||
Software used to create the TIFF file
|
||||
datetime : datetime.datetime
|
||||
Creation date and time
|
||||
|
||||
Metadata for writing
|
||||
--------------------
|
||||
photometric : {'minisblack', 'miniswhite', 'rgb'}
|
||||
The color space of the image data.
|
||||
By default this setting is inferred from the data shape.
|
||||
planarconfig : {'contig', 'planar'}
|
||||
Specifies if samples are stored contiguous or in separate planes.
|
||||
By default this setting is inferred from the data shape.
|
||||
'contig': last dimension contains samples.
|
||||
'planar': third last dimension contains samples.
|
||||
resolution : (float, float) or ((int, int), (int, int))
|
||||
X and Y resolution in dots per inch as float or rational numbers.
|
||||
description : str
|
||||
The subject of the image. Saved with the first page only.
|
||||
compress : int
|
||||
Values from 0 to 9 controlling the level of zlib (deflate) compression.
|
||||
If 0, data are written uncompressed (default).
|
||||
predictor : bool
|
||||
If True, horizontal differencing is applied before compression.
|
||||
Note that using an int literal 1 actually means no prediction scheme
|
||||
will be used.
|
||||
volume : bool
|
||||
If True, volume data are stored in one tile (if applicable) using
|
||||
the SGI image_depth and tile_depth tags.
|
||||
Image width and depth must be multiple of 16.
|
||||
Few software can read this format, e.g. MeVisLab.
|
||||
writeshape : bool
|
||||
If True, write the data shape to the image_description tag
|
||||
if necessary and no other description is given.
|
||||
extratags: sequence of tuples
|
||||
Additional tags as [(code, dtype, count, value, writeonce)].
|
||||
|
||||
code : int
|
||||
The TIFF tag Id.
|
||||
dtype : str
|
||||
Data type of items in 'value' in Python struct format.
|
||||
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
|
||||
count : int
|
||||
Number of data values. Not used for string values.
|
||||
value : sequence
|
||||
'Count' values compatible with 'dtype'.
|
||||
writeonce : bool
|
||||
If True, the tag is written to the first page only.
|
||||
"""
|
||||
|
||||
def _can_read(self, request):
|
||||
# We support any kind of image data
|
||||
return request.extension in self.extensions
|
||||
|
||||
def _can_write(self, request):
|
||||
# We support any kind of image data
|
||||
return request.extension in self.extensions
|
||||
|
||||
# -- reader
|
||||
|
||||
class Reader(Format.Reader):
|
||||
def _open(self, **kwargs):
|
||||
if not _tifffile:
|
||||
load_lib()
|
||||
# Allow loading from http; tifffile uses seek, so download first
|
||||
if self.request.filename.startswith(("http://", "https://")):
|
||||
self._f = f = open(self.request.get_local_filename(), "rb")
|
||||
else:
|
||||
self._f = None
|
||||
f = self.request.get_file()
|
||||
self._tf = _tifffile.TiffFile(f, **kwargs)
|
||||
|
||||
# metadata is the same for all images
|
||||
self._meta = {}
|
||||
|
||||
def _close(self):
|
||||
self._tf.close()
|
||||
if self._f is not None:
|
||||
self._f.close()
|
||||
|
||||
def _get_length(self):
|
||||
if self.request.mode[1] in "vV":
|
||||
return 1 # or can there be pages in pages or something?
|
||||
else:
|
||||
return len(self._tf.pages)
|
||||
|
||||
def _get_data(self, index):
|
||||
if self.request.mode[1] in "vV":
|
||||
# Read data as single 3D (+ color channels) array
|
||||
if index != 0:
|
||||
raise IndexError('Tiff support no more than 1 "volume" per file')
|
||||
im = self._tf.asarray() # request as singleton image
|
||||
meta = self._meta
|
||||
else:
|
||||
# Read as 2D image
|
||||
if index < 0 or index >= self._get_length():
|
||||
raise IndexError("Index out of range while reading from tiff file")
|
||||
im = self._tf.pages[index].asarray()
|
||||
meta = self._meta or self._get_meta_data(index)
|
||||
# Return array and empty meta data
|
||||
return im, meta
|
||||
|
||||
def _get_meta_data(self, index):
|
||||
page = self._tf.pages[index or 0]
|
||||
for key in READ_METADATA_KEYS:
|
||||
try:
|
||||
self._meta[key] = getattr(page, key)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# tifffile <= 0.12.1 use datetime, newer use DateTime
|
||||
for key in ("datetime", "DateTime"):
|
||||
try:
|
||||
self._meta["datetime"] = datetime.datetime.strptime(
|
||||
page.tags[key].value, "%Y:%m:%d %H:%M:%S"
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return self._meta
|
||||
|
||||
# -- writer
|
||||
class Writer(Format.Writer):
|
||||
def _open(self, bigtiff=None, byteorder=None, software=None):
|
||||
if not _tifffile:
|
||||
load_lib()
|
||||
|
||||
try:
|
||||
self._tf = _tifffile.TiffWriter(
|
||||
self.request.get_file(), bigtiff, byteorder, software=software
|
||||
)
|
||||
self._software = None
|
||||
except TypeError:
|
||||
# In tifffile >= 0.15, the `software` arg is passed to
|
||||
# TiffWriter.save
|
||||
self._tf = _tifffile.TiffWriter(
|
||||
self.request.get_file(), bigtiff, byteorder
|
||||
)
|
||||
self._software = software
|
||||
|
||||
self._meta = {}
|
||||
|
||||
def _close(self):
|
||||
self._tf.close()
|
||||
|
||||
def _append_data(self, im, meta):
|
||||
if meta:
|
||||
self.set_meta_data(meta)
|
||||
# No need to check self.request.mode; tifffile figures out whether
|
||||
# this is a single page, or all page data at once.
|
||||
if self._software is None:
|
||||
self._tf.save(np.asanyarray(im), **self._meta)
|
||||
else:
|
||||
# tifffile >= 0.15
|
||||
self._tf.save(np.asanyarray(im), software=self._software, **self._meta)
|
||||
|
||||
def set_meta_data(self, meta):
|
||||
self._meta = {}
|
||||
for (key, value) in meta.items():
|
||||
if key in WRITE_METADATA_KEYS:
|
||||
# Special case of previously read `predictor` int value
|
||||
# 1(=NONE) translation to False expected by TiffWriter.save
|
||||
if key == "predictor" and not isinstance(value, bool):
|
||||
self._meta[key] = value > 1
|
||||
else:
|
||||
self._meta[key] = value
|
||||
|
||||
|
||||
# Register
|
||||
format = TiffFormat("tiff", "TIFF format", TIFF_FORMATS, "iIvV")
|
||||
formats.add_format(format)
|
Loading…
Add table
Add a link
Reference in a new issue