diff --git a/DBHelper.py b/DBHelper.py index dcaed833d..dd73cddcb 100644 --- a/DBHelper.py +++ b/DBHelper.py @@ -1,5 +1,4 @@ import pyrebase -import firebase_admin firebaseConfig = { 'apiKey': "AIzaSyAdL0W5HscjEDFPK4BDi6Cnc7FLa30GPYY", @@ -11,106 +10,117 @@ firebaseConfig = { 'appId': "1:163692530359:web:b6dc7ccfc56a79afb11b32", 'measurementId': "G-EPWP2LK89Q", 'serviceAccount': 'vehicleantitheftrecognition-firebase-adminsdk-krrgw-05da515de5.json' - } +} firebase = pyrebase.initialize_app(firebaseConfig) db = firebase.database() auth = firebase.auth() storage = firebase.storage() -class DBHelper: - # Create account function which creates a new authentication info. - def createAccount(username, password, confirmpassword): - email = username + "@hotmail.com" - if password == confirmpassword: - auth.create_user_with_email_and_password(email,password) - print("Account sucessfully created.") - else: - print("Confirmed password doesn't match to other password.") +# Create account function which creates a new authentication info. +def create_account(username, password, confirmpassword): + email = username + "@hotmail.com" + if password == confirmpassword: + auth.create_user_with_email_and_password(email, password) + print("Account sucessfully created.") + else: + print("Confirmed password doesn't match to other password.") - # Login function which verifies the given authentication info. - def login(username, password): - email = username + "@hotmail.com" - try: - auth.sign_in_with_email_and_password(email, password) - print("Successfully Logged in.") - except: - print("Invalid username or password.") - # Uploads the data of specified user uploaded into firebase. - def uploadData(userID, firstname, lastname, email, phone, address): - data = {"First Name": firstname, "Last Name": lastname, "E-Mail": email, "Phone": phone, "Address": address} - db.child("Users").child(userID).set(data) +# Login function which verifies the given authentication info. +def login(username, password): + email = username + "@hotmail.com" + try: + auth.sign_in_with_email_and_password(email, password) + print("Successfully Logged in.") + except: + print("Invalid username or password.") - # Removes the data of specified user uploaded into firebase. - def removeData(userID): - db.child("Users").child(userID).remove() - # Returns the first name or else an empty string. - def getFirstName(userID): - firstname = "" - users = db.child("Users").get() - for user in users.each(): - if user.key() == userID: - firstname = user.val()["First Name"] - return firstname +# Uploads the data of specified user uploaded into firebase. +def upload_data(userID, firstname, lastname, email, phone, address): + data = {"First Name": firstname, "Last Name": lastname, "E-Mail": email, "Phone": phone, "Address": address} + db.child("Users").child(userID).set(data) - # Returns the last name or else an empty string. - def getLastName(userID): - lastname = "" - users = db.child("Users").get() - for user in users.each(): - if user.key() == userID: - lastname = user.val()["Last Name"] - return lastname - # Returns the e-mail or else an empty string. - def getEmail(userID): - email = "" - users = db.child("Users").get() - for user in users.each(): - if user.key() == userID: - email = user.val()["E-Mail"] - return email +# Removes the data of specified user uploaded into firebase. +def remove_data(userID): + db.child("Users").child(userID).remove() - # Returns the phone or else an empty string. - def getPhone(userID): - phone = "" - users = db.child("Users").get() - for user in users.each(): - if user.key() == userID: - phone = user.val()["Phone"] - return phone - # Returns the address or else an empty string. - def getAddress(userID): - address = "" - users = db.child("Users").get() - for user in users.each(): - if user.key() == userID: - address = user.val()["Address"] - return address +# Returns the first name or else an empty string. +def get_firstname(userID): + firstname = "" + users = db.child("Users").get() + for user in users.each(): + if user.key() == userID: + firstname = user.val()["First Name"] + return firstname - # Uploads the photo of user, input should be something like "example.png" - def uploadUserPhoto(userphoto): - userphoto_str = str(userphoto) - storage.child("Photos_of_Users/" + str(userphoto)).put("Photos_of_Users/" + str(userphoto)) - # Uploads the photo of thief, input should be something like "example.png" - def uploadThiefPhoto(userphoto): - userphoto_str = str(userphoto) - storage.child("Photos_of_Thieves/" + str(userphoto)).put("Photos_of_Thieves/" + str(userphoto)) +# Returns the last name or else an empty string. +def get_lastname(userID): + lastname = "" + users = db.child("Users").get() + for user in users.each(): + if user.key() == userID: + lastname = user.val()["Last Name"] + return lastname - # Downloads all the user photos. - def downloadAllUserphotos(self): - storage.child("Photos_of_Users").download("Storage_from_Database") - # Downloads all the thief photos. - def downloadAllThiefphotos(self): - storage.child("Photos_of_Thieves").download("Storage_from_Thieves") +# Returns the e-mail or else an empty string. +def get_email(userID): + email = "" + users = db.child("Users").get() + for user in users.each(): + if user.key() == userID: + email = user.val()["E-Mail"] + return email - # Deletes photo of the specified user. - def deleteUserPhoto(userphoto): - storage.delete('Photos_of_Users/' + userphoto) +# Returns the phone or else an empty string. +def get_phone(userID): + phone = "" + users = db.child("Users").get() + for user in users.each(): + if user.key() == userID: + phone = user.val()["Phone"] + return phone + + +# Returns the address or else an empty string. +def get_address(userID): + address = "" + users = db.child("Users").get() + for user in users.each(): + if user.key() == userID: + address = user.val()["Address"] + return address + + +# Uploads the photo of user, input should be something like "example.png" +def upload_user_photo(userphoto): + userphoto_str = str(userphoto) + storage.child("Photos_of_Users/" + str(userphoto)).put("Photos_of_Users/" + str(userphoto)) + + +# Uploads the photo of thief, input should be something like "example.png" +def upload_thief_photo(userphoto): + userphoto_str = str(userphoto) + storage.child("Photos_of_Thieves/" + str(userphoto)).put("Photos_of_Thieves/" + str(userphoto)) + + +# Downloads all the user photos. +def download_all_user_photos(self): + storage.child("Photos_of_Users").download("Storage_from_Database") + + +# Downloads all the thief photos. +def download_all_thief_photos(self): + storage.child("Photos_of_Thieves").download("Storage_from_Thieves") + + +# Deletes photo of the specified user. +def delete_user_photo(userphoto): + storage.delete('Photos_of_Users/' + userphoto) diff --git a/Facial_Recognition_Software.py b/Facial_Recognition_Software.py index cf9058869..a2b67a42c 100644 --- a/Facial_Recognition_Software.py +++ b/Facial_Recognition_Software.py @@ -1,5 +1,5 @@ import cv2 -import numpy as np +import numpy as np import sys,os,numpy from glob import glob @@ -24,4 +24,4 @@ if len(faceRects) > 0: cv2.imwrite('output.jpg',img) cv2.imshow("face_image",img) -cv2.waitKey(0) +cv2.waitKey(0) \ No newline at end of file diff --git a/__pycache__/DBHelper.cpython-36.pyc b/__pycache__/DBHelper.cpython-36.pyc new file mode 100644 index 000000000..8f6ceb6f2 Binary files /dev/null and b/__pycache__/DBHelper.cpython-36.pyc differ diff --git a/output.jpg b/output.jpg new file mode 100644 index 000000000..0b7365c7f Binary files /dev/null and b/output.jpg differ diff --git a/venv/Lib/site-packages/PIL/BdfFontFile.py b/venv/Lib/site-packages/PIL/BdfFontFile.py new file mode 100644 index 000000000..102b72e1d --- /dev/null +++ b/venv/Lib/site-packages/PIL/BdfFontFile.py @@ -0,0 +1,110 @@ +# +# The Python Imaging Library +# $Id$ +# +# bitmap distribution font (bdf) file parser +# +# history: +# 1996-05-16 fl created (as bdf2pil) +# 1997-08-25 fl converted to FontFile driver +# 2001-05-25 fl removed bogus __init__ call +# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev) +# 2003-04-22 fl more robustification (from Graham Dumpleton) +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1997-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +""" +Parse X Bitmap Distribution Format (BDF) +""" + + +from . import FontFile, Image + +bdf_slant = { + "R": "Roman", + "I": "Italic", + "O": "Oblique", + "RI": "Reverse Italic", + "RO": "Reverse Oblique", + "OT": "Other", +} + +bdf_spacing = {"P": "Proportional", "M": "Monospaced", "C": "Cell"} + + +def bdf_char(f): + # skip to STARTCHAR + while True: + s = f.readline() + if not s: + return None + if s[:9] == b"STARTCHAR": + break + id = s[9:].strip().decode("ascii") + + # load symbol properties + props = {} + while True: + s = f.readline() + if not s or s[:6] == b"BITMAP": + break + i = s.find(b" ") + props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") + + # load bitmap + bitmap = [] + while True: + s = f.readline() + if not s or s[:7] == b"ENDCHAR": + break + bitmap.append(s[:-1]) + bitmap = b"".join(bitmap) + + [x, y, l, d] = [int(p) for p in props["BBX"].split()] + [dx, dy] = [int(p) for p in props["DWIDTH"].split()] + + bbox = (dx, dy), (l, -d - y, x + l, -d), (0, 0, x, y) + + try: + im = Image.frombytes("1", (x, y), bitmap, "hex", "1") + except ValueError: + # deal with zero-width characters + im = Image.new("1", (x, y)) + + return id, int(props["ENCODING"]), bbox, im + + +class BdfFontFile(FontFile.FontFile): + """Font file plugin for the X11 BDF format.""" + + def __init__(self, fp): + super().__init__() + + s = fp.readline() + if s[:13] != b"STARTFONT 2.1": + raise SyntaxError("not a valid BDF file") + + props = {} + comments = [] + + while True: + s = fp.readline() + if not s or s[:13] == b"ENDPROPERTIES": + break + i = s.find(b" ") + props[s[:i].decode("ascii")] = s[i + 1 : -1].decode("ascii") + if s[:i] in [b"COMMENT", b"COPYRIGHT"]: + if s.find(b"LogicalFontDescription") < 0: + comments.append(s[i + 1 : -1].decode("ascii")) + + while True: + c = bdf_char(fp) + if not c: + break + id, ch, (xy, dst, src), im = c + if 0 <= ch < len(self.glyph): + self.glyph[ch] = xy, dst, src, im diff --git a/venv/Lib/site-packages/PIL/BlpImagePlugin.py b/venv/Lib/site-packages/PIL/BlpImagePlugin.py new file mode 100644 index 000000000..cb8a08e20 --- /dev/null +++ b/venv/Lib/site-packages/PIL/BlpImagePlugin.py @@ -0,0 +1,420 @@ +""" +Blizzard Mipmap Format (.blp) +Jerome Leclanche + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ + +BLP1 files, used mostly in Warcraft III, are not fully supported. +All types of BLP2 files used in World of Warcraft are supported. + +The BLP file structure consists of a header, up to 16 mipmaps of the +texture + +Texture sizes must be powers of two, though the two dimensions do +not have to be equal; 512x256 is valid, but 512x200 is not. +The first mipmap (mipmap #0) is the full size image; each subsequent +mipmap halves both dimensions. The final mipmap should be 1x1. + +BLP files come in many different flavours: +* JPEG-compressed (type == 0) - only supported for BLP1. +* RAW images (type == 1, encoding == 1). Each mipmap is stored as an + array of 8-bit values, one per pixel, left to right, top to bottom. + Each value is an index to the palette. +* DXT-compressed (type == 1, encoding == 2): +- DXT1 compression is used if alpha_encoding == 0. + - An additional alpha bit is used if alpha_depth == 1. + - DXT3 compression is used if alpha_encoding == 1. + - DXT5 compression is used if alpha_encoding == 7. +""" + +import struct +from io import BytesIO + +from . import Image, ImageFile + +BLP_FORMAT_JPEG = 0 + +BLP_ENCODING_UNCOMPRESSED = 1 +BLP_ENCODING_DXT = 2 +BLP_ENCODING_UNCOMPRESSED_RAW_BGRA = 3 + +BLP_ALPHA_ENCODING_DXT1 = 0 +BLP_ALPHA_ENCODING_DXT3 = 1 +BLP_ALPHA_ENCODING_DXT5 = 7 + + +def unpack_565(i): + return (((i >> 11) & 0x1F) << 3, ((i >> 5) & 0x3F) << 2, (i & 0x1F) << 3) + + +def decode_dxt1(data, alpha=False): + """ + input: one "row" of data (i.e. will produce 4*width pixels) + """ + + blocks = len(data) // 8 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + # Decode next 8-byte block. + idx = block * 8 + color0, color1, bits = struct.unpack_from("> 2 + + a = 0xFF + if control == 0: + r, g, b = r0, g0, b0 + elif control == 1: + r, g, b = r1, g1, b1 + elif control == 2: + if color0 > color1: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + else: + r = (r0 + r1) // 2 + g = (g0 + g1) // 2 + b = (b0 + b1) // 2 + elif control == 3: + if color0 > color1: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + else: + r, g, b, a = 0, 0, 0, 0 + + if alpha: + ret[j].extend([r, g, b, a]) + else: + ret[j].extend([r, g, b]) + + return ret + + +def decode_dxt3(data): + """ + input: one "row" of data (i.e. will produce 4*width pixels) + """ + + blocks = len(data) // 16 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + idx = block * 16 + block = data[idx : idx + 16] + # Decode next 16-byte block. + bits = struct.unpack_from("<8B", block) + color0, color1 = struct.unpack_from(">= 4 + else: + high = True + a &= 0xF + a *= 17 # We get a value between 0 and 15 + + color_code = (code >> 2 * (4 * j + i)) & 0x03 + + if color_code == 0: + r, g, b = r0, g0, b0 + elif color_code == 1: + r, g, b = r1, g1, b1 + elif color_code == 2: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + elif color_code == 3: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + + ret[j].extend([r, g, b, a]) + + return ret + + +def decode_dxt5(data): + """ + input: one "row" of data (i.e. will produce 4 * width pixels) + """ + + blocks = len(data) // 16 # number of blocks in row + ret = (bytearray(), bytearray(), bytearray(), bytearray()) + + for block in range(blocks): + idx = block * 16 + block = data[idx : idx + 16] + # Decode next 16-byte block. + a0, a1 = struct.unpack_from("> alphacode_index) & 0x07 + elif alphacode_index == 15: + alphacode = (alphacode2 >> 15) | ((alphacode1 << 1) & 0x06) + else: # alphacode_index >= 18 and alphacode_index <= 45 + alphacode = (alphacode1 >> (alphacode_index - 16)) & 0x07 + + if alphacode == 0: + a = a0 + elif alphacode == 1: + a = a1 + elif a0 > a1: + a = ((8 - alphacode) * a0 + (alphacode - 1) * a1) // 7 + elif alphacode == 6: + a = 0 + elif alphacode == 7: + a = 255 + else: + a = ((6 - alphacode) * a0 + (alphacode - 1) * a1) // 5 + + color_code = (code >> 2 * (4 * j + i)) & 0x03 + + if color_code == 0: + r, g, b = r0, g0, b0 + elif color_code == 1: + r, g, b = r1, g1, b1 + elif color_code == 2: + r = (2 * r0 + r1) // 3 + g = (2 * g0 + g1) // 3 + b = (2 * b0 + b1) // 3 + elif color_code == 3: + r = (2 * r1 + r0) // 3 + g = (2 * g1 + g0) // 3 + b = (2 * b1 + b0) // 3 + + ret[j].extend([r, g, b, a]) + + return ret + + +class BLPFormatError(NotImplementedError): + pass + + +class BlpImageFile(ImageFile.ImageFile): + """ + Blizzard Mipmap Format + """ + + format = "BLP" + format_description = "Blizzard Mipmap Format" + + def _open(self): + self.magic = self.fp.read(4) + self._read_blp_header() + + if self.magic == b"BLP1": + decoder = "BLP1" + self.mode = "RGB" + elif self.magic == b"BLP2": + decoder = "BLP2" + self.mode = "RGBA" if self._blp_alpha_depth else "RGB" + else: + raise BLPFormatError("Bad BLP magic %r" % (self.magic)) + + self.tile = [(decoder, (0, 0) + self.size, 0, (self.mode, 0, 1))] + + def _read_blp_header(self): + (self._blp_compression,) = struct.unpack(" mode, rawmode + 1: ("P", "P;1"), + 4: ("P", "P;4"), + 8: ("P", "P"), + 16: ("RGB", "BGR;15"), + 24: ("RGB", "BGR"), + 32: ("RGB", "BGRX"), +} + + +def _accept(prefix): + return prefix[:2] == b"BM" + + +def _dib_accept(prefix): + return i32(prefix[:4]) in [12, 40, 64, 108, 124] + + +# ============================================================================= +# Image plugin for the Windows BMP format. +# ============================================================================= +class BmpImageFile(ImageFile.ImageFile): + """ Image plugin for the Windows Bitmap format (BMP) """ + + # ------------------------------------------------------------- Description + format_description = "Windows Bitmap" + format = "BMP" + + # -------------------------------------------------- BMP Compression values + COMPRESSIONS = {"RAW": 0, "RLE8": 1, "RLE4": 2, "BITFIELDS": 3, "JPEG": 4, "PNG": 5} + for k, v in COMPRESSIONS.items(): + vars()[k] = v + + def _bitmap(self, header=0, offset=0): + """ Read relevant info about the BMP """ + read, seek = self.fp.read, self.fp.seek + if header: + seek(header) + file_info = {} + # read bmp header size @offset 14 (this is part of the header size) + file_info["header_size"] = i32(read(4)) + file_info["direction"] = -1 + + # -------------------- If requested, read header at a specific position + # read the rest of the bmp header, without its size + header_data = ImageFile._safe_read(self.fp, file_info["header_size"] - 4) + + # -------------------------------------------------- IBM OS/2 Bitmap v1 + # ----- This format has different offsets because of width/height types + if file_info["header_size"] == 12: + file_info["width"] = i16(header_data[0:2]) + file_info["height"] = i16(header_data[2:4]) + file_info["planes"] = i16(header_data[4:6]) + file_info["bits"] = i16(header_data[6:8]) + file_info["compression"] = self.RAW + file_info["palette_padding"] = 3 + + # --------------------------------------------- Windows Bitmap v2 to v5 + # v3, OS/2 v2, v4, v5 + elif file_info["header_size"] in (40, 64, 108, 124): + file_info["y_flip"] = i8(header_data[7]) == 0xFF + file_info["direction"] = 1 if file_info["y_flip"] else -1 + file_info["width"] = i32(header_data[0:4]) + file_info["height"] = ( + i32(header_data[4:8]) + if not file_info["y_flip"] + else 2 ** 32 - i32(header_data[4:8]) + ) + file_info["planes"] = i16(header_data[8:10]) + file_info["bits"] = i16(header_data[10:12]) + file_info["compression"] = i32(header_data[12:16]) + # byte size of pixel data + file_info["data_size"] = i32(header_data[16:20]) + file_info["pixels_per_meter"] = ( + i32(header_data[20:24]), + i32(header_data[24:28]), + ) + file_info["colors"] = i32(header_data[28:32]) + file_info["palette_padding"] = 4 + self.info["dpi"] = tuple( + int(x / 39.3701 + 0.5) for x in file_info["pixels_per_meter"] + ) + if file_info["compression"] == self.BITFIELDS: + if len(header_data) >= 52: + for idx, mask in enumerate( + ["r_mask", "g_mask", "b_mask", "a_mask"] + ): + file_info[mask] = i32(header_data[36 + idx * 4 : 40 + idx * 4]) + else: + # 40 byte headers only have the three components in the + # bitfields masks, ref: + # https://msdn.microsoft.com/en-us/library/windows/desktop/dd183376(v=vs.85).aspx + # See also + # https://github.com/python-pillow/Pillow/issues/1293 + # There is a 4th component in the RGBQuad, in the alpha + # location, but it is listed as a reserved component, + # and it is not generally an alpha channel + file_info["a_mask"] = 0x0 + for mask in ["r_mask", "g_mask", "b_mask"]: + file_info[mask] = i32(read(4)) + file_info["rgb_mask"] = ( + file_info["r_mask"], + file_info["g_mask"], + file_info["b_mask"], + ) + file_info["rgba_mask"] = ( + file_info["r_mask"], + file_info["g_mask"], + file_info["b_mask"], + file_info["a_mask"], + ) + else: + raise OSError("Unsupported BMP header type (%d)" % file_info["header_size"]) + + # ------------------ Special case : header is reported 40, which + # ---------------------- is shorter than real size for bpp >= 16 + self._size = file_info["width"], file_info["height"] + + # ------- If color count was not found in the header, compute from bits + file_info["colors"] = ( + file_info["colors"] + if file_info.get("colors", 0) + else (1 << file_info["bits"]) + ) + + # ------------------------------- Check abnormal values for DOS attacks + if file_info["width"] * file_info["height"] > 2 ** 31: + raise OSError("Unsupported BMP Size: (%dx%d)" % self.size) + + # ---------------------- Check bit depth for unusual unsupported values + self.mode, raw_mode = BIT2MODE.get(file_info["bits"], (None, None)) + if self.mode is None: + raise OSError("Unsupported BMP pixel depth (%d)" % file_info["bits"]) + + # ---------------- Process BMP with Bitfields compression (not palette) + if file_info["compression"] == self.BITFIELDS: + SUPPORTED = { + 32: [ + (0xFF0000, 0xFF00, 0xFF, 0x0), + (0xFF0000, 0xFF00, 0xFF, 0xFF000000), + (0xFF, 0xFF00, 0xFF0000, 0xFF000000), + (0x0, 0x0, 0x0, 0x0), + (0xFF000000, 0xFF0000, 0xFF00, 0x0), + ], + 24: [(0xFF0000, 0xFF00, 0xFF)], + 16: [(0xF800, 0x7E0, 0x1F), (0x7C00, 0x3E0, 0x1F)], + } + MASK_MODES = { + (32, (0xFF0000, 0xFF00, 0xFF, 0x0)): "BGRX", + (32, (0xFF000000, 0xFF0000, 0xFF00, 0x0)): "XBGR", + (32, (0xFF, 0xFF00, 0xFF0000, 0xFF000000)): "RGBA", + (32, (0xFF0000, 0xFF00, 0xFF, 0xFF000000)): "BGRA", + (32, (0x0, 0x0, 0x0, 0x0)): "BGRA", + (24, (0xFF0000, 0xFF00, 0xFF)): "BGR", + (16, (0xF800, 0x7E0, 0x1F)): "BGR;16", + (16, (0x7C00, 0x3E0, 0x1F)): "BGR;15", + } + if file_info["bits"] in SUPPORTED: + if ( + file_info["bits"] == 32 + and file_info["rgba_mask"] in SUPPORTED[file_info["bits"]] + ): + raw_mode = MASK_MODES[(file_info["bits"], file_info["rgba_mask"])] + self.mode = "RGBA" if "A" in raw_mode else self.mode + elif ( + file_info["bits"] in (24, 16) + and file_info["rgb_mask"] in SUPPORTED[file_info["bits"]] + ): + raw_mode = MASK_MODES[(file_info["bits"], file_info["rgb_mask"])] + else: + raise OSError("Unsupported BMP bitfields layout") + else: + raise OSError("Unsupported BMP bitfields layout") + elif file_info["compression"] == self.RAW: + if file_info["bits"] == 32 and header == 22: # 32-bit .cur offset + raw_mode, self.mode = "BGRA", "RGBA" + else: + raise OSError("Unsupported BMP compression (%d)" % file_info["compression"]) + + # --------------- Once the header is processed, process the palette/LUT + if self.mode == "P": # Paletted for 1, 4 and 8 bit images + + # ---------------------------------------------------- 1-bit images + if not (0 < file_info["colors"] <= 65536): + raise OSError("Unsupported BMP Palette size (%d)" % file_info["colors"]) + else: + padding = file_info["palette_padding"] + palette = read(padding * file_info["colors"]) + greyscale = True + indices = ( + (0, 255) + if file_info["colors"] == 2 + else list(range(file_info["colors"])) + ) + + # ----------------- Check if greyscale and ignore palette if so + for ind, val in enumerate(indices): + rgb = palette[ind * padding : ind * padding + 3] + if rgb != o8(val) * 3: + greyscale = False + + # ------- If all colors are grey, white or black, ditch palette + if greyscale: + self.mode = "1" if file_info["colors"] == 2 else "L" + raw_mode = self.mode + else: + self.mode = "P" + self.palette = ImagePalette.raw( + "BGRX" if padding == 4 else "BGR", palette + ) + + # ---------------------------- Finally set the tile data for the plugin + self.info["compression"] = file_info["compression"] + self.tile = [ + ( + "raw", + (0, 0, file_info["width"], file_info["height"]), + offset or self.fp.tell(), + ( + raw_mode, + ((file_info["width"] * file_info["bits"] + 31) >> 3) & (~3), + file_info["direction"], + ), + ) + ] + + def _open(self): + """ Open file, check magic number and read header """ + # read 14 bytes: magic number, filesize, reserved, header final offset + head_data = self.fp.read(14) + # choke if the file does not have the required magic bytes + if not _accept(head_data): + raise SyntaxError("Not a BMP file") + # read the start position of the BMP image data (u32) + offset = i32(head_data[10:14]) + # load bitmap information (offset=raster info) + self._bitmap(offset=offset) + + +# ============================================================================= +# Image plugin for the DIB format (BMP alias) +# ============================================================================= +class DibImageFile(BmpImageFile): + + format = "DIB" + format_description = "Windows Bitmap" + + def _open(self): + self._bitmap() + + +# +# -------------------------------------------------------------------- +# Write BMP file + + +SAVE = { + "1": ("1", 1, 2), + "L": ("L", 8, 256), + "P": ("P", 8, 256), + "RGB": ("BGR", 24, 0), + "RGBA": ("BGRA", 32, 0), +} + + +def _dib_save(im, fp, filename): + _save(im, fp, filename, False) + + +def _save(im, fp, filename, bitmap_header=True): + try: + rawmode, bits, colors = SAVE[im.mode] + except KeyError as e: + raise OSError("cannot write mode %s as BMP" % im.mode) from e + + info = im.encoderinfo + + dpi = info.get("dpi", (96, 96)) + + # 1 meter == 39.3701 inches + ppm = tuple(map(lambda x: int(x * 39.3701 + 0.5), dpi)) + + stride = ((im.size[0] * bits + 7) // 8 + 3) & (~3) + header = 40 # or 64 for OS/2 version 2 + image = stride * im.size[1] + + # bitmap header + if bitmap_header: + offset = 14 + header + colors * 4 + file_size = offset + image + if file_size > 2 ** 32 - 1: + raise ValueError("File size is too large for the BMP format") + fp.write( + b"BM" # file type (magic) + + o32(file_size) # file size + + o32(0) # reserved + + o32(offset) # image data offset + ) + + # bitmap info header + fp.write( + o32(header) # info header size + + o32(im.size[0]) # width + + o32(im.size[1]) # height + + o16(1) # planes + + o16(bits) # depth + + o32(0) # compression (0=uncompressed) + + o32(image) # size of bitmap + + o32(ppm[0]) # resolution + + o32(ppm[1]) # resolution + + o32(colors) # colors used + + o32(colors) # colors important + ) + + fp.write(b"\0" * (header - 40)) # padding (for OS/2 format) + + if im.mode == "1": + for i in (0, 255): + fp.write(o8(i) * 4) + elif im.mode == "L": + for i in range(256): + fp.write(o8(i) * 4) + elif im.mode == "P": + fp.write(im.im.getpalette("RGB", "BGRX")) + + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, stride, -1))]) + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(BmpImageFile.format, BmpImageFile, _accept) +Image.register_save(BmpImageFile.format, _save) + +Image.register_extension(BmpImageFile.format, ".bmp") + +Image.register_mime(BmpImageFile.format, "image/bmp") + +Image.register_open(DibImageFile.format, DibImageFile, _dib_accept) +Image.register_save(DibImageFile.format, _dib_save) + +Image.register_extension(DibImageFile.format, ".dib") + +Image.register_mime(DibImageFile.format, "image/bmp") diff --git a/venv/Lib/site-packages/PIL/BufrStubImagePlugin.py b/venv/Lib/site-packages/PIL/BufrStubImagePlugin.py new file mode 100644 index 000000000..48f21e1b3 --- /dev/null +++ b/venv/Lib/site-packages/PIL/BufrStubImagePlugin.py @@ -0,0 +1,73 @@ +# +# The Python Imaging Library +# $Id$ +# +# BUFR stub adapter +# +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific BUFR image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC" + + +class BufrStubImageFile(ImageFile.StubImageFile): + + format = "BUFR" + format_description = "BUFR" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(4)): + raise SyntaxError("Not a BUFR file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise OSError("BUFR save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept) +Image.register_save(BufrStubImageFile.format, _save) + +Image.register_extension(BufrStubImageFile.format, ".bufr") diff --git a/venv/Lib/site-packages/PIL/ContainerIO.py b/venv/Lib/site-packages/PIL/ContainerIO.py new file mode 100644 index 000000000..45e80b39a --- /dev/null +++ b/venv/Lib/site-packages/PIL/ContainerIO.py @@ -0,0 +1,120 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a class to read from a container file +# +# History: +# 1995-06-18 fl Created +# 1995-09-07 fl Added readline(), readlines() +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1995 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +import io + + +class ContainerIO: + """ + A file object that provides read access to a part of an existing + file (for example a TAR file). + """ + + def __init__(self, file, offset, length): + """ + Create file object. + + :param file: Existing file. + :param offset: Start of region, in bytes. + :param length: Size of region, in bytes. + """ + self.fh = file + self.pos = 0 + self.offset = offset + self.length = length + self.fh.seek(offset) + + ## + # Always false. + + def isatty(self): + return False + + def seek(self, offset, mode=io.SEEK_SET): + """ + Move file pointer. + + :param offset: Offset in bytes. + :param mode: Starting position. Use 0 for beginning of region, 1 + for current offset, and 2 for end of region. You cannot move + the pointer outside the defined region. + """ + if mode == 1: + self.pos = self.pos + offset + elif mode == 2: + self.pos = self.length + offset + else: + self.pos = offset + # clamp + self.pos = max(0, min(self.pos, self.length)) + self.fh.seek(self.offset + self.pos) + + def tell(self): + """ + Get current file pointer. + + :returns: Offset from start of region, in bytes. + """ + return self.pos + + def read(self, n=0): + """ + Read data. + + :param n: Number of bytes to read. If omitted or zero, + read until end of region. + :returns: An 8-bit string. + """ + if n: + n = min(n, self.length - self.pos) + else: + n = self.length - self.pos + if not n: # EOF + return b"" if "b" in self.fh.mode else "" + self.pos = self.pos + n + return self.fh.read(n) + + def readline(self): + """ + Read a line of text. + + :returns: An 8-bit string. + """ + s = b"" if "b" in self.fh.mode else "" + newline_character = b"\n" if "b" in self.fh.mode else "\n" + while True: + c = self.read(1) + if not c: + break + s = s + c + if c == newline_character: + break + return s + + def readlines(self): + """ + Read multiple lines of text. + + :returns: A list of 8-bit strings. + """ + lines = [] + while True: + s = self.readline() + if not s: + break + lines.append(s) + return lines diff --git a/venv/Lib/site-packages/PIL/CurImagePlugin.py b/venv/Lib/site-packages/PIL/CurImagePlugin.py new file mode 100644 index 000000000..3a1b6d2e5 --- /dev/null +++ b/venv/Lib/site-packages/PIL/CurImagePlugin.py @@ -0,0 +1,74 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Windows Cursor support for PIL +# +# notes: +# uses BmpImagePlugin.py to read the bitmap data. +# +# history: +# 96-05-27 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +from . import BmpImagePlugin, Image +from ._binary import i8, i16le as i16, i32le as i32 + +# +# -------------------------------------------------------------------- + + +def _accept(prefix): + return prefix[:4] == b"\0\0\2\0" + + +## +# Image plugin for Windows Cursor files. + + +class CurImageFile(BmpImagePlugin.BmpImageFile): + + format = "CUR" + format_description = "Windows Cursor" + + def _open(self): + + offset = self.fp.tell() + + # check magic + s = self.fp.read(6) + if not _accept(s): + raise SyntaxError("not a CUR file") + + # pick the largest cursor in the file + m = b"" + for i in range(i16(s[4:])): + s = self.fp.read(16) + if not m: + m = s + elif i8(s[0]) > i8(m[0]) and i8(s[1]) > i8(m[1]): + m = s + if not m: + raise TypeError("No cursors were found") + + # load as bitmap + self._bitmap(i32(m[12:]) + offset) + + # patch up the bitmap height + self._size = self.size[0], self.size[1] // 2 + d, e, o, a = self.tile[0] + self.tile[0] = d, (0, 0) + self.size, o, a + + return + + +# +# -------------------------------------------------------------------- + +Image.register_open(CurImageFile.format, CurImageFile, _accept) + +Image.register_extension(CurImageFile.format, ".cur") diff --git a/venv/Lib/site-packages/PIL/DcxImagePlugin.py b/venv/Lib/site-packages/PIL/DcxImagePlugin.py new file mode 100644 index 000000000..de21db8f0 --- /dev/null +++ b/venv/Lib/site-packages/PIL/DcxImagePlugin.py @@ -0,0 +1,89 @@ +# +# The Python Imaging Library. +# $Id$ +# +# DCX file handling +# +# DCX is a container file format defined by Intel, commonly used +# for fax applications. Each DCX file consists of a directory +# (a list of file offsets) followed by a set of (usually 1-bit) +# PCX files. +# +# History: +# 1995-09-09 fl Created +# 1996-03-20 fl Properly derived from PcxImageFile. +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 2002-07-30 fl Fixed file handling +# +# Copyright (c) 1997-98 by Secret Labs AB. +# Copyright (c) 1995-96 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +from . import Image +from ._binary import i32le as i32 +from .PcxImagePlugin import PcxImageFile + +MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then? + + +def _accept(prefix): + return len(prefix) >= 4 and i32(prefix) == MAGIC + + +## +# Image plugin for the Intel DCX format. + + +class DcxImageFile(PcxImageFile): + + format = "DCX" + format_description = "Intel DCX" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # Header + s = self.fp.read(4) + if not _accept(s): + raise SyntaxError("not a DCX file") + + # Component directory + self._offset = [] + for i in range(1024): + offset = i32(self.fp.read(4)) + if not offset: + break + self._offset.append(offset) + + self.__fp = self.fp + self.frame = None + self.n_frames = len(self._offset) + self.is_animated = self.n_frames > 1 + self.seek(0) + + def seek(self, frame): + if not self._seek_check(frame): + return + self.frame = frame + self.fp = self.__fp + self.fp.seek(self._offset[frame]) + PcxImageFile._open(self) + + def tell(self): + return self.frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +Image.register_open(DcxImageFile.format, DcxImageFile, _accept) + +Image.register_extension(DcxImageFile.format, ".dcx") diff --git a/venv/Lib/site-packages/PIL/DdsImagePlugin.py b/venv/Lib/site-packages/PIL/DdsImagePlugin.py new file mode 100644 index 000000000..9ba6e0ff8 --- /dev/null +++ b/venv/Lib/site-packages/PIL/DdsImagePlugin.py @@ -0,0 +1,178 @@ +""" +A Pillow loader for .dds files (S3TC-compressed aka DXTC) +Jerome Leclanche + +Documentation: + https://web.archive.org/web/20170802060935/http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_compression_s3tc.txt + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ +""" + +import struct +from io import BytesIO + +from . import Image, ImageFile + +# Magic ("DDS ") +DDS_MAGIC = 0x20534444 + +# DDS flags +DDSD_CAPS = 0x1 +DDSD_HEIGHT = 0x2 +DDSD_WIDTH = 0x4 +DDSD_PITCH = 0x8 +DDSD_PIXELFORMAT = 0x1000 +DDSD_MIPMAPCOUNT = 0x20000 +DDSD_LINEARSIZE = 0x80000 +DDSD_DEPTH = 0x800000 + +# DDS caps +DDSCAPS_COMPLEX = 0x8 +DDSCAPS_TEXTURE = 0x1000 +DDSCAPS_MIPMAP = 0x400000 + +DDSCAPS2_CUBEMAP = 0x200 +DDSCAPS2_CUBEMAP_POSITIVEX = 0x400 +DDSCAPS2_CUBEMAP_NEGATIVEX = 0x800 +DDSCAPS2_CUBEMAP_POSITIVEY = 0x1000 +DDSCAPS2_CUBEMAP_NEGATIVEY = 0x2000 +DDSCAPS2_CUBEMAP_POSITIVEZ = 0x4000 +DDSCAPS2_CUBEMAP_NEGATIVEZ = 0x8000 +DDSCAPS2_VOLUME = 0x200000 + +# Pixel Format +DDPF_ALPHAPIXELS = 0x1 +DDPF_ALPHA = 0x2 +DDPF_FOURCC = 0x4 +DDPF_PALETTEINDEXED8 = 0x20 +DDPF_RGB = 0x40 +DDPF_LUMINANCE = 0x20000 + + +# dds.h + +DDS_FOURCC = DDPF_FOURCC +DDS_RGB = DDPF_RGB +DDS_RGBA = DDPF_RGB | DDPF_ALPHAPIXELS +DDS_LUMINANCE = DDPF_LUMINANCE +DDS_LUMINANCEA = DDPF_LUMINANCE | DDPF_ALPHAPIXELS +DDS_ALPHA = DDPF_ALPHA +DDS_PAL8 = DDPF_PALETTEINDEXED8 + +DDS_HEADER_FLAGS_TEXTURE = DDSD_CAPS | DDSD_HEIGHT | DDSD_WIDTH | DDSD_PIXELFORMAT +DDS_HEADER_FLAGS_MIPMAP = DDSD_MIPMAPCOUNT +DDS_HEADER_FLAGS_VOLUME = DDSD_DEPTH +DDS_HEADER_FLAGS_PITCH = DDSD_PITCH +DDS_HEADER_FLAGS_LINEARSIZE = DDSD_LINEARSIZE + +DDS_HEIGHT = DDSD_HEIGHT +DDS_WIDTH = DDSD_WIDTH + +DDS_SURFACE_FLAGS_TEXTURE = DDSCAPS_TEXTURE +DDS_SURFACE_FLAGS_MIPMAP = DDSCAPS_COMPLEX | DDSCAPS_MIPMAP +DDS_SURFACE_FLAGS_CUBEMAP = DDSCAPS_COMPLEX + +DDS_CUBEMAP_POSITIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEX +DDS_CUBEMAP_NEGATIVEX = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEX +DDS_CUBEMAP_POSITIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEY +DDS_CUBEMAP_NEGATIVEY = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEY +DDS_CUBEMAP_POSITIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_POSITIVEZ +DDS_CUBEMAP_NEGATIVEZ = DDSCAPS2_CUBEMAP | DDSCAPS2_CUBEMAP_NEGATIVEZ + + +# DXT1 +DXT1_FOURCC = 0x31545844 + +# DXT3 +DXT3_FOURCC = 0x33545844 + +# DXT5 +DXT5_FOURCC = 0x35545844 + + +# dxgiformat.h + +DXGI_FORMAT_BC7_TYPELESS = 97 +DXGI_FORMAT_BC7_UNORM = 98 +DXGI_FORMAT_BC7_UNORM_SRGB = 99 + + +class DdsImageFile(ImageFile.ImageFile): + format = "DDS" + format_description = "DirectDraw Surface" + + def _open(self): + magic, header_size = struct.unpack(" 0: + s = fp.read(min(lengthfile, 100 * 1024)) + if not s: + break + lengthfile -= len(s) + f.write(s) + + # Build Ghostscript command + command = [ + "gs", + "-q", # quiet mode + "-g%dx%d" % size, # set output geometry (pixels) + "-r%fx%f" % res, # set input DPI (dots per inch) + "-dBATCH", # exit after processing + "-dNOPAUSE", # don't pause between pages + "-dSAFER", # safe mode + "-sDEVICE=ppmraw", # ppm driver + "-sOutputFile=%s" % outfile, # output file + # adjust for image origin + "-c", + "%d %d translate" % (-bbox[0], -bbox[1]), + "-f", + infile, # input file + # showpage (see https://bugs.ghostscript.com/show_bug.cgi?id=698272) + "-c", + "showpage", + ] + + if gs_windows_binary is not None: + if not gs_windows_binary: + raise OSError("Unable to locate Ghostscript on paths") + command[0] = gs_windows_binary + + # push data through Ghostscript + try: + startupinfo = None + if sys.platform.startswith("win"): + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + subprocess.check_call(command, startupinfo=startupinfo) + out_im = Image.open(outfile) + out_im.load() + finally: + try: + os.unlink(outfile) + if infile_temp: + os.unlink(infile_temp) + except OSError: + pass + + im = out_im.im.copy() + out_im.close() + return im + + +class PSFile: + """ + Wrapper for bytesio object that treats either CR or LF as end of line. + """ + + def __init__(self, fp): + self.fp = fp + self.char = None + + def seek(self, offset, whence=io.SEEK_SET): + self.char = None + self.fp.seek(offset, whence) + + def readline(self): + s = self.char or b"" + self.char = None + + c = self.fp.read(1) + while c not in b"\r\n": + s = s + c + c = self.fp.read(1) + + self.char = self.fp.read(1) + # line endings can be 1 or 2 of \r \n, in either order + if self.char in b"\r\n": + self.char = None + + return s.decode("latin-1") + + +def _accept(prefix): + return prefix[:4] == b"%!PS" or (len(prefix) >= 4 and i32(prefix) == 0xC6D3D0C5) + + +## +# Image plugin for Encapsulated Postscript. This plugin supports only +# a few variants of this format. + + +class EpsImageFile(ImageFile.ImageFile): + """EPS File Parser for the Python Imaging Library""" + + format = "EPS" + format_description = "Encapsulated Postscript" + + mode_map = {1: "L", 2: "LAB", 3: "RGB", 4: "CMYK"} + + def _open(self): + (length, offset) = self._find_offset(self.fp) + + # Rewrap the open file pointer in something that will + # convert line endings and decode to latin-1. + fp = PSFile(self.fp) + + # go to offset - start of "%!PS" + fp.seek(offset) + + box = None + + self.mode = "RGB" + self._size = 1, 1 # FIXME: huh? + + # + # Load EPS header + + s_raw = fp.readline() + s = s_raw.strip("\r\n") + + while s_raw: + if s: + if len(s) > 255: + raise SyntaxError("not an EPS file") + + try: + m = split.match(s) + except re.error as e: + raise SyntaxError("not an EPS file") from e + + if m: + k, v = m.group(1, 2) + self.info[k] = v + if k == "BoundingBox": + try: + # Note: The DSC spec says that BoundingBox + # fields should be integers, but some drivers + # put floating point values there anyway. + box = [int(float(i)) for i in v.split()] + self._size = box[2] - box[0], box[3] - box[1] + self.tile = [ + ("eps", (0, 0) + self.size, offset, (length, box)) + ] + except Exception: + pass + + else: + m = field.match(s) + if m: + k = m.group(1) + + if k == "EndComments": + break + if k[:8] == "PS-Adobe": + self.info[k[:8]] = k[9:] + else: + self.info[k] = "" + elif s[0] == "%": + # handle non-DSC Postscript comments that some + # tools mistakenly put in the Comments section + pass + else: + raise OSError("bad EPS header") + + s_raw = fp.readline() + s = s_raw.strip("\r\n") + + if s and s[:1] != "%": + break + + # + # Scan for an "ImageData" descriptor + + while s[:1] == "%": + + if len(s) > 255: + raise SyntaxError("not an EPS file") + + if s[:11] == "%ImageData:": + # Encoded bitmapped image. + x, y, bi, mo = s[11:].split(None, 7)[:4] + + if int(bi) != 8: + break + try: + self.mode = self.mode_map[int(mo)] + except ValueError: + break + + self._size = int(x), int(y) + return + + s = fp.readline().strip("\r\n") + if not s: + break + + if not box: + raise OSError("cannot determine EPS bounding box") + + def _find_offset(self, fp): + + s = fp.read(160) + + if s[:4] == b"%!PS": + # for HEAD without binary preview + fp.seek(0, io.SEEK_END) + length = fp.tell() + offset = 0 + elif i32(s[0:4]) == 0xC6D3D0C5: + # FIX for: Some EPS file not handled correctly / issue #302 + # EPS can contain binary data + # or start directly with latin coding + # more info see: + # https://web.archive.org/web/20160528181353/http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf + offset = i32(s[4:8]) + length = i32(s[8:12]) + else: + raise SyntaxError("not an EPS file") + + return (length, offset) + + def load(self, scale=1): + # Load EPS via Ghostscript + if not self.tile: + return + self.im = Ghostscript(self.tile, self.size, self.fp, scale) + self.mode = self.im.mode + self._size = self.im.size + self.tile = [] + + def load_seek(self, *args, **kwargs): + # we can't incrementally load, so force ImageFile.parser to + # use our custom load method by defining this method. + pass + + +# +# -------------------------------------------------------------------- + + +def _save(im, fp, filename, eps=1): + """EPS Writer for the Python Imaging Library.""" + + # + # make sure image data is available + im.load() + + # + # determine postscript image mode + if im.mode == "L": + operator = (8, 1, "image") + elif im.mode == "RGB": + operator = (8, 3, "false 3 colorimage") + elif im.mode == "CMYK": + operator = (8, 4, "false 4 colorimage") + else: + raise ValueError("image mode is not supported") + + base_fp = fp + wrapped_fp = False + if fp != sys.stdout: + fp = io.TextIOWrapper(fp, encoding="latin-1") + wrapped_fp = True + + try: + if eps: + # + # write EPS header + fp.write("%!PS-Adobe-3.0 EPSF-3.0\n") + fp.write("%%Creator: PIL 0.1 EpsEncode\n") + # fp.write("%%CreationDate: %s"...) + fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size) + fp.write("%%Pages: 1\n") + fp.write("%%EndComments\n") + fp.write("%%Page: 1 1\n") + fp.write("%%ImageData: %d %d " % im.size) + fp.write('%d %d 0 1 1 "%s"\n' % operator) + + # + # image header + fp.write("gsave\n") + fp.write("10 dict begin\n") + fp.write("/buf %d string def\n" % (im.size[0] * operator[1])) + fp.write("%d %d scale\n" % im.size) + fp.write("%d %d 8\n" % im.size) # <= bits + fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1])) + fp.write("{ currentfile buf readhexstring pop } bind\n") + fp.write(operator[2] + "\n") + if hasattr(fp, "flush"): + fp.flush() + + ImageFile._save(im, base_fp, [("eps", (0, 0) + im.size, 0, None)]) + + fp.write("\n%%%%EndBinary\n") + fp.write("grestore end\n") + if hasattr(fp, "flush"): + fp.flush() + finally: + if wrapped_fp: + fp.detach() + + +# +# -------------------------------------------------------------------- + + +Image.register_open(EpsImageFile.format, EpsImageFile, _accept) + +Image.register_save(EpsImageFile.format, _save) + +Image.register_extensions(EpsImageFile.format, [".ps", ".eps"]) + +Image.register_mime(EpsImageFile.format, "application/postscript") diff --git a/venv/Lib/site-packages/PIL/ExifTags.py b/venv/Lib/site-packages/PIL/ExifTags.py new file mode 100644 index 000000000..f1c037e51 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ExifTags.py @@ -0,0 +1,318 @@ +# +# The Python Imaging Library. +# $Id$ +# +# EXIF tags +# +# Copyright (c) 2003 by Secret Labs AB +# +# See the README file for information on usage and redistribution. +# + +""" +This module provides constants and clear-text names for various +well-known EXIF tags. +""" + + +TAGS = { + # possibly incomplete + 0x000B: "ProcessingSoftware", + 0x00FE: "NewSubfileType", + 0x00FF: "SubfileType", + 0x0100: "ImageWidth", + 0x0101: "ImageLength", + 0x0102: "BitsPerSample", + 0x0103: "Compression", + 0x0106: "PhotometricInterpretation", + 0x0107: "Thresholding", + 0x0108: "CellWidth", + 0x0109: "CellLength", + 0x010A: "FillOrder", + 0x010D: "DocumentName", + 0x010E: "ImageDescription", + 0x010F: "Make", + 0x0110: "Model", + 0x0111: "StripOffsets", + 0x0112: "Orientation", + 0x0115: "SamplesPerPixel", + 0x0116: "RowsPerStrip", + 0x0117: "StripByteCounts", + 0x0118: "MinSampleValue", + 0x0119: "MaxSampleValue", + 0x011A: "XResolution", + 0x011B: "YResolution", + 0x011C: "PlanarConfiguration", + 0x011D: "PageName", + 0x0120: "FreeOffsets", + 0x0121: "FreeByteCounts", + 0x0122: "GrayResponseUnit", + 0x0123: "GrayResponseCurve", + 0x0124: "T4Options", + 0x0125: "T6Options", + 0x0128: "ResolutionUnit", + 0x0129: "PageNumber", + 0x012D: "TransferFunction", + 0x0131: "Software", + 0x0132: "DateTime", + 0x013B: "Artist", + 0x013C: "HostComputer", + 0x013D: "Predictor", + 0x013E: "WhitePoint", + 0x013F: "PrimaryChromaticities", + 0x0140: "ColorMap", + 0x0141: "HalftoneHints", + 0x0142: "TileWidth", + 0x0143: "TileLength", + 0x0144: "TileOffsets", + 0x0145: "TileByteCounts", + 0x014A: "SubIFDs", + 0x014C: "InkSet", + 0x014D: "InkNames", + 0x014E: "NumberOfInks", + 0x0150: "DotRange", + 0x0151: "TargetPrinter", + 0x0152: "ExtraSamples", + 0x0153: "SampleFormat", + 0x0154: "SMinSampleValue", + 0x0155: "SMaxSampleValue", + 0x0156: "TransferRange", + 0x0157: "ClipPath", + 0x0158: "XClipPathUnits", + 0x0159: "YClipPathUnits", + 0x015A: "Indexed", + 0x015B: "JPEGTables", + 0x015F: "OPIProxy", + 0x0200: "JPEGProc", + 0x0201: "JpegIFOffset", + 0x0202: "JpegIFByteCount", + 0x0203: "JpegRestartInterval", + 0x0205: "JpegLosslessPredictors", + 0x0206: "JpegPointTransforms", + 0x0207: "JpegQTables", + 0x0208: "JpegDCTables", + 0x0209: "JpegACTables", + 0x0211: "YCbCrCoefficients", + 0x0212: "YCbCrSubSampling", + 0x0213: "YCbCrPositioning", + 0x0214: "ReferenceBlackWhite", + 0x02BC: "XMLPacket", + 0x1000: "RelatedImageFileFormat", + 0x1001: "RelatedImageWidth", + 0x1002: "RelatedImageLength", + 0x4746: "Rating", + 0x4749: "RatingPercent", + 0x800D: "ImageID", + 0x828D: "CFARepeatPatternDim", + 0x828E: "CFAPattern", + 0x828F: "BatteryLevel", + 0x8298: "Copyright", + 0x829A: "ExposureTime", + 0x829D: "FNumber", + 0x83BB: "IPTCNAA", + 0x8649: "ImageResources", + 0x8769: "ExifOffset", + 0x8773: "InterColorProfile", + 0x8822: "ExposureProgram", + 0x8824: "SpectralSensitivity", + 0x8825: "GPSInfo", + 0x8827: "ISOSpeedRatings", + 0x8828: "OECF", + 0x8829: "Interlace", + 0x882A: "TimeZoneOffset", + 0x882B: "SelfTimerMode", + 0x9000: "ExifVersion", + 0x9003: "DateTimeOriginal", + 0x9004: "DateTimeDigitized", + 0x9101: "ComponentsConfiguration", + 0x9102: "CompressedBitsPerPixel", + 0x9201: "ShutterSpeedValue", + 0x9202: "ApertureValue", + 0x9203: "BrightnessValue", + 0x9204: "ExposureBiasValue", + 0x9205: "MaxApertureValue", + 0x9206: "SubjectDistance", + 0x9207: "MeteringMode", + 0x9208: "LightSource", + 0x9209: "Flash", + 0x920A: "FocalLength", + 0x920B: "FlashEnergy", + 0x920C: "SpatialFrequencyResponse", + 0x920D: "Noise", + 0x9211: "ImageNumber", + 0x9212: "SecurityClassification", + 0x9213: "ImageHistory", + 0x9214: "SubjectLocation", + 0x9215: "ExposureIndex", + 0x9216: "TIFF/EPStandardID", + 0x927C: "MakerNote", + 0x9286: "UserComment", + 0x9290: "SubsecTime", + 0x9291: "SubsecTimeOriginal", + 0x9292: "SubsecTimeDigitized", + 0x9400: "AmbientTemperature", + 0x9401: "Humidity", + 0x9402: "Pressure", + 0x9403: "WaterDepth", + 0x9404: "Acceleration", + 0x9405: "CameraElevationAngle", + 0x9C9B: "XPTitle", + 0x9C9C: "XPComment", + 0x9C9D: "XPAuthor", + 0x9C9E: "XPKeywords", + 0x9C9F: "XPSubject", + 0xA000: "FlashPixVersion", + 0xA001: "ColorSpace", + 0xA002: "ExifImageWidth", + 0xA003: "ExifImageHeight", + 0xA004: "RelatedSoundFile", + 0xA005: "ExifInteroperabilityOffset", + 0xA20B: "FlashEnergy", + 0xA20C: "SpatialFrequencyResponse", + 0xA20E: "FocalPlaneXResolution", + 0xA20F: "FocalPlaneYResolution", + 0xA210: "FocalPlaneResolutionUnit", + 0xA214: "SubjectLocation", + 0xA215: "ExposureIndex", + 0xA217: "SensingMethod", + 0xA300: "FileSource", + 0xA301: "SceneType", + 0xA302: "CFAPattern", + 0xA401: "CustomRendered", + 0xA402: "ExposureMode", + 0xA403: "WhiteBalance", + 0xA404: "DigitalZoomRatio", + 0xA405: "FocalLengthIn35mmFilm", + 0xA406: "SceneCaptureType", + 0xA407: "GainControl", + 0xA408: "Contrast", + 0xA409: "Saturation", + 0xA40A: "Sharpness", + 0xA40B: "DeviceSettingDescription", + 0xA40C: "SubjectDistanceRange", + 0xA420: "ImageUniqueID", + 0xA430: "CameraOwnerName", + 0xA431: "BodySerialNumber", + 0xA432: "LensSpecification", + 0xA433: "LensMake", + 0xA434: "LensModel", + 0xA435: "LensSerialNumber", + 0xA500: "Gamma", + 0xC4A5: "PrintImageMatching", + 0xC612: "DNGVersion", + 0xC613: "DNGBackwardVersion", + 0xC614: "UniqueCameraModel", + 0xC615: "LocalizedCameraModel", + 0xC616: "CFAPlaneColor", + 0xC617: "CFALayout", + 0xC618: "LinearizationTable", + 0xC619: "BlackLevelRepeatDim", + 0xC61A: "BlackLevel", + 0xC61B: "BlackLevelDeltaH", + 0xC61C: "BlackLevelDeltaV", + 0xC61D: "WhiteLevel", + 0xC61E: "DefaultScale", + 0xC61F: "DefaultCropOrigin", + 0xC620: "DefaultCropSize", + 0xC621: "ColorMatrix1", + 0xC622: "ColorMatrix2", + 0xC623: "CameraCalibration1", + 0xC624: "CameraCalibration2", + 0xC625: "ReductionMatrix1", + 0xC626: "ReductionMatrix2", + 0xC627: "AnalogBalance", + 0xC628: "AsShotNeutral", + 0xC629: "AsShotWhiteXY", + 0xC62A: "BaselineExposure", + 0xC62B: "BaselineNoise", + 0xC62C: "BaselineSharpness", + 0xC62D: "BayerGreenSplit", + 0xC62E: "LinearResponseLimit", + 0xC62F: "CameraSerialNumber", + 0xC630: "LensInfo", + 0xC631: "ChromaBlurRadius", + 0xC632: "AntiAliasStrength", + 0xC633: "ShadowScale", + 0xC634: "DNGPrivateData", + 0xC635: "MakerNoteSafety", + 0xC65A: "CalibrationIlluminant1", + 0xC65B: "CalibrationIlluminant2", + 0xC65C: "BestQualityScale", + 0xC65D: "RawDataUniqueID", + 0xC68B: "OriginalRawFileName", + 0xC68C: "OriginalRawFileData", + 0xC68D: "ActiveArea", + 0xC68E: "MaskedAreas", + 0xC68F: "AsShotICCProfile", + 0xC690: "AsShotPreProfileMatrix", + 0xC691: "CurrentICCProfile", + 0xC692: "CurrentPreProfileMatrix", + 0xC6BF: "ColorimetricReference", + 0xC6F3: "CameraCalibrationSignature", + 0xC6F4: "ProfileCalibrationSignature", + 0xC6F6: "AsShotProfileName", + 0xC6F7: "NoiseReductionApplied", + 0xC6F8: "ProfileName", + 0xC6F9: "ProfileHueSatMapDims", + 0xC6FA: "ProfileHueSatMapData1", + 0xC6FB: "ProfileHueSatMapData2", + 0xC6FC: "ProfileToneCurve", + 0xC6FD: "ProfileEmbedPolicy", + 0xC6FE: "ProfileCopyright", + 0xC714: "ForwardMatrix1", + 0xC715: "ForwardMatrix2", + 0xC716: "PreviewApplicationName", + 0xC717: "PreviewApplicationVersion", + 0xC718: "PreviewSettingsName", + 0xC719: "PreviewSettingsDigest", + 0xC71A: "PreviewColorSpace", + 0xC71B: "PreviewDateTime", + 0xC71C: "RawImageDigest", + 0xC71D: "OriginalRawFileDigest", + 0xC71E: "SubTileBlockSize", + 0xC71F: "RowInterleaveFactor", + 0xC725: "ProfileLookTableDims", + 0xC726: "ProfileLookTableData", + 0xC740: "OpcodeList1", + 0xC741: "OpcodeList2", + 0xC74E: "OpcodeList3", + 0xC761: "NoiseProfile", +} +"""Maps EXIF tags to tag names.""" + + +GPSTAGS = { + 0: "GPSVersionID", + 1: "GPSLatitudeRef", + 2: "GPSLatitude", + 3: "GPSLongitudeRef", + 4: "GPSLongitude", + 5: "GPSAltitudeRef", + 6: "GPSAltitude", + 7: "GPSTimeStamp", + 8: "GPSSatellites", + 9: "GPSStatus", + 10: "GPSMeasureMode", + 11: "GPSDOP", + 12: "GPSSpeedRef", + 13: "GPSSpeed", + 14: "GPSTrackRef", + 15: "GPSTrack", + 16: "GPSImgDirectionRef", + 17: "GPSImgDirection", + 18: "GPSMapDatum", + 19: "GPSDestLatitudeRef", + 20: "GPSDestLatitude", + 21: "GPSDestLongitudeRef", + 22: "GPSDestLongitude", + 23: "GPSDestBearingRef", + 24: "GPSDestBearing", + 25: "GPSDestDistanceRef", + 26: "GPSDestDistance", + 27: "GPSProcessingMethod", + 28: "GPSAreaInformation", + 29: "GPSDateStamp", + 30: "GPSDifferential", + 31: "GPSHPositioningError", +} +"""Maps EXIF GPS tags to tag names.""" diff --git a/venv/Lib/site-packages/PIL/FitsStubImagePlugin.py b/venv/Lib/site-packages/PIL/FitsStubImagePlugin.py new file mode 100644 index 000000000..c2ce8651c --- /dev/null +++ b/venv/Lib/site-packages/PIL/FitsStubImagePlugin.py @@ -0,0 +1,76 @@ +# +# The Python Imaging Library +# $Id$ +# +# FITS stub adapter +# +# Copyright (c) 1998-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific FITS image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[:6] == b"SIMPLE" + + +class FITSStubImageFile(ImageFile.StubImageFile): + + format = "FITS" + format_description = "FITS" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(6)): + raise SyntaxError("Not a FITS file") + + # FIXME: add more sanity checks here; mandatory header items + # include SIMPLE, BITPIX, NAXIS, etc. + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise OSError("FITS save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(FITSStubImageFile.format, FITSStubImageFile, _accept) +Image.register_save(FITSStubImageFile.format, _save) + +Image.register_extensions(FITSStubImageFile.format, [".fit", ".fits"]) diff --git a/venv/Lib/site-packages/PIL/FliImagePlugin.py b/venv/Lib/site-packages/PIL/FliImagePlugin.py new file mode 100644 index 000000000..f09d62ce3 --- /dev/null +++ b/venv/Lib/site-packages/PIL/FliImagePlugin.py @@ -0,0 +1,169 @@ +# +# The Python Imaging Library. +# $Id$ +# +# FLI/FLC file handling. +# +# History: +# 95-09-01 fl Created +# 97-01-03 fl Fixed parser, setup decoder tile +# 98-07-15 fl Renamed offset attribute to avoid name clash +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1995-97. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16le as i16, i32le as i32, o8 + +# +# decoder + + +def _accept(prefix): + return len(prefix) >= 6 and i16(prefix[4:6]) in [0xAF11, 0xAF12] + + +## +# Image plugin for the FLI/FLC animation format. Use the seek +# method to load individual frames. + + +class FliImageFile(ImageFile.ImageFile): + + format = "FLI" + format_description = "Autodesk FLI/FLC Animation" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # HEAD + s = self.fp.read(128) + if not ( + _accept(s) + and i16(s[14:16]) in [0, 3] # flags + and s[20:22] == b"\x00\x00" # reserved + ): + raise SyntaxError("not an FLI/FLC file") + + # frames + self.n_frames = i16(s[6:8]) + self.is_animated = self.n_frames > 1 + + # image characteristics + self.mode = "P" + self._size = i16(s[8:10]), i16(s[10:12]) + + # animation speed + duration = i32(s[16:20]) + magic = i16(s[4:6]) + if magic == 0xAF11: + duration = (duration * 1000) // 70 + self.info["duration"] = duration + + # look for palette + palette = [(a, a, a) for a in range(256)] + + s = self.fp.read(16) + + self.__offset = 128 + + if i16(s[4:6]) == 0xF100: + # prefix chunk; ignore it + self.__offset = self.__offset + i32(s) + s = self.fp.read(16) + + if i16(s[4:6]) == 0xF1FA: + # look for palette chunk + s = self.fp.read(6) + if i16(s[4:6]) == 11: + self._palette(palette, 2) + elif i16(s[4:6]) == 4: + self._palette(palette, 0) + + palette = [o8(r) + o8(g) + o8(b) for (r, g, b) in palette] + self.palette = ImagePalette.raw("RGB", b"".join(palette)) + + # set things up to decode first frame + self.__frame = -1 + self.__fp = self.fp + self.__rewind = self.fp.tell() + self.seek(0) + + def _palette(self, palette, shift): + # load palette + + i = 0 + for e in range(i16(self.fp.read(2))): + s = self.fp.read(2) + i = i + i8(s[0]) + n = i8(s[1]) + if n == 0: + n = 256 + s = self.fp.read(n * 3) + for n in range(0, len(s), 3): + r = i8(s[n]) << shift + g = i8(s[n + 1]) << shift + b = i8(s[n + 2]) << shift + palette[i] = (r, g, b) + i += 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + if frame < self.__frame: + self._seek(0) + + for f in range(self.__frame + 1, frame + 1): + self._seek(f) + + def _seek(self, frame): + if frame == 0: + self.__frame = -1 + self.__fp.seek(self.__rewind) + self.__offset = 128 + else: + # ensure that the previous frame was loaded + self.load() + + if frame != self.__frame + 1: + raise ValueError("cannot seek to frame %d" % frame) + self.__frame = frame + + # move to next frame + self.fp = self.__fp + self.fp.seek(self.__offset) + + s = self.fp.read(4) + if not s: + raise EOFError + + framesize = i32(s) + + self.decodermaxblock = framesize + self.tile = [("fli", (0, 0) + self.size, self.__offset, None)] + + self.__offset += framesize + + def tell(self): + return self.__frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# +# registry + +Image.register_open(FliImageFile.format, FliImageFile, _accept) + +Image.register_extensions(FliImageFile.format, [".fli", ".flc"]) diff --git a/venv/Lib/site-packages/PIL/FontFile.py b/venv/Lib/site-packages/PIL/FontFile.py new file mode 100644 index 000000000..3ebd90730 --- /dev/null +++ b/venv/Lib/site-packages/PIL/FontFile.py @@ -0,0 +1,111 @@ +# +# The Python Imaging Library +# $Id$ +# +# base class for raster font file parsers +# +# history: +# 1997-06-05 fl created +# 1997-08-19 fl restrict image width +# +# Copyright (c) 1997-1998 by Secret Labs AB +# Copyright (c) 1997-1998 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +import os + +from . import Image, _binary + +WIDTH = 800 + + +def puti16(fp, values): + """Write network order (big-endian) 16-bit sequence""" + for v in values: + if v < 0: + v += 65536 + fp.write(_binary.o16be(v)) + + +class FontFile: + """Base class for raster font file handlers.""" + + bitmap = None + + def __init__(self): + + self.info = {} + self.glyph = [None] * 256 + + def __getitem__(self, ix): + return self.glyph[ix] + + def compile(self): + """Create metrics and bitmap""" + + if self.bitmap: + return + + # create bitmap large enough to hold all data + h = w = maxwidth = 0 + lines = 1 + for glyph in self: + if glyph: + d, dst, src, im = glyph + h = max(h, src[3] - src[1]) + w = w + (src[2] - src[0]) + if w > WIDTH: + lines += 1 + w = src[2] - src[0] + maxwidth = max(maxwidth, w) + + xsize = maxwidth + ysize = lines * h + + if xsize == 0 and ysize == 0: + return "" + + self.ysize = h + + # paste glyphs into bitmap + self.bitmap = Image.new("1", (xsize, ysize)) + self.metrics = [None] * 256 + x = y = 0 + for i in range(256): + glyph = self[i] + if glyph: + d, dst, src, im = glyph + xx = src[2] - src[0] + # yy = src[3] - src[1] + x0, y0 = x, y + x = x + xx + if x > WIDTH: + x, y = 0, y + h + x0, y0 = x, y + x = xx + s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0 + self.bitmap.paste(im.crop(src), s) + self.metrics[i] = d, dst, s + + def save(self, filename): + """Save font""" + + self.compile() + + # font data + self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG") + + # font metrics + with open(os.path.splitext(filename)[0] + ".pil", "wb") as fp: + fp.write(b"PILfont\n") + fp.write((";;;;;;%d;\n" % self.ysize).encode("ascii")) # HACK!!! + fp.write(b"DATA\n") + for id in range(256): + m = self.metrics[id] + if not m: + puti16(fp, [0] * 10) + else: + puti16(fp, m[0] + m[1] + m[2]) diff --git a/venv/Lib/site-packages/PIL/FpxImagePlugin.py b/venv/Lib/site-packages/PIL/FpxImagePlugin.py new file mode 100644 index 000000000..bbee9e24d --- /dev/null +++ b/venv/Lib/site-packages/PIL/FpxImagePlugin.py @@ -0,0 +1,242 @@ +# +# THIS IS WORK IN PROGRESS +# +# The Python Imaging Library. +# $Id$ +# +# FlashPix support for PIL +# +# History: +# 97-01-25 fl Created (reads uncompressed RGB images only) +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# +import olefile + +from . import Image, ImageFile +from ._binary import i8, i32le as i32 + +# we map from colour field tuples to (mode, rawmode) descriptors +MODES = { + # opacity + (0x00007FFE): ("A", "L"), + # monochrome + (0x00010000,): ("L", "L"), + (0x00018000, 0x00017FFE): ("RGBA", "LA"), + # photo YCC + (0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"), + (0x00028000, 0x00028001, 0x00028002, 0x00027FFE): ("RGBA", "YCCA;P"), + # standard RGB (NIFRGB) + (0x00030000, 0x00030001, 0x00030002): ("RGB", "RGB"), + (0x00038000, 0x00038001, 0x00038002, 0x00037FFE): ("RGBA", "RGBA"), +} + + +# +# -------------------------------------------------------------------- + + +def _accept(prefix): + return prefix[:8] == olefile.MAGIC + + +## +# Image plugin for the FlashPix images. + + +class FpxImageFile(ImageFile.ImageFile): + + format = "FPX" + format_description = "FlashPix" + + def _open(self): + # + # read the OLE directory and see if this is a likely + # to be a FlashPix file + + try: + self.ole = olefile.OleFileIO(self.fp) + except OSError as e: + raise SyntaxError("not an FPX file; invalid OLE file") from e + + if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B": + raise SyntaxError("not an FPX file; bad root CLSID") + + self._open_index(1) + + def _open_index(self, index=1): + # + # get the Image Contents Property Set + + prop = self.ole.getproperties( + ["Data Object Store %06d" % index, "\005Image Contents"] + ) + + # size (highest resolution) + + self._size = prop[0x1000002], prop[0x1000003] + + size = max(self.size) + i = 1 + while size > 64: + size = size / 2 + i += 1 + self.maxid = i - 1 + + # mode. instead of using a single field for this, flashpix + # requires you to specify the mode for each channel in each + # resolution subimage, and leaves it to the decoder to make + # sure that they all match. for now, we'll cheat and assume + # that this is always the case. + + id = self.maxid << 16 + + s = prop[0x2000002 | id] + + colors = [] + bands = i32(s, 4) + if bands > 4: + raise OSError("Invalid number of bands") + for i in range(bands): + # note: for now, we ignore the "uncalibrated" flag + colors.append(i32(s, 8 + i * 4) & 0x7FFFFFFF) + + self.mode, self.rawmode = MODES[tuple(colors)] + + # load JPEG tables, if any + self.jpeg = {} + for i in range(256): + id = 0x3000001 | (i << 16) + if id in prop: + self.jpeg[i] = prop[id] + + self._open_subimage(1, self.maxid) + + def _open_subimage(self, index=1, subimage=0): + # + # setup tile descriptors for a given subimage + + stream = [ + "Data Object Store %06d" % index, + "Resolution %04d" % subimage, + "Subimage 0000 Header", + ] + + fp = self.ole.openstream(stream) + + # skip prefix + fp.read(28) + + # header stream + s = fp.read(36) + + size = i32(s, 4), i32(s, 8) + # tilecount = i32(s, 12) + tilesize = i32(s, 16), i32(s, 20) + # channels = i32(s, 24) + offset = i32(s, 28) + length = i32(s, 32) + + if size != self.size: + raise OSError("subimage mismatch") + + # get tile descriptors + fp.seek(28 + offset) + s = fp.read(i32(s, 12) * length) + + x = y = 0 + xsize, ysize = size + xtile, ytile = tilesize + self.tile = [] + + for i in range(0, len(s), length): + + compression = i32(s, i + 8) + + if compression == 0: + self.tile.append( + ( + "raw", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (self.rawmode), + ) + ) + + elif compression == 1: + + # FIXME: the fill decoder is not implemented + self.tile.append( + ( + "fill", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (self.rawmode, s[12:16]), + ) + ) + + elif compression == 2: + + internal_color_conversion = i8(s[14]) + jpeg_tables = i8(s[15]) + rawmode = self.rawmode + + if internal_color_conversion: + # The image is stored as usual (usually YCbCr). + if rawmode == "RGBA": + # For "RGBA", data is stored as YCbCrA based on + # negative RGB. The following trick works around + # this problem : + jpegmode, rawmode = "YCbCrK", "CMYK" + else: + jpegmode = None # let the decoder decide + + else: + # The image is stored as defined by rawmode + jpegmode = rawmode + + self.tile.append( + ( + "jpeg", + (x, y, x + xtile, y + ytile), + i32(s, i) + 28, + (rawmode, jpegmode), + ) + ) + + # FIXME: jpeg tables are tile dependent; the prefix + # data must be placed in the tile descriptor itself! + + if jpeg_tables: + self.tile_prefix = self.jpeg[jpeg_tables] + + else: + raise OSError("unknown/invalid compression") + + x = x + xtile + if x >= xsize: + x, y = 0, y + ytile + if y >= ysize: + break # isn't really required + + self.stream = stream + self.fp = None + + def load(self): + + if not self.fp: + self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"]) + + return ImageFile.ImageFile.load(self) + + +# +# -------------------------------------------------------------------- + + +Image.register_open(FpxImageFile.format, FpxImageFile, _accept) + +Image.register_extension(FpxImageFile.format, ".fpx") diff --git a/venv/Lib/site-packages/PIL/FtexImagePlugin.py b/venv/Lib/site-packages/PIL/FtexImagePlugin.py new file mode 100644 index 000000000..096ccacac --- /dev/null +++ b/venv/Lib/site-packages/PIL/FtexImagePlugin.py @@ -0,0 +1,106 @@ +""" +A Pillow loader for .ftc and .ftu files (FTEX) +Jerome Leclanche + +The contents of this file are hereby released in the public domain (CC0) +Full text of the CC0 license: + https://creativecommons.org/publicdomain/zero/1.0/ + +Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001 + +The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a +packed custom format called FTEX. This file format uses file extensions FTC +and FTU. +* FTC files are compressed textures (using standard texture compression). +* FTU files are not compressed. +Texture File Format +The FTC and FTU texture files both use the same format. This +has the following structure: +{header} +{format_directory} +{data} +Where: +{header} = { + u32:magic, + u32:version, + u32:width, + u32:height, + u32:mipmap_count, + u32:format_count +} + +* The "magic" number is "FTEX". +* "width" and "height" are the dimensions of the texture. +* "mipmap_count" is the number of mipmaps in the texture. +* "format_count" is the number of texture formats (different versions of the +same texture) in this file. + +{format_directory} = format_count * { u32:format, u32:where } + +The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB +uncompressed textures. +The texture data for a format starts at the position "where" in the file. + +Each set of texture data in the file has the following structure: +{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } } +* "mipmap_size" is the number of bytes in that mip level. For compressed +textures this is the size of the texture data compressed with DXT1. For 24 bit +uncompressed textures, this is 3 * width * height. Following this are the image +bytes for that mipmap level. + +Note: All data is stored in little-Endian (Intel) byte order. +""" + +import struct +from io import BytesIO + +from . import Image, ImageFile + +MAGIC = b"FTEX" +FORMAT_DXT1 = 0 +FORMAT_UNCOMPRESSED = 1 + + +class FtexImageFile(ImageFile.ImageFile): + format = "FTEX" + format_description = "Texture File Format (IW2:EOC)" + + def _open(self): + struct.unpack("= 8 and i32(prefix[:4]) >= 20 and i32(prefix[4:8]) in (1, 2) + + +## +# Image plugin for the GIMP brush format. + + +class GbrImageFile(ImageFile.ImageFile): + + format = "GBR" + format_description = "GIMP brush file" + + def _open(self): + header_size = i32(self.fp.read(4)) + version = i32(self.fp.read(4)) + if header_size < 20: + raise SyntaxError("not a GIMP brush") + if version not in (1, 2): + raise SyntaxError("Unsupported GIMP brush version: %s" % version) + + width = i32(self.fp.read(4)) + height = i32(self.fp.read(4)) + color_depth = i32(self.fp.read(4)) + if width <= 0 or height <= 0: + raise SyntaxError("not a GIMP brush") + if color_depth not in (1, 4): + raise SyntaxError("Unsupported GIMP brush color depth: %s" % color_depth) + + if version == 1: + comment_length = header_size - 20 + else: + comment_length = header_size - 28 + magic_number = self.fp.read(4) + if magic_number != b"GIMP": + raise SyntaxError("not a GIMP brush, bad magic number") + self.info["spacing"] = i32(self.fp.read(4)) + + comment = self.fp.read(comment_length)[:-1] + + if color_depth == 1: + self.mode = "L" + else: + self.mode = "RGBA" + + self._size = width, height + + self.info["comment"] = comment + + # Image might not be small + Image._decompression_bomb_check(self.size) + + # Data is an uncompressed block of w * h * bytes/pixel + self._data_size = width * height * color_depth + + def load(self): + if self.im: + # Already loaded + return + + self.im = Image.core.new(self.mode, self.size) + self.frombytes(self.fp.read(self._data_size)) + + +# +# registry + + +Image.register_open(GbrImageFile.format, GbrImageFile, _accept) +Image.register_extension(GbrImageFile.format, ".gbr") diff --git a/venv/Lib/site-packages/PIL/GdImageFile.py b/venv/Lib/site-packages/PIL/GdImageFile.py new file mode 100644 index 000000000..0c4574f9e --- /dev/null +++ b/venv/Lib/site-packages/PIL/GdImageFile.py @@ -0,0 +1,89 @@ +# +# The Python Imaging Library. +# $Id$ +# +# GD file handling +# +# History: +# 1996-04-12 fl Created +# +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +""" +.. note:: + This format cannot be automatically recognized, so the + class is not registered for use with :py:func:`PIL.Image.open()`. To open a + gd file, use the :py:func:`PIL.GdImageFile.open()` function instead. + +.. warning:: + THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This + implementation is provided for convenience and demonstrational + purposes only. +""" + + +from . import ImageFile, ImagePalette, UnidentifiedImageError +from ._binary import i8, i16be as i16, i32be as i32 + + +class GdImageFile(ImageFile.ImageFile): + """ + Image plugin for the GD uncompressed format. Note that this format + is not supported by the standard :py:func:`PIL.Image.open()` function. To use + this plugin, you have to import the :py:mod:`PIL.GdImageFile` module and + use the :py:func:`PIL.GdImageFile.open()` function. + """ + + format = "GD" + format_description = "GD uncompressed images" + + def _open(self): + + # Header + s = self.fp.read(1037) + + if not i16(s[:2]) in [65534, 65535]: + raise SyntaxError("Not a valid GD 2.x .gd file") + + self.mode = "L" # FIXME: "P" + self._size = i16(s[2:4]), i16(s[4:6]) + + trueColor = i8(s[6]) + trueColorOffset = 2 if trueColor else 0 + + # transparency index + tindex = i32(s[7 + trueColorOffset : 7 + trueColorOffset + 4]) + if tindex < 256: + self.info["transparency"] = tindex + + self.palette = ImagePalette.raw( + "XBGR", s[7 + trueColorOffset + 4 : 7 + trueColorOffset + 4 + 256 * 4] + ) + + self.tile = [ + ("raw", (0, 0) + self.size, 7 + trueColorOffset + 4 + 256 * 4, ("L", 0, 1)) + ] + + +def open(fp, mode="r"): + """ + Load texture from a GD image file. + + :param filename: GD file name, or an opened file handle. + :param mode: Optional mode. In this version, if the mode argument + is given, it must be "r". + :returns: An image instance. + :raises OSError: If the image could not be read. + """ + if mode != "r": + raise ValueError("bad mode") + + try: + return GdImageFile(fp) + except SyntaxError as e: + raise UnidentifiedImageError("cannot identify this image file") from e diff --git a/venv/Lib/site-packages/PIL/GifImagePlugin.py b/venv/Lib/site-packages/PIL/GifImagePlugin.py new file mode 100644 index 000000000..653051bb8 --- /dev/null +++ b/venv/Lib/site-packages/PIL/GifImagePlugin.py @@ -0,0 +1,885 @@ +# +# The Python Imaging Library. +# $Id$ +# +# GIF file handling +# +# History: +# 1995-09-01 fl Created +# 1996-12-14 fl Added interlace support +# 1996-12-30 fl Added animation support +# 1997-01-05 fl Added write support, fixed local colour map bug +# 1997-02-23 fl Make sure to load raster data in getdata() +# 1997-07-05 fl Support external decoder (0.4) +# 1998-07-09 fl Handle all modes when saving (0.5) +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 2001-04-16 fl Added rewind support (seek to frame 0) (0.6) +# 2001-04-17 fl Added palette optimization (0.7) +# 2002-06-06 fl Added transparency support for save (0.8) +# 2004-02-24 fl Disable interlacing for small images +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1995-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import itertools +import math +import os +import subprocess + +from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence +from ._binary import i8, i16le as i16, o8, o16le as o16 + +# -------------------------------------------------------------------- +# Identify/read GIF files + + +def _accept(prefix): + return prefix[:6] in [b"GIF87a", b"GIF89a"] + + +## +# Image plugin for GIF images. This plugin supports both GIF87 and +# GIF89 images. + + +class GifImageFile(ImageFile.ImageFile): + + format = "GIF" + format_description = "Compuserve GIF" + _close_exclusive_fp_after_loading = False + + global_palette = None + + def data(self): + s = self.fp.read(1) + if s and i8(s): + return self.fp.read(i8(s)) + return None + + def _open(self): + + # Screen + s = self.fp.read(13) + if not _accept(s): + raise SyntaxError("not a GIF file") + + self.info["version"] = s[:6] + self._size = i16(s[6:]), i16(s[8:]) + self.tile = [] + flags = i8(s[10]) + bits = (flags & 7) + 1 + + if flags & 128: + # get global palette + self.info["background"] = i8(s[11]) + # check if palette contains colour indices + p = self.fp.read(3 << bits) + for i in range(0, len(p), 3): + if not (i // 3 == i8(p[i]) == i8(p[i + 1]) == i8(p[i + 2])): + p = ImagePalette.raw("RGB", p) + self.global_palette = self.palette = p + break + + self.__fp = self.fp # FIXME: hack + self.__rewind = self.fp.tell() + self._n_frames = None + self._is_animated = None + self._seek(0) # get ready to read first frame + + @property + def n_frames(self): + if self._n_frames is None: + current = self.tell() + try: + while True: + self.seek(self.tell() + 1) + except EOFError: + self._n_frames = self.tell() + 1 + self.seek(current) + return self._n_frames + + @property + def is_animated(self): + if self._is_animated is None: + if self._n_frames is not None: + self._is_animated = self._n_frames != 1 + else: + current = self.tell() + + try: + self.seek(1) + self._is_animated = True + except EOFError: + self._is_animated = False + + self.seek(current) + return self._is_animated + + def seek(self, frame): + if not self._seek_check(frame): + return + if frame < self.__frame: + if frame != 0: + self.im = None + self._seek(0) + + last_frame = self.__frame + for f in range(self.__frame + 1, frame + 1): + try: + self._seek(f) + except EOFError as e: + self.seek(last_frame) + raise EOFError("no more images in GIF file") from e + + def _seek(self, frame): + + if frame == 0: + # rewind + self.__offset = 0 + self.dispose = None + self.dispose_extent = [0, 0, 0, 0] # x0, y0, x1, y1 + self.__frame = -1 + self.__fp.seek(self.__rewind) + self._prev_im = None + self.disposal_method = 0 + else: + # ensure that the previous frame was loaded + if not self.im: + self.load() + + if frame != self.__frame + 1: + raise ValueError("cannot seek to frame %d" % frame) + self.__frame = frame + + self.tile = [] + + self.fp = self.__fp + if self.__offset: + # backup to last frame + self.fp.seek(self.__offset) + while self.data(): + pass + self.__offset = 0 + + if self.dispose: + self.im.paste(self.dispose, self.dispose_extent) + + from copy import copy + + self.palette = copy(self.global_palette) + + info = {} + while True: + + s = self.fp.read(1) + if not s or s == b";": + break + + elif s == b"!": + # + # extensions + # + s = self.fp.read(1) + block = self.data() + if i8(s) == 249: + # + # graphic control extension + # + flags = i8(block[0]) + if flags & 1: + info["transparency"] = i8(block[3]) + info["duration"] = i16(block[1:3]) * 10 + + # disposal method - find the value of bits 4 - 6 + dispose_bits = 0b00011100 & flags + dispose_bits = dispose_bits >> 2 + if dispose_bits: + # only set the dispose if it is not + # unspecified. I'm not sure if this is + # correct, but it seems to prevent the last + # frame from looking odd for some animations + self.disposal_method = dispose_bits + elif i8(s) == 254: + # + # comment extension + # + while block: + if "comment" in info: + info["comment"] += block + else: + info["comment"] = block + block = self.data() + continue + elif i8(s) == 255: + # + # application extension + # + info["extension"] = block, self.fp.tell() + if block[:11] == b"NETSCAPE2.0": + block = self.data() + if len(block) >= 3 and i8(block[0]) == 1: + info["loop"] = i16(block[1:3]) + while self.data(): + pass + + elif s == b",": + # + # local image + # + s = self.fp.read(9) + + # extent + x0, y0 = i16(s[0:]), i16(s[2:]) + x1, y1 = x0 + i16(s[4:]), y0 + i16(s[6:]) + if x1 > self.size[0] or y1 > self.size[1]: + self._size = max(x1, self.size[0]), max(y1, self.size[1]) + self.dispose_extent = x0, y0, x1, y1 + flags = i8(s[8]) + + interlace = (flags & 64) != 0 + + if flags & 128: + bits = (flags & 7) + 1 + self.palette = ImagePalette.raw("RGB", self.fp.read(3 << bits)) + + # image data + bits = i8(self.fp.read(1)) + self.__offset = self.fp.tell() + self.tile = [ + ("gif", (x0, y0, x1, y1), self.__offset, (bits, interlace)) + ] + break + + else: + pass + # raise OSError, "illegal GIF tag `%x`" % i8(s) + + try: + if self.disposal_method < 2: + # do not dispose or none specified + self.dispose = None + elif self.disposal_method == 2: + # replace with background colour + Image._decompression_bomb_check(self.size) + self.dispose = Image.core.fill("P", self.size, self.info["background"]) + else: + # replace with previous contents + if self.im: + self.dispose = self.im.copy() + + # only dispose the extent in this frame + if self.dispose: + self.dispose = self._crop(self.dispose, self.dispose_extent) + except (AttributeError, KeyError): + pass + + if not self.tile: + # self.__fp = None + raise EOFError + + for k in ["transparency", "duration", "comment", "extension", "loop"]: + if k in info: + self.info[k] = info[k] + elif k in self.info: + del self.info[k] + + self.mode = "L" + if self.palette: + self.mode = "P" + + def tell(self): + return self.__frame + + def load_end(self): + ImageFile.ImageFile.load_end(self) + + # if the disposal method is 'do not dispose', transparent + # pixels should show the content of the previous frame + if self._prev_im and self.disposal_method == 1: + # we do this by pasting the updated area onto the previous + # frame which we then use as the current image content + updated = self._crop(self.im, self.dispose_extent) + self._prev_im.paste(updated, self.dispose_extent, updated.convert("RGBA")) + self.im = self._prev_im + self._prev_im = self.im.copy() + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# -------------------------------------------------------------------- +# Write GIF files + + +RAWMODE = {"1": "L", "L": "L", "P": "P"} + + +def _normalize_mode(im, initial_call=False): + """ + Takes an image (or frame), returns an image in a mode that is appropriate + for saving in a Gif. + + It may return the original image, or it may return an image converted to + palette or 'L' mode. + + UNDONE: What is the point of mucking with the initial call palette, for + an image that shouldn't have a palette, or it would be a mode 'P' and + get returned in the RAWMODE clause. + + :param im: Image object + :param initial_call: Default false, set to true for a single frame. + :returns: Image object + """ + if im.mode in RAWMODE: + im.load() + return im + if Image.getmodebase(im.mode) == "RGB": + if initial_call: + palette_size = 256 + if im.palette: + palette_size = len(im.palette.getdata()[1]) // 3 + return im.convert("P", palette=Image.ADAPTIVE, colors=palette_size) + else: + return im.convert("P") + return im.convert("L") + + +def _normalize_palette(im, palette, info): + """ + Normalizes the palette for image. + - Sets the palette to the incoming palette, if provided. + - Ensures that there's a palette for L mode images + - Optimizes the palette if necessary/desired. + + :param im: Image object + :param palette: bytes object containing the source palette, or .... + :param info: encoderinfo + :returns: Image object + """ + source_palette = None + if palette: + # a bytes palette + if isinstance(palette, (bytes, bytearray, list)): + source_palette = bytearray(palette[:768]) + if isinstance(palette, ImagePalette.ImagePalette): + source_palette = bytearray( + itertools.chain.from_iterable( + zip( + palette.palette[:256], + palette.palette[256:512], + palette.palette[512:768], + ) + ) + ) + + if im.mode == "P": + if not source_palette: + source_palette = im.im.getpalette("RGB")[:768] + else: # L-mode + if not source_palette: + source_palette = bytearray(i // 3 for i in range(768)) + im.palette = ImagePalette.ImagePalette("RGB", palette=source_palette) + + used_palette_colors = _get_optimize(im, info) + if used_palette_colors is not None: + return im.remap_palette(used_palette_colors, source_palette) + + im.palette.palette = source_palette + return im + + +def _write_single_frame(im, fp, palette): + im_out = _normalize_mode(im, True) + for k, v in im_out.info.items(): + im.encoderinfo.setdefault(k, v) + im_out = _normalize_palette(im_out, palette, im.encoderinfo) + + for s in _get_global_header(im_out, im.encoderinfo): + fp.write(s) + + # local image header + flags = 0 + if get_interlace(im): + flags = flags | 64 + _write_local_header(fp, im, (0, 0), flags) + + im_out.encoderconfig = (8, get_interlace(im)) + ImageFile._save(im_out, fp, [("gif", (0, 0) + im.size, 0, RAWMODE[im_out.mode])]) + + fp.write(b"\0") # end of image data + + +def _write_multiple_frames(im, fp, palette): + + duration = im.encoderinfo.get("duration", im.info.get("duration")) + disposal = im.encoderinfo.get("disposal", im.info.get("disposal")) + + im_frames = [] + frame_count = 0 + background_im = None + for imSequence in itertools.chain([im], im.encoderinfo.get("append_images", [])): + for im_frame in ImageSequence.Iterator(imSequence): + # a copy is required here since seek can still mutate the image + im_frame = _normalize_mode(im_frame.copy()) + if frame_count == 0: + for k, v in im_frame.info.items(): + im.encoderinfo.setdefault(k, v) + im_frame = _normalize_palette(im_frame, palette, im.encoderinfo) + + encoderinfo = im.encoderinfo.copy() + if isinstance(duration, (list, tuple)): + encoderinfo["duration"] = duration[frame_count] + if isinstance(disposal, (list, tuple)): + encoderinfo["disposal"] = disposal[frame_count] + frame_count += 1 + + if im_frames: + # delta frame + previous = im_frames[-1] + if encoderinfo.get("disposal") == 2: + if background_im is None: + background = _get_background( + im, + im.encoderinfo.get("background", im.info.get("background")), + ) + background_im = Image.new("P", im_frame.size, background) + background_im.putpalette(im_frames[0]["im"].palette) + base_im = background_im + else: + base_im = previous["im"] + if _get_palette_bytes(im_frame) == _get_palette_bytes(base_im): + delta = ImageChops.subtract_modulo(im_frame, base_im) + else: + delta = ImageChops.subtract_modulo( + im_frame.convert("RGB"), base_im.convert("RGB") + ) + bbox = delta.getbbox() + if not bbox: + # This frame is identical to the previous frame + if duration: + previous["encoderinfo"]["duration"] += encoderinfo["duration"] + continue + else: + bbox = None + im_frames.append({"im": im_frame, "bbox": bbox, "encoderinfo": encoderinfo}) + + if len(im_frames) > 1: + for frame_data in im_frames: + im_frame = frame_data["im"] + if not frame_data["bbox"]: + # global header + for s in _get_global_header(im_frame, frame_data["encoderinfo"]): + fp.write(s) + offset = (0, 0) + else: + # compress difference + frame_data["encoderinfo"]["include_color_table"] = True + + im_frame = im_frame.crop(frame_data["bbox"]) + offset = frame_data["bbox"][:2] + _write_frame_data(fp, im_frame, offset, frame_data["encoderinfo"]) + return True + elif "duration" in im.encoderinfo and isinstance( + im.encoderinfo["duration"], (list, tuple) + ): + # Since multiple frames will not be written, add together the frame durations + im.encoderinfo["duration"] = sum(im.encoderinfo["duration"]) + + +def _save_all(im, fp, filename): + _save(im, fp, filename, save_all=True) + + +def _save(im, fp, filename, save_all=False): + # header + if "palette" in im.encoderinfo or "palette" in im.info: + palette = im.encoderinfo.get("palette", im.info.get("palette")) + else: + palette = None + im.encoderinfo["optimize"] = im.encoderinfo.get("optimize", True) + + if not save_all or not _write_multiple_frames(im, fp, palette): + _write_single_frame(im, fp, palette) + + fp.write(b";") # end of file + + if hasattr(fp, "flush"): + fp.flush() + + +def get_interlace(im): + interlace = im.encoderinfo.get("interlace", 1) + + # workaround for @PIL153 + if min(im.size) < 16: + interlace = 0 + + return interlace + + +def _write_local_header(fp, im, offset, flags): + transparent_color_exists = False + try: + transparency = im.encoderinfo["transparency"] + except KeyError: + pass + else: + transparency = int(transparency) + # optimize the block away if transparent color is not used + transparent_color_exists = True + + used_palette_colors = _get_optimize(im, im.encoderinfo) + if used_palette_colors is not None: + # adjust the transparency index after optimize + try: + transparency = used_palette_colors.index(transparency) + except ValueError: + transparent_color_exists = False + + if "duration" in im.encoderinfo: + duration = int(im.encoderinfo["duration"] / 10) + else: + duration = 0 + + disposal = int(im.encoderinfo.get("disposal", 0)) + + if transparent_color_exists or duration != 0 or disposal: + packed_flag = 1 if transparent_color_exists else 0 + packed_flag |= disposal << 2 + if not transparent_color_exists: + transparency = 0 + + fp.write( + b"!" + + o8(249) # extension intro + + o8(4) # length + + o8(packed_flag) # packed fields + + o16(duration) # duration + + o8(transparency) # transparency index + + o8(0) + ) + + if "comment" in im.encoderinfo and 1 <= len(im.encoderinfo["comment"]): + fp.write(b"!" + o8(254)) # extension intro + comment = im.encoderinfo["comment"] + if isinstance(comment, str): + comment = comment.encode() + for i in range(0, len(comment), 255): + subblock = comment[i : i + 255] + fp.write(o8(len(subblock)) + subblock) + fp.write(o8(0)) + if "loop" in im.encoderinfo: + number_of_loops = im.encoderinfo["loop"] + fp.write( + b"!" + + o8(255) # extension intro + + o8(11) + + b"NETSCAPE2.0" + + o8(3) + + o8(1) + + o16(number_of_loops) # number of loops + + o8(0) + ) + include_color_table = im.encoderinfo.get("include_color_table") + if include_color_table: + palette_bytes = _get_palette_bytes(im) + color_table_size = _get_color_table_size(palette_bytes) + if color_table_size: + flags = flags | 128 # local color table flag + flags = flags | color_table_size + + fp.write( + b"," + + o16(offset[0]) # offset + + o16(offset[1]) + + o16(im.size[0]) # size + + o16(im.size[1]) + + o8(flags) # flags + ) + if include_color_table and color_table_size: + fp.write(_get_header_palette(palette_bytes)) + fp.write(o8(8)) # bits + + +def _save_netpbm(im, fp, filename): + + # Unused by default. + # To use, uncomment the register_save call at the end of the file. + # + # If you need real GIF compression and/or RGB quantization, you + # can use the external NETPBM/PBMPLUS utilities. See comments + # below for information on how to enable this. + tempfile = im._dump() + + try: + with open(filename, "wb") as f: + if im.mode != "RGB": + subprocess.check_call( + ["ppmtogif", tempfile], stdout=f, stderr=subprocess.DEVNULL + ) + else: + # Pipe ppmquant output into ppmtogif + # "ppmquant 256 %s | ppmtogif > %s" % (tempfile, filename) + quant_cmd = ["ppmquant", "256", tempfile] + togif_cmd = ["ppmtogif"] + quant_proc = subprocess.Popen( + quant_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL + ) + togif_proc = subprocess.Popen( + togif_cmd, + stdin=quant_proc.stdout, + stdout=f, + stderr=subprocess.DEVNULL, + ) + + # Allow ppmquant to receive SIGPIPE if ppmtogif exits + quant_proc.stdout.close() + + retcode = quant_proc.wait() + if retcode: + raise subprocess.CalledProcessError(retcode, quant_cmd) + + retcode = togif_proc.wait() + if retcode: + raise subprocess.CalledProcessError(retcode, togif_cmd) + finally: + try: + os.unlink(tempfile) + except OSError: + pass + + +# Force optimization so that we can test performance against +# cases where it took lots of memory and time previously. +_FORCE_OPTIMIZE = False + + +def _get_optimize(im, info): + """ + Palette optimization is a potentially expensive operation. + + This function determines if the palette should be optimized using + some heuristics, then returns the list of palette entries in use. + + :param im: Image object + :param info: encoderinfo + :returns: list of indexes of palette entries in use, or None + """ + if im.mode in ("P", "L") and info and info.get("optimize", 0): + # Potentially expensive operation. + + # The palette saves 3 bytes per color not used, but palette + # lengths are restricted to 3*(2**N) bytes. Max saving would + # be 768 -> 6 bytes if we went all the way down to 2 colors. + # * If we're over 128 colors, we can't save any space. + # * If there aren't any holes, it's not worth collapsing. + # * If we have a 'large' image, the palette is in the noise. + + # create the new palette if not every color is used + optimise = _FORCE_OPTIMIZE or im.mode == "L" + if optimise or im.width * im.height < 512 * 512: + # check which colors are used + used_palette_colors = [] + for i, count in enumerate(im.histogram()): + if count: + used_palette_colors.append(i) + + if optimise or ( + len(used_palette_colors) <= 128 + and max(used_palette_colors) > len(used_palette_colors) + ): + return used_palette_colors + + +def _get_color_table_size(palette_bytes): + # calculate the palette size for the header + if not palette_bytes: + return 0 + elif len(palette_bytes) < 9: + return 1 + else: + return math.ceil(math.log(len(palette_bytes) // 3, 2)) - 1 + + +def _get_header_palette(palette_bytes): + """ + Returns the palette, null padded to the next power of 2 (*3) bytes + suitable for direct inclusion in the GIF header + + :param palette_bytes: Unpadded palette bytes, in RGBRGB form + :returns: Null padded palette + """ + color_table_size = _get_color_table_size(palette_bytes) + + # add the missing amount of bytes + # the palette has to be 2< 0: + palette_bytes += o8(0) * 3 * actual_target_size_diff + return palette_bytes + + +def _get_palette_bytes(im): + """ + Gets the palette for inclusion in the gif header + + :param im: Image object + :returns: Bytes, len<=768 suitable for inclusion in gif header + """ + return im.palette.palette + + +def _get_background(im, infoBackground): + background = 0 + if infoBackground: + background = infoBackground + if isinstance(background, tuple): + # WebPImagePlugin stores an RGBA value in info["background"] + # So it must be converted to the same format as GifImagePlugin's + # info["background"] - a global color table index + background = im.palette.getcolor(background) + return background + + +def _get_global_header(im, info): + """Return a list of strings representing a GIF header""" + + # Header Block + # http://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp + + version = b"87a" + for extensionKey in ["transparency", "duration", "loop", "comment"]: + if info and extensionKey in info: + if (extensionKey == "duration" and info[extensionKey] == 0) or ( + extensionKey == "comment" and not (1 <= len(info[extensionKey]) <= 255) + ): + continue + version = b"89a" + break + else: + if im.info.get("version") == b"89a": + version = b"89a" + + background = _get_background(im, info.get("background")) + + palette_bytes = _get_palette_bytes(im) + color_table_size = _get_color_table_size(palette_bytes) + + return [ + b"GIF" # signature + + version # version + + o16(im.size[0]) # canvas width + + o16(im.size[1]), # canvas height + # Logical Screen Descriptor + # size of global color table + global color table flag + o8(color_table_size + 128), # packed fields + # background + reserved/aspect + o8(background) + o8(0), + # Global Color Table + _get_header_palette(palette_bytes), + ] + + +def _write_frame_data(fp, im_frame, offset, params): + try: + im_frame.encoderinfo = params + + # local image header + _write_local_header(fp, im_frame, offset, 0) + + ImageFile._save( + im_frame, fp, [("gif", (0, 0) + im_frame.size, 0, RAWMODE[im_frame.mode])] + ) + + fp.write(b"\0") # end of image data + finally: + del im_frame.encoderinfo + + +# -------------------------------------------------------------------- +# Legacy GIF utilities + + +def getheader(im, palette=None, info=None): + """ + Legacy Method to get Gif data from image. + + Warning:: May modify image data. + + :param im: Image object + :param palette: bytes object containing the source palette, or .... + :param info: encoderinfo + :returns: tuple of(list of header items, optimized palette) + + """ + used_palette_colors = _get_optimize(im, info) + + if info is None: + info = {} + + if "background" not in info and "background" in im.info: + info["background"] = im.info["background"] + + im_mod = _normalize_palette(im, palette, info) + im.palette = im_mod.palette + im.im = im_mod.im + header = _get_global_header(im, info) + + return header, used_palette_colors + + +# To specify duration, add the time in milliseconds to getdata(), +# e.g. getdata(im_frame, duration=1000) +def getdata(im, offset=(0, 0), **params): + """ + Legacy Method + + Return a list of strings representing this image. + The first string is a local image header, the rest contains + encoded image data. + + :param im: Image object + :param offset: Tuple of (x, y) pixels. Defaults to (0,0) + :param \\**params: E.g. duration or other encoder info parameters + :returns: List of Bytes containing gif encoded frame data + + """ + + class Collector: + data = [] + + def write(self, data): + self.data.append(data) + + im.load() # make sure raster data is available + + fp = Collector() + + _write_frame_data(fp, im, offset, params) + + return fp.data + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(GifImageFile.format, GifImageFile, _accept) +Image.register_save(GifImageFile.format, _save) +Image.register_save_all(GifImageFile.format, _save_all) +Image.register_extension(GifImageFile.format, ".gif") +Image.register_mime(GifImageFile.format, "image/gif") + +# +# Uncomment the following line if you wish to use NETPBM/PBMPLUS +# instead of the built-in "uncompressed" GIF encoder + +# Image.register_save(GifImageFile.format, _save_netpbm) diff --git a/venv/Lib/site-packages/PIL/GimpGradientFile.py b/venv/Lib/site-packages/PIL/GimpGradientFile.py new file mode 100644 index 000000000..7ab7f9990 --- /dev/null +++ b/venv/Lib/site-packages/PIL/GimpGradientFile.py @@ -0,0 +1,140 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read (and render) GIMP gradient files +# +# History: +# 97-08-23 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +""" +Stuff to translate curve segments to palette values (derived from +the corresponding code in GIMP, written by Federico Mena Quintero. +See the GIMP distribution for more information.) +""" + + +from math import log, pi, sin, sqrt + +from ._binary import o8 + +EPSILON = 1e-10 +"""""" # Enable auto-doc for data member + + +def linear(middle, pos): + if pos <= middle: + if middle < EPSILON: + return 0.0 + else: + return 0.5 * pos / middle + else: + pos = pos - middle + middle = 1.0 - middle + if middle < EPSILON: + return 1.0 + else: + return 0.5 + 0.5 * pos / middle + + +def curved(middle, pos): + return pos ** (log(0.5) / log(max(middle, EPSILON))) + + +def sine(middle, pos): + return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0 + + +def sphere_increasing(middle, pos): + return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2) + + +def sphere_decreasing(middle, pos): + return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2) + + +SEGMENTS = [linear, curved, sine, sphere_increasing, sphere_decreasing] +"""""" # Enable auto-doc for data member + + +class GradientFile: + + gradient = None + + def getpalette(self, entries=256): + + palette = [] + + ix = 0 + x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] + + for i in range(entries): + + x = i / (entries - 1) + + while x1 < x: + ix += 1 + x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix] + + w = x1 - x0 + + if w < EPSILON: + scale = segment(0.5, 0.5) + else: + scale = segment((xm - x0) / w, (x - x0) / w) + + # expand to RGBA + r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5)) + g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5)) + b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5)) + a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5)) + + # add to palette + palette.append(r + g + b + a) + + return b"".join(palette), "RGBA" + + +class GimpGradientFile(GradientFile): + """File handler for GIMP's gradient format.""" + + def __init__(self, fp): + + if fp.readline()[:13] != b"GIMP Gradient": + raise SyntaxError("not a GIMP gradient file") + + line = fp.readline() + + # GIMP 1.2 gradient files don't contain a name, but GIMP 1.3 files do + if line.startswith(b"Name: "): + line = fp.readline().strip() + + count = int(line) + + gradient = [] + + for i in range(count): + + s = fp.readline().split() + w = [float(x) for x in s[:11]] + + x0, x1 = w[0], w[2] + xm = w[1] + rgb0 = w[3:7] + rgb1 = w[7:11] + + segment = SEGMENTS[int(s[11])] + cspace = int(s[12]) + + if cspace != 0: + raise OSError("cannot handle HSV colour space") + + gradient.append((x0, x1, xm, rgb0, rgb1, segment)) + + self.gradient = gradient diff --git a/venv/Lib/site-packages/PIL/GimpPaletteFile.py b/venv/Lib/site-packages/PIL/GimpPaletteFile.py new file mode 100644 index 000000000..10fd3ad81 --- /dev/null +++ b/venv/Lib/site-packages/PIL/GimpPaletteFile.py @@ -0,0 +1,56 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read GIMP palette files +# +# History: +# 1997-08-23 fl Created +# 2004-09-07 fl Support GIMP 2.0 palette files. +# +# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. +# Copyright (c) Fredrik Lundh 1997-2004. +# +# See the README file for information on usage and redistribution. +# + +import re + +from ._binary import o8 + + +class GimpPaletteFile: + """File handler for GIMP's palette format.""" + + rawmode = "RGB" + + def __init__(self, fp): + + self.palette = [o8(i) * 3 for i in range(256)] + + if fp.readline()[:12] != b"GIMP Palette": + raise SyntaxError("not a GIMP palette file") + + for i in range(256): + + s = fp.readline() + if not s: + break + + # skip fields and comment lines + if re.match(br"\w+:|#", s): + continue + if len(s) > 100: + raise SyntaxError("bad palette file") + + v = tuple(map(int, s.split()[:3])) + if len(v) != 3: + raise ValueError("bad palette entry") + + self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2]) + + self.palette = b"".join(self.palette) + + def getpalette(self): + + return self.palette, self.rawmode diff --git a/venv/Lib/site-packages/PIL/GribStubImagePlugin.py b/venv/Lib/site-packages/PIL/GribStubImagePlugin.py new file mode 100644 index 000000000..515c272f7 --- /dev/null +++ b/venv/Lib/site-packages/PIL/GribStubImagePlugin.py @@ -0,0 +1,74 @@ +# +# The Python Imaging Library +# $Id$ +# +# GRIB stub adapter +# +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile +from ._binary import i8 + +_handler = None + + +def register_handler(handler): + """ + Install application-specific GRIB image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[0:4] == b"GRIB" and i8(prefix[7]) == 1 + + +class GribStubImageFile(ImageFile.StubImageFile): + + format = "GRIB" + format_description = "GRIB" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(8)): + raise SyntaxError("Not a GRIB file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise OSError("GRIB save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept) +Image.register_save(GribStubImageFile.format, _save) + +Image.register_extension(GribStubImageFile.format, ".grib") diff --git a/venv/Lib/site-packages/PIL/Hdf5StubImagePlugin.py b/venv/Lib/site-packages/PIL/Hdf5StubImagePlugin.py new file mode 100644 index 000000000..362f2d399 --- /dev/null +++ b/venv/Lib/site-packages/PIL/Hdf5StubImagePlugin.py @@ -0,0 +1,73 @@ +# +# The Python Imaging Library +# $Id$ +# +# HDF5 stub adapter +# +# Copyright (c) 2000-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile + +_handler = None + + +def register_handler(handler): + """ + Install application-specific HDF5 image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +# -------------------------------------------------------------------- +# Image adapter + + +def _accept(prefix): + return prefix[:8] == b"\x89HDF\r\n\x1a\n" + + +class HDF5StubImageFile(ImageFile.StubImageFile): + + format = "HDF5" + format_description = "HDF5" + + def _open(self): + + offset = self.fp.tell() + + if not _accept(self.fp.read(8)): + raise SyntaxError("Not an HDF file") + + self.fp.seek(offset) + + # make something up + self.mode = "F" + self._size = 1, 1 + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + +def _save(im, fp, filename): + if _handler is None or not hasattr("_handler", "save"): + raise OSError("HDF5 save handler not installed") + _handler.save(im, fp, filename) + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept) +Image.register_save(HDF5StubImageFile.format, _save) + +Image.register_extensions(HDF5StubImageFile.format, [".h5", ".hdf"]) diff --git a/venv/Lib/site-packages/PIL/IcnsImagePlugin.py b/venv/Lib/site-packages/PIL/IcnsImagePlugin.py new file mode 100644 index 000000000..7023855ba --- /dev/null +++ b/venv/Lib/site-packages/PIL/IcnsImagePlugin.py @@ -0,0 +1,384 @@ +# +# The Python Imaging Library. +# $Id$ +# +# macOS icns file decoder, based on icns.py by Bob Ippolito. +# +# history: +# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies. +# +# Copyright (c) 2004 by Bob Ippolito. +# Copyright (c) 2004 by Secret Labs. +# Copyright (c) 2004 by Fredrik Lundh. +# Copyright (c) 2014 by Alastair Houghton. +# +# See the README file for information on usage and redistribution. +# + +import io +import os +import shutil +import struct +import subprocess +import sys +import tempfile + +from PIL import Image, ImageFile, PngImagePlugin, features +from PIL._binary import i8 + +enable_jpeg2k = features.check_codec("jpg_2000") +if enable_jpeg2k: + from PIL import Jpeg2KImagePlugin + +HEADERSIZE = 8 + + +def nextheader(fobj): + return struct.unpack(">4sI", fobj.read(HEADERSIZE)) + + +def read_32t(fobj, start_length, size): + # The 128x128 icon seems to have an extra header for some reason. + (start, length) = start_length + fobj.seek(start) + sig = fobj.read(4) + if sig != b"\x00\x00\x00\x00": + raise SyntaxError("Unknown signature, expecting 0x00000000") + return read_32(fobj, (start + 4, length - 4), size) + + +def read_32(fobj, start_length, size): + """ + Read a 32bit RGB icon resource. Seems to be either uncompressed or + an RLE packbits-like scheme. + """ + (start, length) = start_length + fobj.seek(start) + pixel_size = (size[0] * size[2], size[1] * size[2]) + sizesq = pixel_size[0] * pixel_size[1] + if length == sizesq * 3: + # uncompressed ("RGBRGBGB") + indata = fobj.read(length) + im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1) + else: + # decode image + im = Image.new("RGB", pixel_size, None) + for band_ix in range(3): + data = [] + bytesleft = sizesq + while bytesleft > 0: + byte = fobj.read(1) + if not byte: + break + byte = i8(byte) + if byte & 0x80: + blocksize = byte - 125 + byte = fobj.read(1) + for i in range(blocksize): + data.append(byte) + else: + blocksize = byte + 1 + data.append(fobj.read(blocksize)) + bytesleft -= blocksize + if bytesleft <= 0: + break + if bytesleft != 0: + raise SyntaxError("Error reading channel [%r left]" % bytesleft) + band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1) + im.im.putband(band.im, band_ix) + return {"RGB": im} + + +def read_mk(fobj, start_length, size): + # Alpha masks seem to be uncompressed + start = start_length[0] + fobj.seek(start) + pixel_size = (size[0] * size[2], size[1] * size[2]) + sizesq = pixel_size[0] * pixel_size[1] + band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1) + return {"A": band} + + +def read_png_or_jpeg2000(fobj, start_length, size): + (start, length) = start_length + fobj.seek(start) + sig = fobj.read(12) + if sig[:8] == b"\x89PNG\x0d\x0a\x1a\x0a": + fobj.seek(start) + im = PngImagePlugin.PngImageFile(fobj) + return {"RGBA": im} + elif ( + sig[:4] == b"\xff\x4f\xff\x51" + or sig[:4] == b"\x0d\x0a\x87\x0a" + or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a" + ): + if not enable_jpeg2k: + raise ValueError( + "Unsupported icon subimage format (rebuild PIL " + "with JPEG 2000 support to fix this)" + ) + # j2k, jpc or j2c + fobj.seek(start) + jp2kstream = fobj.read(length) + f = io.BytesIO(jp2kstream) + im = Jpeg2KImagePlugin.Jpeg2KImageFile(f) + if im.mode != "RGBA": + im = im.convert("RGBA") + return {"RGBA": im} + else: + raise ValueError("Unsupported icon subimage format") + + +class IcnsFile: + + SIZES = { + (512, 512, 2): [(b"ic10", read_png_or_jpeg2000)], + (512, 512, 1): [(b"ic09", read_png_or_jpeg2000)], + (256, 256, 2): [(b"ic14", read_png_or_jpeg2000)], + (256, 256, 1): [(b"ic08", read_png_or_jpeg2000)], + (128, 128, 2): [(b"ic13", read_png_or_jpeg2000)], + (128, 128, 1): [ + (b"ic07", read_png_or_jpeg2000), + (b"it32", read_32t), + (b"t8mk", read_mk), + ], + (64, 64, 1): [(b"icp6", read_png_or_jpeg2000)], + (32, 32, 2): [(b"ic12", read_png_or_jpeg2000)], + (48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)], + (32, 32, 1): [ + (b"icp5", read_png_or_jpeg2000), + (b"il32", read_32), + (b"l8mk", read_mk), + ], + (16, 16, 2): [(b"ic11", read_png_or_jpeg2000)], + (16, 16, 1): [ + (b"icp4", read_png_or_jpeg2000), + (b"is32", read_32), + (b"s8mk", read_mk), + ], + } + + def __init__(self, fobj): + """ + fobj is a file-like object as an icns resource + """ + # signature : (start, length) + self.dct = dct = {} + self.fobj = fobj + sig, filesize = nextheader(fobj) + if sig != b"icns": + raise SyntaxError("not an icns file") + i = HEADERSIZE + while i < filesize: + sig, blocksize = nextheader(fobj) + if blocksize <= 0: + raise SyntaxError("invalid block header") + i += HEADERSIZE + blocksize -= HEADERSIZE + dct[sig] = (i, blocksize) + fobj.seek(blocksize, io.SEEK_CUR) + i += blocksize + + def itersizes(self): + sizes = [] + for size, fmts in self.SIZES.items(): + for (fmt, reader) in fmts: + if fmt in self.dct: + sizes.append(size) + break + return sizes + + def bestsize(self): + sizes = self.itersizes() + if not sizes: + raise SyntaxError("No 32bit icon resources found") + return max(sizes) + + def dataforsize(self, size): + """ + Get an icon resource as {channel: array}. Note that + the arrays are bottom-up like windows bitmaps and will likely + need to be flipped or transposed in some way. + """ + dct = {} + for code, reader in self.SIZES[size]: + desc = self.dct.get(code) + if desc is not None: + dct.update(reader(self.fobj, desc, size)) + return dct + + def getimage(self, size=None): + if size is None: + size = self.bestsize() + if len(size) == 2: + size = (size[0], size[1], 1) + channels = self.dataforsize(size) + + im = channels.get("RGBA", None) + if im: + return im + + im = channels.get("RGB").copy() + try: + im.putalpha(channels["A"]) + except KeyError: + pass + return im + + +## +# Image plugin for Mac OS icons. + + +class IcnsImageFile(ImageFile.ImageFile): + """ + PIL image support for Mac OS .icns files. + Chooses the best resolution, but will possibly load + a different size image if you mutate the size attribute + before calling 'load'. + + The info dictionary has a key 'sizes' that is a list + of sizes that the icns file has. + """ + + format = "ICNS" + format_description = "Mac OS icns resource" + + def _open(self): + self.icns = IcnsFile(self.fp) + self.mode = "RGBA" + self.info["sizes"] = self.icns.itersizes() + self.best_size = self.icns.bestsize() + self.size = ( + self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2], + ) + + @property + def size(self): + return self._size + + @size.setter + def size(self, value): + info_size = value + if info_size not in self.info["sizes"] and len(info_size) == 2: + info_size = (info_size[0], info_size[1], 1) + if ( + info_size not in self.info["sizes"] + and len(info_size) == 3 + and info_size[2] == 1 + ): + simple_sizes = [ + (size[0] * size[2], size[1] * size[2]) for size in self.info["sizes"] + ] + if value in simple_sizes: + info_size = self.info["sizes"][simple_sizes.index(value)] + if info_size not in self.info["sizes"]: + raise ValueError("This is not one of the allowed sizes of this image") + self._size = value + + def load(self): + if len(self.size) == 3: + self.best_size = self.size + self.size = ( + self.best_size[0] * self.best_size[2], + self.best_size[1] * self.best_size[2], + ) + + Image.Image.load(self) + if self.im and self.im.size == self.size: + # Already loaded + return + self.load_prepare() + # This is likely NOT the best way to do it, but whatever. + im = self.icns.getimage(self.best_size) + + # If this is a PNG or JPEG 2000, it won't be loaded yet + im.load() + + self.im = im.im + self.mode = im.mode + self.size = im.size + self.load_end() + + +def _save(im, fp, filename): + """ + Saves the image as a series of PNG files, + that are then converted to a .icns file + using the macOS command line utility 'iconutil'. + + macOS only. + """ + if hasattr(fp, "flush"): + fp.flush() + + # create the temporary set of pngs + with tempfile.TemporaryDirectory(".iconset") as iconset: + provided_images = { + im.width: im for im in im.encoderinfo.get("append_images", []) + } + last_w = None + second_path = None + for w in [16, 32, 128, 256, 512]: + prefix = "icon_{}x{}".format(w, w) + + first_path = os.path.join(iconset, prefix + ".png") + if last_w == w: + shutil.copyfile(second_path, first_path) + else: + im_w = provided_images.get(w, im.resize((w, w), Image.LANCZOS)) + im_w.save(first_path) + + second_path = os.path.join(iconset, prefix + "@2x.png") + im_w2 = provided_images.get(w * 2, im.resize((w * 2, w * 2), Image.LANCZOS)) + im_w2.save(second_path) + last_w = w * 2 + + # iconutil -c icns -o {} {} + + fp_only = not filename + if fp_only: + f, filename = tempfile.mkstemp(".icns") + os.close(f) + convert_cmd = ["iconutil", "-c", "icns", "-o", filename, iconset] + convert_proc = subprocess.Popen( + convert_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL + ) + + convert_proc.stdout.close() + + retcode = convert_proc.wait() + + if retcode: + raise subprocess.CalledProcessError(retcode, convert_cmd) + + if fp_only: + with open(filename, "rb") as f: + fp.write(f.read()) + + +Image.register_open(IcnsImageFile.format, IcnsImageFile, lambda x: x[:4] == b"icns") +Image.register_extension(IcnsImageFile.format, ".icns") + +if sys.platform == "darwin": + Image.register_save(IcnsImageFile.format, _save) + + Image.register_mime(IcnsImageFile.format, "image/icns") + + +if __name__ == "__main__": + + if len(sys.argv) < 2: + print("Syntax: python IcnsImagePlugin.py [file]") + sys.exit() + + with open(sys.argv[1], "rb") as fp: + imf = IcnsImageFile(fp) + for size in imf.info["sizes"]: + imf.size = size + imf.save("out-%s-%s-%s.png" % size) + with Image.open(sys.argv[1]) as im: + im.save("out.png") + if sys.platform == "windows": + os.startfile("out.png") diff --git a/venv/Lib/site-packages/PIL/IcoImagePlugin.py b/venv/Lib/site-packages/PIL/IcoImagePlugin.py new file mode 100644 index 000000000..e4a74321b --- /dev/null +++ b/venv/Lib/site-packages/PIL/IcoImagePlugin.py @@ -0,0 +1,324 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Windows Icon support for PIL +# +# History: +# 96-05-27 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis +# . +# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki +# +# Icon format references: +# * https://en.wikipedia.org/wiki/ICO_(file_format) +# * https://msdn.microsoft.com/en-us/library/ms997538.aspx + + +import struct +import warnings +from io import BytesIO +from math import ceil, log + +from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin +from ._binary import i8, i16le as i16, i32le as i32 + +# +# -------------------------------------------------------------------- + +_MAGIC = b"\0\0\1\0" + + +def _save(im, fp, filename): + fp.write(_MAGIC) # (2+2) + sizes = im.encoderinfo.get( + "sizes", + [(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)], + ) + width, height = im.size + sizes = filter( + lambda x: False + if (x[0] > width or x[1] > height or x[0] > 256 or x[1] > 256) + else True, + sizes, + ) + sizes = list(sizes) + fp.write(struct.pack("=8bpp) + "reserved": i8(s[3]), + "planes": i16(s[4:]), + "bpp": i16(s[6:]), + "size": i32(s[8:]), + "offset": i32(s[12:]), + } + + # See Wikipedia + for j in ("width", "height"): + if not icon_header[j]: + icon_header[j] = 256 + + # See Wikipedia notes about color depth. + # We need this just to differ images with equal sizes + icon_header["color_depth"] = ( + icon_header["bpp"] + or ( + icon_header["nb_color"] != 0 + and ceil(log(icon_header["nb_color"], 2)) + ) + or 256 + ) + + icon_header["dim"] = (icon_header["width"], icon_header["height"]) + icon_header["square"] = icon_header["width"] * icon_header["height"] + + self.entry.append(icon_header) + + self.entry = sorted(self.entry, key=lambda x: x["color_depth"]) + # ICO images are usually squares + # self.entry = sorted(self.entry, key=lambda x: x['width']) + self.entry = sorted(self.entry, key=lambda x: x["square"]) + self.entry.reverse() + + def sizes(self): + """ + Get a list of all available icon sizes and color depths. + """ + return {(h["width"], h["height"]) for h in self.entry} + + def getentryindex(self, size, bpp=False): + for (i, h) in enumerate(self.entry): + if size == h["dim"] and (bpp is False or bpp == h["color_depth"]): + return i + return 0 + + def getimage(self, size, bpp=False): + """ + Get an image from the icon + """ + return self.frame(self.getentryindex(size, bpp)) + + def frame(self, idx): + """ + Get an image from frame idx + """ + + header = self.entry[idx] + + self.buf.seek(header["offset"]) + data = self.buf.read(8) + self.buf.seek(header["offset"]) + + if data[:8] == PngImagePlugin._MAGIC: + # png frame + im = PngImagePlugin.PngImageFile(self.buf) + else: + # XOR + AND mask bmp frame + im = BmpImagePlugin.DibImageFile(self.buf) + Image._decompression_bomb_check(im.size) + + # change tile dimension to only encompass XOR image + im._size = (im.size[0], int(im.size[1] / 2)) + d, e, o, a = im.tile[0] + im.tile[0] = d, (0, 0) + im.size, o, a + + # figure out where AND mask image starts + mode = a[0] + bpp = 8 + for k, v in BmpImagePlugin.BIT2MODE.items(): + if mode == v[1]: + bpp = k + break + + if 32 == bpp: + # 32-bit color depth icon image allows semitransparent areas + # PIL's DIB format ignores transparency bits, recover them. + # The DIB is packed in BGRX byte order where X is the alpha + # channel. + + # Back up to start of bmp data + self.buf.seek(o) + # extract every 4th byte (eg. 3,7,11,15,...) + alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4] + + # convert to an 8bpp grayscale image + mask = Image.frombuffer( + "L", # 8bpp + im.size, # (w, h) + alpha_bytes, # source chars + "raw", # raw decoder + ("L", 0, -1), # 8bpp inverted, unpadded, reversed + ) + else: + # get AND image from end of bitmap + w = im.size[0] + if (w % 32) > 0: + # bitmap row data is aligned to word boundaries + w += 32 - (im.size[0] % 32) + + # the total mask data is + # padded row size * height / bits per char + + and_mask_offset = o + int(im.size[0] * im.size[1] * (bpp / 8.0)) + total_bytes = int((w * im.size[1]) / 8) + + self.buf.seek(and_mask_offset) + mask_data = self.buf.read(total_bytes) + + # convert raw data to image + mask = Image.frombuffer( + "1", # 1 bpp + im.size, # (w, h) + mask_data, # source chars + "raw", # raw decoder + ("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed + ) + + # now we have two images, im is XOR image and mask is AND image + + # apply mask image as alpha channel + im = im.convert("RGBA") + im.putalpha(mask) + + return im + + +## +# Image plugin for Windows Icon files. + + +class IcoImageFile(ImageFile.ImageFile): + """ + PIL read-only image support for Microsoft Windows .ico files. + + By default the largest resolution image in the file will be loaded. This + can be changed by altering the 'size' attribute before calling 'load'. + + The info dictionary has a key 'sizes' that is a list of the sizes available + in the icon file. + + Handles classic, XP and Vista icon formats. + + When saving, PNG compression is used. Support for this was only added in + Windows Vista. + + This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis + . + https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki + """ + + format = "ICO" + format_description = "Windows Icon" + + def _open(self): + self.ico = IcoFile(self.fp) + self.info["sizes"] = self.ico.sizes() + self.size = self.ico.entry[0]["dim"] + self.load() + + @property + def size(self): + return self._size + + @size.setter + def size(self, value): + if value not in self.info["sizes"]: + raise ValueError("This is not one of the allowed sizes of this image") + self._size = value + + def load(self): + if self.im and self.im.size == self.size: + # Already loaded + return + im = self.ico.getimage(self.size) + # if tile is PNG, it won't really be loaded yet + im.load() + self.im = im.im + self.mode = im.mode + if im.size != self.size: + warnings.warn("Image was not the expected size") + + index = self.ico.getentryindex(self.size) + sizes = list(self.info["sizes"]) + sizes[index] = im.size + self.info["sizes"] = set(sizes) + + self.size = im.size + + def load_seek(self): + # Flag the ImageFile.Parser so that it + # just does all the decode at the end. + pass + + +# +# -------------------------------------------------------------------- + + +Image.register_open(IcoImageFile.format, IcoImageFile, _accept) +Image.register_save(IcoImageFile.format, _save) +Image.register_extension(IcoImageFile.format, ".ico") + +Image.register_mime(IcoImageFile.format, "image/x-icon") diff --git a/venv/Lib/site-packages/PIL/ImImagePlugin.py b/venv/Lib/site-packages/PIL/ImImagePlugin.py new file mode 100644 index 000000000..d940899b0 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImImagePlugin.py @@ -0,0 +1,377 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IFUNC IM file handling for PIL +# +# history: +# 1995-09-01 fl Created. +# 1997-01-03 fl Save palette images +# 1997-01-08 fl Added sequence support +# 1997-01-23 fl Added P and RGB save support +# 1997-05-31 fl Read floating point images +# 1997-06-22 fl Save floating point images +# 1997-08-27 fl Read and save 1-bit images +# 1998-06-25 fl Added support for RGB+LUT images +# 1998-07-02 fl Added support for YCC images +# 1998-07-15 fl Renamed offset attribute to avoid name clash +# 1998-12-29 fl Added I;16 support +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) +# 2003-09-26 fl Added LA/PA support +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2001 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +import os +import re + +from . import Image, ImageFile, ImagePalette +from ._binary import i8 + +# -------------------------------------------------------------------- +# Standard tags + +COMMENT = "Comment" +DATE = "Date" +EQUIPMENT = "Digitalization equipment" +FRAMES = "File size (no of images)" +LUT = "Lut" +NAME = "Name" +SCALE = "Scale (x,y)" +SIZE = "Image size (x*y)" +MODE = "Image type" + +TAGS = { + COMMENT: 0, + DATE: 0, + EQUIPMENT: 0, + FRAMES: 0, + LUT: 0, + NAME: 0, + SCALE: 0, + SIZE: 0, + MODE: 0, +} + +OPEN = { + # ifunc93/p3cfunc formats + "0 1 image": ("1", "1"), + "L 1 image": ("1", "1"), + "Greyscale image": ("L", "L"), + "Grayscale image": ("L", "L"), + "RGB image": ("RGB", "RGB;L"), + "RLB image": ("RGB", "RLB"), + "RYB image": ("RGB", "RLB"), + "B1 image": ("1", "1"), + "B2 image": ("P", "P;2"), + "B4 image": ("P", "P;4"), + "X 24 image": ("RGB", "RGB"), + "L 32 S image": ("I", "I;32"), + "L 32 F image": ("F", "F;32"), + # old p3cfunc formats + "RGB3 image": ("RGB", "RGB;T"), + "RYB3 image": ("RGB", "RYB;T"), + # extensions + "LA image": ("LA", "LA;L"), + "PA image": ("LA", "PA;L"), + "RGBA image": ("RGBA", "RGBA;L"), + "RGBX image": ("RGBX", "RGBX;L"), + "CMYK image": ("CMYK", "CMYK;L"), + "YCC image": ("YCbCr", "YCbCr;L"), +} + +# ifunc95 extensions +for i in ["8", "8S", "16", "16S", "32", "32F"]: + OPEN["L %s image" % i] = ("F", "F;%s" % i) + OPEN["L*%s image" % i] = ("F", "F;%s" % i) +for i in ["16", "16L", "16B"]: + OPEN["L %s image" % i] = ("I;%s" % i, "I;%s" % i) + OPEN["L*%s image" % i] = ("I;%s" % i, "I;%s" % i) +for i in ["32S"]: + OPEN["L %s image" % i] = ("I", "I;%s" % i) + OPEN["L*%s image" % i] = ("I", "I;%s" % i) +for i in range(2, 33): + OPEN["L*%s image" % i] = ("F", "F;%s" % i) + + +# -------------------------------------------------------------------- +# Read IM directory + +split = re.compile(br"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$") + + +def number(s): + try: + return int(s) + except ValueError: + return float(s) + + +## +# Image plugin for the IFUNC IM file format. + + +class ImImageFile(ImageFile.ImageFile): + + format = "IM" + format_description = "IFUNC Image Memory" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # Quick rejection: if there's not an LF among the first + # 100 bytes, this is (probably) not a text header. + + if b"\n" not in self.fp.read(100): + raise SyntaxError("not an IM file") + self.fp.seek(0) + + n = 0 + + # Default values + self.info[MODE] = "L" + self.info[SIZE] = (512, 512) + self.info[FRAMES] = 1 + + self.rawmode = "L" + + while True: + + s = self.fp.read(1) + + # Some versions of IFUNC uses \n\r instead of \r\n... + if s == b"\r": + continue + + if not s or s == b"\0" or s == b"\x1A": + break + + # FIXME: this may read whole file if not a text file + s = s + self.fp.readline() + + if len(s) > 100: + raise SyntaxError("not an IM file") + + if s[-2:] == b"\r\n": + s = s[:-2] + elif s[-1:] == b"\n": + s = s[:-1] + + try: + m = split.match(s) + except re.error as e: + raise SyntaxError("not an IM file") from e + + if m: + + k, v = m.group(1, 2) + + # Don't know if this is the correct encoding, + # but a decent guess (I guess) + k = k.decode("latin-1", "replace") + v = v.decode("latin-1", "replace") + + # Convert value as appropriate + if k in [FRAMES, SCALE, SIZE]: + v = v.replace("*", ",") + v = tuple(map(number, v.split(","))) + if len(v) == 1: + v = v[0] + elif k == MODE and v in OPEN: + v, self.rawmode = OPEN[v] + + # Add to dictionary. Note that COMMENT tags are + # combined into a list of strings. + if k == COMMENT: + if k in self.info: + self.info[k].append(v) + else: + self.info[k] = [v] + else: + self.info[k] = v + + if k in TAGS: + n += 1 + + else: + + raise SyntaxError( + "Syntax error in IM header: " + s.decode("ascii", "replace") + ) + + if not n: + raise SyntaxError("Not an IM file") + + # Basic attributes + self._size = self.info[SIZE] + self.mode = self.info[MODE] + + # Skip forward to start of image data + while s and s[0:1] != b"\x1A": + s = self.fp.read(1) + if not s: + raise SyntaxError("File truncated") + + if LUT in self.info: + # convert lookup table to palette or lut attribute + palette = self.fp.read(768) + greyscale = 1 # greyscale palette + linear = 1 # linear greyscale palette + for i in range(256): + if palette[i] == palette[i + 256] == palette[i + 512]: + if i8(palette[i]) != i: + linear = 0 + else: + greyscale = 0 + if self.mode in ["L", "LA", "P", "PA"]: + if greyscale: + if not linear: + self.lut = [i8(c) for c in palette[:256]] + else: + if self.mode in ["L", "P"]: + self.mode = self.rawmode = "P" + elif self.mode in ["LA", "PA"]: + self.mode = "PA" + self.rawmode = "PA;L" + self.palette = ImagePalette.raw("RGB;L", palette) + elif self.mode == "RGB": + if not greyscale or not linear: + self.lut = [i8(c) for c in palette] + + self.frame = 0 + + self.__offset = offs = self.fp.tell() + + self.__fp = self.fp # FIXME: hack + + if self.rawmode[:2] == "F;": + + # ifunc95 formats + try: + # use bit decoder (if necessary) + bits = int(self.rawmode[2:]) + if bits not in [8, 16, 32]: + self.tile = [("bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1))] + return + except ValueError: + pass + + if self.rawmode in ["RGB;T", "RYB;T"]: + # Old LabEye/3PC files. Would be very surprised if anyone + # ever stumbled upon such a file ;-) + size = self.size[0] * self.size[1] + self.tile = [ + ("raw", (0, 0) + self.size, offs, ("G", 0, -1)), + ("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)), + ("raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)), + ] + else: + # LabEye/IFUNC files + self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))] + + @property + def n_frames(self): + return self.info[FRAMES] + + @property + def is_animated(self): + return self.info[FRAMES] > 1 + + def seek(self, frame): + if not self._seek_check(frame): + return + + self.frame = frame + + if self.mode == "1": + bits = 1 + else: + bits = 8 * len(self.mode) + + size = ((self.size[0] * bits + 7) // 8) * self.size[1] + offs = self.__offset + frame * size + + self.fp = self.__fp + + self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))] + + def tell(self): + return self.frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# +# -------------------------------------------------------------------- +# Save IM files + + +SAVE = { + # mode: (im type, raw mode) + "1": ("0 1", "1"), + "L": ("Greyscale", "L"), + "LA": ("LA", "LA;L"), + "P": ("Greyscale", "P"), + "PA": ("LA", "PA;L"), + "I": ("L 32S", "I;32S"), + "I;16": ("L 16", "I;16"), + "I;16L": ("L 16L", "I;16L"), + "I;16B": ("L 16B", "I;16B"), + "F": ("L 32F", "F;32F"), + "RGB": ("RGB", "RGB;L"), + "RGBA": ("RGBA", "RGBA;L"), + "RGBX": ("RGBX", "RGBX;L"), + "CMYK": ("CMYK", "CMYK;L"), + "YCbCr": ("YCC", "YCbCr;L"), +} + + +def _save(im, fp, filename): + + try: + image_type, rawmode = SAVE[im.mode] + except KeyError as e: + raise ValueError("Cannot save %s images as IM" % im.mode) from e + + frames = im.encoderinfo.get("frames", 1) + + fp.write(("Image type: %s image\r\n" % image_type).encode("ascii")) + if filename: + # Each line must be 100 characters or less, + # or: SyntaxError("not an IM file") + # 8 characters are used for "Name: " and "\r\n" + # Keep just the filename, ditch the potentially overlong path + name, ext = os.path.splitext(os.path.basename(filename)) + name = "".join([name[: 92 - len(ext)], ext]) + + fp.write(("Name: %s\r\n" % name).encode("ascii")) + fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode("ascii")) + fp.write(("File size (no of images): %d\r\n" % frames).encode("ascii")) + if im.mode in ["P", "PA"]: + fp.write(b"Lut: 1\r\n") + fp.write(b"\000" * (511 - fp.tell()) + b"\032") + if im.mode in ["P", "PA"]: + fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))]) + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(ImImageFile.format, ImImageFile) +Image.register_save(ImImageFile.format, _save) + +Image.register_extension(ImImageFile.format, ".im") diff --git a/venv/Lib/site-packages/PIL/Image.py b/venv/Lib/site-packages/PIL/Image.py new file mode 100644 index 000000000..c1419744a --- /dev/null +++ b/venv/Lib/site-packages/PIL/Image.py @@ -0,0 +1,3462 @@ +# +# The Python Imaging Library. +# $Id$ +# +# the Image class wrapper +# +# partial release history: +# 1995-09-09 fl Created +# 1996-03-11 fl PIL release 0.0 (proof of concept) +# 1996-04-30 fl PIL release 0.1b1 +# 1999-07-28 fl PIL release 1.0 final +# 2000-06-07 fl PIL release 1.1 +# 2000-10-20 fl PIL release 1.1.1 +# 2001-05-07 fl PIL release 1.1.2 +# 2002-03-15 fl PIL release 1.1.3 +# 2003-05-10 fl PIL release 1.1.4 +# 2005-03-28 fl PIL release 1.1.5 +# 2006-12-02 fl PIL release 1.1.6 +# 2009-11-15 fl PIL release 1.1.7 +# +# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-2009 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import atexit +import builtins +import io +import logging +import math +import numbers +import os +import struct +import sys +import tempfile +import warnings +import xml.etree.ElementTree +from collections.abc import Callable, MutableMapping +from pathlib import Path + +# VERSION was removed in Pillow 6.0.0. +# PILLOW_VERSION is deprecated and will be removed in a future release. +# Use __version__ instead. +from . import ( + ImageMode, + TiffTags, + UnidentifiedImageError, + __version__, + _plugins, + _raise_version_warning, +) +from ._binary import i8, i32le +from ._util import deferred_error, isPath + +if sys.version_info >= (3, 7): + + def __getattr__(name): + if name == "PILLOW_VERSION": + _raise_version_warning() + return __version__ + raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name)) + + +else: + + from . import PILLOW_VERSION + + # Silence warning + assert PILLOW_VERSION + + +logger = logging.getLogger(__name__) + + +class DecompressionBombWarning(RuntimeWarning): + pass + + +class DecompressionBombError(Exception): + pass + + +# Limit to around a quarter gigabyte for a 24 bit (3 bpp) image +MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 // 4 // 3) + + +try: + # If the _imaging C module is not present, Pillow will not load. + # Note that other modules should not refer to _imaging directly; + # import Image and use the Image.core variable instead. + # Also note that Image.core is not a publicly documented interface, + # and should be considered private and subject to change. + from . import _imaging as core + + if __version__ != getattr(core, "PILLOW_VERSION", None): + raise ImportError( + "The _imaging extension was built for another version of Pillow or PIL:\n" + "Core version: %s\n" + "Pillow version: %s" % (getattr(core, "PILLOW_VERSION", None), __version__) + ) + +except ImportError as v: + core = deferred_error(ImportError("The _imaging C module is not installed.")) + # Explanations for ways that we know we might have an import error + if str(v).startswith("Module use of python"): + # The _imaging C module is present, but not compiled for + # the right version (windows only). Print a warning, if + # possible. + warnings.warn( + "The _imaging extension was built for another version of Python.", + RuntimeWarning, + ) + elif str(v).startswith("The _imaging extension"): + warnings.warn(str(v), RuntimeWarning) + # Fail here anyway. Don't let people run with a mostly broken Pillow. + # see docs/porting.rst + raise + + +# works everywhere, win for pypy, not cpython +USE_CFFI_ACCESS = hasattr(sys, "pypy_version_info") +try: + import cffi +except ImportError: + cffi = None + + +def isImageType(t): + """ + Checks if an object is an image object. + + .. warning:: + + This function is for internal use only. + + :param t: object to check if it's an image + :returns: True if the object is an image + """ + return hasattr(t, "im") + + +# +# Constants + +NONE = 0 + +# transpose +FLIP_LEFT_RIGHT = 0 +FLIP_TOP_BOTTOM = 1 +ROTATE_90 = 2 +ROTATE_180 = 3 +ROTATE_270 = 4 +TRANSPOSE = 5 +TRANSVERSE = 6 + +# transforms (also defined in Imaging.h) +AFFINE = 0 +EXTENT = 1 +PERSPECTIVE = 2 +QUAD = 3 +MESH = 4 + +# resampling filters (also defined in Imaging.h) +NEAREST = NONE = 0 +BOX = 4 +BILINEAR = LINEAR = 2 +HAMMING = 5 +BICUBIC = CUBIC = 3 +LANCZOS = ANTIALIAS = 1 + +_filters_support = {BOX: 0.5, BILINEAR: 1.0, HAMMING: 1.0, BICUBIC: 2.0, LANCZOS: 3.0} + + +# dithers +NEAREST = NONE = 0 +ORDERED = 1 # Not yet implemented +RASTERIZE = 2 # Not yet implemented +FLOYDSTEINBERG = 3 # default + +# palettes/quantizers +WEB = 0 +ADAPTIVE = 1 + +MEDIANCUT = 0 +MAXCOVERAGE = 1 +FASTOCTREE = 2 +LIBIMAGEQUANT = 3 + +# categories +NORMAL = 0 +SEQUENCE = 1 +CONTAINER = 2 + +if hasattr(core, "DEFAULT_STRATEGY"): + DEFAULT_STRATEGY = core.DEFAULT_STRATEGY + FILTERED = core.FILTERED + HUFFMAN_ONLY = core.HUFFMAN_ONLY + RLE = core.RLE + FIXED = core.FIXED + + +# -------------------------------------------------------------------- +# Registries + +ID = [] +OPEN = {} +MIME = {} +SAVE = {} +SAVE_ALL = {} +EXTENSION = {} +DECODERS = {} +ENCODERS = {} + +# -------------------------------------------------------------------- +# Modes supported by this version + +_MODEINFO = { + # NOTE: this table will be removed in future versions. use + # getmode* functions or ImageMode descriptors instead. + # official modes + "1": ("L", "L", ("1",)), + "L": ("L", "L", ("L",)), + "I": ("L", "I", ("I",)), + "F": ("L", "F", ("F",)), + "P": ("P", "L", ("P",)), + "RGB": ("RGB", "L", ("R", "G", "B")), + "RGBX": ("RGB", "L", ("R", "G", "B", "X")), + "RGBA": ("RGB", "L", ("R", "G", "B", "A")), + "CMYK": ("RGB", "L", ("C", "M", "Y", "K")), + "YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")), + "LAB": ("RGB", "L", ("L", "A", "B")), + "HSV": ("RGB", "L", ("H", "S", "V")), + # Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and + # BGR;24. Use these modes only if you know exactly what you're + # doing... +} + +if sys.byteorder == "little": + _ENDIAN = "<" +else: + _ENDIAN = ">" + +_MODE_CONV = { + # official modes + "1": ("|b1", None), # Bits need to be extended to bytes + "L": ("|u1", None), + "LA": ("|u1", 2), + "I": (_ENDIAN + "i4", None), + "F": (_ENDIAN + "f4", None), + "P": ("|u1", None), + "RGB": ("|u1", 3), + "RGBX": ("|u1", 4), + "RGBA": ("|u1", 4), + "CMYK": ("|u1", 4), + "YCbCr": ("|u1", 3), + "LAB": ("|u1", 3), # UNDONE - unsigned |u1i1i1 + "HSV": ("|u1", 3), + # I;16 == I;16L, and I;32 == I;32L + "I;16": ("u2", None), + "I;16L": ("i2", None), + "I;16LS": ("u4", None), + "I;32L": ("i4", None), + "I;32LS": ("= 1: + return + + try: + from . import BmpImagePlugin + + assert BmpImagePlugin + except ImportError: + pass + try: + from . import GifImagePlugin + + assert GifImagePlugin + except ImportError: + pass + try: + from . import JpegImagePlugin + + assert JpegImagePlugin + except ImportError: + pass + try: + from . import PpmImagePlugin + + assert PpmImagePlugin + except ImportError: + pass + try: + from . import PngImagePlugin + + assert PngImagePlugin + except ImportError: + pass + # try: + # import TiffImagePlugin + # assert TiffImagePlugin + # except ImportError: + # pass + + _initialized = 1 + + +def init(): + """ + Explicitly initializes the Python Imaging Library. This function + loads all available file format drivers. + """ + + global _initialized + if _initialized >= 2: + return 0 + + for plugin in _plugins: + try: + logger.debug("Importing %s", plugin) + __import__("PIL.%s" % plugin, globals(), locals(), []) + except ImportError as e: + logger.debug("Image: failed to import %s: %s", plugin, e) + + if OPEN or SAVE: + _initialized = 2 + return 1 + + +# -------------------------------------------------------------------- +# Codec factories (used by tobytes/frombytes and ImageFile.load) + + +def _getdecoder(mode, decoder_name, args, extra=()): + + # tweak arguments + if args is None: + args = () + elif not isinstance(args, tuple): + args = (args,) + + try: + decoder = DECODERS[decoder_name] + except KeyError: + pass + else: + return decoder(mode, *args + extra) + + try: + # get decoder + decoder = getattr(core, decoder_name + "_decoder") + except AttributeError as e: + raise OSError("decoder %s not available" % decoder_name) from e + return decoder(mode, *args + extra) + + +def _getencoder(mode, encoder_name, args, extra=()): + + # tweak arguments + if args is None: + args = () + elif not isinstance(args, tuple): + args = (args,) + + try: + encoder = ENCODERS[encoder_name] + except KeyError: + pass + else: + return encoder(mode, *args + extra) + + try: + # get encoder + encoder = getattr(core, encoder_name + "_encoder") + except AttributeError as e: + raise OSError("encoder %s not available" % encoder_name) from e + return encoder(mode, *args + extra) + + +# -------------------------------------------------------------------- +# Simple expression analyzer + + +def coerce_e(value): + return value if isinstance(value, _E) else _E(value) + + +class _E: + def __init__(self, data): + self.data = data + + def __add__(self, other): + return _E((self.data, "__add__", coerce_e(other).data)) + + def __mul__(self, other): + return _E((self.data, "__mul__", coerce_e(other).data)) + + +def _getscaleoffset(expr): + stub = ["stub"] + data = expr(_E(stub)).data + try: + (a, b, c) = data # simplified syntax + if a is stub and b == "__mul__" and isinstance(c, numbers.Number): + return c, 0.0 + if a is stub and b == "__add__" and isinstance(c, numbers.Number): + return 1.0, c + except TypeError: + pass + try: + ((a, b, c), d, e) = data # full syntax + if ( + a is stub + and b == "__mul__" + and isinstance(c, numbers.Number) + and d == "__add__" + and isinstance(e, numbers.Number) + ): + return c, e + except TypeError: + pass + raise ValueError("illegal expression") + + +# -------------------------------------------------------------------- +# Implementation wrapper + + +class Image: + """ + This class represents an image object. To create + :py:class:`~PIL.Image.Image` objects, use the appropriate factory + functions. There's hardly ever any reason to call the Image constructor + directly. + + * :py:func:`~PIL.Image.open` + * :py:func:`~PIL.Image.new` + * :py:func:`~PIL.Image.frombytes` + """ + + format = None + format_description = None + _close_exclusive_fp_after_loading = True + + def __init__(self): + # FIXME: take "new" parameters / other image? + # FIXME: turn mode and size into delegating properties? + self.im = None + self.mode = "" + self._size = (0, 0) + self.palette = None + self.info = {} + self.category = NORMAL + self.readonly = 0 + self.pyaccess = None + self._exif = None + + @property + def width(self): + return self.size[0] + + @property + def height(self): + return self.size[1] + + @property + def size(self): + return self._size + + def _new(self, im): + new = Image() + new.im = im + new.mode = im.mode + new._size = im.size + if im.mode in ("P", "PA"): + if self.palette: + new.palette = self.palette.copy() + else: + from . import ImagePalette + + new.palette = ImagePalette.ImagePalette() + new.info = self.info.copy() + return new + + # Context manager support + def __enter__(self): + return self + + def __exit__(self, *args): + if hasattr(self, "fp") and getattr(self, "_exclusive_fp", False): + if hasattr(self, "_close__fp"): + self._close__fp() + if self.fp: + self.fp.close() + self.fp = None + + def close(self): + """ + Closes the file pointer, if possible. + + This operation will destroy the image core and release its memory. + The image data will be unusable afterward. + + This function is only required to close images that have not + had their file read and closed by the + :py:meth:`~PIL.Image.Image.load` method. See + :ref:`file-handling` for more information. + """ + try: + if hasattr(self, "_close__fp"): + self._close__fp() + self.fp.close() + self.fp = None + except Exception as msg: + logger.debug("Error closing: %s", msg) + + if getattr(self, "map", None): + self.map = None + + # Instead of simply setting to None, we're setting up a + # deferred error that will better explain that the core image + # object is gone. + self.im = deferred_error(ValueError("Operation on closed image")) + + def _copy(self): + self.load() + self.im = self.im.copy() + self.pyaccess = None + self.readonly = 0 + + def _ensure_mutable(self): + if self.readonly: + self._copy() + else: + self.load() + + def _dump(self, file=None, format=None, **options): + suffix = "" + if format: + suffix = "." + format + + if not file: + f, filename = tempfile.mkstemp(suffix) + os.close(f) + else: + filename = file + if not filename.endswith(suffix): + filename = filename + suffix + + self.load() + + if not format or format == "PPM": + self.im.save_ppm(filename) + else: + self.save(filename, format, **options) + + return filename + + def __eq__(self, other): + return ( + self.__class__ is other.__class__ + and self.mode == other.mode + and self.size == other.size + and self.info == other.info + and self.category == other.category + and self.readonly == other.readonly + and self.getpalette() == other.getpalette() + and self.tobytes() == other.tobytes() + ) + + def __repr__(self): + return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % ( + self.__class__.__module__, + self.__class__.__name__, + self.mode, + self.size[0], + self.size[1], + id(self), + ) + + def _repr_png_(self): + """ iPython display hook support + + :returns: png version of the image as bytes + """ + b = io.BytesIO() + self.save(b, "PNG") + return b.getvalue() + + @property + def __array_interface__(self): + # numpy array interface support + new = {} + shape, typestr = _conv_type_shape(self) + new["shape"] = shape + new["typestr"] = typestr + new["version"] = 3 + if self.mode == "1": + # Binary images need to be extended from bits to bytes + # See: https://github.com/python-pillow/Pillow/issues/350 + new["data"] = self.tobytes("raw", "L") + else: + new["data"] = self.tobytes() + return new + + def __getstate__(self): + return [self.info, self.mode, self.size, self.getpalette(), self.tobytes()] + + def __setstate__(self, state): + Image.__init__(self) + self.tile = [] + info, mode, size, palette, data = state + self.info = info + self.mode = mode + self._size = size + self.im = core.new(mode, size) + if mode in ("L", "LA", "P", "PA") and palette: + self.putpalette(palette) + self.frombytes(data) + + def tobytes(self, encoder_name="raw", *args): + """ + Return image as a bytes object. + + .. warning:: + + This method returns the raw image data from the internal + storage. For compressed image data (e.g. PNG, JPEG) use + :meth:`~.save`, with a BytesIO parameter for in-memory + data. + + :param encoder_name: What encoder to use. The default is to + use the standard "raw" encoder. + :param args: Extra arguments to the encoder. + :rtype: A bytes object. + """ + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if encoder_name == "raw" and args == (): + args = self.mode + + self.load() + + # unpack data + e = _getencoder(self.mode, encoder_name, args) + e.setimage(self.im) + + bufsize = max(65536, self.size[0] * 4) # see RawEncode.c + + data = [] + while True: + l, s, d = e.encode(bufsize) + data.append(d) + if s: + break + if s < 0: + raise RuntimeError("encoder error %d in tobytes" % s) + + return b"".join(data) + + def tostring(self, *args, **kw): + raise NotImplementedError( + "tostring() has been removed. Please call tobytes() instead." + ) + + def tobitmap(self, name="image"): + """ + Returns the image converted to an X11 bitmap. + + .. note:: This method only works for mode "1" images. + + :param name: The name prefix to use for the bitmap variables. + :returns: A string containing an X11 bitmap. + :raises ValueError: If the mode is not "1" + """ + + self.load() + if self.mode != "1": + raise ValueError("not a bitmap") + data = self.tobytes("xbm") + return b"".join( + [ + ("#define %s_width %d\n" % (name, self.size[0])).encode("ascii"), + ("#define %s_height %d\n" % (name, self.size[1])).encode("ascii"), + ("static char %s_bits[] = {\n" % name).encode("ascii"), + data, + b"};", + ] + ) + + def frombytes(self, data, decoder_name="raw", *args): + """ + Loads this image with pixel data from a bytes object. + + This method is similar to the :py:func:`~PIL.Image.frombytes` function, + but loads data into this image instead of creating a new image object. + """ + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + # default format + if decoder_name == "raw" and args == (): + args = self.mode + + # unpack data + d = _getdecoder(self.mode, decoder_name, args) + d.setimage(self.im) + s = d.decode(data) + + if s[0] >= 0: + raise ValueError("not enough image data") + if s[1] != 0: + raise ValueError("cannot decode image data") + + def fromstring(self, *args, **kw): + raise NotImplementedError( + "fromstring() has been removed. Please call frombytes() instead." + ) + + def load(self): + """ + Allocates storage for the image and loads the pixel data. In + normal cases, you don't need to call this method, since the + Image class automatically loads an opened image when it is + accessed for the first time. + + If the file associated with the image was opened by Pillow, then this + method will close it. The exception to this is if the image has + multiple frames, in which case the file will be left open for seek + operations. See :ref:`file-handling` for more information. + + :returns: An image access object. + :rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess` + """ + if self.im and self.palette and self.palette.dirty: + # realize palette + self.im.putpalette(*self.palette.getdata()) + self.palette.dirty = 0 + self.palette.mode = "RGB" + self.palette.rawmode = None + if "transparency" in self.info: + if isinstance(self.info["transparency"], int): + self.im.putpalettealpha(self.info["transparency"], 0) + else: + self.im.putpalettealphas(self.info["transparency"]) + self.palette.mode = "RGBA" + + if self.im: + if cffi and USE_CFFI_ACCESS: + if self.pyaccess: + return self.pyaccess + from . import PyAccess + + self.pyaccess = PyAccess.new(self, self.readonly) + if self.pyaccess: + return self.pyaccess + return self.im.pixel_access(self.readonly) + + def verify(self): + """ + Verifies the contents of a file. For data read from a file, this + method attempts to determine if the file is broken, without + actually decoding the image data. If this method finds any + problems, it raises suitable exceptions. If you need to load + the image after using this method, you must reopen the image + file. + """ + pass + + def convert(self, mode=None, matrix=None, dither=None, palette=WEB, colors=256): + """ + Returns a converted copy of this image. For the "P" mode, this + method translates pixels through the palette. If mode is + omitted, a mode is chosen so that all information in the image + and the palette can be represented without a palette. + + The current version supports all possible conversions between + "L", "RGB" and "CMYK." The **matrix** argument only supports "L" + and "RGB". + + When translating a color image to greyscale (mode "L"), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + + The default method of converting a greyscale ("L") or "RGB" + image into a bilevel (mode "1") image uses Floyd-Steinberg + dither to approximate the original image luminosity levels. If + dither is :data:`NONE`, all values larger than 128 are set to 255 (white), + all other values to 0 (black). To use other thresholds, use the + :py:meth:`~PIL.Image.Image.point` method. + + When converting from "RGBA" to "P" without a **matrix** argument, + this passes the operation to :py:meth:`~PIL.Image.Image.quantize`, + and **dither** and **palette** are ignored. + + :param mode: The requested mode. See: :ref:`concept-modes`. + :param matrix: An optional conversion matrix. If given, this + should be 4- or 12-tuple containing floating point values. + :param dither: Dithering method, used when converting from + mode "RGB" to "P" or from "RGB" or "L" to "1". + Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default). + Note that this is not used when **matrix** is supplied. + :param palette: Palette to use when converting from mode "RGB" + to "P". Available palettes are :data:`WEB` or :data:`ADAPTIVE`. + :param colors: Number of colors to use for the :data:`ADAPTIVE` palette. + Defaults to 256. + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + self.load() + + if not mode and self.mode == "P": + # determine default mode + if self.palette: + mode = self.palette.mode + else: + mode = "RGB" + if not mode or (mode == self.mode and not matrix): + return self.copy() + + has_transparency = self.info.get("transparency") is not None + if matrix: + # matrix conversion + if mode not in ("L", "RGB"): + raise ValueError("illegal conversion") + im = self.im.convert_matrix(mode, matrix) + new = self._new(im) + if has_transparency and self.im.bands == 3: + transparency = new.info["transparency"] + + def convert_transparency(m, v): + v = m[0] * v[0] + m[1] * v[1] + m[2] * v[2] + m[3] * 0.5 + return max(0, min(255, int(v))) + + if mode == "L": + transparency = convert_transparency(matrix, transparency) + elif len(mode) == 3: + transparency = tuple( + [ + convert_transparency( + matrix[i * 4 : i * 4 + 4], transparency + ) + for i in range(0, len(transparency)) + ] + ) + new.info["transparency"] = transparency + return new + + if mode == "P" and self.mode == "RGBA": + return self.quantize(colors) + + trns = None + delete_trns = False + # transparency handling + if has_transparency: + if self.mode in ("1", "L", "I", "RGB") and mode == "RGBA": + # Use transparent conversion to promote from transparent + # color to an alpha channel. + new_im = self._new( + self.im.convert_transparent(mode, self.info["transparency"]) + ) + del new_im.info["transparency"] + return new_im + elif self.mode in ("L", "RGB", "P") and mode in ("L", "RGB", "P"): + t = self.info["transparency"] + if isinstance(t, bytes): + # Dragons. This can't be represented by a single color + warnings.warn( + "Palette images with Transparency expressed in bytes should be " + "converted to RGBA images" + ) + delete_trns = True + else: + # get the new transparency color. + # use existing conversions + trns_im = Image()._new(core.new(self.mode, (1, 1))) + if self.mode == "P": + trns_im.putpalette(self.palette) + if isinstance(t, tuple): + try: + t = trns_im.palette.getcolor(t) + except Exception as e: + raise ValueError( + "Couldn't allocate a palette color for transparency" + ) from e + trns_im.putpixel((0, 0), t) + + if mode in ("L", "RGB"): + trns_im = trns_im.convert(mode) + else: + # can't just retrieve the palette number, got to do it + # after quantization. + trns_im = trns_im.convert("RGB") + trns = trns_im.getpixel((0, 0)) + + elif self.mode == "P" and mode == "RGBA": + t = self.info["transparency"] + delete_trns = True + + if isinstance(t, bytes): + self.im.putpalettealphas(t) + elif isinstance(t, int): + self.im.putpalettealpha(t, 0) + else: + raise ValueError("Transparency for P mode should be bytes or int") + + if mode == "P" and palette == ADAPTIVE: + im = self.im.quantize(colors) + new = self._new(im) + from . import ImagePalette + + new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB")) + if delete_trns: + # This could possibly happen if we requantize to fewer colors. + # The transparency would be totally off in that case. + del new.info["transparency"] + if trns is not None: + try: + new.info["transparency"] = new.palette.getcolor(trns) + except Exception: + # if we can't make a transparent color, don't leave the old + # transparency hanging around to mess us up. + del new.info["transparency"] + warnings.warn("Couldn't allocate palette entry for transparency") + return new + + # colorspace conversion + if dither is None: + dither = FLOYDSTEINBERG + + try: + im = self.im.convert(mode, dither) + except ValueError: + try: + # normalize source image and try again + im = self.im.convert(getmodebase(self.mode)) + im = im.convert(mode, dither) + except KeyError as e: + raise ValueError("illegal conversion") from e + + new_im = self._new(im) + if delete_trns: + # crash fail if we leave a bytes transparency in an rgb/l mode. + del new_im.info["transparency"] + if trns is not None: + if new_im.mode == "P": + try: + new_im.info["transparency"] = new_im.palette.getcolor(trns) + except Exception: + del new_im.info["transparency"] + warnings.warn("Couldn't allocate palette entry for transparency") + else: + new_im.info["transparency"] = trns + return new_im + + def quantize(self, colors=256, method=None, kmeans=0, palette=None, dither=1): + """ + Convert the image to 'P' mode with the specified number + of colors. + + :param colors: The desired number of colors, <= 256 + :param method: :data:`MEDIANCUT` (median cut), + :data:`MAXCOVERAGE` (maximum coverage), + :data:`FASTOCTREE` (fast octree), + :data:`LIBIMAGEQUANT` (libimagequant; check support using + :py:func:`PIL.features.check_feature` + with ``feature="libimagequant"``). + :param kmeans: Integer + :param palette: Quantize to the palette of given + :py:class:`PIL.Image.Image`. + :param dither: Dithering method, used when converting from + mode "RGB" to "P" or from "RGB" or "L" to "1". + Available methods are :data:`NONE` or :data:`FLOYDSTEINBERG` (default). + Default: 1 (legacy setting) + :returns: A new image + + """ + + self.load() + + if method is None: + # defaults: + method = 0 + if self.mode == "RGBA": + method = 2 + + if self.mode == "RGBA" and method not in (2, 3): + # Caller specified an invalid mode. + raise ValueError( + "Fast Octree (method == 2) and libimagequant (method == 3) " + "are the only valid methods for quantizing RGBA images" + ) + + if palette: + # use palette from reference image + palette.load() + if palette.mode != "P": + raise ValueError("bad mode for palette image") + if self.mode != "RGB" and self.mode != "L": + raise ValueError( + "only RGB or L mode images can be quantized to a palette" + ) + im = self.im.convert("P", dither, palette.im) + return self._new(im) + + im = self._new(self.im.quantize(colors, method, kmeans)) + + from . import ImagePalette + + mode = im.im.getpalettemode() + im.palette = ImagePalette.ImagePalette(mode, im.im.getpalette(mode, mode)) + + return im + + def copy(self): + """ + Copies this image. Use this method if you wish to paste things + into an image, but still retain the original. + + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + self.load() + return self._new(self.im.copy()) + + __copy__ = copy + + def crop(self, box=None): + """ + Returns a rectangular region from this image. The box is a + 4-tuple defining the left, upper, right, and lower pixel + coordinate. See :ref:`coordinate-system`. + + Note: Prior to Pillow 3.4.0, this was a lazy operation. + + :param box: The crop rectangle, as a (left, upper, right, lower)-tuple. + :rtype: :py:class:`~PIL.Image.Image` + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if box is None: + return self.copy() + + self.load() + return self._new(self._crop(self.im, box)) + + def _crop(self, im, box): + """ + Returns a rectangular region from the core image object im. + + This is equivalent to calling im.crop((x0, y0, x1, y1)), but + includes additional sanity checks. + + :param im: a core image object + :param box: The crop rectangle, as a (left, upper, right, lower)-tuple. + :returns: A core image object. + """ + + x0, y0, x1, y1 = map(int, map(round, box)) + + absolute_values = (abs(x1 - x0), abs(y1 - y0)) + + _decompression_bomb_check(absolute_values) + + return im.crop((x0, y0, x1, y1)) + + def draft(self, mode, size): + """ + Configures the image file loader so it returns a version of the + image that as closely as possible matches the given mode and + size. For example, you can use this method to convert a color + JPEG to greyscale while loading it. + + If any changes are made, returns a tuple with the chosen ``mode`` and + ``box`` with coordinates of the original image within the altered one. + + Note that this method modifies the :py:class:`~PIL.Image.Image` object + in place. If the image has already been loaded, this method has no + effect. + + Note: This method is not implemented for most images. It is + currently implemented only for JPEG and MPO images. + + :param mode: The requested mode. + :param size: The requested size. + """ + pass + + def _expand(self, xmargin, ymargin=None): + if ymargin is None: + ymargin = xmargin + self.load() + return self._new(self.im.expand(xmargin, ymargin, 0)) + + def filter(self, filter): + """ + Filters this image using the given filter. For a list of + available filters, see the :py:mod:`~PIL.ImageFilter` module. + + :param filter: Filter kernel. + :returns: An :py:class:`~PIL.Image.Image` object. """ + + from . import ImageFilter + + self.load() + + if isinstance(filter, Callable): + filter = filter() + if not hasattr(filter, "filter"): + raise TypeError( + "filter argument should be ImageFilter.Filter instance or class" + ) + + multiband = isinstance(filter, ImageFilter.MultibandFilter) + if self.im.bands == 1 or multiband: + return self._new(filter.filter(self.im)) + + ims = [] + for c in range(self.im.bands): + ims.append(self._new(filter.filter(self.im.getband(c)))) + return merge(self.mode, ims) + + def getbands(self): + """ + Returns a tuple containing the name of each band in this image. + For example, **getbands** on an RGB image returns ("R", "G", "B"). + + :returns: A tuple containing band names. + :rtype: tuple + """ + return ImageMode.getmode(self.mode).bands + + def getbbox(self): + """ + Calculates the bounding box of the non-zero regions in the + image. + + :returns: The bounding box is returned as a 4-tuple defining the + left, upper, right, and lower pixel coordinate. See + :ref:`coordinate-system`. If the image is completely empty, this + method returns None. + + """ + + self.load() + return self.im.getbbox() + + def getcolors(self, maxcolors=256): + """ + Returns a list of colors used in this image. + + :param maxcolors: Maximum number of colors. If this number is + exceeded, this method returns None. The default limit is + 256 colors. + :returns: An unsorted list of (count, pixel) values. + """ + + self.load() + if self.mode in ("1", "L", "P"): + h = self.im.histogram() + out = [] + for i in range(256): + if h[i]: + out.append((h[i], i)) + if len(out) > maxcolors: + return None + return out + return self.im.getcolors(maxcolors) + + def getdata(self, band=None): + """ + Returns the contents of this image as a sequence object + containing pixel values. The sequence object is flattened, so + that values for line one follow directly after the values of + line zero, and so on. + + Note that the sequence object returned by this method is an + internal PIL data type, which only supports certain sequence + operations. To convert it to an ordinary sequence (e.g. for + printing), use **list(im.getdata())**. + + :param band: What band to return. The default is to return + all bands. To return a single band, pass in the index + value (e.g. 0 to get the "R" band from an "RGB" image). + :returns: A sequence-like object. + """ + + self.load() + if band is not None: + return self.im.getband(band) + return self.im # could be abused + + def getextrema(self): + """ + Gets the the minimum and maximum pixel values for each band in + the image. + + :returns: For a single-band image, a 2-tuple containing the + minimum and maximum pixel value. For a multi-band image, + a tuple containing one 2-tuple for each band. + """ + + self.load() + if self.im.bands > 1: + extrema = [] + for i in range(self.im.bands): + extrema.append(self.im.getband(i).getextrema()) + return tuple(extrema) + return self.im.getextrema() + + def getexif(self): + if self._exif is None: + self._exif = Exif() + + exif_info = self.info.get("exif") + if exif_info is None and "Raw profile type exif" in self.info: + exif_info = bytes.fromhex( + "".join(self.info["Raw profile type exif"].split("\n")[3:]) + ) + self._exif.load(exif_info) + + # XMP tags + if 0x0112 not in self._exif: + xmp_tags = self.info.get("XML:com.adobe.xmp") + if xmp_tags: + root = xml.etree.ElementTree.fromstring(xmp_tags) + for elem in root.iter(): + if elem.tag.endswith("}Description"): + orientation = elem.attrib.get( + "{http://ns.adobe.com/tiff/1.0/}Orientation" + ) + if orientation: + self._exif[0x0112] = int(orientation) + break + + return self._exif + + def getim(self): + """ + Returns a capsule that points to the internal image memory. + + :returns: A capsule object. + """ + + self.load() + return self.im.ptr + + def getpalette(self): + """ + Returns the image palette as a list. + + :returns: A list of color values [r, g, b, ...], or None if the + image has no palette. + """ + + self.load() + try: + return list(self.im.getpalette()) + except ValueError: + return None # no palette + + def getpixel(self, xy): + """ + Returns the pixel value at a given position. + + :param xy: The coordinate, given as (x, y). See + :ref:`coordinate-system`. + :returns: The pixel value. If the image is a multi-layer image, + this method returns a tuple. + """ + + self.load() + if self.pyaccess: + return self.pyaccess.getpixel(xy) + return self.im.getpixel(xy) + + def getprojection(self): + """ + Get projection to x and y axes + + :returns: Two sequences, indicating where there are non-zero + pixels along the X-axis and the Y-axis, respectively. + """ + + self.load() + x, y = self.im.getprojection() + return [i8(c) for c in x], [i8(c) for c in y] + + def histogram(self, mask=None, extrema=None): + """ + Returns a histogram for the image. The histogram is returned as + a list of pixel counts, one for each pixel value in the source + image. If the image has more than one band, the histograms for + all bands are concatenated (for example, the histogram for an + "RGB" image contains 768 values). + + A bilevel image (mode "1") is treated as a greyscale ("L") image + by this method. + + If a mask is provided, the method returns a histogram for those + parts of the image where the mask image is non-zero. The mask + image must have the same size as the image, and be either a + bi-level image (mode "1") or a greyscale image ("L"). + + :param mask: An optional mask. + :param extrema: An optional tuple of manually-specified extrema. + :returns: A list containing pixel counts. + """ + self.load() + if mask: + mask.load() + return self.im.histogram((0, 0), mask.im) + if self.mode in ("I", "F"): + if extrema is None: + extrema = self.getextrema() + return self.im.histogram(extrema) + return self.im.histogram() + + def entropy(self, mask=None, extrema=None): + """ + Calculates and returns the entropy for the image. + + A bilevel image (mode "1") is treated as a greyscale ("L") + image by this method. + + If a mask is provided, the method employs the histogram for + those parts of the image where the mask image is non-zero. + The mask image must have the same size as the image, and be + either a bi-level image (mode "1") or a greyscale image ("L"). + + :param mask: An optional mask. + :param extrema: An optional tuple of manually-specified extrema. + :returns: A float value representing the image entropy + """ + self.load() + if mask: + mask.load() + return self.im.entropy((0, 0), mask.im) + if self.mode in ("I", "F"): + if extrema is None: + extrema = self.getextrema() + return self.im.entropy(extrema) + return self.im.entropy() + + def offset(self, xoffset, yoffset=None): + raise NotImplementedError( + "offset() has been removed. Please call ImageChops.offset() instead." + ) + + def paste(self, im, box=None, mask=None): + """ + Pastes another image into this image. The box argument is either + a 2-tuple giving the upper left corner, a 4-tuple defining the + left, upper, right, and lower pixel coordinate, or None (same as + (0, 0)). See :ref:`coordinate-system`. If a 4-tuple is given, the size + of the pasted image must match the size of the region. + + If the modes don't match, the pasted image is converted to the mode of + this image (see the :py:meth:`~PIL.Image.Image.convert` method for + details). + + Instead of an image, the source can be a integer or tuple + containing pixel values. The method then fills the region + with the given color. When creating RGB images, you can + also use color strings as supported by the ImageColor module. + + If a mask is given, this method updates only the regions + indicated by the mask. You can use either "1", "L" or "RGBA" + images (in the latter case, the alpha band is used as mask). + Where the mask is 255, the given image is copied as is. Where + the mask is 0, the current value is preserved. Intermediate + values will mix the two images together, including their alpha + channels if they have them. + + See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to + combine images with respect to their alpha channels. + + :param im: Source image or pixel value (integer or tuple). + :param box: An optional 4-tuple giving the region to paste into. + If a 2-tuple is used instead, it's treated as the upper left + corner. If omitted or None, the source is pasted into the + upper left corner. + + If an image is given as the second argument and there is no + third, the box defaults to (0, 0), and the second argument + is interpreted as a mask image. + :param mask: An optional mask image. + """ + + if isImageType(box) and mask is None: + # abbreviated paste(im, mask) syntax + mask = box + box = None + + if box is None: + box = (0, 0) + + if len(box) == 2: + # upper left corner given; get size from image or mask + if isImageType(im): + size = im.size + elif isImageType(mask): + size = mask.size + else: + # FIXME: use self.size here? + raise ValueError("cannot determine region size; use 4-item box") + box += (box[0] + size[0], box[1] + size[1]) + + if isinstance(im, str): + from . import ImageColor + + im = ImageColor.getcolor(im, self.mode) + + elif isImageType(im): + im.load() + if self.mode != im.mode: + if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"): + # should use an adapter for this! + im = im.convert(self.mode) + im = im.im + + self._ensure_mutable() + + if mask: + mask.load() + self.im.paste(im, box, mask.im) + else: + self.im.paste(im, box) + + def alpha_composite(self, im, dest=(0, 0), source=(0, 0)): + """ 'In-place' analog of Image.alpha_composite. Composites an image + onto this image. + + :param im: image to composite over this one + :param dest: Optional 2 tuple (left, top) specifying the upper + left corner in this (destination) image. + :param source: Optional 2 (left, top) tuple for the upper left + corner in the overlay source image, or 4 tuple (left, top, right, + bottom) for the bounds of the source rectangle + + Performance Note: Not currently implemented in-place in the core layer. + """ + + if not isinstance(source, (list, tuple)): + raise ValueError("Source must be a tuple") + if not isinstance(dest, (list, tuple)): + raise ValueError("Destination must be a tuple") + if not len(source) in (2, 4): + raise ValueError("Source must be a 2 or 4-tuple") + if not len(dest) == 2: + raise ValueError("Destination must be a 2-tuple") + if min(source) < 0: + raise ValueError("Source must be non-negative") + if min(dest) < 0: + raise ValueError("Destination must be non-negative") + + if len(source) == 2: + source = source + im.size + + # over image, crop if it's not the whole thing. + if source == (0, 0) + im.size: + overlay = im + else: + overlay = im.crop(source) + + # target for the paste + box = dest + (dest[0] + overlay.width, dest[1] + overlay.height) + + # destination image. don't copy if we're using the whole image. + if box == (0, 0) + self.size: + background = self + else: + background = self.crop(box) + + result = alpha_composite(background, overlay) + self.paste(result, box) + + def point(self, lut, mode=None): + """ + Maps this image through a lookup table or function. + + :param lut: A lookup table, containing 256 (or 65536 if + self.mode=="I" and mode == "L") values per band in the + image. A function can be used instead, it should take a + single argument. The function is called once for each + possible pixel value, and the resulting table is applied to + all bands of the image. + :param mode: Output mode (default is same as input). In the + current version, this can only be used if the source image + has mode "L" or "P", and the output has mode "1" or the + source image mode is "I" and the output mode is "L". + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + self.load() + + if isinstance(lut, ImagePointHandler): + return lut.point(self) + + if callable(lut): + # if it isn't a list, it should be a function + if self.mode in ("I", "I;16", "F"): + # check if the function can be used with point_transform + # UNDONE wiredfool -- I think this prevents us from ever doing + # a gamma function point transform on > 8bit images. + scale, offset = _getscaleoffset(lut) + return self._new(self.im.point_transform(scale, offset)) + # for other modes, convert the function to a table + lut = [lut(i) for i in range(256)] * self.im.bands + + if self.mode == "F": + # FIXME: _imaging returns a confusing error message for this case + raise ValueError("point operation not supported for this mode") + + return self._new(self.im.point(lut, mode)) + + def putalpha(self, alpha): + """ + Adds or replaces the alpha layer in this image. If the image + does not have an alpha layer, it's converted to "LA" or "RGBA". + The new layer must be either "L" or "1". + + :param alpha: The new alpha layer. This can either be an "L" or "1" + image having the same size as this image, or an integer or + other color value. + """ + + self._ensure_mutable() + + if self.mode not in ("LA", "PA", "RGBA"): + # attempt to promote self to a matching alpha mode + try: + mode = getmodebase(self.mode) + "A" + try: + self.im.setmode(mode) + except (AttributeError, ValueError) as e: + # do things the hard way + im = self.im.convert(mode) + if im.mode not in ("LA", "PA", "RGBA"): + raise ValueError from e # sanity check + self.im = im + self.pyaccess = None + self.mode = self.im.mode + except (KeyError, ValueError) as e: + raise ValueError("illegal image mode") from e + + if self.mode in ("LA", "PA"): + band = 1 + else: + band = 3 + + if isImageType(alpha): + # alpha layer + if alpha.mode not in ("1", "L"): + raise ValueError("illegal image mode") + alpha.load() + if alpha.mode == "1": + alpha = alpha.convert("L") + else: + # constant alpha + try: + self.im.fillband(band, alpha) + except (AttributeError, ValueError): + # do things the hard way + alpha = new("L", self.size, alpha) + else: + return + + self.im.putband(alpha.im, band) + + def putdata(self, data, scale=1.0, offset=0.0): + """ + Copies pixel data to this image. This method copies data from a + sequence object into the image, starting at the upper left + corner (0, 0), and continuing until either the image or the + sequence ends. The scale and offset values are used to adjust + the sequence values: **pixel = value*scale + offset**. + + :param data: A sequence object. + :param scale: An optional scale value. The default is 1.0. + :param offset: An optional offset value. The default is 0.0. + """ + + self._ensure_mutable() + + self.im.putdata(data, scale, offset) + + def putpalette(self, data, rawmode="RGB"): + """ + Attaches a palette to this image. The image must be a "P", + "PA", "L" or "LA" image, and the palette sequence must contain + 768 integer values, where each group of three values represent + the red, green, and blue values for the corresponding pixel + index. Instead of an integer sequence, you can use an 8-bit + string. + + :param data: A palette sequence (either a list or a string). + :param rawmode: The raw mode of the palette. + """ + from . import ImagePalette + + if self.mode not in ("L", "LA", "P", "PA"): + raise ValueError("illegal image mode") + self.load() + if isinstance(data, ImagePalette.ImagePalette): + palette = ImagePalette.raw(data.rawmode, data.palette) + else: + if not isinstance(data, bytes): + data = bytes(data) + palette = ImagePalette.raw(rawmode, data) + self.mode = "PA" if "A" in self.mode else "P" + self.palette = palette + self.palette.mode = "RGB" + self.load() # install new palette + + def putpixel(self, xy, value): + """ + Modifies the pixel at the given position. The color is given as + a single numerical value for single-band images, and a tuple for + multi-band images. In addition to this, RGB and RGBA tuples are + accepted for P images. + + Note that this method is relatively slow. For more extensive changes, + use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw` + module instead. + + See: + + * :py:meth:`~PIL.Image.Image.paste` + * :py:meth:`~PIL.Image.Image.putdata` + * :py:mod:`~PIL.ImageDraw` + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :param value: The pixel value. + """ + + if self.readonly: + self._copy() + self.load() + + if self.pyaccess: + return self.pyaccess.putpixel(xy, value) + + if ( + self.mode == "P" + and isinstance(value, (list, tuple)) + and len(value) in [3, 4] + ): + # RGB or RGBA value for a P image + value = self.palette.getcolor(value) + return self.im.putpixel(xy, value) + + def remap_palette(self, dest_map, source_palette=None): + """ + Rewrites the image to reorder the palette. + + :param dest_map: A list of indexes into the original palette. + e.g. [1,0] would swap a two item palette, and list(range(256)) + is the identity transform. + :param source_palette: Bytes or None. + :returns: An :py:class:`~PIL.Image.Image` object. + + """ + from . import ImagePalette + + if self.mode not in ("L", "P"): + raise ValueError("illegal image mode") + + if source_palette is None: + if self.mode == "P": + real_source_palette = self.im.getpalette("RGB")[:768] + else: # L-mode + real_source_palette = bytearray(i // 3 for i in range(768)) + else: + real_source_palette = source_palette + + palette_bytes = b"" + new_positions = [0] * 256 + + # pick only the used colors from the palette + for i, oldPosition in enumerate(dest_map): + palette_bytes += real_source_palette[oldPosition * 3 : oldPosition * 3 + 3] + new_positions[oldPosition] = i + + # replace the palette color id of all pixel with the new id + + # Palette images are [0..255], mapped through a 1 or 3 + # byte/color map. We need to remap the whole image + # from palette 1 to palette 2. New_positions is + # an array of indexes into palette 1. Palette 2 is + # palette 1 with any holes removed. + + # We're going to leverage the convert mechanism to use the + # C code to remap the image from palette 1 to palette 2, + # by forcing the source image into 'L' mode and adding a + # mapping 'L' mode palette, then converting back to 'L' + # sans palette thus converting the image bytes, then + # assigning the optimized RGB palette. + + # perf reference, 9500x4000 gif, w/~135 colors + # 14 sec prepatch, 1 sec postpatch with optimization forced. + + mapping_palette = bytearray(new_positions) + + m_im = self.copy() + m_im.mode = "P" + + m_im.palette = ImagePalette.ImagePalette( + "RGB", palette=mapping_palette * 3, size=768 + ) + # possibly set palette dirty, then + # m_im.putpalette(mapping_palette, 'L') # converts to 'P' + # or just force it. + # UNDONE -- this is part of the general issue with palettes + m_im.im.putpalette(*m_im.palette.getdata()) + + m_im = m_im.convert("L") + + # Internally, we require 768 bytes for a palette. + new_palette_bytes = palette_bytes + (768 - len(palette_bytes)) * b"\x00" + m_im.putpalette(new_palette_bytes) + m_im.palette = ImagePalette.ImagePalette( + "RGB", palette=palette_bytes, size=len(palette_bytes) + ) + + return m_im + + def _get_safe_box(self, size, resample, box): + """Expands the box so it includes adjacent pixels + that may be used by resampling with the given resampling filter. + """ + filter_support = _filters_support[resample] - 0.5 + scale_x = (box[2] - box[0]) / size[0] + scale_y = (box[3] - box[1]) / size[1] + support_x = filter_support * scale_x + support_y = filter_support * scale_y + + return ( + max(0, int(box[0] - support_x)), + max(0, int(box[1] - support_y)), + min(self.size[0], math.ceil(box[2] + support_x)), + min(self.size[1], math.ceil(box[3] + support_y)), + ) + + def resize(self, size, resample=BICUBIC, box=None, reducing_gap=None): + """ + Returns a resized copy of this image. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param resample: An optional resampling filter. This can be + one of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BOX`, + :py:data:`PIL.Image.BILINEAR`, :py:data:`PIL.Image.HAMMING`, + :py:data:`PIL.Image.BICUBIC` or :py:data:`PIL.Image.LANCZOS`. + Default filter is :py:data:`PIL.Image.BICUBIC`. + If the image has mode "1" or "P", it is + always set to :py:data:`PIL.Image.NEAREST`. + See: :ref:`concept-filters`. + :param box: An optional 4-tuple of floats providing + the source image region to be scaled. + The values must be within (0, 0, width, height) rectangle. + If omitted or None, the entire source is used. + :param reducing_gap: Apply optimization by resizing the image + in two steps. First, reducing the image by integer times + using :py:meth:`~PIL.Image.Image.reduce`. + Second, resizing using regular resampling. The last step + changes size no less than by ``reducing_gap`` times. + ``reducing_gap`` may be None (no first step is performed) + or should be greater than 1.0. The bigger ``reducing_gap``, + the closer the result to the fair resampling. + The smaller ``reducing_gap``, the faster resizing. + With ``reducing_gap`` greater or equal to 3.0, the result is + indistinguishable from fair resampling in most cases. + The default value is None (no optimization). + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if resample not in (NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING): + message = "Unknown resampling filter ({}).".format(resample) + + filters = [ + "{} ({})".format(filter[1], filter[0]) + for filter in ( + (NEAREST, "Image.NEAREST"), + (LANCZOS, "Image.LANCZOS"), + (BILINEAR, "Image.BILINEAR"), + (BICUBIC, "Image.BICUBIC"), + (BOX, "Image.BOX"), + (HAMMING, "Image.HAMMING"), + ) + ] + raise ValueError( + message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1] + ) + + if reducing_gap is not None and reducing_gap < 1.0: + raise ValueError("reducing_gap must be 1.0 or greater") + + size = tuple(size) + + if box is None: + box = (0, 0) + self.size + else: + box = tuple(box) + + if self.size == size and box == (0, 0) + self.size: + return self.copy() + + if self.mode in ("1", "P"): + resample = NEAREST + + if self.mode in ["LA", "RGBA"]: + im = self.convert(self.mode[:-1] + "a") + im = im.resize(size, resample, box) + return im.convert(self.mode) + + self.load() + + if reducing_gap is not None and resample != NEAREST: + factor_x = int((box[2] - box[0]) / size[0] / reducing_gap) or 1 + factor_y = int((box[3] - box[1]) / size[1] / reducing_gap) or 1 + if factor_x > 1 or factor_y > 1: + reduce_box = self._get_safe_box(size, resample, box) + factor = (factor_x, factor_y) + if callable(self.reduce): + self = self.reduce(factor, box=reduce_box) + else: + self = Image.reduce(self, factor, box=reduce_box) + box = ( + (box[0] - reduce_box[0]) / factor_x, + (box[1] - reduce_box[1]) / factor_y, + (box[2] - reduce_box[0]) / factor_x, + (box[3] - reduce_box[1]) / factor_y, + ) + + return self._new(self.im.resize(size, resample, box)) + + def reduce(self, factor, box=None): + """ + Returns a copy of the image reduced by `factor` times. + If the size of the image is not dividable by the `factor`, + the resulting size will be rounded up. + + :param factor: A greater than 0 integer or tuple of two integers + for width and height separately. + :param box: An optional 4-tuple of ints providing + the source image region to be reduced. + The values must be within (0, 0, width, height) rectangle. + If omitted or None, the entire source is used. + """ + if not isinstance(factor, (list, tuple)): + factor = (factor, factor) + + if box is None: + box = (0, 0) + self.size + else: + box = tuple(box) + + if factor == (1, 1) and box == (0, 0) + self.size: + return self.copy() + + if self.mode in ["LA", "RGBA"]: + im = self.convert(self.mode[:-1] + "a") + im = im.reduce(factor, box) + return im.convert(self.mode) + + self.load() + + return self._new(self.im.reduce(factor, box)) + + def rotate( + self, + angle, + resample=NEAREST, + expand=0, + center=None, + translate=None, + fillcolor=None, + ): + """ + Returns a rotated copy of this image. This method returns a + copy of this image, rotated the given number of degrees counter + clockwise around its centre. + + :param angle: In degrees counter clockwise. + :param resample: An optional resampling filter. This can be + one of :py:data:`PIL.Image.NEAREST` (use nearest neighbour), + :py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2 + environment), or :py:data:`PIL.Image.BICUBIC` + (cubic spline interpolation in a 4x4 environment). + If omitted, or if the image has mode "1" or "P", it is + set to :py:data:`PIL.Image.NEAREST`. See :ref:`concept-filters`. + :param expand: Optional expansion flag. If true, expands the output + image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the + input image. Note that the expand flag assumes rotation around + the center and no translation. + :param center: Optional center of rotation (a 2-tuple). Origin is + the upper left corner. Default is the center of the image. + :param translate: An optional post-rotate translation (a 2-tuple). + :param fillcolor: An optional color for area outside the rotated image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + angle = angle % 360.0 + + # Fast paths regardless of filter, as long as we're not + # translating or changing the center. + if not (center or translate): + if angle == 0: + return self.copy() + if angle == 180: + return self.transpose(ROTATE_180) + if angle == 90 and expand: + return self.transpose(ROTATE_90) + if angle == 270 and expand: + return self.transpose(ROTATE_270) + + # Calculate the affine matrix. Note that this is the reverse + # transformation (from destination image to source) because we + # want to interpolate the (discrete) destination pixel from + # the local area around the (floating) source pixel. + + # The matrix we actually want (note that it operates from the right): + # (1, 0, tx) (1, 0, cx) ( cos a, sin a, 0) (1, 0, -cx) + # (0, 1, ty) * (0, 1, cy) * (-sin a, cos a, 0) * (0, 1, -cy) + # (0, 0, 1) (0, 0, 1) ( 0, 0, 1) (0, 0, 1) + + # The reverse matrix is thus: + # (1, 0, cx) ( cos -a, sin -a, 0) (1, 0, -cx) (1, 0, -tx) + # (0, 1, cy) * (-sin -a, cos -a, 0) * (0, 1, -cy) * (0, 1, -ty) + # (0, 0, 1) ( 0, 0, 1) (0, 0, 1) (0, 0, 1) + + # In any case, the final translation may be updated at the end to + # compensate for the expand flag. + + w, h = self.size + + if translate is None: + post_trans = (0, 0) + else: + post_trans = translate + if center is None: + # FIXME These should be rounded to ints? + rotn_center = (w / 2.0, h / 2.0) + else: + rotn_center = center + + angle = -math.radians(angle) + matrix = [ + round(math.cos(angle), 15), + round(math.sin(angle), 15), + 0.0, + round(-math.sin(angle), 15), + round(math.cos(angle), 15), + 0.0, + ] + + def transform(x, y, matrix): + (a, b, c, d, e, f) = matrix + return a * x + b * y + c, d * x + e * y + f + + matrix[2], matrix[5] = transform( + -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix + ) + matrix[2] += rotn_center[0] + matrix[5] += rotn_center[1] + + if expand: + # calculate output size + xx = [] + yy = [] + for x, y in ((0, 0), (w, 0), (w, h), (0, h)): + x, y = transform(x, y, matrix) + xx.append(x) + yy.append(y) + nw = math.ceil(max(xx)) - math.floor(min(xx)) + nh = math.ceil(max(yy)) - math.floor(min(yy)) + + # We multiply a translation matrix from the right. Because of its + # special form, this is the same as taking the image of the + # translation vector as new translation vector. + matrix[2], matrix[5] = transform(-(nw - w) / 2.0, -(nh - h) / 2.0, matrix) + w, h = nw, nh + + return self.transform((w, h), AFFINE, matrix, resample, fillcolor=fillcolor) + + def save(self, fp, format=None, **params): + """ + Saves this image under the given filename. If no format is + specified, the format to use is determined from the filename + extension, if possible. + + Keyword options can be used to provide additional instructions + to the writer. If a writer doesn't recognise an option, it is + silently ignored. The available options are described in the + :doc:`image format documentation + <../handbook/image-file-formats>` for each writer. + + You can use a file object instead of a filename. In this case, + you must always specify the format. The file object must + implement the ``seek``, ``tell``, and ``write`` + methods, and be opened in binary mode. + + :param fp: A filename (string), pathlib.Path object or file object. + :param format: Optional format override. If omitted, the + format to use is determined from the filename extension. + If a file object was used instead of a filename, this + parameter should always be used. + :param params: Extra parameters to the image writer. + :returns: None + :exception ValueError: If the output format could not be determined + from the file name. Use the format option to solve this. + :exception OSError: If the file could not be written. The file + may have been created, and may contain partial data. + """ + + filename = "" + open_fp = False + if isPath(fp): + filename = fp + open_fp = True + elif isinstance(fp, Path): + filename = str(fp) + open_fp = True + if not filename and hasattr(fp, "name") and isPath(fp.name): + # only set the name for metadata purposes + filename = fp.name + + # may mutate self! + self._ensure_mutable() + + save_all = params.pop("save_all", False) + self.encoderinfo = params + self.encoderconfig = () + + preinit() + + ext = os.path.splitext(filename)[1].lower() + + if not format: + if ext not in EXTENSION: + init() + try: + format = EXTENSION[ext] + except KeyError as e: + raise ValueError("unknown file extension: {}".format(ext)) from e + + if format.upper() not in SAVE: + init() + if save_all: + save_handler = SAVE_ALL[format.upper()] + else: + save_handler = SAVE[format.upper()] + + if open_fp: + if params.get("append", False): + # Open also for reading ("+"), because TIFF save_all + # writer needs to go back and edit the written data. + fp = builtins.open(filename, "r+b") + else: + fp = builtins.open(filename, "w+b") + + try: + save_handler(self, fp, filename) + finally: + # do what we can to clean up + if open_fp: + fp.close() + + def seek(self, frame): + """ + Seeks to the given frame in this sequence file. If you seek + beyond the end of the sequence, the method raises an + ``EOFError`` exception. When a sequence file is opened, the + library automatically seeks to frame 0. + + See :py:meth:`~PIL.Image.Image.tell`. + + :param frame: Frame number, starting at 0. + :exception EOFError: If the call attempts to seek beyond the end + of the sequence. + """ + + # overridden by file handlers + if frame != 0: + raise EOFError + + def show(self, title=None, command=None): + """ + Displays this image. This method is mainly intended for debugging purposes. + + This method calls :py:func:`PIL.ImageShow.show` internally. You can use + :py:func:`PIL.ImageShow.register` to override its default behaviour. + + The image is first saved to a temporary file. By default, it will be in + PNG format. + + On Unix, the image is then opened using the **display**, **eog** or + **xv** utility, depending on which one can be found. + + On macOS, the image is opened with the native Preview application. + + On Windows, the image is opened with the standard PNG display utility. + + :param title: Optional title to use for the image window, where possible. + """ + + if command is not None: + warnings.warn( + "The command parameter is deprecated and will be removed in a future " + "release. Use a subclass of ImageShow.Viewer instead.", + DeprecationWarning, + ) + + _show(self, title=title, command=command) + + def split(self): + """ + Split this image into individual bands. This method returns a + tuple of individual image bands from an image. For example, + splitting an "RGB" image creates three new images each + containing a copy of one of the original bands (red, green, + blue). + + If you need only one band, :py:meth:`~PIL.Image.Image.getchannel` + method can be more convenient and faster. + + :returns: A tuple containing bands. + """ + + self.load() + if self.im.bands == 1: + ims = [self.copy()] + else: + ims = map(self._new, self.im.split()) + return tuple(ims) + + def getchannel(self, channel): + """ + Returns an image containing a single channel of the source image. + + :param channel: What channel to return. Could be index + (0 for "R" channel of "RGB") or channel name + ("A" for alpha channel of "RGBA"). + :returns: An image in "L" mode. + + .. versionadded:: 4.3.0 + """ + self.load() + + if isinstance(channel, str): + try: + channel = self.getbands().index(channel) + except ValueError as e: + raise ValueError('The image has no channel "{}"'.format(channel)) from e + + return self._new(self.im.getband(channel)) + + def tell(self): + """ + Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`. + + :returns: Frame number, starting with 0. + """ + return 0 + + def thumbnail(self, size, resample=BICUBIC, reducing_gap=2.0): + """ + Make this image into a thumbnail. This method modifies the + image to contain a thumbnail version of itself, no larger than + the given size. This method calculates an appropriate thumbnail + size to preserve the aspect of the image, calls the + :py:meth:`~PIL.Image.Image.draft` method to configure the file reader + (where applicable), and finally resizes the image. + + Note that this function modifies the :py:class:`~PIL.Image.Image` + object in place. If you need to use the full resolution image as well, + apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original + image. + + :param size: Requested size. + :param resample: Optional resampling filter. This can be one + of :py:data:`PIL.Image.NEAREST`, :py:data:`PIL.Image.BILINEAR`, + :py:data:`PIL.Image.BICUBIC`, or :py:data:`PIL.Image.LANCZOS`. + If omitted, it defaults to :py:data:`PIL.Image.BICUBIC`. + (was :py:data:`PIL.Image.NEAREST` prior to version 2.5.0). + See: :ref:`concept-filters`. + :param reducing_gap: Apply optimization by resizing the image + in two steps. First, reducing the image by integer times + using :py:meth:`~PIL.Image.Image.reduce` or + :py:meth:`~PIL.Image.Image.draft` for JPEG images. + Second, resizing using regular resampling. The last step + changes size no less than by ``reducing_gap`` times. + ``reducing_gap`` may be None (no first step is performed) + or should be greater than 1.0. The bigger ``reducing_gap``, + the closer the result to the fair resampling. + The smaller ``reducing_gap``, the faster resizing. + With ``reducing_gap`` greater or equal to 3.0, the result is + indistinguishable from fair resampling in most cases. + The default value is 2.0 (very close to fair resampling + while still being faster in many cases). + :returns: None + """ + + x, y = map(math.floor, size) + if x >= self.width and y >= self.height: + return + + def round_aspect(number, key): + return max(min(math.floor(number), math.ceil(number), key=key), 1) + + # preserve aspect ratio + aspect = self.width / self.height + if x / y >= aspect: + x = round_aspect(y * aspect, key=lambda n: abs(aspect - n / y)) + else: + y = round_aspect( + x / aspect, key=lambda n: 0 if n == 0 else abs(aspect - x / n) + ) + size = (x, y) + + box = None + if reducing_gap is not None: + res = self.draft(None, (size[0] * reducing_gap, size[1] * reducing_gap)) + if res is not None: + box = res[1] + + if self.size != size: + im = self.resize(size, resample, box=box, reducing_gap=reducing_gap) + + self.im = im.im + self._size = size + self.mode = self.im.mode + + self.readonly = 0 + self.pyaccess = None + + # FIXME: the different transform methods need further explanation + # instead of bloating the method docs, add a separate chapter. + def transform( + self, size, method, data=None, resample=NEAREST, fill=1, fillcolor=None + ): + """ + Transforms this image. This method creates a new image with the + given size, and the same mode as the original, and copies data + to the new image using the given transform. + + :param size: The output size. + :param method: The transformation method. This is one of + :py:data:`PIL.Image.EXTENT` (cut out a rectangular subregion), + :py:data:`PIL.Image.AFFINE` (affine transform), + :py:data:`PIL.Image.PERSPECTIVE` (perspective transform), + :py:data:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or + :py:data:`PIL.Image.MESH` (map a number of source quadrilaterals + in one operation). + + It may also be an :py:class:`~PIL.Image.ImageTransformHandler` + object:: + + class Example(Image.ImageTransformHandler): + def transform(size, method, data, resample, fill=1): + # Return result + + It may also be an object with a :py:meth:`~method.getdata` method + that returns a tuple supplying new **method** and **data** values:: + + class Example: + def getdata(self): + method = Image.EXTENT + data = (0, 0, 100, 100) + return method, data + :param data: Extra data to the transformation method. + :param resample: Optional resampling filter. It can be one of + :py:data:`PIL.Image.NEAREST` (use nearest neighbour), + :py:data:`PIL.Image.BILINEAR` (linear interpolation in a 2x2 + environment), or :py:data:`PIL.Image.BICUBIC` (cubic spline + interpolation in a 4x4 environment). If omitted, or if the image + has mode "1" or "P", it is set to :py:data:`PIL.Image.NEAREST`. + See: :ref:`concept-filters`. + :param fill: If **method** is an + :py:class:`~PIL.Image.ImageTransformHandler` object, this is one of + the arguments passed to it. Otherwise, it is unused. + :param fillcolor: Optional fill color for the area outside the + transform in the output image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if self.mode == "LA": + return ( + self.convert("La") + .transform(size, method, data, resample, fill, fillcolor) + .convert("LA") + ) + + if self.mode == "RGBA": + return ( + self.convert("RGBa") + .transform(size, method, data, resample, fill, fillcolor) + .convert("RGBA") + ) + + if isinstance(method, ImageTransformHandler): + return method.transform(size, self, resample=resample, fill=fill) + + if hasattr(method, "getdata"): + # compatibility w. old-style transform objects + method, data = method.getdata() + + if data is None: + raise ValueError("missing method data") + + im = new(self.mode, size, fillcolor) + im.info = self.info.copy() + if method == MESH: + # list of quads + for box, quad in data: + im.__transformer(box, self, QUAD, quad, resample, fillcolor is None) + else: + im.__transformer( + (0, 0) + size, self, method, data, resample, fillcolor is None + ) + + return im + + def __transformer(self, box, image, method, data, resample=NEAREST, fill=1): + w = box[2] - box[0] + h = box[3] - box[1] + + if method == AFFINE: + data = data[0:6] + + elif method == EXTENT: + # convert extent to an affine transform + x0, y0, x1, y1 = data + xs = (x1 - x0) / w + ys = (y1 - y0) / h + method = AFFINE + data = (xs, 0, x0, 0, ys, y0) + + elif method == PERSPECTIVE: + data = data[0:8] + + elif method == QUAD: + # quadrilateral warp. data specifies the four corners + # given as NW, SW, SE, and NE. + nw = data[0:2] + sw = data[2:4] + se = data[4:6] + ne = data[6:8] + x0, y0 = nw + As = 1.0 / w + At = 1.0 / h + data = ( + x0, + (ne[0] - x0) * As, + (sw[0] - x0) * At, + (se[0] - sw[0] - ne[0] + x0) * As * At, + y0, + (ne[1] - y0) * As, + (sw[1] - y0) * At, + (se[1] - sw[1] - ne[1] + y0) * As * At, + ) + + else: + raise ValueError("unknown transformation method") + + if resample not in (NEAREST, BILINEAR, BICUBIC): + if resample in (BOX, HAMMING, LANCZOS): + message = { + BOX: "Image.BOX", + HAMMING: "Image.HAMMING", + LANCZOS: "Image.LANCZOS/Image.ANTIALIAS", + }[resample] + " ({}) cannot be used.".format(resample) + else: + message = "Unknown resampling filter ({}).".format(resample) + + filters = [ + "{} ({})".format(filter[1], filter[0]) + for filter in ( + (NEAREST, "Image.NEAREST"), + (BILINEAR, "Image.BILINEAR"), + (BICUBIC, "Image.BICUBIC"), + ) + ] + raise ValueError( + message + " Use " + ", ".join(filters[:-1]) + " or " + filters[-1] + ) + + image.load() + + self.load() + + if image.mode in ("1", "P"): + resample = NEAREST + + self.im.transform2(box, image.im, method, data, resample, fill) + + def transpose(self, method): + """ + Transpose image (flip or rotate in 90 degree steps) + + :param method: One of :py:data:`PIL.Image.FLIP_LEFT_RIGHT`, + :py:data:`PIL.Image.FLIP_TOP_BOTTOM`, :py:data:`PIL.Image.ROTATE_90`, + :py:data:`PIL.Image.ROTATE_180`, :py:data:`PIL.Image.ROTATE_270`, + :py:data:`PIL.Image.TRANSPOSE` or :py:data:`PIL.Image.TRANSVERSE`. + :returns: Returns a flipped or rotated copy of this image. + """ + + self.load() + return self._new(self.im.transpose(method)) + + def effect_spread(self, distance): + """ + Randomly spread pixels in an image. + + :param distance: Distance to spread pixels. + """ + self.load() + return self._new(self.im.effect_spread(distance)) + + def toqimage(self): + """Returns a QImage copy of this image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.toqimage(self) + + def toqpixmap(self): + """Returns a QPixmap copy of this image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.toqpixmap(self) + + +# -------------------------------------------------------------------- +# Abstract handlers. + + +class ImagePointHandler: + # used as a mixin by point transforms (for use with im.point) + pass + + +class ImageTransformHandler: + # used as a mixin by geometry transforms (for use with im.transform) + pass + + +# -------------------------------------------------------------------- +# Factories + +# +# Debugging + + +def _wedge(): + """Create greyscale wedge (for debugging only)""" + + return Image()._new(core.wedge("L")) + + +def _check_size(size): + """ + Common check to enforce type and sanity check on size tuples + + :param size: Should be a 2 tuple of (width, height) + :returns: True, or raises a ValueError + """ + + if not isinstance(size, (list, tuple)): + raise ValueError("Size must be a tuple") + if len(size) != 2: + raise ValueError("Size must be a tuple of length 2") + if size[0] < 0 or size[1] < 0: + raise ValueError("Width and height must be >= 0") + + return True + + +def new(mode, size, color=0): + """ + Creates a new image with the given mode and size. + + :param mode: The mode to use for the new image. See: + :ref:`concept-modes`. + :param size: A 2-tuple, containing (width, height) in pixels. + :param color: What color to use for the image. Default is black. + If given, this should be a single integer or floating point value + for single-band modes, and a tuple for multi-band modes (one value + per band). When creating RGB images, you can also use color + strings as supported by the ImageColor module. If the color is + None, the image is not initialised. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + _check_size(size) + + if color is None: + # don't initialize + return Image()._new(core.new(mode, size)) + + if isinstance(color, str): + # css3-style specifier + + from . import ImageColor + + color = ImageColor.getcolor(color, mode) + + im = Image() + if mode == "P" and isinstance(color, (list, tuple)) and len(color) in [3, 4]: + # RGB or RGBA value for a P image + from . import ImagePalette + + im.palette = ImagePalette.ImagePalette() + color = im.palette.getcolor(color) + return im._new(core.fill(mode, size, color)) + + +def frombytes(mode, size, data, decoder_name="raw", *args): + """ + Creates a copy of an image memory from pixel data in a buffer. + + In its simplest form, this function takes three arguments + (mode, size, and unpacked pixel data). + + You can also use any pixel decoder supported by PIL. For more + information on available decoders, see the section + :ref:`Writing Your Own File Decoder `. + + Note that this function decodes pixel data only, not entire images. + If you have an entire image in a string, wrap it in a + :py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load + it. + + :param mode: The image mode. See: :ref:`concept-modes`. + :param size: The image size. + :param data: A byte buffer containing raw data for the given mode. + :param decoder_name: What decoder to use. + :param args: Additional parameters for the given decoder. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + _check_size(size) + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if decoder_name == "raw" and args == (): + args = mode + + im = new(mode, size) + im.frombytes(data, decoder_name, args) + return im + + +def fromstring(*args, **kw): + raise NotImplementedError( + "fromstring() has been removed. Please call frombytes() instead." + ) + + +def frombuffer(mode, size, data, decoder_name="raw", *args): + """ + Creates an image memory referencing pixel data in a byte buffer. + + This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data + in the byte buffer, where possible. This means that changes to the + original buffer object are reflected in this image). Not all modes can + share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK". + + Note that this function decodes pixel data only, not entire images. + If you have an entire image file in a string, wrap it in a + **BytesIO** object, and use :py:func:`~PIL.Image.open` to load it. + + In the current version, the default parameters used for the "raw" decoder + differs from that used for :py:func:`~PIL.Image.frombytes`. This is a + bug, and will probably be fixed in a future release. The current release + issues a warning if you do this; to disable the warning, you should provide + the full set of parameters. See below for details. + + :param mode: The image mode. See: :ref:`concept-modes`. + :param size: The image size. + :param data: A bytes or other buffer object containing raw + data for the given mode. + :param decoder_name: What decoder to use. + :param args: Additional parameters for the given decoder. For the + default encoder ("raw"), it's recommended that you provide the + full set of parameters:: + + frombuffer(mode, size, data, "raw", mode, 0, 1) + + :returns: An :py:class:`~PIL.Image.Image` object. + + .. versionadded:: 1.1.4 + """ + + _check_size(size) + + # may pass tuple instead of argument list + if len(args) == 1 and isinstance(args[0], tuple): + args = args[0] + + if decoder_name == "raw": + if args == (): + args = mode, 0, 1 + if args[0] in _MAPMODES: + im = new(mode, (1, 1)) + im = im._new(core.map_buffer(data, size, decoder_name, 0, args)) + im.readonly = 1 + return im + + return frombytes(mode, size, data, decoder_name, args) + + +def fromarray(obj, mode=None): + """ + Creates an image memory from an object exporting the array interface + (using the buffer protocol). + + If **obj** is not contiguous, then the tobytes method is called + and :py:func:`~PIL.Image.frombuffer` is used. + + If you have an image in NumPy:: + + from PIL import Image + import numpy as np + im = Image.open('hopper.jpg') + a = np.asarray(im) + + Then this can be used to convert it to a Pillow image:: + + im = Image.fromarray(a) + + :param obj: Object with array interface + :param mode: Mode to use (will be determined from type if None) + See: :ref:`concept-modes`. + :returns: An image object. + + .. versionadded:: 1.1.6 + """ + arr = obj.__array_interface__ + shape = arr["shape"] + ndim = len(shape) + strides = arr.get("strides", None) + if mode is None: + try: + typekey = (1, 1) + shape[2:], arr["typestr"] + except KeyError as e: + raise TypeError("Cannot handle this data type") from e + try: + mode, rawmode = _fromarray_typemap[typekey] + except KeyError as e: + raise TypeError("Cannot handle this data type: %s, %s" % typekey) from e + else: + rawmode = mode + if mode in ["1", "L", "I", "P", "F"]: + ndmax = 2 + elif mode == "RGB": + ndmax = 3 + else: + ndmax = 4 + if ndim > ndmax: + raise ValueError("Too many dimensions: %d > %d." % (ndim, ndmax)) + + size = 1 if ndim == 1 else shape[1], shape[0] + if strides is not None: + if hasattr(obj, "tobytes"): + obj = obj.tobytes() + else: + obj = obj.tostring() + + return frombuffer(mode, size, obj, "raw", rawmode, 0, 1) + + +def fromqimage(im): + """Creates an image instance from a QImage image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.fromqimage(im) + + +def fromqpixmap(im): + """Creates an image instance from a QPixmap image""" + from . import ImageQt + + if not ImageQt.qt_is_installed: + raise ImportError("Qt bindings are not installed") + return ImageQt.fromqpixmap(im) + + +_fromarray_typemap = { + # (shape, typestr) => mode, rawmode + # first two members of shape are set to one + ((1, 1), "|b1"): ("1", "1;8"), + ((1, 1), "|u1"): ("L", "L"), + ((1, 1), "|i1"): ("I", "I;8"), + ((1, 1), "u2"): ("I", "I;16B"), + ((1, 1), "i2"): ("I", "I;16BS"), + ((1, 1), "u4"): ("I", "I;32B"), + ((1, 1), "i4"): ("I", "I;32BS"), + ((1, 1), "f4"): ("F", "F;32BF"), + ((1, 1), "f8"): ("F", "F;64BF"), + ((1, 1, 2), "|u1"): ("LA", "LA"), + ((1, 1, 3), "|u1"): ("RGB", "RGB"), + ((1, 1, 4), "|u1"): ("RGBA", "RGBA"), +} + +# shortcuts +_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I") +_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F") + + +def _decompression_bomb_check(size): + if MAX_IMAGE_PIXELS is None: + return + + pixels = size[0] * size[1] + + if pixels > 2 * MAX_IMAGE_PIXELS: + raise DecompressionBombError( + "Image size (%d pixels) exceeds limit of %d pixels, " + "could be decompression bomb DOS attack." % (pixels, 2 * MAX_IMAGE_PIXELS) + ) + + if pixels > MAX_IMAGE_PIXELS: + warnings.warn( + "Image size (%d pixels) exceeds limit of %d pixels, " + "could be decompression bomb DOS attack." % (pixels, MAX_IMAGE_PIXELS), + DecompressionBombWarning, + ) + + +def open(fp, mode="r"): + """ + Opens and identifies the given image file. + + This is a lazy operation; this function identifies the file, but + the file remains open and the actual image data is not read from + the file until you try to process the data (or call the + :py:meth:`~PIL.Image.Image.load` method). See + :py:func:`~PIL.Image.new`. See :ref:`file-handling`. + + :param fp: A filename (string), pathlib.Path object or a file object. + The file object must implement :py:meth:`~file.read`, + :py:meth:`~file.seek`, and :py:meth:`~file.tell` methods, + and be opened in binary mode. + :param mode: The mode. If given, this argument must be "r". + :returns: An :py:class:`~PIL.Image.Image` object. + :exception FileNotFoundError: If the file cannot be found. + :exception PIL.UnidentifiedImageError: If the image cannot be opened and + identified. + :exception ValueError: If the ``mode`` is not "r", or if a ``StringIO`` + instance is used for ``fp``. + """ + + if mode != "r": + raise ValueError("bad mode %r" % mode) + elif isinstance(fp, io.StringIO): + raise ValueError( + "StringIO cannot be used to open an image. " + "Binary data must be used instead." + ) + + exclusive_fp = False + filename = "" + if isinstance(fp, Path): + filename = str(fp.resolve()) + elif isPath(fp): + filename = fp + + if filename: + fp = builtins.open(filename, "rb") + exclusive_fp = True + + try: + fp.seek(0) + except (AttributeError, io.UnsupportedOperation): + fp = io.BytesIO(fp.read()) + exclusive_fp = True + + prefix = fp.read(16) + + preinit() + + accept_warnings = [] + + def _open_core(fp, filename, prefix): + for i in ID: + try: + factory, accept = OPEN[i] + result = not accept or accept(prefix) + if type(result) in [str, bytes]: + accept_warnings.append(result) + elif result: + fp.seek(0) + im = factory(fp, filename) + _decompression_bomb_check(im.size) + return im + except (SyntaxError, IndexError, TypeError, struct.error): + # Leave disabled by default, spams the logs with image + # opening failures that are entirely expected. + # logger.debug("", exc_info=True) + continue + except BaseException: + if exclusive_fp: + fp.close() + raise + return None + + im = _open_core(fp, filename, prefix) + + if im is None: + if init(): + im = _open_core(fp, filename, prefix) + + if im: + im._exclusive_fp = exclusive_fp + return im + + if exclusive_fp: + fp.close() + for message in accept_warnings: + warnings.warn(message) + raise UnidentifiedImageError( + "cannot identify image file %r" % (filename if filename else fp) + ) + + +# +# Image processing. + + +def alpha_composite(im1, im2): + """ + Alpha composite im2 over im1. + + :param im1: The first image. Must have mode RGBA. + :param im2: The second image. Must have mode RGBA, and the same size as + the first image. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + im1.load() + im2.load() + return im1._new(core.alpha_composite(im1.im, im2.im)) + + +def blend(im1, im2, alpha): + """ + Creates a new image by interpolating between two input images, using + a constant alpha.:: + + out = image1 * (1.0 - alpha) + image2 * alpha + + :param im1: The first image. + :param im2: The second image. Must have the same mode and size as + the first image. + :param alpha: The interpolation alpha factor. If alpha is 0.0, a + copy of the first image is returned. If alpha is 1.0, a copy of + the second image is returned. There are no restrictions on the + alpha value. If necessary, the result is clipped to fit into + the allowed output range. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + im1.load() + im2.load() + return im1._new(core.blend(im1.im, im2.im, alpha)) + + +def composite(image1, image2, mask): + """ + Create composite image by blending images using a transparency mask. + + :param image1: The first image. + :param image2: The second image. Must have the same mode and + size as the first image. + :param mask: A mask image. This image can have mode + "1", "L", or "RGBA", and must have the same size as the + other two images. + """ + + image = image2.copy() + image.paste(image1, None, mask) + return image + + +def eval(image, *args): + """ + Applies the function (which should take one argument) to each pixel + in the given image. If the image has more than one band, the same + function is applied to each band. Note that the function is + evaluated once for each possible pixel value, so you cannot use + random components or other generators. + + :param image: The input image. + :param function: A function object, taking one integer argument. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + return image.point(args[0]) + + +def merge(mode, bands): + """ + Merge a set of single band images into a new multiband image. + + :param mode: The mode to use for the output image. See: + :ref:`concept-modes`. + :param bands: A sequence containing one single-band image for + each band in the output image. All bands must have the + same size. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + + if getmodebands(mode) != len(bands) or "*" in mode: + raise ValueError("wrong number of bands") + for band in bands[1:]: + if band.mode != getmodetype(mode): + raise ValueError("mode mismatch") + if band.size != bands[0].size: + raise ValueError("size mismatch") + for band in bands: + band.load() + return bands[0]._new(core.merge(mode, *[b.im for b in bands])) + + +# -------------------------------------------------------------------- +# Plugin registry + + +def register_open(id, factory, accept=None): + """ + Register an image file plugin. This function should not be used + in application code. + + :param id: An image format identifier. + :param factory: An image file factory method. + :param accept: An optional function that can be used to quickly + reject images having another format. + """ + id = id.upper() + ID.append(id) + OPEN[id] = factory, accept + + +def register_mime(id, mimetype): + """ + Registers an image MIME type. This function should not be used + in application code. + + :param id: An image format identifier. + :param mimetype: The image MIME type for this format. + """ + MIME[id.upper()] = mimetype + + +def register_save(id, driver): + """ + Registers an image save function. This function should not be + used in application code. + + :param id: An image format identifier. + :param driver: A function to save images in this format. + """ + SAVE[id.upper()] = driver + + +def register_save_all(id, driver): + """ + Registers an image function to save all the frames + of a multiframe format. This function should not be + used in application code. + + :param id: An image format identifier. + :param driver: A function to save images in this format. + """ + SAVE_ALL[id.upper()] = driver + + +def register_extension(id, extension): + """ + Registers an image extension. This function should not be + used in application code. + + :param id: An image format identifier. + :param extension: An extension used for this format. + """ + EXTENSION[extension.lower()] = id.upper() + + +def register_extensions(id, extensions): + """ + Registers image extensions. This function should not be + used in application code. + + :param id: An image format identifier. + :param extensions: A list of extensions used for this format. + """ + for extension in extensions: + register_extension(id, extension) + + +def registered_extensions(): + """ + Returns a dictionary containing all file extensions belonging + to registered plugins + """ + if not EXTENSION: + init() + return EXTENSION + + +def register_decoder(name, decoder): + """ + Registers an image decoder. This function should not be + used in application code. + + :param name: The name of the decoder + :param decoder: A callable(mode, args) that returns an + ImageFile.PyDecoder object + + .. versionadded:: 4.1.0 + """ + DECODERS[name] = decoder + + +def register_encoder(name, encoder): + """ + Registers an image encoder. This function should not be + used in application code. + + :param name: The name of the encoder + :param encoder: A callable(mode, args) that returns an + ImageFile.PyEncoder object + + .. versionadded:: 4.1.0 + """ + ENCODERS[name] = encoder + + +# -------------------------------------------------------------------- +# Simple display support. + + +def _show(image, **options): + options["_internal_pillow"] = True + _showxv(image, **options) + + +def _showxv(image, title=None, **options): + from . import ImageShow + + if "_internal_pillow" in options: + del options["_internal_pillow"] + else: + warnings.warn( + "_showxv is deprecated and will be removed in a future release. " + "Use Image.show instead.", + DeprecationWarning, + ) + ImageShow.show(image, title, **options) + + +# -------------------------------------------------------------------- +# Effects + + +def effect_mandelbrot(size, extent, quality): + """ + Generate a Mandelbrot set covering the given extent. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param extent: The extent to cover, as a 4-tuple: + (x0, y0, x1, y2). + :param quality: Quality. + """ + return Image()._new(core.effect_mandelbrot(size, extent, quality)) + + +def effect_noise(size, sigma): + """ + Generate Gaussian noise centered around 128. + + :param size: The requested size in pixels, as a 2-tuple: + (width, height). + :param sigma: Standard deviation of noise. + """ + return Image()._new(core.effect_noise(size, sigma)) + + +def linear_gradient(mode): + """ + Generate 256x256 linear gradient from black to white, top to bottom. + + :param mode: Input mode. + """ + return Image()._new(core.linear_gradient(mode)) + + +def radial_gradient(mode): + """ + Generate 256x256 radial gradient from black to white, centre to edge. + + :param mode: Input mode. + """ + return Image()._new(core.radial_gradient(mode)) + + +# -------------------------------------------------------------------- +# Resources + + +def _apply_env_variables(env=None): + if env is None: + env = os.environ + + for var_name, setter in [ + ("PILLOW_ALIGNMENT", core.set_alignment), + ("PILLOW_BLOCK_SIZE", core.set_block_size), + ("PILLOW_BLOCKS_MAX", core.set_blocks_max), + ]: + if var_name not in env: + continue + + var = env[var_name].lower() + + units = 1 + for postfix, mul in [("k", 1024), ("m", 1024 * 1024)]: + if var.endswith(postfix): + units = mul + var = var[: -len(postfix)] + + try: + var = int(var) * units + except ValueError: + warnings.warn("{} is not int".format(var_name)) + continue + + try: + setter(var) + except ValueError as e: + warnings.warn("{}: {}".format(var_name, e)) + + +_apply_env_variables() +atexit.register(core.clear_cache) + + +class Exif(MutableMapping): + endian = "<" + + def __init__(self): + self._data = {} + self._ifds = {} + self._info = None + self._loaded_exif = None + + def _fixup(self, value): + try: + if len(value) == 1 and isinstance(value, tuple): + return value[0] + except Exception: + pass + return value + + def _fixup_dict(self, src_dict): + # Helper function for _getexif() + # returns a dict with any single item tuples/lists as individual values + return {k: self._fixup(v) for k, v in src_dict.items()} + + def _get_ifd_dict(self, tag): + try: + # an offset pointer to the location of the nested embedded IFD. + # It should be a long, but may be corrupted. + self.fp.seek(self[tag]) + except (KeyError, TypeError): + pass + else: + from . import TiffImagePlugin + + info = TiffImagePlugin.ImageFileDirectory_v2(self.head) + info.load(self.fp) + return self._fixup_dict(info) + + def load(self, data): + # Extract EXIF information. This is highly experimental, + # and is likely to be replaced with something better in a future + # version. + + # The EXIF record consists of a TIFF file embedded in a JPEG + # application marker (!). + if data == self._loaded_exif: + return + self._loaded_exif = data + self._data.clear() + self._ifds.clear() + self._info = None + if not data: + return + + if data.startswith(b"Exif\x00\x00"): + data = data[6:] + self.fp = io.BytesIO(data) + self.head = self.fp.read(8) + # process dictionary + from . import TiffImagePlugin + + self._info = TiffImagePlugin.ImageFileDirectory_v2(self.head) + self.endian = self._info._endian + self.fp.seek(self._info.next) + self._info.load(self.fp) + + # get EXIF extension + ifd = self._get_ifd_dict(0x8769) + if ifd: + self._data.update(ifd) + self._ifds[0x8769] = ifd + + def tobytes(self, offset=8): + from . import TiffImagePlugin + + if self.endian == "<": + head = b"II\x2A\x00\x08\x00\x00\x00" + else: + head = b"MM\x00\x2A\x00\x00\x00\x08" + ifd = TiffImagePlugin.ImageFileDirectory_v2(ifh=head) + for tag, value in self.items(): + ifd[tag] = value + return b"Exif\x00\x00" + head + ifd.tobytes(offset) + + def get_ifd(self, tag): + if tag not in self._ifds and tag in self: + if tag in [0x8825, 0xA005]: + # gpsinfo, interop + self._ifds[tag] = self._get_ifd_dict(tag) + elif tag == 0x927C: # makernote + from .TiffImagePlugin import ImageFileDirectory_v2 + + if self[0x927C][:8] == b"FUJIFILM": + exif_data = self[0x927C] + ifd_offset = i32le(exif_data[8:12]) + ifd_data = exif_data[ifd_offset:] + + makernote = {} + for i in range(0, struct.unpack(" 4: + (offset,) = struct.unpack("H", ifd_data[:2])[0]): + ifd_tag, typ, count, data = struct.unpack( + ">HHL4s", ifd_data[i * 12 + 2 : (i + 1) * 12 + 2] + ) + if ifd_tag == 0x1101: + # CameraInfo + (offset,) = struct.unpack(">L", data) + self.fp.seek(offset) + + camerainfo = {"ModelID": self.fp.read(4)} + + self.fp.read(4) + # Seconds since 2000 + camerainfo["TimeStamp"] = i32le(self.fp.read(12)) + + self.fp.read(4) + camerainfo["InternalSerialNumber"] = self.fp.read(4) + + self.fp.read(12) + parallax = self.fp.read(4) + handler = ImageFileDirectory_v2._load_dispatch[ + TiffTags.FLOAT + ][1] + camerainfo["Parallax"] = handler( + ImageFileDirectory_v2(), parallax, False + ) + + self.fp.read(4) + camerainfo["Category"] = self.fp.read(2) + + makernote = {0x1101: dict(self._fixup_dict(camerainfo))} + self._ifds[0x927C] = makernote + return self._ifds.get(tag, {}) + + def __str__(self): + if self._info is not None: + # Load all keys into self._data + for tag in self._info.keys(): + self[tag] + + return str(self._data) + + def __len__(self): + keys = set(self._data) + if self._info is not None: + keys.update(self._info) + return len(keys) + + def __getitem__(self, tag): + if self._info is not None and tag not in self._data and tag in self._info: + self._data[tag] = self._fixup(self._info[tag]) + if tag == 0x8825: + self._data[tag] = self.get_ifd(tag) + del self._info[tag] + return self._data[tag] + + def __contains__(self, tag): + return tag in self._data or (self._info is not None and tag in self._info) + + def __setitem__(self, tag, value): + if self._info is not None and tag in self._info: + del self._info[tag] + self._data[tag] = value + + def __delitem__(self, tag): + if self._info is not None and tag in self._info: + del self._info[tag] + del self._data[tag] + + def __iter__(self): + keys = set(self._data) + if self._info is not None: + keys.update(self._info) + return iter(keys) diff --git a/venv/Lib/site-packages/PIL/ImageChops.py b/venv/Lib/site-packages/PIL/ImageChops.py new file mode 100644 index 000000000..c1a2574e4 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageChops.py @@ -0,0 +1,328 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard channel operations +# +# History: +# 1996-03-24 fl Created +# 1996-08-13 fl Added logical operations (for "1" images) +# 2000-10-12 fl Added offset method (from Image.py) +# +# Copyright (c) 1997-2000 by Secret Labs AB +# Copyright (c) 1996-2000 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +def constant(image, value): + """Fill a channel with a given grey level. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.new("L", image.size, value) + + +def duplicate(image): + """Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return image.copy() + + +def invert(image): + """ + Invert an image (channel). + + .. code-block:: python + + out = MAX - image + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image.load() + return image._new(image.im.chop_invert()) + + +def lighter(image1, image2): + """ + Compares the two images, pixel by pixel, and returns a new image containing + the lighter values. + + .. code-block:: python + + out = max(image1, image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_lighter(image2.im)) + + +def darker(image1, image2): + """ + Compares the two images, pixel by pixel, and returns a new image containing + the darker values. + + .. code-block:: python + + out = min(image1, image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_darker(image2.im)) + + +def difference(image1, image2): + """ + Returns the absolute value of the pixel-by-pixel difference between the two + images. + + .. code-block:: python + + out = abs(image1 - image2) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_difference(image2.im)) + + +def multiply(image1, image2): + """ + Superimposes two images on top of each other. + + If you multiply an image with a solid black image, the result is black. If + you multiply with a solid white image, the image is unaffected. + + .. code-block:: python + + out = image1 * image2 / MAX + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_multiply(image2.im)) + + +def screen(image1, image2): + """ + Superimposes two inverted images on top of each other. + + .. code-block:: python + + out = MAX - ((MAX - image1) * (MAX - image2) / MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_screen(image2.im)) + + +def soft_light(image1, image2): + """ + Superimposes two images on top of each other using the Soft Light algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_soft_light(image2.im)) + + +def hard_light(image1, image2): + """ + Superimposes two images on top of each other using the Hard Light algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_hard_light(image2.im)) + + +def overlay(image1, image2): + """ + Superimposes two images on top of each other using the Overlay algorithm + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_overlay(image2.im)) + + +def add(image1, image2, scale=1.0, offset=0): + """ + Adds two images, dividing the result by scale and adding the + offset. If omitted, scale defaults to 1.0, and offset to 0.0. + + .. code-block:: python + + out = ((image1 + image2) / scale + offset) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_add(image2.im, scale, offset)) + + +def subtract(image1, image2, scale=1.0, offset=0): + """ + Subtracts two images, dividing the result by scale and adding the offset. + If omitted, scale defaults to 1.0, and offset to 0.0. + + .. code-block:: python + + out = ((image1 - image2) / scale + offset) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_subtract(image2.im, scale, offset)) + + +def add_modulo(image1, image2): + """Add two images, without clipping the result. + + .. code-block:: python + + out = ((image1 + image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_add_modulo(image2.im)) + + +def subtract_modulo(image1, image2): + """Subtract two images, without clipping the result. + + .. code-block:: python + + out = ((image1 - image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_subtract_modulo(image2.im)) + + +def logical_and(image1, image2): + """Logical AND between two images. + + Both of the images must have mode "1". If you would like to perform a + logical AND on an image with a mode other than "1", try + :py:meth:`~PIL.ImageChops.multiply` instead, using a black-and-white mask + as the second image. + + .. code-block:: python + + out = ((image1 and image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_and(image2.im)) + + +def logical_or(image1, image2): + """Logical OR between two images. + + Both of the images must have mode "1". + + .. code-block:: python + + out = ((image1 or image2) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_or(image2.im)) + + +def logical_xor(image1, image2): + """Logical XOR between two images. + + Both of the images must have mode "1". + + .. code-block:: python + + out = ((bool(image1) != bool(image2)) % MAX) + + :rtype: :py:class:`~PIL.Image.Image` + """ + + image1.load() + image2.load() + return image1._new(image1.im.chop_xor(image2.im)) + + +def blend(image1, image2, alpha): + """Blend images using constant transparency weight. Alias for + :py:meth:`PIL.Image.Image.blend`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.blend(image1, image2, alpha) + + +def composite(image1, image2, mask): + """Create composite using transparency mask. Alias for + :py:meth:`PIL.Image.Image.composite`. + + :rtype: :py:class:`~PIL.Image.Image` + """ + + return Image.composite(image1, image2, mask) + + +def offset(image, xoffset, yoffset=None): + """Returns a copy of the image where data has been offset by the given + distances. Data wraps around the edges. If **yoffset** is omitted, it + is assumed to be equal to **xoffset**. + + :param xoffset: The horizontal distance. + :param yoffset: The vertical distance. If omitted, both + distances are set to the same value. + :rtype: :py:class:`~PIL.Image.Image` + """ + + if yoffset is None: + yoffset = xoffset + image.load() + return image._new(image.im.offset(xoffset, yoffset)) diff --git a/venv/Lib/site-packages/PIL/ImageCms.py b/venv/Lib/site-packages/PIL/ImageCms.py new file mode 100644 index 000000000..1c4ce5a08 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageCms.py @@ -0,0 +1,990 @@ +# The Python Imaging Library. +# $Id$ + +# Optional color management support, based on Kevin Cazabon's PyCMS +# library. + +# History: + +# 2009-03-08 fl Added to PIL. + +# Copyright (C) 2002-2003 Kevin Cazabon +# Copyright (c) 2009 by Fredrik Lundh +# Copyright (c) 2013 by Eric Soroos + +# See the README file for information on usage and redistribution. See +# below for the original description. + +import sys + +from PIL import Image + +try: + from PIL import _imagingcms +except ImportError as ex: + # Allow error import for doc purposes, but error out when accessing + # anything in core. + from ._util import deferred_error + + _imagingcms = deferred_error(ex) + +DESCRIPTION = """ +pyCMS + + a Python / PIL interface to the littleCMS ICC Color Management System + Copyright (C) 2002-2003 Kevin Cazabon + kevin@cazabon.com + http://www.cazabon.com + + pyCMS home page: http://www.cazabon.com/pyCMS + littleCMS home page: http://www.littlecms.com + (littleCMS is Copyright (C) 1998-2001 Marti Maria) + + Originally released under LGPL. Graciously donated to PIL in + March 2009, for distribution under the standard PIL license + + The pyCMS.py module provides a "clean" interface between Python/PIL and + pyCMSdll, taking care of some of the more complex handling of the direct + pyCMSdll functions, as well as error-checking and making sure that all + relevant data is kept together. + + While it is possible to call pyCMSdll functions directly, it's not highly + recommended. + + Version History: + + 1.0.0 pil Oct 2013 Port to LCMS 2. + + 0.1.0 pil mod March 10, 2009 + + Renamed display profile to proof profile. The proof + profile is the profile of the device that is being + simulated, not the profile of the device which is + actually used to display/print the final simulation + (that'd be the output profile) - also see LCMSAPI.txt + input colorspace -> using 'renderingIntent' -> proof + colorspace -> using 'proofRenderingIntent' -> output + colorspace + + Added LCMS FLAGS support. + Added FLAGS["SOFTPROOFING"] as default flag for + buildProofTransform (otherwise the proof profile/intent + would be ignored). + + 0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms + + 0.0.2 alpha Jan 6, 2002 + + Added try/except statements around type() checks of + potential CObjects... Python won't let you use type() + on them, and raises a TypeError (stupid, if you ask + me!) + + Added buildProofTransformFromOpenProfiles() function. + Additional fixes in DLL, see DLL code for details. + + 0.0.1 alpha first public release, Dec. 26, 2002 + + Known to-do list with current version (of Python interface, not pyCMSdll): + + none + +""" + +VERSION = "1.0.0 pil" + +# --------------------------------------------------------------------. + +core = _imagingcms + +# +# intent/direction values + +INTENT_PERCEPTUAL = 0 +INTENT_RELATIVE_COLORIMETRIC = 1 +INTENT_SATURATION = 2 +INTENT_ABSOLUTE_COLORIMETRIC = 3 + +DIRECTION_INPUT = 0 +DIRECTION_OUTPUT = 1 +DIRECTION_PROOF = 2 + +# +# flags + +FLAGS = { + "MATRIXINPUT": 1, + "MATRIXOUTPUT": 2, + "MATRIXONLY": (1 | 2), + "NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot + # Don't create prelinearization tables on precalculated transforms + # (internal use): + "NOPRELINEARIZATION": 16, + "GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink) + "NOTCACHE": 64, # Inhibit 1-pixel cache + "NOTPRECALC": 256, + "NULLTRANSFORM": 512, # Don't transform anyway + "HIGHRESPRECALC": 1024, # Use more memory to give better accuracy + "LOWRESPRECALC": 2048, # Use less memory to minimize resources + "WHITEBLACKCOMPENSATION": 8192, + "BLACKPOINTCOMPENSATION": 8192, + "GAMUTCHECK": 4096, # Out of Gamut alarm + "SOFTPROOFING": 16384, # Do softproofing + "PRESERVEBLACK": 32768, # Black preservation + "NODEFAULTRESOURCEDEF": 16777216, # CRD special + "GRIDPOINTS": lambda n: ((n) & 0xFF) << 16, # Gridpoints +} + +_MAX_FLAG = 0 +for flag in FLAGS.values(): + if isinstance(flag, int): + _MAX_FLAG = _MAX_FLAG | flag + + +# --------------------------------------------------------------------. +# Experimental PIL-level API +# --------------------------------------------------------------------. + +## +# Profile. + + +class ImageCmsProfile: + def __init__(self, profile): + """ + :param profile: Either a string representing a filename, + a file like object containing a profile or a + low-level profile object + + """ + + if isinstance(profile, str): + self._set(core.profile_open(profile), profile) + elif hasattr(profile, "read"): + self._set(core.profile_frombytes(profile.read())) + elif isinstance(profile, _imagingcms.CmsProfile): + self._set(profile) + else: + raise TypeError("Invalid type for Profile") + + def _set(self, profile, filename=None): + self.profile = profile + self.filename = filename + if profile: + self.product_name = None # profile.product_name + self.product_info = None # profile.product_info + else: + self.product_name = None + self.product_info = None + + def tobytes(self): + """ + Returns the profile in a format suitable for embedding in + saved images. + + :returns: a bytes object containing the ICC profile. + """ + + return core.profile_tobytes(self.profile) + + +class ImageCmsTransform(Image.ImagePointHandler): + + """ + Transform. This can be used with the procedural API, or with the standard + :py:func:`~PIL.Image.Image.point` method. + + Will return the output profile in the ``output.info['icc_profile']``. + """ + + def __init__( + self, + input, + output, + input_mode, + output_mode, + intent=INTENT_PERCEPTUAL, + proof=None, + proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, + flags=0, + ): + if proof is None: + self.transform = core.buildTransform( + input.profile, output.profile, input_mode, output_mode, intent, flags + ) + else: + self.transform = core.buildProofTransform( + input.profile, + output.profile, + proof.profile, + input_mode, + output_mode, + intent, + proof_intent, + flags, + ) + # Note: inputMode and outputMode are for pyCMS compatibility only + self.input_mode = self.inputMode = input_mode + self.output_mode = self.outputMode = output_mode + + self.output_profile = output + + def point(self, im): + return self.apply(im) + + def apply(self, im, imOut=None): + im.load() + if imOut is None: + imOut = Image.new(self.output_mode, im.size, None) + self.transform.apply(im.im.id, imOut.im.id) + imOut.info["icc_profile"] = self.output_profile.tobytes() + return imOut + + def apply_in_place(self, im): + im.load() + if im.mode != self.output_mode: + raise ValueError("mode mismatch") # wrong output mode + self.transform.apply(im.im.id, im.im.id) + im.info["icc_profile"] = self.output_profile.tobytes() + return im + + +def get_display_profile(handle=None): + """ (experimental) Fetches the profile for the current display device. + :returns: ``None`` if the profile is not known. + """ + + if sys.platform != "win32": + return None + + from PIL import ImageWin + + if isinstance(handle, ImageWin.HDC): + profile = core.get_display_profile_win32(handle, 1) + else: + profile = core.get_display_profile_win32(handle or 0) + if profile is None: + return None + return ImageCmsProfile(profile) + + +# --------------------------------------------------------------------. +# pyCMS compatible layer +# --------------------------------------------------------------------. + + +class PyCMSError(Exception): + + """ (pyCMS) Exception class. + This is used for all errors in the pyCMS API. """ + + pass + + +def profileToProfile( + im, + inputProfile, + outputProfile, + renderingIntent=INTENT_PERCEPTUAL, + outputMode=None, + inPlace=False, + flags=0, +): + """ + (pyCMS) Applies an ICC transformation to a given image, mapping from + ``inputProfile`` to ``outputProfile``. + + If the input or output profiles specified are not valid filenames, a + :exc:`PyCMSError` will be raised. If ``inPlace`` is ``True`` and + ``outputMode != im.mode``, a :exc:`PyCMSError` will be raised. + If an error occurs during application of the profiles, + a :exc:`PyCMSError` will be raised. + If ``outputMode`` is not a mode supported by the ``outputProfile`` (or by pyCMS), + a :exc:`PyCMSError` will be raised. + + This function applies an ICC transformation to im from ``inputProfile``'s + color space to ``outputProfile``'s color space using the specified rendering + intent to decide how to handle out-of-gamut colors. + + ``outputMode`` can be used to specify that a color mode conversion is to + be done using these profiles, but the specified profiles must be able + to handle that mode. I.e., if converting im from RGB to CMYK using + profiles, the input profile must handle RGB data, and the output + profile must handle CMYK data. + + :param im: An open :py:class:`~PIL.Image.Image` object (i.e. Image.new(...) + or Image.open(...), etc.) + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this image, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + profile you wish to use for this image, or a profile object + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the transform + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param outputMode: A valid PIL mode for the output image (i.e. "RGB", + "CMYK", etc.). Note: if rendering the image "inPlace", outputMode + MUST be the same mode as the input, or omitted completely. If + omitted, the outputMode will be the same as the mode of the input + image (im.mode) + :param inPlace: Boolean. If ``True``, the original image is modified in-place, + and ``None`` is returned. If ``False`` (default), a new + :py:class:`~PIL.Image.Image` object is returned with the transform applied. + :param flags: Integer (0-...) specifying additional flags + :returns: Either None or a new :py:class:`~PIL.Image.Image` object, depending on + the value of ``inPlace`` + :exception PyCMSError: + """ + + if outputMode is None: + outputMode = im.mode + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + transform = ImageCmsTransform( + inputProfile, + outputProfile, + im.mode, + outputMode, + renderingIntent, + flags=flags, + ) + if inPlace: + transform.apply_in_place(im) + imOut = None + else: + imOut = transform.apply(im) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + return imOut + + +def getOpenProfile(profileFilename): + """ + (pyCMS) Opens an ICC profile file. + + The PyCMSProfile object can be passed back into pyCMS for use in creating + transforms and such (as in ImageCms.buildTransformFromOpenProfiles()). + + If ``profileFilename`` is not a valid filename for an ICC profile, + a :exc:`PyCMSError` will be raised. + + :param profileFilename: String, as a valid filename path to the ICC profile + you wish to open, or a file-like object. + :returns: A CmsProfile class object. + :exception PyCMSError: + """ + + try: + return ImageCmsProfile(profileFilename) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def buildTransform( + inputProfile, + outputProfile, + inMode, + outMode, + renderingIntent=INTENT_PERCEPTUAL, + flags=0, +): + """ + (pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the + ``outputProfile``. Use applyTransform to apply the transform to a given + image. + + If the input or output profiles specified are not valid filenames, a + :exc:`PyCMSError` will be raised. If an error occurs during creation + of the transform, a :exc:`PyCMSError` will be raised. + + If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile`` + (or by pyCMS), a :exc:`PyCMSError` will be raised. + + This function builds and returns an ICC transform from the ``inputProfile`` + to the ``outputProfile`` using the ``renderingIntent`` to determine what to do + with out-of-gamut colors. It will ONLY work for converting images that + are in ``inMode`` to images that are in ``outMode`` color format (PIL mode, + i.e. "RGB", "RGBA", "CMYK", etc.). + + Building the transform is a fair part of the overhead in + ImageCms.profileToProfile(), so if you're planning on converting multiple + images using the same input/output settings, this can save you time. + Once you have a transform object, it can be used with + ImageCms.applyProfile() to convert images without the need to re-compute + the lookup table for the transform. + + The reason pyCMS returns a class object rather than a handle directly + to the transform is that it needs to keep track of the PIL input/output + modes that the transform is meant for. These attributes are stored in + the ``inMode`` and ``outMode`` attributes of the object (which can be + manually overridden if you really want to, but I don't know of any + time that would be of use, or would even work). + + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this transform, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + profile you wish to use for this transform, or a profile object + :param inMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param outMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the transform + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param flags: Integer (0-...) specifying additional flags + :returns: A CmsTransform class object. + :exception PyCMSError: + """ + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + return ImageCmsTransform( + inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags + ) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def buildProofTransform( + inputProfile, + outputProfile, + proofProfile, + inMode, + outMode, + renderingIntent=INTENT_PERCEPTUAL, + proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC, + flags=FLAGS["SOFTPROOFING"], +): + """ + (pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the + ``outputProfile``, but tries to simulate the result that would be + obtained on the ``proofProfile`` device. + + If the input, output, or proof profiles specified are not valid + filenames, a :exc:`PyCMSError` will be raised. + + If an error occurs during creation of the transform, + a :exc:`PyCMSError` will be raised. + + If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile`` + (or by pyCMS), a :exc:`PyCMSError` will be raised. + + This function builds and returns an ICC transform from the ``inputProfile`` + to the ``outputProfile``, but tries to simulate the result that would be + obtained on the ``proofProfile`` device using ``renderingIntent`` and + ``proofRenderingIntent`` to determine what to do with out-of-gamut + colors. This is known as "soft-proofing". It will ONLY work for + converting images that are in ``inMode`` to images that are in outMode + color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.). + + Usage of the resulting transform object is exactly the same as with + ImageCms.buildTransform(). + + Proof profiling is generally used when using an output device to get a + good idea of what the final printed/displayed image would look like on + the ``proofProfile`` device when it's quicker and easier to use the + output device for judging color. Generally, this means that the + output device is a monitor, or a dye-sub printer (etc.), and the simulated + device is something more expensive, complicated, or time consuming + (making it difficult to make a real print for color judgement purposes). + + Soft-proofing basically functions by adjusting the colors on the + output device to match the colors of the device being simulated. However, + when the simulated device has a much wider gamut than the output + device, you may obtain marginal results. + + :param inputProfile: String, as a valid filename path to the ICC input + profile you wish to use for this transform, or a profile object + :param outputProfile: String, as a valid filename path to the ICC output + (monitor, usually) profile you wish to use for this transform, or a + profile object + :param proofProfile: String, as a valid filename path to the ICC proof + profile you wish to use for this transform, or a profile object + :param inMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param outMode: String, as a valid PIL mode that the appropriate profile + also supports (i.e. "RGB", "RGBA", "CMYK", etc.) + :param renderingIntent: Integer (0-3) specifying the rendering intent you + wish to use for the input->proof (simulated) transform + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param proofRenderingIntent: Integer (0-3) specifying the rendering intent + you wish to use for proof->output transform + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param flags: Integer (0-...) specifying additional flags + :returns: A CmsTransform class object. + :exception PyCMSError: + """ + + if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3): + raise PyCMSError("renderingIntent must be an integer between 0 and 3") + + if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG): + raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG) + + try: + if not isinstance(inputProfile, ImageCmsProfile): + inputProfile = ImageCmsProfile(inputProfile) + if not isinstance(outputProfile, ImageCmsProfile): + outputProfile = ImageCmsProfile(outputProfile) + if not isinstance(proofProfile, ImageCmsProfile): + proofProfile = ImageCmsProfile(proofProfile) + return ImageCmsTransform( + inputProfile, + outputProfile, + inMode, + outMode, + renderingIntent, + proofProfile, + proofRenderingIntent, + flags, + ) + except (OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +buildTransformFromOpenProfiles = buildTransform +buildProofTransformFromOpenProfiles = buildProofTransform + + +def applyTransform(im, transform, inPlace=False): + """ + (pyCMS) Applies a transform to a given image. + + If ``im.mode != transform.inMode``, a :exc:`PyCMSError` is raised. + + If ``inPlace`` is ``True`` and ``transform.inMode != transform.outMode``, a + :exc:`PyCMSError` is raised. + + If ``im.mode``, ``transform.inMode`` or ``transform.outMode`` is not + supported by pyCMSdll or the profiles you used for the transform, a + :exc:`PyCMSError` is raised. + + If an error occurs while the transform is being applied, + a :exc:`PyCMSError` is raised. + + This function applies a pre-calculated transform (from + ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles()) + to an image. The transform can be used for multiple images, saving + considerable calculation time if doing the same conversion multiple times. + + If you want to modify im in-place instead of receiving a new image as + the return value, set ``inPlace`` to ``True``. This can only be done if + ``transform.inMode`` and ``transform.outMode`` are the same, because we can't + change the mode in-place (the buffer sizes for some modes are + different). The default behavior is to return a new :py:class:`~PIL.Image.Image` + object of the same dimensions in mode ``transform.outMode``. + + :param im: An :py:class:`~PIL.Image.Image` object, and im.mode must be the same + as the ``inMode`` supported by the transform. + :param transform: A valid CmsTransform class object + :param inPlace: Bool. If ``True``, ``im` is modified in place and ``None`` is + returned, if ``False``, a new :py:class:`~PIL.Image.Image` object with the + transform applied is returned (and ``im`` is not changed). The default is + ``False``. + :returns: Either ``None``, or a new :py:class:`~PIL.Image.Image` object, + depending on the value of ``inPlace``. The profile will be returned in + the image's ``info['icc_profile']``. + :exception PyCMSError: + """ + + try: + if inPlace: + transform.apply_in_place(im) + imOut = None + else: + imOut = transform.apply(im) + except (TypeError, ValueError) as v: + raise PyCMSError(v) from v + + return imOut + + +def createProfile(colorSpace, colorTemp=-1): + """ + (pyCMS) Creates a profile. + + If colorSpace not in ``["LAB", "XYZ", "sRGB"]``, + a :exc:`PyCMSError` is raised. + + If using LAB and ``colorTemp`` is not a positive integer, + a :exc:`PyCMSError` is raised. + + If an error occurs while creating the profile, + a :exc:`PyCMSError` is raised. + + Use this function to create common profiles on-the-fly instead of + having to supply a profile on disk and knowing the path to it. It + returns a normal CmsProfile object that can be passed to + ImageCms.buildTransformFromOpenProfiles() to create a transform to apply + to images. + + :param colorSpace: String, the color space of the profile you wish to + create. + Currently only "LAB", "XYZ", and "sRGB" are supported. + :param colorTemp: Positive integer for the white point for the profile, in + degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50 + illuminant if omitted (5000k). colorTemp is ONLY applied to LAB + profiles, and is ignored for XYZ and sRGB. + :returns: A CmsProfile class object + :exception PyCMSError: + """ + + if colorSpace not in ["LAB", "XYZ", "sRGB"]: + raise PyCMSError( + "Color space not supported for on-the-fly profile creation (%s)" + % colorSpace + ) + + if colorSpace == "LAB": + try: + colorTemp = float(colorTemp) + except (TypeError, ValueError) as e: + raise PyCMSError( + 'Color temperature must be numeric, "%s" not valid' % colorTemp + ) from e + + try: + return core.createProfile(colorSpace, colorTemp) + except (TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileName(profile): + """ + + (pyCMS) Gets the internal product name for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, + a :exc:`PyCMSError` is raised If an error occurs while trying + to obtain the name tag, a :exc:`PyCMSError` is raised. + + Use this function to obtain the INTERNAL name of the profile (stored + in an ICC tag in the profile itself), usually the one used when the + profile was originally created. Sometimes this tag also contains + additional information supplied by the creator. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal name of the profile as stored + in an ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # do it in python, not c. + # // name was "%s - %s" (model, manufacturer) || Description , + # // but if the Model and Manufacturer were the same or the model + # // was long, Just the model, in 1.x + model = profile.profile.model + manufacturer = profile.profile.manufacturer + + if not (model or manufacturer): + return (profile.profile.profile_description or "") + "\n" + if not manufacturer or len(model) > 30: + return model + "\n" + return "{} - {}\n".format(model, manufacturer) + + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileInfo(profile): + """ + (pyCMS) Gets the internal product information for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, + a :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the info tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + info tag. This often contains details about the profile, and how it + was created, as supplied by the creator. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # add an extra newline to preserve pyCMS compatibility + # Python, not C. the white point bits weren't working well, + # so skipping. + # info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint + description = profile.profile.profile_description + cpright = profile.profile.copyright + arr = [] + for elt in (description, cpright): + if elt: + arr.append(elt) + return "\r\n\r\n".join(arr) + "\r\n\r\n" + + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileCopyright(profile): + """ + (pyCMS) Gets the copyright for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the copyright tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + copyright tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.copyright or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileManufacturer(profile): + """ + (pyCMS) Gets the manufacturer for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the manufacturer tag, a + :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + manufacturer tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.manufacturer or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileModel(profile): + """ + (pyCMS) Gets the model for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the model tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + model tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in + an ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.model or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getProfileDescription(profile): + """ + (pyCMS) Gets the description for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the description tag, + a :exc:`PyCMSError` is raised. + + Use this function to obtain the information stored in the profile's + description tag. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: A string containing the internal profile information stored in an + ICC tag. + :exception PyCMSError: + """ + + try: + # add an extra newline to preserve pyCMS compatibility + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return (profile.profile.profile_description or "") + "\n" + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def getDefaultIntent(profile): + """ + (pyCMS) Gets the default intent name for the given profile. + + If ``profile`` isn't a valid CmsProfile object or filename to a profile, a + :exc:`PyCMSError` is raised. + + If an error occurs while trying to obtain the default intent, a + :exc:`PyCMSError` is raised. + + Use this function to determine the default (and usually best optimized) + rendering intent for this profile. Most profiles support multiple + rendering intents, but are intended mostly for one type of conversion. + If you wish to use a different intent than returned, use + ImageCms.isIntentSupported() to verify it will work first. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :returns: Integer 0-3 specifying the default rendering intent for this + profile. + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + return profile.profile.rendering_intent + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def isIntentSupported(profile, intent, direction): + """ + (pyCMS) Checks if a given intent is supported. + + Use this function to verify that you can use your desired + ``intent`` with ``profile``, and that ``profile`` can be used for the + input/output/proof profile as you desire. + + Some profiles are created specifically for one "direction", can cannot + be used for others. Some profiles can only be used for certain + rendering intents, so it's best to either verify this before trying + to create a transform with them (using this function), or catch the + potential :exc:`PyCMSError` that will occur if they don't + support the modes you select. + + :param profile: EITHER a valid CmsProfile object, OR a string of the + filename of an ICC profile. + :param intent: Integer (0-3) specifying the rendering intent you wish to + use with this profile + + ImageCms.INTENT_PERCEPTUAL = 0 (DEFAULT) + ImageCms.INTENT_RELATIVE_COLORIMETRIC = 1 + ImageCms.INTENT_SATURATION = 2 + ImageCms.INTENT_ABSOLUTE_COLORIMETRIC = 3 + + see the pyCMS documentation for details on rendering intents and what + they do. + :param direction: Integer specifying if the profile is to be used for + input, output, or proof + + INPUT = 0 (or use ImageCms.DIRECTION_INPUT) + OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT) + PROOF = 2 (or use ImageCms.DIRECTION_PROOF) + + :returns: 1 if the intent/direction are supported, -1 if they are not. + :exception PyCMSError: + """ + + try: + if not isinstance(profile, ImageCmsProfile): + profile = ImageCmsProfile(profile) + # FIXME: I get different results for the same data w. different + # compilers. Bug in LittleCMS or in the binding? + if profile.profile.is_intent_supported(intent, direction): + return 1 + else: + return -1 + except (AttributeError, OSError, TypeError, ValueError) as v: + raise PyCMSError(v) from v + + +def versions(): + """ + (pyCMS) Fetches versions. + """ + + return (VERSION, core.littlecms_version, sys.version.split()[0], Image.__version__) diff --git a/venv/Lib/site-packages/PIL/ImageColor.py b/venv/Lib/site-packages/PIL/ImageColor.py new file mode 100644 index 000000000..9cf7a9912 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageColor.py @@ -0,0 +1,300 @@ +# +# The Python Imaging Library +# $Id$ +# +# map CSS3-style colour description strings to RGB +# +# History: +# 2002-10-24 fl Added support for CSS-style color strings +# 2002-12-15 fl Added RGBA support +# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2 +# 2004-07-19 fl Fixed gray/grey spelling issues +# 2009-03-05 fl Fixed rounding error in grayscale calculation +# +# Copyright (c) 2002-2004 by Secret Labs AB +# Copyright (c) 2002-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import re + +from . import Image + + +def getrgb(color): + """ + Convert a color string to an RGB tuple. If the string cannot be parsed, + this function raises a :py:exc:`ValueError` exception. + + .. versionadded:: 1.1.4 + + :param color: A color string + :return: ``(red, green, blue[, alpha])`` + """ + color = color.lower() + + rgb = colormap.get(color, None) + if rgb: + if isinstance(rgb, tuple): + return rgb + colormap[color] = rgb = getrgb(rgb) + return rgb + + # check for known string formats + if re.match("#[a-f0-9]{3}$", color): + return (int(color[1] * 2, 16), int(color[2] * 2, 16), int(color[3] * 2, 16)) + + if re.match("#[a-f0-9]{4}$", color): + return ( + int(color[1] * 2, 16), + int(color[2] * 2, 16), + int(color[3] * 2, 16), + int(color[4] * 2, 16), + ) + + if re.match("#[a-f0-9]{6}$", color): + return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)) + + if re.match("#[a-f0-9]{8}$", color): + return ( + int(color[1:3], 16), + int(color[3:5], 16), + int(color[5:7], 16), + int(color[7:9], 16), + ) + + m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) + if m: + return (int(m.group(1)), int(m.group(2)), int(m.group(3))) + + m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color) + if m: + return ( + int((int(m.group(1)) * 255) / 100.0 + 0.5), + int((int(m.group(2)) * 255) / 100.0 + 0.5), + int((int(m.group(3)) * 255) / 100.0 + 0.5), + ) + + m = re.match( + r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color + ) + if m: + from colorsys import hls_to_rgb + + rgb = hls_to_rgb( + float(m.group(1)) / 360.0, + float(m.group(3)) / 100.0, + float(m.group(2)) / 100.0, + ) + return ( + int(rgb[0] * 255 + 0.5), + int(rgb[1] * 255 + 0.5), + int(rgb[2] * 255 + 0.5), + ) + + m = re.match( + r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color + ) + if m: + from colorsys import hsv_to_rgb + + rgb = hsv_to_rgb( + float(m.group(1)) / 360.0, + float(m.group(2)) / 100.0, + float(m.group(3)) / 100.0, + ) + return ( + int(rgb[0] * 255 + 0.5), + int(rgb[1] * 255 + 0.5), + int(rgb[2] * 255 + 0.5), + ) + + m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color) + if m: + return (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))) + raise ValueError("unknown color specifier: %r" % color) + + +def getcolor(color, mode): + """ + Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a + greyscale value if the mode is not color or a palette image. If the string + cannot be parsed, this function raises a :py:exc:`ValueError` exception. + + .. versionadded:: 1.1.4 + + :param color: A color string + :return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])`` + """ + # same as getrgb, but converts the result to the given mode + color, alpha = getrgb(color), 255 + if len(color) == 4: + color, alpha = color[0:3], color[3] + + if Image.getmodebase(mode) == "L": + r, g, b = color + # ITU-R Recommendation 601-2 for nonlinear RGB + # scaled to 24 bits to match the convert's implementation. + color = (r * 19595 + g * 38470 + b * 7471 + 0x8000) >> 16 + if mode[-1] == "A": + return (color, alpha) + else: + if mode[-1] == "A": + return color + (alpha,) + return color + + +colormap = { + # X11 colour table from https://drafts.csswg.org/css-color-4/, with + # gray/grey spelling issues fixed. This is a superset of HTML 4.0 + # colour names used in CSS 1. + "aliceblue": "#f0f8ff", + "antiquewhite": "#faebd7", + "aqua": "#00ffff", + "aquamarine": "#7fffd4", + "azure": "#f0ffff", + "beige": "#f5f5dc", + "bisque": "#ffe4c4", + "black": "#000000", + "blanchedalmond": "#ffebcd", + "blue": "#0000ff", + "blueviolet": "#8a2be2", + "brown": "#a52a2a", + "burlywood": "#deb887", + "cadetblue": "#5f9ea0", + "chartreuse": "#7fff00", + "chocolate": "#d2691e", + "coral": "#ff7f50", + "cornflowerblue": "#6495ed", + "cornsilk": "#fff8dc", + "crimson": "#dc143c", + "cyan": "#00ffff", + "darkblue": "#00008b", + "darkcyan": "#008b8b", + "darkgoldenrod": "#b8860b", + "darkgray": "#a9a9a9", + "darkgrey": "#a9a9a9", + "darkgreen": "#006400", + "darkkhaki": "#bdb76b", + "darkmagenta": "#8b008b", + "darkolivegreen": "#556b2f", + "darkorange": "#ff8c00", + "darkorchid": "#9932cc", + "darkred": "#8b0000", + "darksalmon": "#e9967a", + "darkseagreen": "#8fbc8f", + "darkslateblue": "#483d8b", + "darkslategray": "#2f4f4f", + "darkslategrey": "#2f4f4f", + "darkturquoise": "#00ced1", + "darkviolet": "#9400d3", + "deeppink": "#ff1493", + "deepskyblue": "#00bfff", + "dimgray": "#696969", + "dimgrey": "#696969", + "dodgerblue": "#1e90ff", + "firebrick": "#b22222", + "floralwhite": "#fffaf0", + "forestgreen": "#228b22", + "fuchsia": "#ff00ff", + "gainsboro": "#dcdcdc", + "ghostwhite": "#f8f8ff", + "gold": "#ffd700", + "goldenrod": "#daa520", + "gray": "#808080", + "grey": "#808080", + "green": "#008000", + "greenyellow": "#adff2f", + "honeydew": "#f0fff0", + "hotpink": "#ff69b4", + "indianred": "#cd5c5c", + "indigo": "#4b0082", + "ivory": "#fffff0", + "khaki": "#f0e68c", + "lavender": "#e6e6fa", + "lavenderblush": "#fff0f5", + "lawngreen": "#7cfc00", + "lemonchiffon": "#fffacd", + "lightblue": "#add8e6", + "lightcoral": "#f08080", + "lightcyan": "#e0ffff", + "lightgoldenrodyellow": "#fafad2", + "lightgreen": "#90ee90", + "lightgray": "#d3d3d3", + "lightgrey": "#d3d3d3", + "lightpink": "#ffb6c1", + "lightsalmon": "#ffa07a", + "lightseagreen": "#20b2aa", + "lightskyblue": "#87cefa", + "lightslategray": "#778899", + "lightslategrey": "#778899", + "lightsteelblue": "#b0c4de", + "lightyellow": "#ffffe0", + "lime": "#00ff00", + "limegreen": "#32cd32", + "linen": "#faf0e6", + "magenta": "#ff00ff", + "maroon": "#800000", + "mediumaquamarine": "#66cdaa", + "mediumblue": "#0000cd", + "mediumorchid": "#ba55d3", + "mediumpurple": "#9370db", + "mediumseagreen": "#3cb371", + "mediumslateblue": "#7b68ee", + "mediumspringgreen": "#00fa9a", + "mediumturquoise": "#48d1cc", + "mediumvioletred": "#c71585", + "midnightblue": "#191970", + "mintcream": "#f5fffa", + "mistyrose": "#ffe4e1", + "moccasin": "#ffe4b5", + "navajowhite": "#ffdead", + "navy": "#000080", + "oldlace": "#fdf5e6", + "olive": "#808000", + "olivedrab": "#6b8e23", + "orange": "#ffa500", + "orangered": "#ff4500", + "orchid": "#da70d6", + "palegoldenrod": "#eee8aa", + "palegreen": "#98fb98", + "paleturquoise": "#afeeee", + "palevioletred": "#db7093", + "papayawhip": "#ffefd5", + "peachpuff": "#ffdab9", + "peru": "#cd853f", + "pink": "#ffc0cb", + "plum": "#dda0dd", + "powderblue": "#b0e0e6", + "purple": "#800080", + "rebeccapurple": "#663399", + "red": "#ff0000", + "rosybrown": "#bc8f8f", + "royalblue": "#4169e1", + "saddlebrown": "#8b4513", + "salmon": "#fa8072", + "sandybrown": "#f4a460", + "seagreen": "#2e8b57", + "seashell": "#fff5ee", + "sienna": "#a0522d", + "silver": "#c0c0c0", + "skyblue": "#87ceeb", + "slateblue": "#6a5acd", + "slategray": "#708090", + "slategrey": "#708090", + "snow": "#fffafa", + "springgreen": "#00ff7f", + "steelblue": "#4682b4", + "tan": "#d2b48c", + "teal": "#008080", + "thistle": "#d8bfd8", + "tomato": "#ff6347", + "turquoise": "#40e0d0", + "violet": "#ee82ee", + "wheat": "#f5deb3", + "white": "#ffffff", + "whitesmoke": "#f5f5f5", + "yellow": "#ffff00", + "yellowgreen": "#9acd32", +} diff --git a/venv/Lib/site-packages/PIL/ImageDraw.py b/venv/Lib/site-packages/PIL/ImageDraw.py new file mode 100644 index 000000000..cbecf652d --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageDraw.py @@ -0,0 +1,566 @@ +# +# The Python Imaging Library +# $Id$ +# +# drawing interface operations +# +# History: +# 1996-04-13 fl Created (experimental) +# 1996-08-07 fl Filled polygons, ellipses. +# 1996-08-13 fl Added text support +# 1998-06-28 fl Handle I and F images +# 1998-12-29 fl Added arc; use arc primitive to draw ellipses +# 1999-01-10 fl Added shape stuff (experimental) +# 1999-02-06 fl Added bitmap support +# 1999-02-11 fl Changed all primitives to take options +# 1999-02-20 fl Fixed backwards compatibility +# 2000-10-12 fl Copy on write, when necessary +# 2001-02-18 fl Use default ink for bitmap/text also in fill mode +# 2002-10-24 fl Added support for CSS-style color strings +# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing +# 2002-12-11 fl Refactored low-level drawing API (work in progress) +# 2004-08-26 fl Made Draw() a factory function, added getdraw() support +# 2004-09-04 fl Added width support to line primitive +# 2004-09-10 fl Added font mode handling +# 2006-06-19 fl Added font bearing support (getmask2) +# +# Copyright (c) 1997-2006 by Secret Labs AB +# Copyright (c) 1996-2006 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import math +import numbers + +from . import Image, ImageColor + + +""" +A simple 2D drawing interface for PIL images. +

+Application code should use the Draw factory, instead of +directly. +""" + + +class ImageDraw: + def __init__(self, im, mode=None): + """ + Create a drawing instance. + + :param im: The image to draw in. + :param mode: Optional mode to use for color values. For RGB + images, this argument can be RGB or RGBA (to blend the + drawing into the image). For all other modes, this argument + must be the same as the image mode. If omitted, the mode + defaults to the mode of the image. + """ + im.load() + if im.readonly: + im._copy() # make it writeable + blend = 0 + if mode is None: + mode = im.mode + if mode != im.mode: + if mode == "RGBA" and im.mode == "RGB": + blend = 1 + else: + raise ValueError("mode mismatch") + if mode == "P": + self.palette = im.palette + else: + self.palette = None + self.im = im.im + self.draw = Image.core.draw(self.im, blend) + self.mode = mode + if mode in ("I", "F"): + self.ink = self.draw.draw_ink(1) + else: + self.ink = self.draw.draw_ink(-1) + if mode in ("1", "P", "I", "F"): + # FIXME: fix Fill2 to properly support matte for I+F images + self.fontmode = "1" + else: + self.fontmode = "L" # aliasing is okay for other modes + self.fill = 0 + self.font = None + + def getfont(self): + """ + Get the current default font. + + :returns: An image font.""" + if not self.font: + # FIXME: should add a font repository + from . import ImageFont + + self.font = ImageFont.load_default() + return self.font + + def _getink(self, ink, fill=None): + if ink is None and fill is None: + if self.fill: + fill = self.ink + else: + ink = self.ink + else: + if ink is not None: + if isinstance(ink, str): + ink = ImageColor.getcolor(ink, self.mode) + if self.palette and not isinstance(ink, numbers.Number): + ink = self.palette.getcolor(ink) + ink = self.draw.draw_ink(ink) + if fill is not None: + if isinstance(fill, str): + fill = ImageColor.getcolor(fill, self.mode) + if self.palette and not isinstance(fill, numbers.Number): + fill = self.palette.getcolor(fill) + fill = self.draw.draw_ink(fill) + return ink, fill + + def arc(self, xy, start, end, fill=None, width=0): + """Draw an arc.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_arc(xy, start, end, ink, width) + + def bitmap(self, xy, bitmap, fill=None): + """Draw a bitmap.""" + bitmap.load() + ink, fill = self._getink(fill) + if ink is None: + ink = fill + if ink is not None: + self.draw.draw_bitmap(xy, bitmap.im, ink) + + def chord(self, xy, start, end, fill=None, outline=None, width=1): + """Draw a chord.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_chord(xy, start, end, fill, 1) + if ink is not None and ink != fill and width != 0: + self.draw.draw_chord(xy, start, end, ink, 0, width) + + def ellipse(self, xy, fill=None, outline=None, width=1): + """Draw an ellipse.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_ellipse(xy, fill, 1) + if ink is not None and ink != fill and width != 0: + self.draw.draw_ellipse(xy, ink, 0, width) + + def line(self, xy, fill=None, width=0, joint=None): + """Draw a line, or a connected sequence of line segments.""" + ink = self._getink(fill)[0] + if ink is not None: + self.draw.draw_lines(xy, ink, width) + if joint == "curve" and width > 4: + if not isinstance(xy[0], (list, tuple)): + xy = [tuple(xy[i : i + 2]) for i in range(0, len(xy), 2)] + for i in range(1, len(xy) - 1): + point = xy[i] + angles = [ + math.degrees(math.atan2(end[0] - start[0], start[1] - end[1])) + % 360 + for start, end in ((xy[i - 1], point), (point, xy[i + 1])) + ] + if angles[0] == angles[1]: + # This is a straight line, so no joint is required + continue + + def coord_at_angle(coord, angle): + x, y = coord + angle -= 90 + distance = width / 2 - 1 + return tuple( + [ + p + (math.floor(p_d) if p_d > 0 else math.ceil(p_d)) + for p, p_d in ( + (x, distance * math.cos(math.radians(angle))), + (y, distance * math.sin(math.radians(angle))), + ) + ] + ) + + flipped = ( + angles[1] > angles[0] and angles[1] - 180 > angles[0] + ) or (angles[1] < angles[0] and angles[1] + 180 > angles[0]) + coords = [ + (point[0] - width / 2 + 1, point[1] - width / 2 + 1), + (point[0] + width / 2 - 1, point[1] + width / 2 - 1), + ] + if flipped: + start, end = (angles[1] + 90, angles[0] + 90) + else: + start, end = (angles[0] - 90, angles[1] - 90) + self.pieslice(coords, start - 90, end - 90, fill) + + if width > 8: + # Cover potential gaps between the line and the joint + if flipped: + gapCoords = [ + coord_at_angle(point, angles[0] + 90), + point, + coord_at_angle(point, angles[1] + 90), + ] + else: + gapCoords = [ + coord_at_angle(point, angles[0] - 90), + point, + coord_at_angle(point, angles[1] - 90), + ] + self.line(gapCoords, fill, width=3) + + def shape(self, shape, fill=None, outline=None): + """(Experimental) Draw a shape.""" + shape.close() + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_outline(shape, fill, 1) + if ink is not None and ink != fill: + self.draw.draw_outline(shape, ink, 0) + + def pieslice(self, xy, start, end, fill=None, outline=None, width=1): + """Draw a pieslice.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_pieslice(xy, start, end, fill, 1) + if ink is not None and ink != fill and width != 0: + self.draw.draw_pieslice(xy, start, end, ink, 0, width) + + def point(self, xy, fill=None): + """Draw one or more individual pixels.""" + ink, fill = self._getink(fill) + if ink is not None: + self.draw.draw_points(xy, ink) + + def polygon(self, xy, fill=None, outline=None): + """Draw a polygon.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_polygon(xy, fill, 1) + if ink is not None and ink != fill: + self.draw.draw_polygon(xy, ink, 0) + + def rectangle(self, xy, fill=None, outline=None, width=1): + """Draw a rectangle.""" + ink, fill = self._getink(outline, fill) + if fill is not None: + self.draw.draw_rectangle(xy, fill, 1) + if ink is not None and ink != fill and width != 0: + self.draw.draw_rectangle(xy, ink, 0, width) + + def _multiline_check(self, text): + """Draw text.""" + split_character = "\n" if isinstance(text, str) else b"\n" + + return split_character in text + + def _multiline_split(self, text): + split_character = "\n" if isinstance(text, str) else b"\n" + + return text.split(split_character) + + def text( + self, + xy, + text, + fill=None, + font=None, + anchor=None, + spacing=4, + align="left", + direction=None, + features=None, + language=None, + stroke_width=0, + stroke_fill=None, + *args, + **kwargs + ): + if self._multiline_check(text): + return self.multiline_text( + xy, + text, + fill, + font, + anchor, + spacing, + align, + direction, + features, + language, + stroke_width, + stroke_fill, + ) + + if font is None: + font = self.getfont() + + def getink(fill): + ink, fill = self._getink(fill) + if ink is None: + return fill + return ink + + def draw_text(ink, stroke_width=0, stroke_offset=None): + coord = xy + try: + mask, offset = font.getmask2( + text, + self.fontmode, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + *args, + **kwargs, + ) + coord = coord[0] + offset[0], coord[1] + offset[1] + except AttributeError: + try: + mask = font.getmask( + text, + self.fontmode, + direction, + features, + language, + stroke_width, + *args, + **kwargs, + ) + except TypeError: + mask = font.getmask(text) + if stroke_offset: + coord = coord[0] + stroke_offset[0], coord[1] + stroke_offset[1] + self.draw.draw_bitmap(coord, mask, ink) + + ink = getink(fill) + if ink is not None: + stroke_ink = None + if stroke_width: + stroke_ink = getink(stroke_fill) if stroke_fill is not None else ink + + if stroke_ink is not None: + # Draw stroked text + draw_text(stroke_ink, stroke_width) + + # Draw normal text + draw_text(ink, 0, (stroke_width, stroke_width)) + else: + # Only draw normal text + draw_text(ink) + + def multiline_text( + self, + xy, + text, + fill=None, + font=None, + anchor=None, + spacing=4, + align="left", + direction=None, + features=None, + language=None, + stroke_width=0, + stroke_fill=None, + ): + widths = [] + max_width = 0 + lines = self._multiline_split(text) + line_spacing = ( + self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing + ) + for line in lines: + line_width, line_height = self.textsize( + line, + font, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + ) + widths.append(line_width) + max_width = max(max_width, line_width) + left, top = xy + for idx, line in enumerate(lines): + if align == "left": + pass # left = x + elif align == "center": + left += (max_width - widths[idx]) / 2.0 + elif align == "right": + left += max_width - widths[idx] + else: + raise ValueError('align must be "left", "center" or "right"') + self.text( + (left, top), + line, + fill, + font, + anchor, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + stroke_fill=stroke_fill, + ) + top += line_spacing + left = xy[0] + + def textsize( + self, + text, + font=None, + spacing=4, + direction=None, + features=None, + language=None, + stroke_width=0, + ): + """Get the size of a given string, in pixels.""" + if self._multiline_check(text): + return self.multiline_textsize( + text, font, spacing, direction, features, language, stroke_width + ) + + if font is None: + font = self.getfont() + return font.getsize(text, direction, features, language, stroke_width) + + def multiline_textsize( + self, + text, + font=None, + spacing=4, + direction=None, + features=None, + language=None, + stroke_width=0, + ): + max_width = 0 + lines = self._multiline_split(text) + line_spacing = ( + self.textsize("A", font=font, stroke_width=stroke_width)[1] + spacing + ) + for line in lines: + line_width, line_height = self.textsize( + line, font, spacing, direction, features, language, stroke_width + ) + max_width = max(max_width, line_width) + return max_width, len(lines) * line_spacing - spacing + + +def Draw(im, mode=None): + """ + A simple 2D drawing interface for PIL images. + + :param im: The image to draw in. + :param mode: Optional mode to use for color values. For RGB + images, this argument can be RGB or RGBA (to blend the + drawing into the image). For all other modes, this argument + must be the same as the image mode. If omitted, the mode + defaults to the mode of the image. + """ + try: + return im.getdraw(mode) + except AttributeError: + return ImageDraw(im, mode) + + +# experimental access to the outline API +try: + Outline = Image.core.outline +except AttributeError: + Outline = None + + +def getdraw(im=None, hints=None): + """ + (Experimental) A more advanced 2D drawing interface for PIL images, + based on the WCK interface. + + :param im: The image to draw in. + :param hints: An optional list of hints. + :returns: A (drawing context, drawing resource factory) tuple. + """ + # FIXME: this needs more work! + # FIXME: come up with a better 'hints' scheme. + handler = None + if not hints or "nicest" in hints: + try: + from . import _imagingagg as handler + except ImportError: + pass + if handler is None: + from . import ImageDraw2 as handler + if im: + im = handler.Draw(im) + return im, handler + + +def floodfill(image, xy, value, border=None, thresh=0): + """ + (experimental) Fills a bounded region with a given color. + + :param image: Target image. + :param xy: Seed position (a 2-item coordinate tuple). See + :ref:`coordinate-system`. + :param value: Fill color. + :param border: Optional border value. If given, the region consists of + pixels with a color different from the border color. If not given, + the region consists of pixels having the same color as the seed + pixel. + :param thresh: Optional threshold value which specifies a maximum + tolerable difference of a pixel value from the 'background' in + order for it to be replaced. Useful for filling regions of + non-homogeneous, but similar, colors. + """ + # based on an implementation by Eric S. Raymond + # amended by yo1995 @20180806 + pixel = image.load() + x, y = xy + try: + background = pixel[x, y] + if _color_diff(value, background) <= thresh: + return # seed point already has fill color + pixel[x, y] = value + except (ValueError, IndexError): + return # seed point outside image + edge = {(x, y)} + # use a set to keep record of current and previous edge pixels + # to reduce memory consumption + full_edge = set() + while edge: + new_edge = set() + for (x, y) in edge: # 4 adjacent method + for (s, t) in ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)): + # If already processed, or if a coordinate is negative, skip + if (s, t) in full_edge or s < 0 or t < 0: + continue + try: + p = pixel[s, t] + except (ValueError, IndexError): + pass + else: + full_edge.add((s, t)) + if border is None: + fill = _color_diff(p, background) <= thresh + else: + fill = p != value and p != border + if fill: + pixel[s, t] = value + new_edge.add((s, t)) + full_edge = edge # discard pixels processed + edge = new_edge + + +def _color_diff(color1, color2): + """ + Uses 1-norm distance to calculate difference between two values. + """ + if isinstance(color2, tuple): + return sum([abs(color1[i] - color2[i]) for i in range(0, len(color2))]) + else: + return abs(color1 - color2) diff --git a/venv/Lib/site-packages/PIL/ImageDraw2.py b/venv/Lib/site-packages/PIL/ImageDraw2.py new file mode 100644 index 000000000..1f63110fd --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageDraw2.py @@ -0,0 +1,179 @@ +# +# The Python Imaging Library +# $Id$ +# +# WCK-style drawing interface operations +# +# History: +# 2003-12-07 fl created +# 2005-05-15 fl updated; added to PIL as ImageDraw2 +# 2005-05-15 fl added text support +# 2005-05-20 fl added arc/chord/pieslice support +# +# Copyright (c) 2003-2005 by Secret Labs AB +# Copyright (c) 2003-2005 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +""" +(Experimental) WCK-style drawing interface operations + +.. seealso:: :py:mod:`PIL.ImageDraw` +""" + + +from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath + + +class Pen: + """Stores an outline color and width.""" + + def __init__(self, color, width=1, opacity=255): + self.color = ImageColor.getrgb(color) + self.width = width + + +class Brush: + """Stores a fill color""" + + def __init__(self, color, opacity=255): + self.color = ImageColor.getrgb(color) + + +class Font: + """Stores a TrueType font and color""" + + def __init__(self, color, file, size=12): + # FIXME: add support for bitmap fonts + self.color = ImageColor.getrgb(color) + self.font = ImageFont.truetype(file, size) + + +class Draw: + """ + (Experimental) WCK-style drawing interface + """ + + def __init__(self, image, size=None, color=None): + if not hasattr(image, "im"): + image = Image.new(image, size, color) + self.draw = ImageDraw.Draw(image) + self.image = image + self.transform = None + + def flush(self): + return self.image + + def render(self, op, xy, pen, brush=None): + # handle color arguments + outline = fill = None + width = 1 + if isinstance(pen, Pen): + outline = pen.color + width = pen.width + elif isinstance(brush, Pen): + outline = brush.color + width = brush.width + if isinstance(brush, Brush): + fill = brush.color + elif isinstance(pen, Brush): + fill = pen.color + # handle transformation + if self.transform: + xy = ImagePath.Path(xy) + xy.transform(self.transform) + # render the item + if op == "line": + self.draw.line(xy, fill=outline, width=width) + else: + getattr(self.draw, op)(xy, fill=fill, outline=outline) + + def settransform(self, offset): + """Sets a transformation offset.""" + (xoffset, yoffset) = offset + self.transform = (1, 0, xoffset, 0, 1, yoffset) + + def arc(self, xy, start, end, *options): + """ + Draws an arc (a portion of a circle outline) between the start and end + angles, inside the given bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc` + """ + self.render("arc", xy, start, end, *options) + + def chord(self, xy, start, end, *options): + """ + Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points + with a straight line. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord` + """ + self.render("chord", xy, start, end, *options) + + def ellipse(self, xy, *options): + """ + Draws an ellipse inside the given bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse` + """ + self.render("ellipse", xy, *options) + + def line(self, xy, *options): + """ + Draws a line between the coordinates in the ``xy`` list. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line` + """ + self.render("line", xy, *options) + + def pieslice(self, xy, start, end, *options): + """ + Same as arc, but also draws straight lines between the end points and the + center of the bounding box. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice` + """ + self.render("pieslice", xy, start, end, *options) + + def polygon(self, xy, *options): + """ + Draws a polygon. + + The polygon outline consists of straight lines between the given + coordinates, plus a straight line between the last and the first + coordinate. + + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon` + """ + self.render("polygon", xy, *options) + + def rectangle(self, xy, *options): + """ + Draws a rectangle. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle` + """ + self.render("rectangle", xy, *options) + + def text(self, xy, text, font): + """ + Draws the string at the given position. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text` + """ + if self.transform: + xy = ImagePath.Path(xy) + xy.transform(self.transform) + self.draw.text(xy, text, font=font.font, fill=font.color) + + def textsize(self, text, font): + """ + Return the size of the given string, in pixels. + + .. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textsize` + """ + return self.draw.textsize(text, font=font.font) diff --git a/venv/Lib/site-packages/PIL/ImageEnhance.py b/venv/Lib/site-packages/PIL/ImageEnhance.py new file mode 100644 index 000000000..3b79d5c46 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageEnhance.py @@ -0,0 +1,103 @@ +# +# The Python Imaging Library. +# $Id$ +# +# image enhancement classes +# +# For a background, see "Image Processing By Interpolation and +# Extrapolation", Paul Haeberli and Douglas Voorhies. Available +# at http://www.graficaobscura.com/interp/index.html +# +# History: +# 1996-03-23 fl Created +# 2009-06-16 fl Fixed mean calculation +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFilter, ImageStat + + +class _Enhance: + def enhance(self, factor): + """ + Returns an enhanced image. + + :param factor: A floating point value controlling the enhancement. + Factor 1.0 always returns a copy of the original image, + lower factors mean less color (brightness, contrast, + etc), and higher values more. There are no restrictions + on this value. + :rtype: :py:class:`~PIL.Image.Image` + """ + return Image.blend(self.degenerate, self.image, factor) + + +class Color(_Enhance): + """Adjust image color balance. + + This class can be used to adjust the colour balance of an image, in + a manner similar to the controls on a colour TV set. An enhancement + factor of 0.0 gives a black and white image. A factor of 1.0 gives + the original image. + """ + + def __init__(self, image): + self.image = image + self.intermediate_mode = "L" + if "A" in image.getbands(): + self.intermediate_mode = "LA" + + self.degenerate = image.convert(self.intermediate_mode).convert(image.mode) + + +class Contrast(_Enhance): + """Adjust image contrast. + + This class can be used to control the contrast of an image, similar + to the contrast control on a TV set. An enhancement factor of 0.0 + gives a solid grey image. A factor of 1.0 gives the original image. + """ + + def __init__(self, image): + self.image = image + mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5) + self.degenerate = Image.new("L", image.size, mean).convert(image.mode) + + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) + + +class Brightness(_Enhance): + """Adjust image brightness. + + This class can be used to control the brightness of an image. An + enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the + original image. + """ + + def __init__(self, image): + self.image = image + self.degenerate = Image.new(image.mode, image.size, 0) + + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) + + +class Sharpness(_Enhance): + """Adjust image sharpness. + + This class can be used to adjust the sharpness of an image. An + enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the + original image, and a factor of 2.0 gives a sharpened image. + """ + + def __init__(self, image): + self.image = image + self.degenerate = image.filter(ImageFilter.SMOOTH) + + if "A" in image.getbands(): + self.degenerate.putalpha(image.getchannel("A")) diff --git a/venv/Lib/site-packages/PIL/ImageFile.py b/venv/Lib/site-packages/PIL/ImageFile.py new file mode 100644 index 000000000..fd2e1bbde --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageFile.py @@ -0,0 +1,693 @@ +# +# The Python Imaging Library. +# $Id$ +# +# base class for image file handlers +# +# history: +# 1995-09-09 fl Created +# 1996-03-11 fl Fixed load mechanism. +# 1996-04-15 fl Added pcx/xbm decoders. +# 1996-04-30 fl Added encoders. +# 1996-12-14 fl Added load helpers +# 1997-01-11 fl Use encode_to_file where possible +# 1997-08-27 fl Flush output in _save +# 1998-03-05 fl Use memory mapping for some modes +# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B" +# 1999-05-31 fl Added image parser +# 2000-10-12 fl Set readonly flag on memory-mapped images +# 2002-03-20 fl Use better messages for common decoder errors +# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available +# 2003-10-30 fl Added StubImageFile class +# 2004-02-25 fl Made incremental parser more robust +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1995-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import io +import struct +import sys +import warnings + +from . import Image +from ._util import isPath + +MAXBLOCK = 65536 + +SAFEBLOCK = 1024 * 1024 + +LOAD_TRUNCATED_IMAGES = False + +ERRORS = { + -1: "image buffer overrun error", + -2: "decoding error", + -3: "unknown error", + -8: "bad configuration", + -9: "out of memory error", +} + + +# +# -------------------------------------------------------------------- +# Helpers + + +def raise_oserror(error): + try: + message = Image.core.getcodecstatus(error) + except AttributeError: + message = ERRORS.get(error) + if not message: + message = "decoder error %d" % error + raise OSError(message + " when reading image file") + + +def raise_ioerror(error): + warnings.warn( + "raise_ioerror is deprecated and will be removed in a future release. " + "Use raise_oserror instead.", + DeprecationWarning, + ) + return raise_oserror(error) + + +def _tilesort(t): + # sort on offset + return t[2] + + +# +# -------------------------------------------------------------------- +# ImageFile base class + + +class ImageFile(Image.Image): + """Base class for image file format handlers.""" + + def __init__(self, fp=None, filename=None): + super().__init__() + + self._min_frame = 0 + + self.custom_mimetype = None + + self.tile = None + self.readonly = 1 # until we know better + + self.decoderconfig = () + self.decodermaxblock = MAXBLOCK + + if isPath(fp): + # filename + self.fp = open(fp, "rb") + self.filename = fp + self._exclusive_fp = True + else: + # stream + self.fp = fp + self.filename = filename + # can be overridden + self._exclusive_fp = None + + try: + try: + self._open() + except ( + IndexError, # end of data + TypeError, # end of data (ord) + KeyError, # unsupported mode + EOFError, # got header but not the first frame + struct.error, + ) as v: + raise SyntaxError(v) from v + + if not self.mode or self.size[0] <= 0: + raise SyntaxError("not identified by this driver") + except BaseException: + # close the file only if we have opened it this constructor + if self._exclusive_fp: + self.fp.close() + raise + + def get_format_mimetype(self): + if self.custom_mimetype: + return self.custom_mimetype + if self.format is not None: + return Image.MIME.get(self.format.upper()) + + def verify(self): + """Check file integrity""" + + # raise exception if something's wrong. must be called + # directly after open, and closes file when finished. + if self._exclusive_fp: + self.fp.close() + self.fp = None + + def load(self): + """Load image data based on tile list""" + + if self.tile is None: + raise OSError("cannot load this image") + + pixel = Image.Image.load(self) + if not self.tile: + return pixel + + self.map = None + use_mmap = self.filename and len(self.tile) == 1 + # As of pypy 2.1.0, memory mapping was failing here. + use_mmap = use_mmap and not hasattr(sys, "pypy_version_info") + + readonly = 0 + + # look for read/seek overrides + try: + read = self.load_read + # don't use mmap if there are custom read/seek functions + use_mmap = False + except AttributeError: + read = self.fp.read + + try: + seek = self.load_seek + use_mmap = False + except AttributeError: + seek = self.fp.seek + + if use_mmap: + # try memory mapping + decoder_name, extents, offset, args = self.tile[0] + if ( + decoder_name == "raw" + and len(args) >= 3 + and args[0] == self.mode + and args[0] in Image._MAPMODES + ): + try: + if hasattr(Image.core, "map"): + # use built-in mapper WIN32 only + self.map = Image.core.map(self.filename) + self.map.seek(offset) + self.im = self.map.readimage( + self.mode, self.size, args[1], args[2] + ) + else: + # use mmap, if possible + import mmap + + with open(self.filename, "r") as fp: + self.map = mmap.mmap( + fp.fileno(), 0, access=mmap.ACCESS_READ + ) + self.im = Image.core.map_buffer( + self.map, self.size, decoder_name, offset, args + ) + readonly = 1 + # After trashing self.im, + # we might need to reload the palette data. + if self.palette: + self.palette.dirty = 1 + except (AttributeError, OSError, ImportError): + self.map = None + + self.load_prepare() + err_code = -3 # initialize to unknown error + if not self.map: + # sort tiles in file order + self.tile.sort(key=_tilesort) + + try: + # FIXME: This is a hack to handle TIFF's JpegTables tag. + prefix = self.tile_prefix + except AttributeError: + prefix = b"" + + for decoder_name, extents, offset, args in self.tile: + decoder = Image._getdecoder( + self.mode, decoder_name, args, self.decoderconfig + ) + try: + seek(offset) + decoder.setimage(self.im, extents) + if decoder.pulls_fd: + decoder.setfd(self.fp) + status, err_code = decoder.decode(b"") + else: + b = prefix + while True: + try: + s = read(self.decodermaxblock) + except (IndexError, struct.error) as e: + # truncated png/gif + if LOAD_TRUNCATED_IMAGES: + break + else: + raise OSError("image file is truncated") from e + + if not s: # truncated jpeg + if LOAD_TRUNCATED_IMAGES: + break + else: + raise OSError( + "image file is truncated " + "(%d bytes not processed)" % len(b) + ) + + b = b + s + n, err_code = decoder.decode(b) + if n < 0: + break + b = b[n:] + finally: + # Need to cleanup here to prevent leaks + decoder.cleanup() + + self.tile = [] + self.readonly = readonly + + self.load_end() + + if self._exclusive_fp and self._close_exclusive_fp_after_loading: + self.fp.close() + self.fp = None + + if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0: + # still raised if decoder fails to return anything + raise_oserror(err_code) + + return Image.Image.load(self) + + def load_prepare(self): + # create image memory if necessary + if not self.im or self.im.mode != self.mode or self.im.size != self.size: + self.im = Image.core.new(self.mode, self.size) + # create palette (optional) + if self.mode == "P": + Image.Image.load(self) + + def load_end(self): + # may be overridden + pass + + # may be defined for contained formats + # def load_seek(self, pos): + # pass + + # may be defined for blocked formats (e.g. PNG) + # def load_read(self, bytes): + # pass + + def _seek_check(self, frame): + if ( + frame < self._min_frame + # Only check upper limit on frames if additional seek operations + # are not required to do so + or ( + not (hasattr(self, "_n_frames") and self._n_frames is None) + and frame >= self.n_frames + self._min_frame + ) + ): + raise EOFError("attempt to seek outside sequence") + + return self.tell() != frame + + +class StubImageFile(ImageFile): + """ + Base class for stub image loaders. + + A stub loader is an image loader that can identify files of a + certain format, but relies on external code to load the file. + """ + + def _open(self): + raise NotImplementedError("StubImageFile subclass must implement _open") + + def load(self): + loader = self._load() + if loader is None: + raise OSError("cannot find loader for this %s file" % self.format) + image = loader.load(self) + assert image is not None + # become the other object (!) + self.__class__ = image.__class__ + self.__dict__ = image.__dict__ + + def _load(self): + """(Hook) Find actual image loader.""" + raise NotImplementedError("StubImageFile subclass must implement _load") + + +class Parser: + """ + Incremental image parser. This class implements the standard + feed/close consumer interface. + """ + + incremental = None + image = None + data = None + decoder = None + offset = 0 + finished = 0 + + def reset(self): + """ + (Consumer) Reset the parser. Note that you can only call this + method immediately after you've created a parser; parser + instances cannot be reused. + """ + assert self.data is None, "cannot reuse parsers" + + def feed(self, data): + """ + (Consumer) Feed data to the parser. + + :param data: A string buffer. + :exception OSError: If the parser failed to parse the image file. + """ + # collect data + + if self.finished: + return + + if self.data is None: + self.data = data + else: + self.data = self.data + data + + # parse what we have + if self.decoder: + + if self.offset > 0: + # skip header + skip = min(len(self.data), self.offset) + self.data = self.data[skip:] + self.offset = self.offset - skip + if self.offset > 0 or not self.data: + return + + n, e = self.decoder.decode(self.data) + + if n < 0: + # end of stream + self.data = None + self.finished = 1 + if e < 0: + # decoding error + self.image = None + raise_oserror(e) + else: + # end of image + return + self.data = self.data[n:] + + elif self.image: + + # if we end up here with no decoder, this file cannot + # be incrementally parsed. wait until we've gotten all + # available data + pass + + else: + + # attempt to open this file + try: + with io.BytesIO(self.data) as fp: + im = Image.open(fp) + except OSError: + # traceback.print_exc() + pass # not enough data + else: + flag = hasattr(im, "load_seek") or hasattr(im, "load_read") + if flag or len(im.tile) != 1: + # custom load code, or multiple tiles + self.decode = None + else: + # initialize decoder + im.load_prepare() + d, e, o, a = im.tile[0] + im.tile = [] + self.decoder = Image._getdecoder(im.mode, d, a, im.decoderconfig) + self.decoder.setimage(im.im, e) + + # calculate decoder offset + self.offset = o + if self.offset <= len(self.data): + self.data = self.data[self.offset :] + self.offset = 0 + + self.image = im + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + """ + (Consumer) Close the stream. + + :returns: An image object. + :exception OSError: If the parser failed to parse the image file either + because it cannot be identified or cannot be + decoded. + """ + # finish decoding + if self.decoder: + # get rid of what's left in the buffers + self.feed(b"") + self.data = self.decoder = None + if not self.finished: + raise OSError("image was incomplete") + if not self.image: + raise OSError("cannot parse this image") + if self.data: + # incremental parsing not possible; reopen the file + # not that we have all data + with io.BytesIO(self.data) as fp: + try: + self.image = Image.open(fp) + finally: + self.image.load() + return self.image + + +# -------------------------------------------------------------------- + + +def _save(im, fp, tile, bufsize=0): + """Helper to save image based on tile list + + :param im: Image object. + :param fp: File object. + :param tile: Tile list. + :param bufsize: Optional buffer size + """ + + im.load() + if not hasattr(im, "encoderconfig"): + im.encoderconfig = () + tile.sort(key=_tilesort) + # FIXME: make MAXBLOCK a configuration parameter + # It would be great if we could have the encoder specify what it needs + # But, it would need at least the image size in most cases. RawEncode is + # a tricky case. + bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c + if fp == sys.stdout: + fp.flush() + return + try: + fh = fp.fileno() + fp.flush() + except (AttributeError, io.UnsupportedOperation) as e: + # compress to Python file-compatible object + for e, b, o, a in tile: + e = Image._getencoder(im.mode, e, a, im.encoderconfig) + if o > 0: + fp.seek(o) + e.setimage(im.im, b) + if e.pushes_fd: + e.setfd(fp) + l, s = e.encode_to_pyfd() + else: + while True: + l, s, d = e.encode(bufsize) + fp.write(d) + if s: + break + if s < 0: + raise OSError("encoder error %d when writing image file" % s) from e + e.cleanup() + else: + # slight speedup: compress to real file object + for e, b, o, a in tile: + e = Image._getencoder(im.mode, e, a, im.encoderconfig) + if o > 0: + fp.seek(o) + e.setimage(im.im, b) + if e.pushes_fd: + e.setfd(fp) + l, s = e.encode_to_pyfd() + else: + s = e.encode_to_file(fh, bufsize) + if s < 0: + raise OSError("encoder error %d when writing image file" % s) + e.cleanup() + if hasattr(fp, "flush"): + fp.flush() + + +def _safe_read(fp, size): + """ + Reads large blocks in a safe way. Unlike fp.read(n), this function + doesn't trust the user. If the requested size is larger than + SAFEBLOCK, the file is read block by block. + + :param fp: File handle. Must implement a read method. + :param size: Number of bytes to read. + :returns: A string containing up to size bytes of data. + """ + if size <= 0: + return b"" + if size <= SAFEBLOCK: + return fp.read(size) + data = [] + while size > 0: + block = fp.read(min(size, SAFEBLOCK)) + if not block: + break + data.append(block) + size -= len(block) + return b"".join(data) + + +class PyCodecState: + def __init__(self): + self.xsize = 0 + self.ysize = 0 + self.xoff = 0 + self.yoff = 0 + + def extents(self): + return (self.xoff, self.yoff, self.xoff + self.xsize, self.yoff + self.ysize) + + +class PyDecoder: + """ + Python implementation of a format decoder. Override this class and + add the decoding logic in the `decode` method. + + See :ref:`Writing Your Own File Decoder in Python` + """ + + _pulls_fd = False + + def __init__(self, mode, *args): + self.im = None + self.state = PyCodecState() + self.fd = None + self.mode = mode + self.init(args) + + def init(self, args): + """ + Override to perform decoder specific initialization + + :param args: Array of args items from the tile entry + :returns: None + """ + self.args = args + + @property + def pulls_fd(self): + return self._pulls_fd + + def decode(self, buffer): + """ + Override to perform the decoding process. + + :param buffer: A bytes object with the data to be decoded. + :returns: A tuple of (bytes consumed, errcode). + If finished with decoding return <0 for the bytes consumed. + Err codes are from `ERRORS` + """ + raise NotImplementedError() + + def cleanup(self): + """ + Override to perform decoder specific cleanup + + :returns: None + """ + pass + + def setfd(self, fd): + """ + Called from ImageFile to set the python file-like object + + :param fd: A python file-like object + :returns: None + """ + self.fd = fd + + def setimage(self, im, extents=None): + """ + Called from ImageFile to set the core output image for the decoder + + :param im: A core image object + :param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle + for this tile + :returns: None + """ + + # following c code + self.im = im + + if extents: + (x0, y0, x1, y1) = extents + else: + (x0, y0, x1, y1) = (0, 0, 0, 0) + + if x0 == 0 and x1 == 0: + self.state.xsize, self.state.ysize = self.im.size + else: + self.state.xoff = x0 + self.state.yoff = y0 + self.state.xsize = x1 - x0 + self.state.ysize = y1 - y0 + + if self.state.xsize <= 0 or self.state.ysize <= 0: + raise ValueError("Size cannot be negative") + + if ( + self.state.xsize + self.state.xoff > self.im.size[0] + or self.state.ysize + self.state.yoff > self.im.size[1] + ): + raise ValueError("Tile cannot extend outside image") + + def set_as_raw(self, data, rawmode=None): + """ + Convenience method to set the internal image from a stream of raw data + + :param data: Bytes to be set + :param rawmode: The rawmode to be used for the decoder. + If not specified, it will default to the mode of the image + :returns: None + """ + + if not rawmode: + rawmode = self.mode + d = Image._getdecoder(self.mode, "raw", (rawmode)) + d.setimage(self.im, self.state.extents()) + s = d.decode(data) + + if s[0] >= 0: + raise ValueError("not enough image data") + if s[1] != 0: + raise ValueError("cannot decode image data") diff --git a/venv/Lib/site-packages/PIL/ImageFilter.py b/venv/Lib/site-packages/PIL/ImageFilter.py new file mode 100644 index 000000000..3e61a6ca1 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageFilter.py @@ -0,0 +1,535 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard filters +# +# History: +# 1995-11-27 fl Created +# 2002-06-08 fl Added rank and mode filters +# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2002 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +import functools + +try: + import numpy +except ImportError: # pragma: no cover + numpy = None + + +class Filter: + pass + + +class MultibandFilter(Filter): + pass + + +class BuiltinFilter(MultibandFilter): + def filter(self, image): + if image.mode == "P": + raise ValueError("cannot filter palette images") + return image.filter(*self.filterargs) + + +class Kernel(BuiltinFilter): + """ + Create a convolution kernel. The current version only + supports 3x3 and 5x5 integer and floating point kernels. + + In the current version, kernels can only be applied to + "L" and "RGB" images. + + :param size: Kernel size, given as (width, height). In the current + version, this must be (3,3) or (5,5). + :param kernel: A sequence containing kernel weights. + :param scale: Scale factor. If given, the result for each pixel is + divided by this value. the default is the sum of the + kernel weights. + :param offset: Offset. If given, this value is added to the result, + after it has been divided by the scale factor. + """ + + name = "Kernel" + + def __init__(self, size, kernel, scale=None, offset=0): + if scale is None: + # default scale is sum of kernel + scale = functools.reduce(lambda a, b: a + b, kernel) + if size[0] * size[1] != len(kernel): + raise ValueError("not enough coefficients in kernel") + self.filterargs = size, scale, offset, kernel + + +class RankFilter(Filter): + """ + Create a rank filter. The rank filter sorts all pixels in + a window of the given size, and returns the **rank**'th value. + + :param size: The kernel size, in pixels. + :param rank: What pixel value to pick. Use 0 for a min filter, + ``size * size / 2`` for a median filter, ``size * size - 1`` + for a max filter, etc. + """ + + name = "Rank" + + def __init__(self, size, rank): + self.size = size + self.rank = rank + + def filter(self, image): + if image.mode == "P": + raise ValueError("cannot filter palette images") + image = image.expand(self.size // 2, self.size // 2) + return image.rankfilter(self.size, self.rank) + + +class MedianFilter(RankFilter): + """ + Create a median filter. Picks the median pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Median" + + def __init__(self, size=3): + self.size = size + self.rank = size * size // 2 + + +class MinFilter(RankFilter): + """ + Create a min filter. Picks the lowest pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Min" + + def __init__(self, size=3): + self.size = size + self.rank = 0 + + +class MaxFilter(RankFilter): + """ + Create a max filter. Picks the largest pixel value in a window with the + given size. + + :param size: The kernel size, in pixels. + """ + + name = "Max" + + def __init__(self, size=3): + self.size = size + self.rank = size * size - 1 + + +class ModeFilter(Filter): + """ + Create a mode filter. Picks the most frequent pixel value in a box with the + given size. Pixel values that occur only once or twice are ignored; if no + pixel value occurs more than twice, the original pixel value is preserved. + + :param size: The kernel size, in pixels. + """ + + name = "Mode" + + def __init__(self, size=3): + self.size = size + + def filter(self, image): + return image.modefilter(self.size) + + +class GaussianBlur(MultibandFilter): + """Gaussian blur filter. + + :param radius: Blur radius. + """ + + name = "GaussianBlur" + + def __init__(self, radius=2): + self.radius = radius + + def filter(self, image): + return image.gaussian_blur(self.radius) + + +class BoxBlur(MultibandFilter): + """Blurs the image by setting each pixel to the average value of the pixels + in a square box extending radius pixels in each direction. + Supports float radius of arbitrary size. Uses an optimized implementation + which runs in linear time relative to the size of the image + for any radius value. + + :param radius: Size of the box in one direction. Radius 0 does not blur, + returns an identical image. Radius 1 takes 1 pixel + in each direction, i.e. 9 pixels in total. + """ + + name = "BoxBlur" + + def __init__(self, radius): + self.radius = radius + + def filter(self, image): + return image.box_blur(self.radius) + + +class UnsharpMask(MultibandFilter): + """Unsharp mask filter. + + See Wikipedia's entry on `digital unsharp masking`_ for an explanation of + the parameters. + + :param radius: Blur Radius + :param percent: Unsharp strength, in percent + :param threshold: Threshold controls the minimum brightness change that + will be sharpened + + .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking + + """ # noqa: E501 + + name = "UnsharpMask" + + def __init__(self, radius=2, percent=150, threshold=3): + self.radius = radius + self.percent = percent + self.threshold = threshold + + def filter(self, image): + return image.unsharp_mask(self.radius, self.percent, self.threshold) + + +class BLUR(BuiltinFilter): + name = "Blur" + # fmt: off + filterargs = (5, 5), 16, 0, ( + 1, 1, 1, 1, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 0, 0, 0, 1, + 1, 1, 1, 1, 1, + ) + # fmt: on + + +class CONTOUR(BuiltinFilter): + name = "Contour" + # fmt: off + filterargs = (3, 3), 1, 255, ( + -1, -1, -1, + -1, 8, -1, + -1, -1, -1, + ) + # fmt: on + + +class DETAIL(BuiltinFilter): + name = "Detail" + # fmt: off + filterargs = (3, 3), 6, 0, ( + 0, -1, 0, + -1, 10, -1, + 0, -1, 0, + ) + # fmt: on + + +class EDGE_ENHANCE(BuiltinFilter): + name = "Edge-enhance" + # fmt: off + filterargs = (3, 3), 2, 0, ( + -1, -1, -1, + -1, 10, -1, + -1, -1, -1, + ) + # fmt: on + + +class EDGE_ENHANCE_MORE(BuiltinFilter): + name = "Edge-enhance More" + # fmt: off + filterargs = (3, 3), 1, 0, ( + -1, -1, -1, + -1, 9, -1, + -1, -1, -1, + ) + # fmt: on + + +class EMBOSS(BuiltinFilter): + name = "Emboss" + # fmt: off + filterargs = (3, 3), 1, 128, ( + -1, 0, 0, + 0, 1, 0, + 0, 0, 0, + ) + # fmt: on + + +class FIND_EDGES(BuiltinFilter): + name = "Find Edges" + # fmt: off + filterargs = (3, 3), 1, 0, ( + -1, -1, -1, + -1, 8, -1, + -1, -1, -1, + ) + # fmt: on + + +class SHARPEN(BuiltinFilter): + name = "Sharpen" + # fmt: off + filterargs = (3, 3), 16, 0, ( + -2, -2, -2, + -2, 32, -2, + -2, -2, -2, + ) + # fmt: on + + +class SMOOTH(BuiltinFilter): + name = "Smooth" + # fmt: off + filterargs = (3, 3), 13, 0, ( + 1, 1, 1, + 1, 5, 1, + 1, 1, 1, + ) + # fmt: on + + +class SMOOTH_MORE(BuiltinFilter): + name = "Smooth More" + # fmt: off + filterargs = (5, 5), 100, 0, ( + 1, 1, 1, 1, 1, + 1, 5, 5, 5, 1, + 1, 5, 44, 5, 1, + 1, 5, 5, 5, 1, + 1, 1, 1, 1, 1, + ) + # fmt: on + + +class Color3DLUT(MultibandFilter): + """Three-dimensional color lookup table. + + Transforms 3-channel pixels using the values of the channels as coordinates + in the 3D lookup table and interpolating the nearest elements. + + This method allows you to apply almost any color transformation + in constant time by using pre-calculated decimated tables. + + .. versionadded:: 5.2.0 + + :param size: Size of the table. One int or tuple of (int, int, int). + Minimal size in any dimension is 2, maximum is 65. + :param table: Flat lookup table. A list of ``channels * size**3`` + float elements or a list of ``size**3`` channels-sized + tuples with floats. Channels are changed first, + then first dimension, then second, then third. + Value 0.0 corresponds lowest value of output, 1.0 highest. + :param channels: Number of channels in the table. Could be 3 or 4. + Default is 3. + :param target_mode: A mode for the result image. Should have not less + than ``channels`` channels. Default is ``None``, + which means that mode wouldn't be changed. + """ + + name = "Color 3D LUT" + + def __init__(self, size, table, channels=3, target_mode=None, **kwargs): + if channels not in (3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + self.size = size = self._check_size(size) + self.channels = channels + self.mode = target_mode + + # Hidden flag `_copy_table=False` could be used to avoid extra copying + # of the table if the table is specially made for the constructor. + copy_table = kwargs.get("_copy_table", True) + items = size[0] * size[1] * size[2] + wrong_size = False + + if numpy and isinstance(table, numpy.ndarray): + if copy_table: + table = table.copy() + + if table.shape in [ + (items * channels,), + (items, channels), + (size[2], size[1], size[0], channels), + ]: + table = table.reshape(items * channels) + else: + wrong_size = True + + else: + if copy_table: + table = list(table) + + # Convert to a flat list + if table and isinstance(table[0], (list, tuple)): + table, raw_table = [], table + for pixel in raw_table: + if len(pixel) != channels: + raise ValueError( + "The elements of the table should " + "have a length of {}.".format(channels) + ) + table.extend(pixel) + + if wrong_size or len(table) != items * channels: + raise ValueError( + "The table should have either channels * size**3 float items " + "or size**3 items of channels-sized tuples with floats. " + "Table should be: {}x{}x{}x{}. Actual length: {}".format( + channels, size[0], size[1], size[2], len(table) + ) + ) + self.table = table + + @staticmethod + def _check_size(size): + try: + _, _, _ = size + except ValueError as e: + raise ValueError( + "Size should be either an integer or a tuple of three integers." + ) from e + except TypeError: + size = (size, size, size) + size = [int(x) for x in size] + for size1D in size: + if not 2 <= size1D <= 65: + raise ValueError("Size should be in [2, 65] range.") + return size + + @classmethod + def generate(cls, size, callback, channels=3, target_mode=None): + """Generates new LUT using provided callback. + + :param size: Size of the table. Passed to the constructor. + :param callback: Function with three parameters which correspond + three color channels. Will be called ``size**3`` + times with values from 0.0 to 1.0 and should return + a tuple with ``channels`` elements. + :param channels: The number of channels which should return callback. + :param target_mode: Passed to the constructor of the resulting + lookup table. + """ + size1D, size2D, size3D = cls._check_size(size) + if channels not in (3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + + table = [0] * (size1D * size2D * size3D * channels) + idx_out = 0 + for b in range(size3D): + for g in range(size2D): + for r in range(size1D): + table[idx_out : idx_out + channels] = callback( + r / (size1D - 1), g / (size2D - 1), b / (size3D - 1) + ) + idx_out += channels + + return cls( + (size1D, size2D, size3D), + table, + channels=channels, + target_mode=target_mode, + _copy_table=False, + ) + + def transform(self, callback, with_normals=False, channels=None, target_mode=None): + """Transforms the table values using provided callback and returns + a new LUT with altered values. + + :param callback: A function which takes old lookup table values + and returns a new set of values. The number + of arguments which function should take is + ``self.channels`` or ``3 + self.channels`` + if ``with_normals`` flag is set. + Should return a tuple of ``self.channels`` or + ``channels`` elements if it is set. + :param with_normals: If true, ``callback`` will be called with + coordinates in the color cube as the first + three arguments. Otherwise, ``callback`` + will be called only with actual color values. + :param channels: The number of channels in the resulting lookup table. + :param target_mode: Passed to the constructor of the resulting + lookup table. + """ + if channels not in (None, 3, 4): + raise ValueError("Only 3 or 4 output channels are supported") + ch_in = self.channels + ch_out = channels or ch_in + size1D, size2D, size3D = self.size + + table = [0] * (size1D * size2D * size3D * ch_out) + idx_in = 0 + idx_out = 0 + for b in range(size3D): + for g in range(size2D): + for r in range(size1D): + values = self.table[idx_in : idx_in + ch_in] + if with_normals: + values = callback( + r / (size1D - 1), + g / (size2D - 1), + b / (size3D - 1), + *values, + ) + else: + values = callback(*values) + table[idx_out : idx_out + ch_out] = values + idx_in += ch_in + idx_out += ch_out + + return type(self)( + self.size, + table, + channels=ch_out, + target_mode=target_mode or self.mode, + _copy_table=False, + ) + + def __repr__(self): + r = [ + "{} from {}".format(self.__class__.__name__, self.table.__class__.__name__), + "size={:d}x{:d}x{:d}".format(*self.size), + "channels={:d}".format(self.channels), + ] + if self.mode: + r.append("target_mode={}".format(self.mode)) + return "<{}>".format(" ".join(r)) + + def filter(self, image): + from . import Image + + return image.color_lut_3d( + self.mode or image.mode, + Image.LINEAR, + self.channels, + self.size[0], + self.size[1], + self.size[2], + self.table, + ) diff --git a/venv/Lib/site-packages/PIL/ImageFont.py b/venv/Lib/site-packages/PIL/ImageFont.py new file mode 100644 index 000000000..8f792d55b --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageFont.py @@ -0,0 +1,860 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PIL raster font management +# +# History: +# 1996-08-07 fl created (experimental) +# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3 +# 1999-02-06 fl rewrote most font management stuff in C +# 1999-03-17 fl take pth files into account in load_path (from Richard Jones) +# 2001-02-17 fl added freetype support +# 2001-05-09 fl added TransposedFont wrapper class +# 2002-03-04 fl make sure we have a "L" or "1" font +# 2002-12-04 fl skip non-directory entries in the system path +# 2003-04-29 fl add embedded default font +# 2003-09-27 fl added support for truetype charmap encodings +# +# Todo: +# Adapt to PILFONT2 format (16-bit fonts, compressed, single file) +# +# Copyright (c) 1997-2003 by Secret Labs AB +# Copyright (c) 1996-2003 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import base64 +import os +import sys +from io import BytesIO + +from . import Image +from ._util import isDirectory, isPath + +LAYOUT_BASIC = 0 +LAYOUT_RAQM = 1 + + +class _imagingft_not_installed: + # module placeholder + def __getattr__(self, id): + raise ImportError("The _imagingft C module is not installed") + + +try: + from . import _imagingft as core +except ImportError: + core = _imagingft_not_installed() + + +# FIXME: add support for pilfont2 format (see FontFile.py) + +# -------------------------------------------------------------------- +# Font metrics format: +# "PILfont" LF +# fontdescriptor LF +# (optional) key=value... LF +# "DATA" LF +# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox) +# +# To place a character, cut out srcbox and paste at dstbox, +# relative to the character position. Then move the character +# position according to dx, dy. +# -------------------------------------------------------------------- + + +class ImageFont: + "PIL font wrapper" + + def _load_pilfont(self, filename): + + with open(filename, "rb") as fp: + image = None + for ext in (".png", ".gif", ".pbm"): + if image: + image.close() + try: + fullname = os.path.splitext(filename)[0] + ext + image = Image.open(fullname) + except Exception: + pass + else: + if image and image.mode in ("1", "L"): + break + else: + if image: + image.close() + raise OSError("cannot find glyph data file") + + self.file = fullname + + self._load_pilfont_data(fp, image) + image.close() + + def _load_pilfont_data(self, file, image): + + # read PILfont header + if file.readline() != b"PILfont\n": + raise SyntaxError("Not a PILfont file") + file.readline().split(b";") + self.info = [] # FIXME: should be a dictionary + while True: + s = file.readline() + if not s or s == b"DATA\n": + break + self.info.append(s) + + # read PILfont metrics + data = file.read(256 * 20) + + # check image + if image.mode not in ("1", "L"): + raise TypeError("invalid font image mode") + + image.load() + + self.font = Image.core.font(image.im, data) + + def getsize(self, text, *args, **kwargs): + """ + Returns width and height (in pixels) of given text. + + :param text: Text to measure. + + :return: (width, height) + """ + return self.font.getsize(text) + + def getmask(self, text, mode="", *args, **kwargs): + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :return: An internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module. + """ + return self.font.getmask(text, mode) + + +## +# Wrapper for FreeType fonts. Application code should use the +# truetype factory function to create font objects. + + +class FreeTypeFont: + "FreeType font wrapper (requires _imagingft service)" + + def __init__(self, font=None, size=10, index=0, encoding="", layout_engine=None): + # FIXME: use service provider instead + + self.path = font + self.size = size + self.index = index + self.encoding = encoding + + if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM): + layout_engine = LAYOUT_BASIC + if core.HAVE_RAQM: + layout_engine = LAYOUT_RAQM + elif layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM: + layout_engine = LAYOUT_BASIC + + self.layout_engine = layout_engine + + def load_from_bytes(f): + self.font_bytes = f.read() + self.font = core.getfont( + "", size, index, encoding, self.font_bytes, layout_engine + ) + + if isPath(font): + if sys.platform == "win32": + font_bytes_path = font if isinstance(font, bytes) else font.encode() + try: + font_bytes_path.decode("ascii") + except UnicodeDecodeError: + # FreeType cannot load fonts with non-ASCII characters on Windows + # So load it into memory first + with open(font, "rb") as f: + load_from_bytes(f) + return + self.font = core.getfont( + font, size, index, encoding, layout_engine=layout_engine + ) + else: + load_from_bytes(font) + + def _multiline_split(self, text): + split_character = "\n" if isinstance(text, str) else b"\n" + return text.split(split_character) + + def getname(self): + """ + :return: A tuple of the font family (e.g. Helvetica) and the font style + (e.g. Bold) + """ + return self.font.family, self.font.style + + def getmetrics(self): + """ + :return: A tuple of the font ascent (the distance from the baseline to + the highest outline point) and descent (the distance from the + baseline to the lowest outline point, a negative value) + """ + return self.font.ascent, self.font.descent + + def getsize( + self, text, direction=None, features=None, language=None, stroke_width=0 + ): + """ + Returns width and height (in pixels) of given text if rendered in font with + provided direction, features, and language. + + :param text: Text to measure. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + ` + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: (width, height) + """ + size, offset = self.font.getsize(text, False, direction, features, language) + return ( + size[0] + stroke_width * 2 + offset[0], + size[1] + stroke_width * 2 + offset[1], + ) + + def getsize_multiline( + self, + text, + direction=None, + spacing=4, + features=None, + language=None, + stroke_width=0, + ): + """ + Returns width and height (in pixels) of given text if rendered in font + with provided direction, features, and language, while respecting + newline characters. + + :param text: Text to measure. + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + :param spacing: The vertical gap between lines, defaulting to 4 pixels. + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + ` + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: (width, height) + """ + max_width = 0 + lines = self._multiline_split(text) + line_spacing = self.getsize("A", stroke_width=stroke_width)[1] + spacing + for line in lines: + line_width, line_height = self.getsize( + line, direction, features, language, stroke_width + ) + max_width = max(max_width, line_width) + + return max_width, len(lines) * line_spacing - spacing + + def getoffset(self, text): + """ + Returns the offset of given text. This is the gap between the + starting coordinate and the first marking. Note that this gap is + included in the result of :py:func:`~PIL.ImageFont.FreeTypeFont.getsize`. + + :param text: Text to measure. + + :return: A tuple of the x and y offset + """ + return self.font.getsize(text)[1] + + def getmask( + self, + text, + mode="", + direction=None, + features=None, + language=None, + stroke_width=0, + ): + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + ` + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: An internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module. + """ + return self.getmask2( + text, + mode, + direction=direction, + features=features, + language=language, + stroke_width=stroke_width, + )[0] + + def getmask2( + self, + text, + mode="", + fill=Image.core.fill, + direction=None, + features=None, + language=None, + stroke_width=0, + *args, + **kwargs + ): + """ + Create a bitmap for the text. + + If the font uses antialiasing, the bitmap should have mode ``L`` and use a + maximum value of 255. Otherwise, it should have mode ``1``. + + :param text: Text to render. + :param mode: Used by some graphics drivers to indicate what mode the + driver prefers; if empty, the renderer may return either + mode. Note that the mode is always a string, to simplify + C-level implementations. + + .. versionadded:: 1.1.5 + + :param direction: Direction of the text. It can be 'rtl' (right to + left), 'ltr' (left to right) or 'ttb' (top to bottom). + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param features: A list of OpenType font features to be used during text + layout. This is usually used to turn on optional + font features that are not enabled by default, + for example 'dlig' or 'ss01', but can be also + used to turn off default font features for + example '-liga' to disable ligatures or '-kern' + to disable kerning. To get all supported + features, see + https://docs.microsoft.com/en-us/typography/opentype/spec/featurelist + Requires libraqm. + + .. versionadded:: 4.2.0 + + :param language: Language of the text. Different languages may use + different glyph shapes or ligatures. This parameter tells + the font which language the text is in, and to apply the + correct substitutions as appropriate, if available. + It should be a `BCP 47 language code + ` + Requires libraqm. + + .. versionadded:: 6.0.0 + + :param stroke_width: The width of the text stroke. + + .. versionadded:: 6.2.0 + + :return: A tuple of an internal PIL storage memory instance as defined by the + :py:mod:`PIL.Image.core` interface module, and the text offset, the + gap between the starting coordinate and the first marking + """ + size, offset = self.font.getsize( + text, mode == "1", direction, features, language + ) + size = size[0] + stroke_width * 2, size[1] + stroke_width * 2 + im = fill("L", size, 0) + self.font.render( + text, im.id, mode == "1", direction, features, language, stroke_width + ) + return im, offset + + def font_variant( + self, font=None, size=None, index=None, encoding=None, layout_engine=None + ): + """ + Create a copy of this FreeTypeFont object, + using any specified arguments to override the settings. + + Parameters are identical to the parameters used to initialize this + object. + + :return: A FreeTypeFont object. + """ + return FreeTypeFont( + font=self.path if font is None else font, + size=self.size if size is None else size, + index=self.index if index is None else index, + encoding=self.encoding if encoding is None else encoding, + layout_engine=layout_engine or self.layout_engine, + ) + + def get_variation_names(self): + """ + :returns: A list of the named styles in a variation font. + :exception OSError: If the font is not a variation font. + """ + try: + names = self.font.getvarnames() + except AttributeError as e: + raise NotImplementedError("FreeType 2.9.1 or greater is required") from e + return [name.replace(b"\x00", b"") for name in names] + + def set_variation_by_name(self, name): + """ + :param name: The name of the style. + :exception OSError: If the font is not a variation font. + """ + names = self.get_variation_names() + if not isinstance(name, bytes): + name = name.encode() + index = names.index(name) + + if index == getattr(self, "_last_variation_index", None): + # When the same name is set twice in a row, + # there is an 'unknown freetype error' + # https://savannah.nongnu.org/bugs/?56186 + return + self._last_variation_index = index + + self.font.setvarname(index) + + def get_variation_axes(self): + """ + :returns: A list of the axes in a variation font. + :exception OSError: If the font is not a variation font. + """ + try: + axes = self.font.getvaraxes() + except AttributeError as e: + raise NotImplementedError("FreeType 2.9.1 or greater is required") from e + for axis in axes: + axis["name"] = axis["name"].replace(b"\x00", b"") + return axes + + def set_variation_by_axes(self, axes): + """ + :param axes: A list of values for each axis. + :exception OSError: If the font is not a variation font. + """ + try: + self.font.setvaraxes(axes) + except AttributeError as e: + raise NotImplementedError("FreeType 2.9.1 or greater is required") from e + + +class TransposedFont: + "Wrapper for writing rotated or mirrored text" + + def __init__(self, font, orientation=None): + """ + Wrapper that creates a transposed font from any existing font + object. + + :param font: A font object. + :param orientation: An optional orientation. If given, this should + be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM, + Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270. + """ + self.font = font + self.orientation = orientation # any 'transpose' argument, or None + + def getsize(self, text, *args, **kwargs): + w, h = self.font.getsize(text) + if self.orientation in (Image.ROTATE_90, Image.ROTATE_270): + return h, w + return w, h + + def getmask(self, text, mode="", *args, **kwargs): + im = self.font.getmask(text, mode, *args, **kwargs) + if self.orientation is not None: + return im.transpose(self.orientation) + return im + + +def load(filename): + """ + Load a font file. This function loads a font object from the given + bitmap font file, and returns the corresponding font object. + + :param filename: Name of font file. + :return: A font object. + :exception OSError: If the file could not be read. + """ + f = ImageFont() + f._load_pilfont(filename) + return f + + +def truetype(font=None, size=10, index=0, encoding="", layout_engine=None): + """ + Load a TrueType or OpenType font from a file or file-like object, + and create a font object. + This function loads a font object from the given file or file-like + object, and creates a font object for a font of the given size. + + Pillow uses FreeType to open font files. If you are opening many fonts + simultaneously on Windows, be aware that Windows limits the number of files + that can be open in C at once to 512. If you approach that limit, an + ``OSError`` may be thrown, reporting that FreeType "cannot open resource". + + This function requires the _imagingft service. + + :param font: A filename or file-like object containing a TrueType font. + If the file is not found in this filename, the loader may also + search in other directories, such as the :file:`fonts/` + directory on Windows or :file:`/Library/Fonts/`, + :file:`/System/Library/Fonts/` and :file:`~/Library/Fonts/` on + macOS. + + :param size: The requested size, in points. + :param index: Which font face to load (default is first available face). + :param encoding: Which font encoding to use (default is Unicode). Possible + encodings include (see the FreeType documentation for more + information): + + * "unic" (Unicode) + * "symb" (Microsoft Symbol) + * "ADOB" (Adobe Standard) + * "ADBE" (Adobe Expert) + * "ADBC" (Adobe Custom) + * "armn" (Apple Roman) + * "sjis" (Shift JIS) + * "gb " (PRC) + * "big5" + * "wans" (Extended Wansung) + * "joha" (Johab) + * "lat1" (Latin-1) + + This specifies the character set to use. It does not alter the + encoding of any text provided in subsequent operations. + :param layout_engine: Which layout engine to use, if available: + `ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`. + + You can check support for Raqm layout using + :py:func:`PIL.features.check_feature` with ``feature="raqm"``. + + .. versionadded:: 4.2.0 + :return: A font object. + :exception OSError: If the file could not be read. + """ + + def freetype(font): + return FreeTypeFont(font, size, index, encoding, layout_engine) + + try: + return freetype(font) + except OSError: + if not isPath(font): + raise + ttf_filename = os.path.basename(font) + + dirs = [] + if sys.platform == "win32": + # check the windows font repository + # NOTE: must use uppercase WINDIR, to work around bugs in + # 1.5.2's os.environ.get() + windir = os.environ.get("WINDIR") + if windir: + dirs.append(os.path.join(windir, "fonts")) + elif sys.platform in ("linux", "linux2"): + lindirs = os.environ.get("XDG_DATA_DIRS", "") + if not lindirs: + # According to the freedesktop spec, XDG_DATA_DIRS should + # default to /usr/share + lindirs = "/usr/share" + dirs += [os.path.join(lindir, "fonts") for lindir in lindirs.split(":")] + elif sys.platform == "darwin": + dirs += [ + "/Library/Fonts", + "/System/Library/Fonts", + os.path.expanduser("~/Library/Fonts"), + ] + + ext = os.path.splitext(ttf_filename)[1] + first_font_with_a_different_extension = None + for directory in dirs: + for walkroot, walkdir, walkfilenames in os.walk(directory): + for walkfilename in walkfilenames: + if ext and walkfilename == ttf_filename: + return freetype(os.path.join(walkroot, walkfilename)) + elif not ext and os.path.splitext(walkfilename)[0] == ttf_filename: + fontpath = os.path.join(walkroot, walkfilename) + if os.path.splitext(fontpath)[1] == ".ttf": + return freetype(fontpath) + if not ext and first_font_with_a_different_extension is None: + first_font_with_a_different_extension = fontpath + if first_font_with_a_different_extension: + return freetype(first_font_with_a_different_extension) + raise + + +def load_path(filename): + """ + Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a + bitmap font along the Python path. + + :param filename: Name of font file. + :return: A font object. + :exception OSError: If the file could not be read. + """ + for directory in sys.path: + if isDirectory(directory): + if not isinstance(filename, str): + filename = filename.decode("utf-8") + try: + return load(os.path.join(directory, filename)) + except OSError: + pass + raise OSError("cannot find font file") + + +def load_default(): + """Load a "better than nothing" default font. + + .. versionadded:: 1.1.4 + + :return: A font object. + """ + f = ImageFont() + f._load_pilfont_data( + # courB08 + BytesIO( + base64.b64decode( + b""" +UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA +BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL +AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA +AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB +ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A +BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB +//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA +AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH +AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA +ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv +AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/ +/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5 +AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA +AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG +AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA +BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA +AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA +2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF +AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA//// ++gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA +////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA +BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv +AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA +AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA +AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA +BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP// +//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA +AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF +AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB +mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn +AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA +AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7 +AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA +Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB +//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA +AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ +AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC +DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ +AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/ ++wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5 +AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/ +///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG +AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA +BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA +Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC +eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG +AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA//// ++gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA +////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA +BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT +AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A +AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA +Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA +Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP// +//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA +AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ +AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA +LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5 +AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA +AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5 +AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA +AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG +AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA +EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK +AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA +pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG +AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA//// ++QAGAAIAzgAKANUAEw== +""" + ) + ), + Image.open( + BytesIO( + base64.b64decode( + b""" +iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u +Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9 +M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g +LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F +IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA +Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791 +NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx +in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9 +SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY +AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt +y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG +ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY +lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H +/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3 +AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47 +c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/ +/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw +pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv +oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR +evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA +AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v// +Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR +w7IkEbzhVQAAAABJRU5ErkJggg== +""" + ) + ) + ), + ) + return f diff --git a/venv/Lib/site-packages/PIL/ImageGrab.py b/venv/Lib/site-packages/PIL/ImageGrab.py new file mode 100644 index 000000000..3fa338b0a --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageGrab.py @@ -0,0 +1,120 @@ +# +# The Python Imaging Library +# $Id$ +# +# screen grabber +# +# History: +# 2001-04-26 fl created +# 2001-09-17 fl use builtin driver, if present +# 2002-11-19 fl added grabclipboard support +# +# Copyright (c) 2001-2002 by Secret Labs AB +# Copyright (c) 2001-2002 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import sys + +from . import Image + +if sys.platform == "darwin": + import os + import tempfile + import subprocess + + +def grab(bbox=None, include_layered_windows=False, all_screens=False, xdisplay=None): + if xdisplay is None: + if sys.platform == "darwin": + fh, filepath = tempfile.mkstemp(".png") + os.close(fh) + subprocess.call(["screencapture", "-x", filepath]) + im = Image.open(filepath) + im.load() + os.unlink(filepath) + if bbox: + im_cropped = im.crop(bbox) + im.close() + return im_cropped + return im + elif sys.platform == "win32": + offset, size, data = Image.core.grabscreen_win32( + include_layered_windows, all_screens + ) + im = Image.frombytes( + "RGB", + size, + data, + # RGB, 32-bit line padding, origin lower left corner + "raw", + "BGR", + (size[0] * 3 + 3) & -4, + -1, + ) + if bbox: + x0, y0 = offset + left, top, right, bottom = bbox + im = im.crop((left - x0, top - y0, right - x0, bottom - y0)) + return im + # use xdisplay=None for default display on non-win32/macOS systems + if not Image.core.HAVE_XCB: + raise OSError("Pillow was built without XCB support") + size, data = Image.core.grabscreen_x11(xdisplay) + im = Image.frombytes("RGB", size, data, "raw", "BGRX", size[0] * 4, 1) + if bbox: + im = im.crop(bbox) + return im + + +def grabclipboard(): + if sys.platform == "darwin": + fh, filepath = tempfile.mkstemp(".jpg") + os.close(fh) + commands = [ + 'set theFile to (open for access POSIX file "' + + filepath + + '" with write permission)', + "try", + " write (the clipboard as JPEG picture) to theFile", + "end try", + "close access theFile", + ] + script = ["osascript"] + for command in commands: + script += ["-e", command] + subprocess.call(script) + + im = None + if os.stat(filepath).st_size != 0: + im = Image.open(filepath) + im.load() + os.unlink(filepath) + return im + elif sys.platform == "win32": + fmt, data = Image.core.grabclipboard_win32() + if fmt == "file": # CF_HDROP + import struct + + o = struct.unpack_from("I", data)[0] + if data[16] != 0: + files = data[o:].decode("utf-16le").split("\0") + else: + files = data[o:].decode("mbcs").split("\0") + return files[: files.index("")] + if isinstance(data, bytes): + import io + + data = io.BytesIO(data) + if fmt == "png": + from . import PngImagePlugin + + return PngImagePlugin.PngImageFile(data) + elif fmt == "DIB": + from . import BmpImagePlugin + + return BmpImagePlugin.DibImageFile(data) + return None + else: + raise NotImplementedError("ImageGrab.grabclipboard() is macOS and Windows only") diff --git a/venv/Lib/site-packages/PIL/ImageMath.py b/venv/Lib/site-packages/PIL/ImageMath.py new file mode 100644 index 000000000..9a2d0b78e --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageMath.py @@ -0,0 +1,253 @@ +# +# The Python Imaging Library +# $Id$ +# +# a simple math add-on for the Python Imaging Library +# +# History: +# 1999-02-15 fl Original PIL Plus release +# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6 +# 2005-09-12 fl Fixed int() and float() for Python 2.4.1 +# +# Copyright (c) 1999-2005 by Secret Labs AB +# Copyright (c) 2005 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import builtins + +from . import Image, _imagingmath + +VERBOSE = 0 + + +def _isconstant(v): + return isinstance(v, (int, float)) + + +class _Operand: + """Wraps an image operand, providing standard operators""" + + def __init__(self, im): + self.im = im + + def __fixup(self, im1): + # convert image to suitable mode + if isinstance(im1, _Operand): + # argument was an image. + if im1.im.mode in ("1", "L"): + return im1.im.convert("I") + elif im1.im.mode in ("I", "F"): + return im1.im + else: + raise ValueError("unsupported mode: %s" % im1.im.mode) + else: + # argument was a constant + if _isconstant(im1) and self.im.mode in ("1", "L", "I"): + return Image.new("I", self.im.size, im1) + else: + return Image.new("F", self.im.size, im1) + + def apply(self, op, im1, im2=None, mode=None): + im1 = self.__fixup(im1) + if im2 is None: + # unary operation + out = Image.new(mode or im1.mode, im1.size, None) + im1.load() + try: + op = getattr(_imagingmath, op + "_" + im1.mode) + except AttributeError as e: + raise TypeError("bad operand type for '%s'" % op) from e + _imagingmath.unop(op, out.im.id, im1.im.id) + else: + # binary operation + im2 = self.__fixup(im2) + if im1.mode != im2.mode: + # convert both arguments to floating point + if im1.mode != "F": + im1 = im1.convert("F") + if im2.mode != "F": + im2 = im2.convert("F") + if im1.mode != im2.mode: + raise ValueError("mode mismatch") + if im1.size != im2.size: + # crop both arguments to a common size + size = (min(im1.size[0], im2.size[0]), min(im1.size[1], im2.size[1])) + if im1.size != size: + im1 = im1.crop((0, 0) + size) + if im2.size != size: + im2 = im2.crop((0, 0) + size) + out = Image.new(mode or im1.mode, size, None) + else: + out = Image.new(mode or im1.mode, im1.size, None) + im1.load() + im2.load() + try: + op = getattr(_imagingmath, op + "_" + im1.mode) + except AttributeError as e: + raise TypeError("bad operand type for '%s'" % op) from e + _imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id) + return _Operand(out) + + # unary operators + def __bool__(self): + # an image is "true" if it contains at least one non-zero pixel + return self.im.getbbox() is not None + + def __abs__(self): + return self.apply("abs", self) + + def __pos__(self): + return self + + def __neg__(self): + return self.apply("neg", self) + + # binary operators + def __add__(self, other): + return self.apply("add", self, other) + + def __radd__(self, other): + return self.apply("add", other, self) + + def __sub__(self, other): + return self.apply("sub", self, other) + + def __rsub__(self, other): + return self.apply("sub", other, self) + + def __mul__(self, other): + return self.apply("mul", self, other) + + def __rmul__(self, other): + return self.apply("mul", other, self) + + def __truediv__(self, other): + return self.apply("div", self, other) + + def __rtruediv__(self, other): + return self.apply("div", other, self) + + def __mod__(self, other): + return self.apply("mod", self, other) + + def __rmod__(self, other): + return self.apply("mod", other, self) + + def __pow__(self, other): + return self.apply("pow", self, other) + + def __rpow__(self, other): + return self.apply("pow", other, self) + + # bitwise + def __invert__(self): + return self.apply("invert", self) + + def __and__(self, other): + return self.apply("and", self, other) + + def __rand__(self, other): + return self.apply("and", other, self) + + def __or__(self, other): + return self.apply("or", self, other) + + def __ror__(self, other): + return self.apply("or", other, self) + + def __xor__(self, other): + return self.apply("xor", self, other) + + def __rxor__(self, other): + return self.apply("xor", other, self) + + def __lshift__(self, other): + return self.apply("lshift", self, other) + + def __rshift__(self, other): + return self.apply("rshift", self, other) + + # logical + def __eq__(self, other): + return self.apply("eq", self, other) + + def __ne__(self, other): + return self.apply("ne", self, other) + + def __lt__(self, other): + return self.apply("lt", self, other) + + def __le__(self, other): + return self.apply("le", self, other) + + def __gt__(self, other): + return self.apply("gt", self, other) + + def __ge__(self, other): + return self.apply("ge", self, other) + + +# conversions +def imagemath_int(self): + return _Operand(self.im.convert("I")) + + +def imagemath_float(self): + return _Operand(self.im.convert("F")) + + +# logical +def imagemath_equal(self, other): + return self.apply("eq", self, other, mode="I") + + +def imagemath_notequal(self, other): + return self.apply("ne", self, other, mode="I") + + +def imagemath_min(self, other): + return self.apply("min", self, other) + + +def imagemath_max(self, other): + return self.apply("max", self, other) + + +def imagemath_convert(self, mode): + return _Operand(self.im.convert(mode)) + + +ops = {} +for k, v in list(globals().items()): + if k[:10] == "imagemath_": + ops[k[10:]] = v + + +def eval(expression, _dict={}, **kw): + """ + Evaluates an image expression. + + :param expression: A string containing a Python-style expression. + :param options: Values to add to the evaluation context. You + can either use a dictionary, or one or more keyword + arguments. + :return: The evaluated expression. This is usually an image object, but can + also be an integer, a floating point value, or a pixel tuple, + depending on the expression. + """ + + # build execution namespace + args = ops.copy() + args.update(_dict) + args.update(kw) + for k, v in list(args.items()): + if hasattr(v, "im"): + args[k] = _Operand(v) + + out = builtins.eval(expression, args) + try: + return out.im + except AttributeError: + return out diff --git a/venv/Lib/site-packages/PIL/ImageMode.py b/venv/Lib/site-packages/PIL/ImageMode.py new file mode 100644 index 000000000..988288329 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageMode.py @@ -0,0 +1,64 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard mode descriptors +# +# History: +# 2006-03-20 fl Added +# +# Copyright (c) 2006 by Secret Labs AB. +# Copyright (c) 2006 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +# mode descriptor cache +_modes = None + + +class ModeDescriptor: + """Wrapper for mode strings.""" + + def __init__(self, mode, bands, basemode, basetype): + self.mode = mode + self.bands = bands + self.basemode = basemode + self.basetype = basetype + + def __str__(self): + return self.mode + + +def getmode(mode): + """Gets a mode descriptor for the given mode.""" + global _modes + if not _modes: + # initialize mode cache + + from . import Image + + modes = {} + # core modes + for m, (basemode, basetype, bands) in Image._MODEINFO.items(): + modes[m] = ModeDescriptor(m, bands, basemode, basetype) + # extra experimental modes + modes["RGBa"] = ModeDescriptor("RGBa", ("R", "G", "B", "a"), "RGB", "L") + modes["LA"] = ModeDescriptor("LA", ("L", "A"), "L", "L") + modes["La"] = ModeDescriptor("La", ("L", "a"), "L", "L") + modes["PA"] = ModeDescriptor("PA", ("P", "A"), "RGB", "L") + # mapping modes + for i16mode in ( + "I;16", + "I;16S", + "I;16L", + "I;16LS", + "I;16B", + "I;16BS", + "I;16N", + "I;16NS", + ): + modes[i16mode] = ModeDescriptor(i16mode, ("I",), "L", "L") + # set global mode cache atomically + _modes = modes + return _modes[mode] diff --git a/venv/Lib/site-packages/PIL/ImageMorph.py b/venv/Lib/site-packages/PIL/ImageMorph.py new file mode 100644 index 000000000..d1ec09eac --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageMorph.py @@ -0,0 +1,245 @@ +# A binary morphology add-on for the Python Imaging Library +# +# History: +# 2014-06-04 Initial version. +# +# Copyright (c) 2014 Dov Grobgeld + +import re + +from . import Image, _imagingmorph + +LUT_SIZE = 1 << 9 + +# fmt: off +ROTATION_MATRIX = [ + 6, 3, 0, + 7, 4, 1, + 8, 5, 2, +] +MIRROR_MATRIX = [ + 2, 1, 0, + 5, 4, 3, + 8, 7, 6, +] +# fmt: on + + +class LutBuilder: + """A class for building a MorphLut from a descriptive language + + The input patterns is a list of a strings sequences like these:: + + 4:(... + .1. + 111)->1 + + (whitespaces including linebreaks are ignored). The option 4 + describes a series of symmetry operations (in this case a + 4-rotation), the pattern is described by: + + - . or X - Ignore + - 1 - Pixel is on + - 0 - Pixel is off + + The result of the operation is described after "->" string. + + The default is to return the current pixel value, which is + returned if no other match is found. + + Operations: + + - 4 - 4 way rotation + - N - Negate + - 1 - Dummy op for no other operation (an op must always be given) + - M - Mirroring + + Example:: + + lb = LutBuilder(patterns = ["4:(... .1. 111)->1"]) + lut = lb.build_lut() + + """ + + def __init__(self, patterns=None, op_name=None): + if patterns is not None: + self.patterns = patterns + else: + self.patterns = [] + self.lut = None + if op_name is not None: + known_patterns = { + "corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"], + "dilation4": ["4:(... .0. .1.)->1"], + "dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"], + "erosion4": ["4:(... .1. .0.)->0"], + "erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"], + "edge": [ + "1:(... ... ...)->0", + "4:(.0. .1. ...)->1", + "4:(01. .1. ...)->1", + ], + } + if op_name not in known_patterns: + raise Exception("Unknown pattern " + op_name + "!") + + self.patterns = known_patterns[op_name] + + def add_patterns(self, patterns): + self.patterns += patterns + + def build_default_lut(self): + symbols = [0, 1] + m = 1 << 4 # pos of current pixel + self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE)) + + def get_lut(self): + return self.lut + + def _string_permute(self, pattern, permutation): + """string_permute takes a pattern and a permutation and returns the + string permuted according to the permutation list. + """ + assert len(permutation) == 9 + return "".join(pattern[p] for p in permutation) + + def _pattern_permute(self, basic_pattern, options, basic_result): + """pattern_permute takes a basic pattern and its result and clones + the pattern according to the modifications described in the $options + parameter. It returns a list of all cloned patterns.""" + patterns = [(basic_pattern, basic_result)] + + # rotations + if "4" in options: + res = patterns[-1][1] + for i in range(4): + patterns.append( + (self._string_permute(patterns[-1][0], ROTATION_MATRIX), res) + ) + # mirror + if "M" in options: + n = len(patterns) + for pattern, res in patterns[0:n]: + patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res)) + + # negate + if "N" in options: + n = len(patterns) + for pattern, res in patterns[0:n]: + # Swap 0 and 1 + pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1") + res = 1 - int(res) + patterns.append((pattern, res)) + + return patterns + + def build_lut(self): + """Compile all patterns into a morphology lut. + + TBD :Build based on (file) morphlut:modify_lut + """ + self.build_default_lut() + patterns = [] + + # Parse and create symmetries of the patterns strings + for p in self.patterns: + m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", "")) + if not m: + raise Exception('Syntax error in pattern "' + p + '"') + options = m.group(1) + pattern = m.group(2) + result = int(m.group(3)) + + # Get rid of spaces + pattern = pattern.replace(" ", "").replace("\n", "") + + patterns += self._pattern_permute(pattern, options, result) + + # compile the patterns into regular expressions for speed + for i, pattern in enumerate(patterns): + p = pattern[0].replace(".", "X").replace("X", "[01]") + p = re.compile(p) + patterns[i] = (p, pattern[1]) + + # Step through table and find patterns that match. + # Note that all the patterns are searched. The last one + # caught overrides + for i in range(LUT_SIZE): + # Build the bit pattern + bitpattern = bin(i)[2:] + bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1] + + for p, r in patterns: + if p.match(bitpattern): + self.lut[i] = [0, 1][r] + + return self.lut + + +class MorphOp: + """A class for binary morphological operators""" + + def __init__(self, lut=None, op_name=None, patterns=None): + """Create a binary morphological operator""" + self.lut = lut + if op_name is not None: + self.lut = LutBuilder(op_name=op_name).build_lut() + elif patterns is not None: + self.lut = LutBuilder(patterns=patterns).build_lut() + + def apply(self, image): + """Run a single morphological operation on an image + + Returns a tuple of the number of changed pixels and the + morphed image""" + if self.lut is None: + raise Exception("No operator loaded") + + if image.mode != "L": + raise Exception("Image must be binary, meaning it must use mode L") + outimage = Image.new(image.mode, image.size, None) + count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id) + return count, outimage + + def match(self, image): + """Get a list of coordinates matching the morphological operation on + an image. + + Returns a list of tuples of (x,y) coordinates + of all matching pixels. See :ref:`coordinate-system`.""" + if self.lut is None: + raise Exception("No operator loaded") + + if image.mode != "L": + raise Exception("Image must be binary, meaning it must use mode L") + return _imagingmorph.match(bytes(self.lut), image.im.id) + + def get_on_pixels(self, image): + """Get a list of all turned on pixels in a binary image + + Returns a list of tuples of (x,y) coordinates + of all matching pixels. See :ref:`coordinate-system`.""" + + if image.mode != "L": + raise Exception("Image must be binary, meaning it must use mode L") + return _imagingmorph.get_on_pixels(image.im.id) + + def load_lut(self, filename): + """Load an operator from an mrl file""" + with open(filename, "rb") as f: + self.lut = bytearray(f.read()) + + if len(self.lut) != LUT_SIZE: + self.lut = None + raise Exception("Wrong size operator file!") + + def save_lut(self, filename): + """Save an operator to an mrl file""" + if self.lut is None: + raise Exception("No operator loaded") + with open(filename, "wb") as f: + f.write(self.lut) + + def set_lut(self, lut): + """Set the lut from an external source""" + self.lut = lut diff --git a/venv/Lib/site-packages/PIL/ImageOps.py b/venv/Lib/site-packages/PIL/ImageOps.py new file mode 100644 index 000000000..e4e0840b8 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageOps.py @@ -0,0 +1,551 @@ +# +# The Python Imaging Library. +# $Id$ +# +# standard image operations +# +# History: +# 2001-10-20 fl Created +# 2001-10-23 fl Added autocontrast operator +# 2001-12-18 fl Added Kevin's fit operator +# 2004-03-14 fl Fixed potential division by zero in equalize +# 2005-05-05 fl Fixed equalize for low number of values +# +# Copyright (c) 2001-2004 by Secret Labs AB +# Copyright (c) 2001-2004 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import functools +import operator + +from . import Image + +# +# helpers + + +def _border(border): + if isinstance(border, tuple): + if len(border) == 2: + left, top = right, bottom = border + elif len(border) == 4: + left, top, right, bottom = border + else: + left = top = right = bottom = border + return left, top, right, bottom + + +def _color(color, mode): + if isinstance(color, str): + from . import ImageColor + + color = ImageColor.getcolor(color, mode) + return color + + +def _lut(image, lut): + if image.mode == "P": + # FIXME: apply to lookup table, not image data + raise NotImplementedError("mode P support coming soon") + elif image.mode in ("L", "RGB"): + if image.mode == "RGB" and len(lut) == 256: + lut = lut + lut + lut + return image.point(lut) + else: + raise OSError("not supported for this image mode") + + +# +# actions + + +def autocontrast(image, cutoff=0, ignore=None): + """ + Maximize (normalize) image contrast. This function calculates a + histogram of the input image, removes **cutoff** percent of the + lightest and darkest pixels from the histogram, and remaps the image + so that the darkest pixel becomes black (0), and the lightest + becomes white (255). + + :param image: The image to process. + :param cutoff: How many percent to cut off from the histogram. + :param ignore: The background pixel value (use None for no background). + :return: An image. + """ + histogram = image.histogram() + lut = [] + for layer in range(0, len(histogram), 256): + h = histogram[layer : layer + 256] + if ignore is not None: + # get rid of outliers + try: + h[ignore] = 0 + except TypeError: + # assume sequence + for ix in ignore: + h[ix] = 0 + if cutoff: + # cut off pixels from both ends of the histogram + # get number of pixels + n = 0 + for ix in range(256): + n = n + h[ix] + # remove cutoff% pixels from the low end + cut = n * cutoff // 100 + for lo in range(256): + if cut > h[lo]: + cut = cut - h[lo] + h[lo] = 0 + else: + h[lo] -= cut + cut = 0 + if cut <= 0: + break + # remove cutoff% samples from the hi end + cut = n * cutoff // 100 + for hi in range(255, -1, -1): + if cut > h[hi]: + cut = cut - h[hi] + h[hi] = 0 + else: + h[hi] -= cut + cut = 0 + if cut <= 0: + break + # find lowest/highest samples after preprocessing + for lo in range(256): + if h[lo]: + break + for hi in range(255, -1, -1): + if h[hi]: + break + if hi <= lo: + # don't bother + lut.extend(list(range(256))) + else: + scale = 255.0 / (hi - lo) + offset = -lo * scale + for ix in range(256): + ix = int(ix * scale + offset) + if ix < 0: + ix = 0 + elif ix > 255: + ix = 255 + lut.append(ix) + return _lut(image, lut) + + +def colorize(image, black, white, mid=None, blackpoint=0, whitepoint=255, midpoint=127): + """ + Colorize grayscale image. + This function calculates a color wedge which maps all black pixels in + the source image to the first color and all white pixels to the + second color. If **mid** is specified, it uses three-color mapping. + The **black** and **white** arguments should be RGB tuples or color names; + optionally you can use three-color mapping by also specifying **mid**. + Mapping positions for any of the colors can be specified + (e.g. **blackpoint**), where these parameters are the integer + value corresponding to where the corresponding color should be mapped. + These parameters must have logical order, such that + **blackpoint** <= **midpoint** <= **whitepoint** (if **mid** is specified). + + :param image: The image to colorize. + :param black: The color to use for black input pixels. + :param white: The color to use for white input pixels. + :param mid: The color to use for midtone input pixels. + :param blackpoint: an int value [0, 255] for the black mapping. + :param whitepoint: an int value [0, 255] for the white mapping. + :param midpoint: an int value [0, 255] for the midtone mapping. + :return: An image. + """ + + # Initial asserts + assert image.mode == "L" + if mid is None: + assert 0 <= blackpoint <= whitepoint <= 255 + else: + assert 0 <= blackpoint <= midpoint <= whitepoint <= 255 + + # Define colors from arguments + black = _color(black, "RGB") + white = _color(white, "RGB") + if mid is not None: + mid = _color(mid, "RGB") + + # Empty lists for the mapping + red = [] + green = [] + blue = [] + + # Create the low-end values + for i in range(0, blackpoint): + red.append(black[0]) + green.append(black[1]) + blue.append(black[2]) + + # Create the mapping (2-color) + if mid is None: + + range_map = range(0, whitepoint - blackpoint) + + for i in range_map: + red.append(black[0] + i * (white[0] - black[0]) // len(range_map)) + green.append(black[1] + i * (white[1] - black[1]) // len(range_map)) + blue.append(black[2] + i * (white[2] - black[2]) // len(range_map)) + + # Create the mapping (3-color) + else: + + range_map1 = range(0, midpoint - blackpoint) + range_map2 = range(0, whitepoint - midpoint) + + for i in range_map1: + red.append(black[0] + i * (mid[0] - black[0]) // len(range_map1)) + green.append(black[1] + i * (mid[1] - black[1]) // len(range_map1)) + blue.append(black[2] + i * (mid[2] - black[2]) // len(range_map1)) + for i in range_map2: + red.append(mid[0] + i * (white[0] - mid[0]) // len(range_map2)) + green.append(mid[1] + i * (white[1] - mid[1]) // len(range_map2)) + blue.append(mid[2] + i * (white[2] - mid[2]) // len(range_map2)) + + # Create the high-end values + for i in range(0, 256 - whitepoint): + red.append(white[0]) + green.append(white[1]) + blue.append(white[2]) + + # Return converted image + image = image.convert("RGB") + return _lut(image, red + green + blue) + + +def pad(image, size, method=Image.BICUBIC, color=None, centering=(0.5, 0.5)): + """ + Returns a sized and padded version of the image, expanded to fill the + requested aspect ratio and size. + + :param image: The image to size and crop. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: What resampling method to use. Default is + :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`. + :param color: The background color of the padded image. + :param centering: Control the position of the original image within the + padded version. + + (0.5, 0.5) will keep the image centered + (0, 0) will keep the image aligned to the top left + (1, 1) will keep the image aligned to the bottom + right + :return: An image. + """ + + im_ratio = image.width / image.height + dest_ratio = size[0] / size[1] + + if im_ratio == dest_ratio: + out = image.resize(size, resample=method) + else: + out = Image.new(image.mode, size, color) + if im_ratio > dest_ratio: + new_height = int(image.height / image.width * size[0]) + if new_height != size[1]: + image = image.resize((size[0], new_height), resample=method) + + y = int((size[1] - new_height) * max(0, min(centering[1], 1))) + out.paste(image, (0, y)) + else: + new_width = int(image.width / image.height * size[1]) + if new_width != size[0]: + image = image.resize((new_width, size[1]), resample=method) + + x = int((size[0] - new_width) * max(0, min(centering[0], 1))) + out.paste(image, (x, 0)) + return out + + +def crop(image, border=0): + """ + Remove border from image. The same amount of pixels are removed + from all four sides. This function works on all image modes. + + .. seealso:: :py:meth:`~PIL.Image.Image.crop` + + :param image: The image to crop. + :param border: The number of pixels to remove. + :return: An image. + """ + left, top, right, bottom = _border(border) + return image.crop((left, top, image.size[0] - right, image.size[1] - bottom)) + + +def scale(image, factor, resample=Image.BICUBIC): + """ + Returns a rescaled image by a specific factor given in parameter. + A factor greater than 1 expands the image, between 0 and 1 contracts the + image. + + :param image: The image to rescale. + :param factor: The expansion factor, as a float. + :param resample: What resampling method to use. Default is + :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`. + :returns: An :py:class:`~PIL.Image.Image` object. + """ + if factor == 1: + return image.copy() + elif factor <= 0: + raise ValueError("the factor must be greater than 0") + else: + size = (round(factor * image.width), round(factor * image.height)) + return image.resize(size, resample) + + +def deform(image, deformer, resample=Image.BILINEAR): + """ + Deform the image. + + :param image: The image to deform. + :param deformer: A deformer object. Any object that implements a + **getmesh** method can be used. + :param resample: An optional resampling filter. Same values possible as + in the PIL.Image.transform function. + :return: An image. + """ + return image.transform(image.size, Image.MESH, deformer.getmesh(image), resample) + + +def equalize(image, mask=None): + """ + Equalize the image histogram. This function applies a non-linear + mapping to the input image, in order to create a uniform + distribution of grayscale values in the output image. + + :param image: The image to equalize. + :param mask: An optional mask. If given, only the pixels selected by + the mask are included in the analysis. + :return: An image. + """ + if image.mode == "P": + image = image.convert("RGB") + h = image.histogram(mask) + lut = [] + for b in range(0, len(h), 256): + histo = [_f for _f in h[b : b + 256] if _f] + if len(histo) <= 1: + lut.extend(list(range(256))) + else: + step = (functools.reduce(operator.add, histo) - histo[-1]) // 255 + if not step: + lut.extend(list(range(256))) + else: + n = step // 2 + for i in range(256): + lut.append(n // step) + n = n + h[i + b] + return _lut(image, lut) + + +def expand(image, border=0, fill=0): + """ + Add border to the image + + :param image: The image to expand. + :param border: Border width, in pixels. + :param fill: Pixel fill value (a color value). Default is 0 (black). + :return: An image. + """ + left, top, right, bottom = _border(border) + width = left + image.size[0] + right + height = top + image.size[1] + bottom + out = Image.new(image.mode, (width, height), _color(fill, image.mode)) + out.paste(image, (left, top)) + return out + + +def fit(image, size, method=Image.BICUBIC, bleed=0.0, centering=(0.5, 0.5)): + """ + Returns a sized and cropped version of the image, cropped to the + requested aspect ratio and size. + + This function was contributed by Kevin Cazabon. + + :param image: The image to size and crop. + :param size: The requested output size in pixels, given as a + (width, height) tuple. + :param method: What resampling method to use. Default is + :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`. + :param bleed: Remove a border around the outside of the image from all + four edges. The value is a decimal percentage (use 0.01 for + one percent). The default value is 0 (no border). + Cannot be greater than or equal to 0.5. + :param centering: Control the cropping position. Use (0.5, 0.5) for + center cropping (e.g. if cropping the width, take 50% off + of the left side, and therefore 50% off the right side). + (0.0, 0.0) will crop from the top left corner (i.e. if + cropping the width, take all of the crop off of the right + side, and if cropping the height, take all of it off the + bottom). (1.0, 0.0) will crop from the bottom left + corner, etc. (i.e. if cropping the width, take all of the + crop off the left side, and if cropping the height take + none from the top, and therefore all off the bottom). + :return: An image. + """ + + # by Kevin Cazabon, Feb 17/2000 + # kevin@cazabon.com + # http://www.cazabon.com + + # ensure centering is mutable + centering = list(centering) + + if not 0.0 <= centering[0] <= 1.0: + centering[0] = 0.5 + if not 0.0 <= centering[1] <= 1.0: + centering[1] = 0.5 + + if not 0.0 <= bleed < 0.5: + bleed = 0.0 + + # calculate the area to use for resizing and cropping, subtracting + # the 'bleed' around the edges + + # number of pixels to trim off on Top and Bottom, Left and Right + bleed_pixels = (bleed * image.size[0], bleed * image.size[1]) + + live_size = ( + image.size[0] - bleed_pixels[0] * 2, + image.size[1] - bleed_pixels[1] * 2, + ) + + # calculate the aspect ratio of the live_size + live_size_ratio = live_size[0] / live_size[1] + + # calculate the aspect ratio of the output image + output_ratio = size[0] / size[1] + + # figure out if the sides or top/bottom will be cropped off + if live_size_ratio == output_ratio: + # live_size is already the needed ratio + crop_width = live_size[0] + crop_height = live_size[1] + elif live_size_ratio >= output_ratio: + # live_size is wider than what's needed, crop the sides + crop_width = output_ratio * live_size[1] + crop_height = live_size[1] + else: + # live_size is taller than what's needed, crop the top and bottom + crop_width = live_size[0] + crop_height = live_size[0] / output_ratio + + # make the crop + crop_left = bleed_pixels[0] + (live_size[0] - crop_width) * centering[0] + crop_top = bleed_pixels[1] + (live_size[1] - crop_height) * centering[1] + + crop = (crop_left, crop_top, crop_left + crop_width, crop_top + crop_height) + + # resize the image and return it + return image.resize(size, method, box=crop) + + +def flip(image): + """ + Flip the image vertically (top to bottom). + + :param image: The image to flip. + :return: An image. + """ + return image.transpose(Image.FLIP_TOP_BOTTOM) + + +def grayscale(image): + """ + Convert the image to grayscale. + + :param image: The image to convert. + :return: An image. + """ + return image.convert("L") + + +def invert(image): + """ + Invert (negate) the image. + + :param image: The image to invert. + :return: An image. + """ + lut = [] + for i in range(256): + lut.append(255 - i) + return _lut(image, lut) + + +def mirror(image): + """ + Flip image horizontally (left to right). + + :param image: The image to mirror. + :return: An image. + """ + return image.transpose(Image.FLIP_LEFT_RIGHT) + + +def posterize(image, bits): + """ + Reduce the number of bits for each color channel. + + :param image: The image to posterize. + :param bits: The number of bits to keep for each channel (1-8). + :return: An image. + """ + lut = [] + mask = ~(2 ** (8 - bits) - 1) + for i in range(256): + lut.append(i & mask) + return _lut(image, lut) + + +def solarize(image, threshold=128): + """ + Invert all pixel values above a threshold. + + :param image: The image to solarize. + :param threshold: All pixels above this greyscale level are inverted. + :return: An image. + """ + lut = [] + for i in range(256): + if i < threshold: + lut.append(i) + else: + lut.append(255 - i) + return _lut(image, lut) + + +def exif_transpose(image): + """ + If an image has an EXIF Orientation tag, return a new image that is + transposed accordingly. Otherwise, return a copy of the image. + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112) + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + if method is not None: + transposed_image = image.transpose(method) + del exif[0x0112] + transposed_image.info["exif"] = exif.tobytes() + return transposed_image + return image.copy() diff --git a/venv/Lib/site-packages/PIL/ImagePalette.py b/venv/Lib/site-packages/PIL/ImagePalette.py new file mode 100644 index 000000000..5dba6176f --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImagePalette.py @@ -0,0 +1,221 @@ +# +# The Python Imaging Library. +# $Id$ +# +# image palette object +# +# History: +# 1996-03-11 fl Rewritten. +# 1997-01-03 fl Up and running. +# 1997-08-23 fl Added load hack +# 2001-04-16 fl Fixed randint shadow bug in random() +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import array + +from . import GimpGradientFile, GimpPaletteFile, ImageColor, PaletteFile + + +class ImagePalette: + """ + Color palette for palette mapped images + + :param mode: The mode to use for the Palette. See: + :ref:`concept-modes`. Defaults to "RGB" + :param palette: An optional palette. If given, it must be a bytearray, + an array or a list of ints between 0-255 and of length ``size`` + times the number of colors in ``mode``. The list must be aligned + by channel (All R values must be contiguous in the list before G + and B values.) Defaults to 0 through 255 per channel. + :param size: An optional palette size. If given, it cannot be equal to + or greater than 256. Defaults to 0. + """ + + def __init__(self, mode="RGB", palette=None, size=0): + self.mode = mode + self.rawmode = None # if set, palette contains raw data + self.palette = palette or bytearray(range(256)) * len(self.mode) + self.colors = {} + self.dirty = None + if (size == 0 and len(self.mode) * 256 != len(self.palette)) or ( + size != 0 and size != len(self.palette) + ): + raise ValueError("wrong palette size") + + def copy(self): + new = ImagePalette() + + new.mode = self.mode + new.rawmode = self.rawmode + if self.palette is not None: + new.palette = self.palette[:] + new.colors = self.colors.copy() + new.dirty = self.dirty + + return new + + def getdata(self): + """ + Get palette contents in format suitable for the low-level + ``im.putpalette`` primitive. + + .. warning:: This method is experimental. + """ + if self.rawmode: + return self.rawmode, self.palette + return self.mode + ";L", self.tobytes() + + def tobytes(self): + """Convert palette to bytes. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(self.palette, bytes): + return self.palette + arr = array.array("B", self.palette) + if hasattr(arr, "tobytes"): + return arr.tobytes() + return arr.tostring() + + # Declare tostring as an alias for tobytes + tostring = tobytes + + def getcolor(self, color): + """Given an rgb tuple, allocate palette entry. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(color, tuple): + try: + return self.colors[color] + except KeyError as e: + # allocate new color slot + if isinstance(self.palette, bytes): + self.palette = bytearray(self.palette) + index = len(self.colors) + if index >= 256: + raise ValueError("cannot allocate more than 256 colors") from e + self.colors[color] = index + self.palette[index] = color[0] + self.palette[index + 256] = color[1] + self.palette[index + 512] = color[2] + self.dirty = 1 + return index + else: + raise ValueError("unknown color specifier: %r" % color) + + def save(self, fp): + """Save palette to text file. + + .. warning:: This method is experimental. + """ + if self.rawmode: + raise ValueError("palette contains raw palette data") + if isinstance(fp, str): + fp = open(fp, "w") + fp.write("# Palette\n") + fp.write("# Mode: %s\n" % self.mode) + for i in range(256): + fp.write("%d" % i) + for j in range(i * len(self.mode), (i + 1) * len(self.mode)): + try: + fp.write(" %d" % self.palette[j]) + except IndexError: + fp.write(" 0") + fp.write("\n") + fp.close() + + +# -------------------------------------------------------------------- +# Internal + + +def raw(rawmode, data): + palette = ImagePalette() + palette.rawmode = rawmode + palette.palette = data + palette.dirty = 1 + return palette + + +# -------------------------------------------------------------------- +# Factories + + +def make_linear_lut(black, white): + lut = [] + if black == 0: + for i in range(256): + lut.append(white * i // 255) + else: + raise NotImplementedError # FIXME + return lut + + +def make_gamma_lut(exp): + lut = [] + for i in range(256): + lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5)) + return lut + + +def negative(mode="RGB"): + palette = list(range(256)) + palette.reverse() + return ImagePalette(mode, palette * len(mode)) + + +def random(mode="RGB"): + from random import randint + + palette = [] + for i in range(256 * len(mode)): + palette.append(randint(0, 255)) + return ImagePalette(mode, palette) + + +def sepia(white="#fff0c0"): + r, g, b = ImageColor.getrgb(white) + r = make_linear_lut(0, r) + g = make_linear_lut(0, g) + b = make_linear_lut(0, b) + return ImagePalette("RGB", r + g + b) + + +def wedge(mode="RGB"): + return ImagePalette(mode, list(range(256)) * len(mode)) + + +def load(filename): + + # FIXME: supports GIMP gradients only + + with open(filename, "rb") as fp: + + for paletteHandler in [ + GimpPaletteFile.GimpPaletteFile, + GimpGradientFile.GimpGradientFile, + PaletteFile.PaletteFile, + ]: + try: + fp.seek(0) + lut = paletteHandler(fp).getpalette() + if lut: + break + except (SyntaxError, ValueError): + # import traceback + # traceback.print_exc() + pass + else: + raise OSError("cannot load palette") + + return lut # data, rawmode diff --git a/venv/Lib/site-packages/PIL/ImagePath.py b/venv/Lib/site-packages/PIL/ImagePath.py new file mode 100644 index 000000000..3d3538c97 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImagePath.py @@ -0,0 +1,19 @@ +# +# The Python Imaging Library +# $Id$ +# +# path interface +# +# History: +# 1996-11-04 fl Created +# 2002-04-14 fl Added documentation stub class +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + +from . import Image + +Path = Image.core.path diff --git a/venv/Lib/site-packages/PIL/ImageQt.py b/venv/Lib/site-packages/PIL/ImageQt.py new file mode 100644 index 000000000..a15f4ab5e --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageQt.py @@ -0,0 +1,195 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a simple Qt image interface. +# +# history: +# 2006-06-03 fl: created +# 2006-06-04 fl: inherit from QImage instead of wrapping it +# 2006-06-05 fl: removed toimage helper; move string support to ImageQt +# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com) +# +# Copyright (c) 2006 by Secret Labs AB +# Copyright (c) 2006 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import sys +from io import BytesIO + +from . import Image +from ._util import isPath + +qt_versions = [["5", "PyQt5"], ["side2", "PySide2"]] + +# If a version has already been imported, attempt it first +qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules, reverse=True) +for qt_version, qt_module in qt_versions: + try: + if qt_module == "PyQt5": + from PyQt5.QtGui import QImage, qRgba, QPixmap + from PyQt5.QtCore import QBuffer, QIODevice + elif qt_module == "PySide2": + from PySide2.QtGui import QImage, qRgba, QPixmap + from PySide2.QtCore import QBuffer, QIODevice + except (ImportError, RuntimeError): + continue + qt_is_installed = True + break +else: + qt_is_installed = False + qt_version = None + + +def rgb(r, g, b, a=255): + """(Internal) Turns an RGB color into a Qt compatible color integer.""" + # use qRgb to pack the colors, and then turn the resulting long + # into a negative integer with the same bitpattern. + return qRgba(r, g, b, a) & 0xFFFFFFFF + + +def fromqimage(im): + """ + :param im: A PIL Image object, or a file name + (given either as Python string or a PyQt string object) + """ + buffer = QBuffer() + buffer.open(QIODevice.ReadWrite) + # preserve alpha channel with png + # otherwise ppm is more friendly with Image.open + if im.hasAlphaChannel(): + im.save(buffer, "png") + else: + im.save(buffer, "ppm") + + b = BytesIO() + b.write(buffer.data()) + buffer.close() + b.seek(0) + + return Image.open(b) + + +def fromqpixmap(im): + return fromqimage(im) + # buffer = QBuffer() + # buffer.open(QIODevice.ReadWrite) + # # im.save(buffer) + # # What if png doesn't support some image features like animation? + # im.save(buffer, 'ppm') + # bytes_io = BytesIO() + # bytes_io.write(buffer.data()) + # buffer.close() + # bytes_io.seek(0) + # return Image.open(bytes_io) + + +def align8to32(bytes, width, mode): + """ + converts each scanline of data from 8 bit to 32 bit aligned + """ + + bits_per_pixel = {"1": 1, "L": 8, "P": 8}[mode] + + # calculate bytes per line and the extra padding if needed + bits_per_line = bits_per_pixel * width + full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8) + bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0) + + extra_padding = -bytes_per_line % 4 + + # already 32 bit aligned by luck + if not extra_padding: + return bytes + + new_data = [] + for i in range(len(bytes) // bytes_per_line): + new_data.append( + bytes[i * bytes_per_line : (i + 1) * bytes_per_line] + + b"\x00" * extra_padding + ) + + return b"".join(new_data) + + +def _toqclass_helper(im): + data = None + colortable = None + + # handle filename, if given instead of image name + if hasattr(im, "toUtf8"): + # FIXME - is this really the best way to do this? + im = str(im.toUtf8(), "utf-8") + if isPath(im): + im = Image.open(im) + + if im.mode == "1": + format = QImage.Format_Mono + elif im.mode == "L": + format = QImage.Format_Indexed8 + colortable = [] + for i in range(256): + colortable.append(rgb(i, i, i)) + elif im.mode == "P": + format = QImage.Format_Indexed8 + colortable = [] + palette = im.getpalette() + for i in range(0, len(palette), 3): + colortable.append(rgb(*palette[i : i + 3])) + elif im.mode == "RGB": + data = im.tobytes("raw", "BGRX") + format = QImage.Format_RGB32 + elif im.mode == "RGBA": + data = im.tobytes("raw", "BGRA") + format = QImage.Format_ARGB32 + else: + raise ValueError("unsupported image mode %r" % im.mode) + + __data = data or align8to32(im.tobytes(), im.size[0], im.mode) + return {"data": __data, "im": im, "format": format, "colortable": colortable} + + +if qt_is_installed: + + class ImageQt(QImage): + def __init__(self, im): + """ + An PIL image wrapper for Qt. This is a subclass of PyQt's QImage + class. + + :param im: A PIL Image object, or a file name (given either as + Python string or a PyQt string object). + """ + im_data = _toqclass_helper(im) + # must keep a reference, or Qt will crash! + # All QImage constructors that take data operate on an existing + # buffer, so this buffer has to hang on for the life of the image. + # Fixes https://github.com/python-pillow/Pillow/issues/1370 + self.__data = im_data["data"] + super().__init__( + self.__data, + im_data["im"].size[0], + im_data["im"].size[1], + im_data["format"], + ) + if im_data["colortable"]: + self.setColorTable(im_data["colortable"]) + + +def toqimage(im): + return ImageQt(im) + + +def toqpixmap(im): + # # This doesn't work. For now using a dumb approach. + # im_data = _toqclass_helper(im) + # result = QPixmap(im_data['im'].size[0], im_data['im'].size[1]) + # result.loadFromData(im_data['data']) + # Fix some strange bug that causes + if im.mode == "RGB": + im = im.convert("RGBA") + + qimage = toqimage(im) + return QPixmap.fromImage(qimage) diff --git a/venv/Lib/site-packages/PIL/ImageSequence.py b/venv/Lib/site-packages/PIL/ImageSequence.py new file mode 100644 index 000000000..9df910a43 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageSequence.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library. +# $Id$ +# +# sequence support classes +# +# history: +# 1997-02-20 fl Created +# +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1997 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +## + + +class Iterator: + """ + This class implements an iterator object that can be used to loop + over an image sequence. + + You can use the ``[]`` operator to access elements by index. This operator + will raise an :py:exc:`IndexError` if you try to access a nonexistent + frame. + + :param im: An image object. + """ + + def __init__(self, im): + if not hasattr(im, "seek"): + raise AttributeError("im must have seek method") + self.im = im + self.position = getattr(self.im, "_min_frame", 0) + + def __getitem__(self, ix): + try: + self.im.seek(ix) + return self.im + except EOFError as e: + raise IndexError from e # end of sequence + + def __iter__(self): + return self + + def __next__(self): + try: + self.im.seek(self.position) + self.position += 1 + return self.im + except EOFError as e: + raise StopIteration from e + + +def all_frames(im, func=None): + """ + Applies a given function to all frames in an image or a list of images. + The frames are returned as a list of separate images. + + :param im: An image, or a list of images. + :param func: The function to apply to all of the image frames. + :returns: A list of images. + """ + if not isinstance(im, list): + im = [im] + + ims = [] + for imSequence in im: + current = imSequence.tell() + + ims += [im_frame.copy() for im_frame in Iterator(imSequence)] + + imSequence.seek(current) + return [func(im) for im in ims] if func else ims diff --git a/venv/Lib/site-packages/PIL/ImageShow.py b/venv/Lib/site-packages/PIL/ImageShow.py new file mode 100644 index 000000000..3ffb4d632 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageShow.py @@ -0,0 +1,238 @@ +# +# The Python Imaging Library. +# $Id$ +# +# im.show() drivers +# +# History: +# 2008-04-06 fl Created +# +# Copyright (c) Secret Labs AB 2008. +# +# See the README file for information on usage and redistribution. +# +import os +import shutil +import subprocess +import sys +import tempfile +from shlex import quote + +from PIL import Image + +_viewers = [] + + +def register(viewer, order=1): + """ + The :py:func:`register` function is used to register additional viewers. + + :param viewer: The viewer to be registered. + :param order: + Zero or a negative integer to prepend this viewer to the list, + a positive integer to append it. + """ + try: + if issubclass(viewer, Viewer): + viewer = viewer() + except TypeError: + pass # raised if viewer wasn't a class + if order > 0: + _viewers.append(viewer) + else: + _viewers.insert(0, viewer) + + +def show(image, title=None, **options): + r""" + Display a given image. + + :param image: An image object. + :param title: Optional title. Not all viewers can display the title. + :param \**options: Additional viewer options. + :returns: ``True`` if a suitable viewer was found, ``False`` otherwise. + """ + for viewer in _viewers: + if viewer.show(image, title=title, **options): + return 1 + return 0 + + +class Viewer: + """Base class for viewers.""" + + # main api + + def show(self, image, **options): + """ + The main function for displaying an image. + Converts the given image to the target format and displays it. + """ + + # save temporary image to disk + if not ( + image.mode in ("1", "RGBA") + or (self.format == "PNG" and image.mode in ("I;16", "LA")) + ): + base = Image.getmodebase(image.mode) + if image.mode != base: + image = image.convert(base) + + return self.show_image(image, **options) + + # hook methods + + format = None + """The format to convert the image into.""" + options = {} + """Additional options used to convert the image.""" + + def get_format(self, image): + """Return format name, or ``None`` to save as PGM/PPM.""" + return self.format + + def get_command(self, file, **options): + """ + Returns the command used to display the file. + Not implemented in the base class. + """ + raise NotImplementedError + + def save_image(self, image): + """Save to temporary file and return filename.""" + return image._dump(format=self.get_format(image), **self.options) + + def show_image(self, image, **options): + """Display the given image.""" + return self.show_file(self.save_image(image), **options) + + def show_file(self, file, **options): + """Display the given file.""" + os.system(self.get_command(file, **options)) + return 1 + + +# -------------------------------------------------------------------- + + +class WindowsViewer(Viewer): + """The default viewer on Windows is the default system application for PNG files.""" + + format = "PNG" + options = {"compress_level": 1} + + def get_command(self, file, **options): + return ( + 'start "Pillow" /WAIT "%s" ' + "&& ping -n 2 127.0.0.1 >NUL " + '&& del /f "%s"' % (file, file) + ) + + +if sys.platform == "win32": + register(WindowsViewer) + + +class MacViewer(Viewer): + """The default viewer on MacOS using ``Preview.app``.""" + + format = "PNG" + options = {"compress_level": 1} + + def get_command(self, file, **options): + # on darwin open returns immediately resulting in the temp + # file removal while app is opening + command = "open -a Preview.app" + command = "({} {}; sleep 20; rm -f {})&".format( + command, quote(file), quote(file) + ) + return command + + def show_file(self, file, **options): + """Display given file""" + fd, path = tempfile.mkstemp() + with os.fdopen(fd, "w") as f: + f.write(file) + with open(path, "r") as f: + subprocess.Popen( + ["im=$(cat); open -a Preview.app $im; sleep 20; rm -f $im"], + shell=True, + stdin=f, + ) + os.remove(path) + return 1 + + +if sys.platform == "darwin": + register(MacViewer) + + +class UnixViewer(Viewer): + format = "PNG" + options = {"compress_level": 1} + + def get_command(self, file, **options): + command = self.get_command_ex(file, **options)[0] + return "({} {}; rm -f {})&".format(command, quote(file), quote(file)) + + def show_file(self, file, **options): + """Display given file""" + fd, path = tempfile.mkstemp() + with os.fdopen(fd, "w") as f: + f.write(file) + with open(path, "r") as f: + command = self.get_command_ex(file, **options)[0] + subprocess.Popen( + ["im=$(cat);" + command + " $im; rm -f $im"], shell=True, stdin=f + ) + os.remove(path) + return 1 + + +class DisplayViewer(UnixViewer): + """The ImageMagick ``display`` command.""" + + def get_command_ex(self, file, **options): + command = executable = "display" + return command, executable + + +class EogViewer(UnixViewer): + """The GNOME Image Viewer ``eog`` command.""" + + def get_command_ex(self, file, **options): + command = executable = "eog" + return command, executable + + +class XVViewer(UnixViewer): + """ + The X Viewer ``xv`` command. + This viewer supports the ``title`` parameter. + """ + + def get_command_ex(self, file, title=None, **options): + # note: xv is pretty outdated. most modern systems have + # imagemagick's display command instead. + command = executable = "xv" + if title: + command += " -name %s" % quote(title) + return command, executable + + +if sys.platform not in ("win32", "darwin"): # unixoids + if shutil.which("display"): + register(DisplayViewer) + if shutil.which("eog"): + register(EogViewer) + if shutil.which("xv"): + register(XVViewer) + +if __name__ == "__main__": + + if len(sys.argv) < 2: + print("Syntax: python ImageShow.py imagefile [title]") + sys.exit() + + with Image.open(sys.argv[1]) as im: + print(show(im, *sys.argv[2:])) diff --git a/venv/Lib/site-packages/PIL/ImageStat.py b/venv/Lib/site-packages/PIL/ImageStat.py new file mode 100644 index 000000000..50bafc972 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageStat.py @@ -0,0 +1,147 @@ +# +# The Python Imaging Library. +# $Id$ +# +# global image statistics +# +# History: +# 1996-04-05 fl Created +# 1997-05-21 fl Added mask; added rms, var, stddev attributes +# 1997-08-05 fl Added median +# 1998-07-05 hk Fixed integer overflow error +# +# Notes: +# This class shows how to implement delayed evaluation of attributes. +# To get a certain value, simply access the corresponding attribute. +# The __getattr__ dispatcher takes care of the rest. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996-97. +# +# See the README file for information on usage and redistribution. +# + +import functools +import math +import operator + + +class Stat: + def __init__(self, image_or_list, mask=None): + try: + if mask: + self.h = image_or_list.histogram(mask) + else: + self.h = image_or_list.histogram() + except AttributeError: + self.h = image_or_list # assume it to be a histogram list + if not isinstance(self.h, list): + raise TypeError("first argument must be image or list") + self.bands = list(range(len(self.h) // 256)) + + def __getattr__(self, id): + """Calculate missing attribute""" + if id[:4] == "_get": + raise AttributeError(id) + # calculate missing attribute + v = getattr(self, "_get" + id)() + setattr(self, id, v) + return v + + def _getextrema(self): + """Get min/max values for each band in the image""" + + def minmax(histogram): + n = 255 + x = 0 + for i in range(256): + if histogram[i]: + n = min(n, i) + x = max(x, i) + return n, x # returns (255, 0) if there's no data in the histogram + + v = [] + for i in range(0, len(self.h), 256): + v.append(minmax(self.h[i:])) + return v + + def _getcount(self): + """Get total number of pixels in each layer""" + + v = [] + for i in range(0, len(self.h), 256): + v.append(functools.reduce(operator.add, self.h[i : i + 256])) + return v + + def _getsum(self): + """Get sum of all pixels in each layer""" + + v = [] + for i in range(0, len(self.h), 256): + layerSum = 0.0 + for j in range(256): + layerSum += j * self.h[i + j] + v.append(layerSum) + return v + + def _getsum2(self): + """Get squared sum of all pixels in each layer""" + + v = [] + for i in range(0, len(self.h), 256): + sum2 = 0.0 + for j in range(256): + sum2 += (j ** 2) * float(self.h[i + j]) + v.append(sum2) + return v + + def _getmean(self): + """Get average pixel level for each layer""" + + v = [] + for i in self.bands: + v.append(self.sum[i] / self.count[i]) + return v + + def _getmedian(self): + """Get median pixel level for each layer""" + + v = [] + for i in self.bands: + s = 0 + half = self.count[i] // 2 + b = i * 256 + for j in range(256): + s = s + self.h[b + j] + if s > half: + break + v.append(j) + return v + + def _getrms(self): + """Get RMS for each layer""" + + v = [] + for i in self.bands: + v.append(math.sqrt(self.sum2[i] / self.count[i])) + return v + + def _getvar(self): + """Get variance for each layer""" + + v = [] + for i in self.bands: + n = self.count[i] + v.append((self.sum2[i] - (self.sum[i] ** 2.0) / n) / n) + return v + + def _getstddev(self): + """Get standard deviation for each layer""" + + v = [] + for i in self.bands: + v.append(math.sqrt(self.var[i])) + return v + + +Global = Stat # compatibility diff --git a/venv/Lib/site-packages/PIL/ImageTk.py b/venv/Lib/site-packages/PIL/ImageTk.py new file mode 100644 index 000000000..ee707cffb --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageTk.py @@ -0,0 +1,300 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a Tk display interface +# +# History: +# 96-04-08 fl Created +# 96-09-06 fl Added getimage method +# 96-11-01 fl Rewritten, removed image attribute and crop method +# 97-05-09 fl Use PyImagingPaste method instead of image type +# 97-05-12 fl Minor tweaks to match the IFUNC95 interface +# 97-05-17 fl Support the "pilbitmap" booster patch +# 97-06-05 fl Added file= and data= argument to image constructors +# 98-03-09 fl Added width and height methods to Image classes +# 98-07-02 fl Use default mode for "P" images without palette attribute +# 98-07-02 fl Explicitly destroy Tkinter image objects +# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch) +# 99-07-26 fl Automatically hook into Tkinter (if possible) +# 99-08-15 fl Hook uses _imagingtk instead of _imaging +# +# Copyright (c) 1997-1999 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import tkinter +from io import BytesIO + +from . import Image + +# -------------------------------------------------------------------- +# Check for Tkinter interface hooks + +_pilbitmap_ok = None + + +def _pilbitmap_check(): + global _pilbitmap_ok + if _pilbitmap_ok is None: + try: + im = Image.new("1", (1, 1)) + tkinter.BitmapImage(data="PIL:%d" % im.im.id) + _pilbitmap_ok = 1 + except tkinter.TclError: + _pilbitmap_ok = 0 + return _pilbitmap_ok + + +def _get_image_from_kw(kw): + source = None + if "file" in kw: + source = kw.pop("file") + elif "data" in kw: + source = BytesIO(kw.pop("data")) + if source: + return Image.open(source) + + +# -------------------------------------------------------------------- +# PhotoImage + + +class PhotoImage: + """ + A Tkinter-compatible photo image. This can be used + everywhere Tkinter expects an image object. If the image is an RGBA + image, pixels having alpha 0 are treated as transparent. + + The constructor takes either a PIL image, or a mode and a size. + Alternatively, you can use the **file** or **data** options to initialize + the photo image object. + + :param image: Either a PIL image, or a mode string. If a mode string is + used, a size must also be given. + :param size: If the first argument is a mode string, this defines the size + of the image. + :keyword file: A filename to load the image from (using + ``Image.open(file)``). + :keyword data: An 8-bit string containing image data (as loaded from an + image file). + """ + + def __init__(self, image=None, size=None, **kw): + + # Tk compatibility: file or data + if image is None: + image = _get_image_from_kw(kw) + + if hasattr(image, "mode") and hasattr(image, "size"): + # got an image instead of a mode + mode = image.mode + if mode == "P": + # palette mapped data + image.load() + try: + mode = image.palette.mode + except AttributeError: + mode = "RGB" # default + size = image.size + kw["width"], kw["height"] = size + else: + mode = image + image = None + + if mode not in ["1", "L", "RGB", "RGBA"]: + mode = Image.getmodebase(mode) + + self.__mode = mode + self.__size = size + self.__photo = tkinter.PhotoImage(**kw) + self.tk = self.__photo.tk + if image: + self.paste(image) + + def __del__(self): + name = self.__photo.name + self.__photo.name = None + try: + self.__photo.tk.call("image", "delete", name) + except Exception: + pass # ignore internal errors + + def __str__(self): + """ + Get the Tkinter photo image identifier. This method is automatically + called by Tkinter whenever a PhotoImage object is passed to a Tkinter + method. + + :return: A Tkinter photo image identifier (a string). + """ + return str(self.__photo) + + def width(self): + """ + Get the width of the image. + + :return: The width, in pixels. + """ + return self.__size[0] + + def height(self): + """ + Get the height of the image. + + :return: The height, in pixels. + """ + return self.__size[1] + + def paste(self, im, box=None): + """ + Paste a PIL image into the photo image. Note that this can + be very slow if the photo image is displayed. + + :param im: A PIL image. The size must match the target region. If the + mode does not match, the image is converted to the mode of + the bitmap image. + :param box: A 4-tuple defining the left, upper, right, and lower pixel + coordinate. See :ref:`coordinate-system`. If None is given + instead of a tuple, all of the image is assumed. + """ + + # convert to blittable + im.load() + image = im.im + if image.isblock() and im.mode == self.__mode: + block = image + else: + block = image.new_block(self.__mode, im.size) + image.convert2(block, image) # convert directly between buffers + + tk = self.__photo.tk + + try: + tk.call("PyImagingPhoto", self.__photo, block.id) + except tkinter.TclError: + # activate Tkinter hook + try: + from . import _imagingtk + + try: + if hasattr(tk, "interp"): + # Required for PyPy, which always has CFFI installed + from cffi import FFI + + ffi = FFI() + + # PyPy is using an FFI CDATA element + # (Pdb) self.tk.interp + # + _imagingtk.tkinit(int(ffi.cast("uintptr_t", tk.interp)), 1) + else: + _imagingtk.tkinit(tk.interpaddr(), 1) + except AttributeError: + _imagingtk.tkinit(id(tk), 0) + tk.call("PyImagingPhoto", self.__photo, block.id) + except (ImportError, AttributeError, tkinter.TclError): + raise # configuration problem; cannot attach to Tkinter + + +# -------------------------------------------------------------------- +# BitmapImage + + +class BitmapImage: + """ + A Tkinter-compatible bitmap image. This can be used everywhere Tkinter + expects an image object. + + The given image must have mode "1". Pixels having value 0 are treated as + transparent. Options, if any, are passed on to Tkinter. The most commonly + used option is **foreground**, which is used to specify the color for the + non-transparent parts. See the Tkinter documentation for information on + how to specify colours. + + :param image: A PIL image. + """ + + def __init__(self, image=None, **kw): + + # Tk compatibility: file or data + if image is None: + image = _get_image_from_kw(kw) + + self.__mode = image.mode + self.__size = image.size + + if _pilbitmap_check(): + # fast way (requires the pilbitmap booster patch) + image.load() + kw["data"] = "PIL:%d" % image.im.id + self.__im = image # must keep a reference + else: + # slow but safe way + kw["data"] = image.tobitmap() + self.__photo = tkinter.BitmapImage(**kw) + + def __del__(self): + name = self.__photo.name + self.__photo.name = None + try: + self.__photo.tk.call("image", "delete", name) + except Exception: + pass # ignore internal errors + + def width(self): + """ + Get the width of the image. + + :return: The width, in pixels. + """ + return self.__size[0] + + def height(self): + """ + Get the height of the image. + + :return: The height, in pixels. + """ + return self.__size[1] + + def __str__(self): + """ + Get the Tkinter bitmap image identifier. This method is automatically + called by Tkinter whenever a BitmapImage object is passed to a Tkinter + method. + + :return: A Tkinter bitmap image identifier (a string). + """ + return str(self.__photo) + + +def getimage(photo): + """Copies the contents of a PhotoImage to a PIL image memory.""" + im = Image.new("RGBA", (photo.width(), photo.height())) + block = im.im + + photo.tk.call("PyImagingPhotoGet", photo, block.id) + + return im + + +def _show(image, title): + """Helper for the Image.show method.""" + + class UI(tkinter.Label): + def __init__(self, master, im): + if im.mode == "1": + self.image = BitmapImage(im, foreground="white", master=master) + else: + self.image = PhotoImage(im, master=master) + super().__init__(master, image=self.image, bg="black", bd=0) + + if not tkinter._default_root: + raise OSError("tkinter not initialized") + top = tkinter.Toplevel() + if title: + top.title(title) + UI(top, image).pack() diff --git a/venv/Lib/site-packages/PIL/ImageTransform.py b/venv/Lib/site-packages/PIL/ImageTransform.py new file mode 100644 index 000000000..77791ab72 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageTransform.py @@ -0,0 +1,102 @@ +# +# The Python Imaging Library. +# $Id$ +# +# transform wrappers +# +# History: +# 2002-04-08 fl Created +# +# Copyright (c) 2002 by Secret Labs AB +# Copyright (c) 2002 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +class Transform(Image.ImageTransformHandler): + def __init__(self, data): + self.data = data + + def getdata(self): + return self.method, self.data + + def transform(self, size, image, **options): + # can be overridden + method, data = self.getdata() + return image.transform(size, method, data, **options) + + +class AffineTransform(Transform): + """ + Define an affine image transform. + + This function takes a 6-tuple (a, b, c, d, e, f) which contain the first + two rows from an affine transform matrix. For each pixel (x, y) in the + output image, the new value is taken from a position (a x + b y + c, + d x + e y + f) in the input image, rounded to nearest pixel. + + This function can be used to scale, translate, rotate, and shear the + original image. + + See :py:meth:`~PIL.Image.Image.transform` + + :param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows + from an affine transform matrix. + """ + + method = Image.AFFINE + + +class ExtentTransform(Transform): + """ + Define a transform to extract a subregion from an image. + + Maps a rectangle (defined by two corners) from the image to a rectangle of + the given size. The resulting image will contain data sampled from between + the corners, such that (x0, y0) in the input image will end up at (0,0) in + the output image, and (x1, y1) at size. + + This method can be used to crop, stretch, shrink, or mirror an arbitrary + rectangle in the current image. It is slightly slower than crop, but about + as fast as a corresponding resize operation. + + See :py:meth:`~PIL.Image.Image.transform` + + :param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the + input image's coordinate system. See :ref:`coordinate-system`. + """ + + method = Image.EXTENT + + +class QuadTransform(Transform): + """ + Define a quad image transform. + + Maps a quadrilateral (a region defined by four corners) from the image to a + rectangle of the given size. + + See :py:meth:`~PIL.Image.Image.transform` + + :param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the + upper left, lower left, lower right, and upper right corner of the + source quadrilateral. + """ + + method = Image.QUAD + + +class MeshTransform(Transform): + """ + Define a mesh image transform. A mesh transform consists of one or more + individual quad transforms. + + See :py:meth:`~PIL.Image.Image.transform` + + :param data: A list of (bbox, quad) tuples. + """ + + method = Image.MESH diff --git a/venv/Lib/site-packages/PIL/ImageWin.py b/venv/Lib/site-packages/PIL/ImageWin.py new file mode 100644 index 000000000..afba61c32 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImageWin.py @@ -0,0 +1,230 @@ +# +# The Python Imaging Library. +# $Id$ +# +# a Windows DIB display interface +# +# History: +# 1996-05-20 fl Created +# 1996-09-20 fl Fixed subregion exposure +# 1997-09-21 fl Added draw primitive (for tzPrint) +# 2003-05-21 fl Added experimental Window/ImageWindow classes +# 2003-09-05 fl Added fromstring/tostring methods +# +# Copyright (c) Secret Labs AB 1997-2003. +# Copyright (c) Fredrik Lundh 1996-2003. +# +# See the README file for information on usage and redistribution. +# + +from . import Image + + +class HDC: + """ + Wraps an HDC integer. The resulting object can be passed to the + :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` + methods. + """ + + def __init__(self, dc): + self.dc = dc + + def __int__(self): + return self.dc + + +class HWND: + """ + Wraps an HWND integer. The resulting object can be passed to the + :py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose` + methods, instead of a DC. + """ + + def __init__(self, wnd): + self.wnd = wnd + + def __int__(self): + return self.wnd + + +class Dib: + """ + A Windows bitmap with the given mode and size. The mode can be one of "1", + "L", "P", or "RGB". + + If the display requires a palette, this constructor creates a suitable + palette and associates it with the image. For an "L" image, 128 greylevels + are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together + with 20 greylevels. + + To make sure that palettes work properly under Windows, you must call the + **palette** method upon certain events from Windows. + + :param image: Either a PIL image, or a mode string. If a mode string is + used, a size must also be given. The mode can be one of "1", + "L", "P", or "RGB". + :param size: If the first argument is a mode string, this + defines the size of the image. + """ + + def __init__(self, image, size=None): + if hasattr(image, "mode") and hasattr(image, "size"): + mode = image.mode + size = image.size + else: + mode = image + image = None + if mode not in ["1", "L", "P", "RGB"]: + mode = Image.getmodebase(mode) + self.image = Image.core.display(mode, size) + self.mode = mode + self.size = size + if image: + self.paste(image) + + def expose(self, handle): + """ + Copy the bitmap contents to a device context. + + :param handle: Device context (HDC), cast to a Python integer, or an + HDC or HWND instance. In PythonWin, you can use the + :py:meth:`CDC.GetHandleAttrib` to get a suitable handle. + """ + if isinstance(handle, HWND): + dc = self.image.getdc(handle) + try: + result = self.image.expose(dc) + finally: + self.image.releasedc(handle, dc) + else: + result = self.image.expose(handle) + return result + + def draw(self, handle, dst, src=None): + """ + Same as expose, but allows you to specify where to draw the image, and + what part of it to draw. + + The destination and source areas are given as 4-tuple rectangles. If + the source is omitted, the entire image is copied. If the source and + the destination have different sizes, the image is resized as + necessary. + """ + if not src: + src = (0, 0) + self.size + if isinstance(handle, HWND): + dc = self.image.getdc(handle) + try: + result = self.image.draw(dc, dst, src) + finally: + self.image.releasedc(handle, dc) + else: + result = self.image.draw(handle, dst, src) + return result + + def query_palette(self, handle): + """ + Installs the palette associated with the image in the given device + context. + + This method should be called upon **QUERYNEWPALETTE** and + **PALETTECHANGED** events from Windows. If this method returns a + non-zero value, one or more display palette entries were changed, and + the image should be redrawn. + + :param handle: Device context (HDC), cast to a Python integer, or an + HDC or HWND instance. + :return: A true value if one or more entries were changed (this + indicates that the image should be redrawn). + """ + if isinstance(handle, HWND): + handle = self.image.getdc(handle) + try: + result = self.image.query_palette(handle) + finally: + self.image.releasedc(handle, handle) + else: + result = self.image.query_palette(handle) + return result + + def paste(self, im, box=None): + """ + Paste a PIL image into the bitmap image. + + :param im: A PIL image. The size must match the target region. + If the mode does not match, the image is converted to the + mode of the bitmap image. + :param box: A 4-tuple defining the left, upper, right, and + lower pixel coordinate. See :ref:`coordinate-system`. If + None is given instead of a tuple, all of the image is + assumed. + """ + im.load() + if self.mode != im.mode: + im = im.convert(self.mode) + if box: + self.image.paste(im.im, box) + else: + self.image.paste(im.im) + + def frombytes(self, buffer): + """ + Load display memory contents from byte data. + + :param buffer: A buffer containing display data (usually + data returned from :py:func:`~PIL.ImageWin.Dib.tobytes`) + """ + return self.image.frombytes(buffer) + + def tobytes(self): + """ + Copy display memory contents to bytes object. + + :return: A bytes object containing display data. + """ + return self.image.tobytes() + + +class Window: + """Create a Window with the given title size.""" + + def __init__(self, title="PIL", width=None, height=None): + self.hwnd = Image.core.createwindow( + title, self.__dispatcher, width or 0, height or 0 + ) + + def __dispatcher(self, action, *args): + return getattr(self, "ui_handle_" + action)(*args) + + def ui_handle_clear(self, dc, x0, y0, x1, y1): + pass + + def ui_handle_damage(self, x0, y0, x1, y1): + pass + + def ui_handle_destroy(self): + pass + + def ui_handle_repair(self, dc, x0, y0, x1, y1): + pass + + def ui_handle_resize(self, width, height): + pass + + def mainloop(self): + Image.core.eventloop() + + +class ImageWindow(Window): + """Create an image window which displays the given image.""" + + def __init__(self, image, title="PIL"): + if not isinstance(image, Dib): + image = Dib(image) + self.image = image + width, height = image.size + super().__init__(title, width=width, height=height) + + def ui_handle_repair(self, dc, x0, y0, x1, y1): + self.image.draw(dc, (x0, y0, x1, y1)) diff --git a/venv/Lib/site-packages/PIL/ImtImagePlugin.py b/venv/Lib/site-packages/PIL/ImtImagePlugin.py new file mode 100644 index 000000000..21ffd7475 --- /dev/null +++ b/venv/Lib/site-packages/PIL/ImtImagePlugin.py @@ -0,0 +1,93 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IM Tools support for PIL +# +# history: +# 1996-05-27 fl Created (read 8-bit images only) +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2) +# +# Copyright (c) Secret Labs AB 1997-2001. +# Copyright (c) Fredrik Lundh 1996-2001. +# +# See the README file for information on usage and redistribution. +# + + +import re + +from . import Image, ImageFile + +# +# -------------------------------------------------------------------- + +field = re.compile(br"([a-z]*) ([^ \r\n]*)") + + +## +# Image plugin for IM Tools images. + + +class ImtImageFile(ImageFile.ImageFile): + + format = "IMT" + format_description = "IM Tools" + + def _open(self): + + # Quick rejection: if there's not a LF among the first + # 100 bytes, this is (probably) not a text header. + + if b"\n" not in self.fp.read(100): + raise SyntaxError("not an IM file") + self.fp.seek(0) + + xsize = ysize = 0 + + while True: + + s = self.fp.read(1) + if not s: + break + + if s == b"\x0C": + + # image data begins + self.tile = [ + ("raw", (0, 0) + self.size, self.fp.tell(), (self.mode, 0, 1)) + ] + + break + + else: + + # read key/value pair + # FIXME: dangerous, may read whole file + s = s + self.fp.readline() + if len(s) == 1 or len(s) > 100: + break + if s[0] == ord(b"*"): + continue # comment + + m = field.match(s) + if not m: + break + k, v = m.group(1, 2) + if k == "width": + xsize = int(v) + self._size = xsize, ysize + elif k == "height": + ysize = int(v) + self._size = xsize, ysize + elif k == "pixel" and v == "n8": + self.mode = "L" + + +# +# -------------------------------------------------------------------- + +Image.register_open(ImtImageFile.format, ImtImageFile) + +# +# no extension registered (".im" is simply too common) diff --git a/venv/Lib/site-packages/PIL/IptcImagePlugin.py b/venv/Lib/site-packages/PIL/IptcImagePlugin.py new file mode 100644 index 000000000..75e7b5a2a --- /dev/null +++ b/venv/Lib/site-packages/PIL/IptcImagePlugin.py @@ -0,0 +1,226 @@ +# +# The Python Imaging Library. +# $Id$ +# +# IPTC/NAA file handling +# +# history: +# 1995-10-01 fl Created +# 1998-03-09 fl Cleaned up and added to PIL +# 2002-06-18 fl Added getiptcinfo helper +# +# Copyright (c) Secret Labs AB 1997-2002. +# Copyright (c) Fredrik Lundh 1995. +# +# See the README file for information on usage and redistribution. +# +import os +import tempfile + +from . import Image, ImageFile +from ._binary import i8, i16be as i16, i32be as i32, o8 + +COMPRESSION = {1: "raw", 5: "jpeg"} + +PAD = o8(0) * 4 + + +# +# Helpers + + +def i(c): + return i32((PAD + c)[-4:]) + + +def dump(c): + for i in c: + print("%02x" % i8(i), end=" ") + print() + + +## +# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields +# from TIFF and JPEG files, use the getiptcinfo function. + + +class IptcImageFile(ImageFile.ImageFile): + + format = "IPTC" + format_description = "IPTC/NAA" + + def getint(self, key): + return i(self.info[key]) + + def field(self): + # + # get a IPTC field header + s = self.fp.read(5) + if not len(s): + return None, 0 + + tag = i8(s[1]), i8(s[2]) + + # syntax + if i8(s[0]) != 0x1C or tag[0] < 1 or tag[0] > 9: + raise SyntaxError("invalid IPTC/NAA file") + + # field size + size = i8(s[3]) + if size > 132: + raise OSError("illegal field length in IPTC/NAA file") + elif size == 128: + size = 0 + elif size > 128: + size = i(self.fp.read(size - 128)) + else: + size = i16(s[3:]) + + return tag, size + + def _open(self): + + # load descriptive fields + while True: + offset = self.fp.tell() + tag, size = self.field() + if not tag or tag == (8, 10): + break + if size: + tagdata = self.fp.read(size) + else: + tagdata = None + if tag in self.info: + if isinstance(self.info[tag], list): + self.info[tag].append(tagdata) + else: + self.info[tag] = [self.info[tag], tagdata] + else: + self.info[tag] = tagdata + + # mode + layers = i8(self.info[(3, 60)][0]) + component = i8(self.info[(3, 60)][1]) + if (3, 65) in self.info: + id = i8(self.info[(3, 65)][0]) - 1 + else: + id = 0 + if layers == 1 and not component: + self.mode = "L" + elif layers == 3 and component: + self.mode = "RGB"[id] + elif layers == 4 and component: + self.mode = "CMYK"[id] + + # size + self._size = self.getint((3, 20)), self.getint((3, 30)) + + # compression + try: + compression = COMPRESSION[self.getint((3, 120))] + except KeyError as e: + raise OSError("Unknown IPTC image compression") from e + + # tile + if tag == (8, 10): + self.tile = [ + ("iptc", (compression, offset), (0, 0, self.size[0], self.size[1])) + ] + + def load(self): + + if len(self.tile) != 1 or self.tile[0][0] != "iptc": + return ImageFile.ImageFile.load(self) + + type, tile, box = self.tile[0] + + encoding, offset = tile + + self.fp.seek(offset) + + # Copy image data to temporary file + o_fd, outfile = tempfile.mkstemp(text=False) + o = os.fdopen(o_fd) + if encoding == "raw": + # To simplify access to the extracted file, + # prepend a PPM header + o.write("P5\n%d %d\n255\n" % self.size) + while True: + type, size = self.field() + if type != (8, 10): + break + while size > 0: + s = self.fp.read(min(size, 8192)) + if not s: + break + o.write(s) + size -= len(s) + o.close() + + try: + with Image.open(outfile) as _im: + _im.load() + self.im = _im.im + finally: + try: + os.unlink(outfile) + except OSError: + pass + + +Image.register_open(IptcImageFile.format, IptcImageFile) + +Image.register_extension(IptcImageFile.format, ".iim") + + +def getiptcinfo(im): + """ + Get IPTC information from TIFF, JPEG, or IPTC file. + + :param im: An image containing IPTC data. + :returns: A dictionary containing IPTC information, or None if + no IPTC information block was found. + """ + from . import TiffImagePlugin, JpegImagePlugin + import io + + data = None + + if isinstance(im, IptcImageFile): + # return info dictionary right away + return im.info + + elif isinstance(im, JpegImagePlugin.JpegImageFile): + # extract the IPTC/NAA resource + photoshop = im.info.get("photoshop") + if photoshop: + data = photoshop.get(0x0404) + + elif isinstance(im, TiffImagePlugin.TiffImageFile): + # get raw data from the IPTC/NAA tag (PhotoShop tags the data + # as 4-byte integers, so we cannot use the get method...) + try: + data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK] + except (AttributeError, KeyError): + pass + + if data is None: + return None # no properties + + # create an IptcImagePlugin object without initializing it + class FakeImage: + pass + + im = FakeImage() + im.__class__ = IptcImageFile + + # parse the IPTC information chunk + im.info = {} + im.fp = io.BytesIO(data) + + try: + im._open() + except (IndexError, KeyError): + pass # expected failure + + return im.info diff --git a/venv/Lib/site-packages/PIL/Jpeg2KImagePlugin.py b/venv/Lib/site-packages/PIL/Jpeg2KImagePlugin.py new file mode 100644 index 000000000..0b0d433db --- /dev/null +++ b/venv/Lib/site-packages/PIL/Jpeg2KImagePlugin.py @@ -0,0 +1,314 @@ +# +# The Python Imaging Library +# $Id$ +# +# JPEG2000 file handling +# +# History: +# 2014-03-12 ajh Created +# +# Copyright (c) 2014 Coriolis Systems Limited +# Copyright (c) 2014 Alastair Houghton +# +# See the README file for information on usage and redistribution. +# +import io +import os +import struct + +from . import Image, ImageFile + + +def _parse_codestream(fp): + """Parse the JPEG 2000 codestream to extract the size and component + count from the SIZ marker segment, returning a PIL (size, mode) tuple.""" + + hdr = fp.read(2) + lsiz = struct.unpack(">H", hdr)[0] + siz = hdr + fp.read(lsiz - 2) + lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, _, _, _, _, csiz = struct.unpack_from( + ">HHIIIIIIIIH", siz + ) + ssiz = [None] * csiz + xrsiz = [None] * csiz + yrsiz = [None] * csiz + for i in range(csiz): + ssiz[i], xrsiz[i], yrsiz[i] = struct.unpack_from(">BBB", siz, 36 + 3 * i) + + size = (xsiz - xosiz, ysiz - yosiz) + if csiz == 1: + if (yrsiz[0] & 0x7F) > 8: + mode = "I;16" + else: + mode = "L" + elif csiz == 2: + mode = "LA" + elif csiz == 3: + mode = "RGB" + elif csiz == 4: + mode = "RGBA" + else: + mode = None + + return (size, mode) + + +def _parse_jp2_header(fp): + """Parse the JP2 header box to extract size, component count and + color space information, returning a (size, mode, mimetype) tuple.""" + + # Find the JP2 header box + header = None + mimetype = None + while True: + lbox, tbox = struct.unpack(">I4s", fp.read(8)) + if lbox == 1: + lbox = struct.unpack(">Q", fp.read(8))[0] + hlen = 16 + else: + hlen = 8 + + if lbox < hlen: + raise SyntaxError("Invalid JP2 header length") + + if tbox == b"jp2h": + header = fp.read(lbox - hlen) + break + elif tbox == b"ftyp": + if fp.read(4) == b"jpx ": + mimetype = "image/jpx" + fp.seek(lbox - hlen - 4, os.SEEK_CUR) + else: + fp.seek(lbox - hlen, os.SEEK_CUR) + + if header is None: + raise SyntaxError("could not find JP2 header") + + size = None + mode = None + bpc = None + nc = None + + hio = io.BytesIO(header) + while True: + lbox, tbox = struct.unpack(">I4s", hio.read(8)) + if lbox == 1: + lbox = struct.unpack(">Q", hio.read(8))[0] + hlen = 16 + else: + hlen = 8 + + content = hio.read(lbox - hlen) + + if tbox == b"ihdr": + height, width, nc, bpc, c, unkc, ipr = struct.unpack(">IIHBBBB", content) + size = (width, height) + if unkc: + if nc == 1 and (bpc & 0x7F) > 8: + mode = "I;16" + elif nc == 1: + mode = "L" + elif nc == 2: + mode = "LA" + elif nc == 3: + mode = "RGB" + elif nc == 4: + mode = "RGBA" + break + elif tbox == b"colr": + meth, prec, approx = struct.unpack_from(">BBB", content) + if meth == 1: + cs = struct.unpack_from(">I", content, 3)[0] + if cs == 16: # sRGB + if nc == 1 and (bpc & 0x7F) > 8: + mode = "I;16" + elif nc == 1: + mode = "L" + elif nc == 3: + mode = "RGB" + elif nc == 4: + mode = "RGBA" + break + elif cs == 17: # grayscale + if nc == 1 and (bpc & 0x7F) > 8: + mode = "I;16" + elif nc == 1: + mode = "L" + elif nc == 2: + mode = "LA" + break + elif cs == 18: # sYCC + if nc == 3: + mode = "RGB" + elif nc == 4: + mode = "RGBA" + break + + if size is None or mode is None: + raise SyntaxError("Malformed jp2 header") + + return (size, mode, mimetype) + + +## +# Image plugin for JPEG2000 images. + + +class Jpeg2KImageFile(ImageFile.ImageFile): + format = "JPEG2000" + format_description = "JPEG 2000 (ISO 15444)" + + def _open(self): + sig = self.fp.read(4) + if sig == b"\xff\x4f\xff\x51": + self.codec = "j2k" + self._size, self.mode = _parse_codestream(self.fp) + else: + sig = sig + self.fp.read(8) + + if sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a": + self.codec = "jp2" + header = _parse_jp2_header(self.fp) + self._size, self.mode, self.custom_mimetype = header + else: + raise SyntaxError("not a JPEG 2000 file") + + if self.size is None or self.mode is None: + raise SyntaxError("unable to determine size/mode") + + self._reduce = 0 + self.layers = 0 + + fd = -1 + length = -1 + + try: + fd = self.fp.fileno() + length = os.fstat(fd).st_size + except Exception: + fd = -1 + try: + pos = self.fp.tell() + self.fp.seek(0, io.SEEK_END) + length = self.fp.tell() + self.fp.seek(pos) + except Exception: + length = -1 + + self.tile = [ + ( + "jpeg2k", + (0, 0) + self.size, + 0, + (self.codec, self._reduce, self.layers, fd, length), + ) + ] + + @property + def reduce(self): + # https://github.com/python-pillow/Pillow/issues/4343 found that the + # new Image 'reduce' method was shadowed by this plugin's 'reduce' + # property. This attempts to allow for both scenarios + return self._reduce or super().reduce + + @reduce.setter + def reduce(self, value): + self._reduce = value + + def load(self): + if self.tile and self._reduce: + power = 1 << self._reduce + adjust = power >> 1 + self._size = ( + int((self.size[0] + adjust) / power), + int((self.size[1] + adjust) / power), + ) + + # Update the reduce and layers settings + t = self.tile[0] + t3 = (t[3][0], self._reduce, self.layers, t[3][3], t[3][4]) + self.tile = [(t[0], (0, 0) + self.size, t[2], t3)] + + return ImageFile.ImageFile.load(self) + + +def _accept(prefix): + return ( + prefix[:4] == b"\xff\x4f\xff\x51" + or prefix[:12] == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a" + ) + + +# ------------------------------------------------------------ +# Save support + + +def _save(im, fp, filename): + if filename.endswith(".j2k"): + kind = "j2k" + else: + kind = "jp2" + + # Get the keyword arguments + info = im.encoderinfo + + offset = info.get("offset", None) + tile_offset = info.get("tile_offset", None) + tile_size = info.get("tile_size", None) + quality_mode = info.get("quality_mode", "rates") + quality_layers = info.get("quality_layers", None) + if quality_layers is not None and not ( + isinstance(quality_layers, (list, tuple)) + and all( + [ + isinstance(quality_layer, (int, float)) + for quality_layer in quality_layers + ] + ) + ): + raise ValueError("quality_layers must be a sequence of numbers") + + num_resolutions = info.get("num_resolutions", 0) + cblk_size = info.get("codeblock_size", None) + precinct_size = info.get("precinct_size", None) + irreversible = info.get("irreversible", False) + progression = info.get("progression", "LRCP") + cinema_mode = info.get("cinema_mode", "no") + fd = -1 + + if hasattr(fp, "fileno"): + try: + fd = fp.fileno() + except Exception: + fd = -1 + + im.encoderconfig = ( + offset, + tile_offset, + tile_size, + quality_mode, + quality_layers, + num_resolutions, + cblk_size, + precinct_size, + irreversible, + progression, + cinema_mode, + fd, + ) + + ImageFile._save(im, fp, [("jpeg2k", (0, 0) + im.size, 0, kind)]) + + +# ------------------------------------------------------------ +# Registry stuff + + +Image.register_open(Jpeg2KImageFile.format, Jpeg2KImageFile, _accept) +Image.register_save(Jpeg2KImageFile.format, _save) + +Image.register_extensions( + Jpeg2KImageFile.format, [".jp2", ".j2k", ".jpc", ".jpf", ".jpx", ".j2c"] +) + +Image.register_mime(Jpeg2KImageFile.format, "image/jp2") diff --git a/venv/Lib/site-packages/PIL/JpegImagePlugin.py b/venv/Lib/site-packages/PIL/JpegImagePlugin.py new file mode 100644 index 000000000..b4795c302 --- /dev/null +++ b/venv/Lib/site-packages/PIL/JpegImagePlugin.py @@ -0,0 +1,809 @@ +# +# The Python Imaging Library. +# $Id$ +# +# JPEG (JFIF) file handling +# +# See "Digital Compression and Coding of Continuous-Tone Still Images, +# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1) +# +# History: +# 1995-09-09 fl Created +# 1995-09-13 fl Added full parser +# 1996-03-25 fl Added hack to use the IJG command line utilities +# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug +# 1996-05-28 fl Added draft support, JFIF version (0.1) +# 1996-12-30 fl Added encoder options, added progression property (0.2) +# 1997-08-27 fl Save mode 1 images as BW (0.3) +# 1998-07-12 fl Added YCbCr to draft and save methods (0.4) +# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1) +# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2) +# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3) +# 2003-04-25 fl Added experimental EXIF decoder (0.5) +# 2003-06-06 fl Added experimental EXIF GPSinfo decoder +# 2003-09-13 fl Extract COM markers +# 2009-09-06 fl Added icc_profile support (from Florian Hoech) +# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6) +# 2009-03-08 fl Added subsampling support (from Justin Huff). +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# +import array +import io +import os +import struct +import subprocess +import tempfile +import warnings + +from . import Image, ImageFile, TiffImagePlugin +from ._binary import i8, i16be as i16, i32be as i32, o8 +from .JpegPresets import presets + +# +# Parser + + +def Skip(self, marker): + n = i16(self.fp.read(2)) - 2 + ImageFile._safe_read(self.fp, n) + + +def APP(self, marker): + # + # Application marker. Store these in the APP dictionary. + # Also look for well-known application markers. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + + app = "APP%d" % (marker & 15) + + self.app[app] = s # compatibility + self.applist.append((app, s)) + + if marker == 0xFFE0 and s[:4] == b"JFIF": + # extract JFIF information + self.info["jfif"] = version = i16(s, 5) # version + self.info["jfif_version"] = divmod(version, 256) + # extract JFIF properties + try: + jfif_unit = i8(s[7]) + jfif_density = i16(s, 8), i16(s, 10) + except Exception: + pass + else: + if jfif_unit == 1: + self.info["dpi"] = jfif_density + self.info["jfif_unit"] = jfif_unit + self.info["jfif_density"] = jfif_density + elif marker == 0xFFE1 and s[:5] == b"Exif\0": + if "exif" not in self.info: + # extract EXIF information (incomplete) + self.info["exif"] = s # FIXME: value will change + elif marker == 0xFFE2 and s[:5] == b"FPXR\0": + # extract FlashPix information (incomplete) + self.info["flashpix"] = s # FIXME: value will change + elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0": + # Since an ICC profile can be larger than the maximum size of + # a JPEG marker (64K), we need provisions to split it into + # multiple markers. The format defined by the ICC specifies + # one or more APP2 markers containing the following data: + # Identifying string ASCII "ICC_PROFILE\0" (12 bytes) + # Marker sequence number 1, 2, etc (1 byte) + # Number of markers Total of APP2's used (1 byte) + # Profile data (remainder of APP2 data) + # Decoders should use the marker sequence numbers to + # reassemble the profile, rather than assuming that the APP2 + # markers appear in the correct sequence. + self.icclist.append(s) + elif marker == 0xFFED and s[:14] == b"Photoshop 3.0\x00": + # parse the image resource block + offset = 14 + photoshop = self.info.setdefault("photoshop", {}) + while s[offset : offset + 4] == b"8BIM": + try: + offset += 4 + # resource code + code = i16(s, offset) + offset += 2 + # resource name (usually empty) + name_len = i8(s[offset]) + # name = s[offset+1:offset+1+name_len] + offset += 1 + name_len + offset += offset & 1 # align + # resource data block + size = i32(s, offset) + offset += 4 + data = s[offset : offset + size] + if code == 0x03ED: # ResolutionInfo + data = { + "XResolution": i32(data[:4]) / 65536, + "DisplayedUnitsX": i16(data[4:8]), + "YResolution": i32(data[8:12]) / 65536, + "DisplayedUnitsY": i16(data[12:]), + } + photoshop[code] = data + offset += size + offset += offset & 1 # align + except struct.error: + break # insufficient data + + elif marker == 0xFFEE and s[:5] == b"Adobe": + self.info["adobe"] = i16(s, 5) + # extract Adobe custom properties + try: + adobe_transform = i8(s[1]) + except Exception: + pass + else: + self.info["adobe_transform"] = adobe_transform + elif marker == 0xFFE2 and s[:4] == b"MPF\0": + # extract MPO information + self.info["mp"] = s[4:] + # offset is current location minus buffer size + # plus constant header size + self.info["mpoffset"] = self.fp.tell() - n + 4 + + # If DPI isn't in JPEG header, fetch from EXIF + if "dpi" not in self.info and "exif" in self.info: + try: + exif = self.getexif() + resolution_unit = exif[0x0128] + x_resolution = exif[0x011A] + try: + dpi = float(x_resolution[0]) / x_resolution[1] + except TypeError: + dpi = x_resolution + if resolution_unit == 3: # cm + # 1 dpcm = 2.54 dpi + dpi *= 2.54 + self.info["dpi"] = int(dpi + 0.5), int(dpi + 0.5) + except (KeyError, SyntaxError, ValueError, ZeroDivisionError): + # SyntaxError for invalid/unreadable EXIF + # KeyError for dpi not included + # ZeroDivisionError for invalid dpi rational value + # ValueError for x_resolution[0] being an invalid float + self.info["dpi"] = 72, 72 + + +def COM(self, marker): + # + # Comment marker. Store these in the APP dictionary. + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + + self.info["comment"] = s + self.app["COM"] = s # compatibility + self.applist.append(("COM", s)) + + +def SOF(self, marker): + # + # Start of frame marker. Defines the size and mode of the + # image. JPEG is colour blind, so we use some simple + # heuristics to map the number of layers to an appropriate + # mode. Note that this could be made a bit brighter, by + # looking for JFIF and Adobe APP markers. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + self._size = i16(s[3:]), i16(s[1:]) + + self.bits = i8(s[0]) + if self.bits != 8: + raise SyntaxError("cannot handle %d-bit layers" % self.bits) + + self.layers = i8(s[5]) + if self.layers == 1: + self.mode = "L" + elif self.layers == 3: + self.mode = "RGB" + elif self.layers == 4: + self.mode = "CMYK" + else: + raise SyntaxError("cannot handle %d-layer images" % self.layers) + + if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]: + self.info["progressive"] = self.info["progression"] = 1 + + if self.icclist: + # fixup icc profile + self.icclist.sort() # sort by sequence number + if i8(self.icclist[0][13]) == len(self.icclist): + profile = [] + for p in self.icclist: + profile.append(p[14:]) + icc_profile = b"".join(profile) + else: + icc_profile = None # wrong number of fragments + self.info["icc_profile"] = icc_profile + self.icclist = [] + + for i in range(6, len(s), 3): + t = s[i : i + 3] + # 4-tuples: id, vsamp, hsamp, qtable + self.layer.append((t[0], i8(t[1]) // 16, i8(t[1]) & 15, i8(t[2]))) + + +def DQT(self, marker): + # + # Define quantization table. Support baseline 8-bit tables + # only. Note that there might be more than one table in + # each marker. + + # FIXME: The quantization tables can be used to estimate the + # compression quality. + + n = i16(self.fp.read(2)) - 2 + s = ImageFile._safe_read(self.fp, n) + while len(s): + if len(s) < 65: + raise SyntaxError("bad quantization table marker") + v = i8(s[0]) + if v // 16 == 0: + self.quantization[v & 15] = array.array("B", s[1:65]) + s = s[65:] + else: + return # FIXME: add code to read 16-bit tables! + # raise SyntaxError, "bad quantization table element size" + + +# +# JPEG marker table + +MARKER = { + 0xFFC0: ("SOF0", "Baseline DCT", SOF), + 0xFFC1: ("SOF1", "Extended Sequential DCT", SOF), + 0xFFC2: ("SOF2", "Progressive DCT", SOF), + 0xFFC3: ("SOF3", "Spatial lossless", SOF), + 0xFFC4: ("DHT", "Define Huffman table", Skip), + 0xFFC5: ("SOF5", "Differential sequential DCT", SOF), + 0xFFC6: ("SOF6", "Differential progressive DCT", SOF), + 0xFFC7: ("SOF7", "Differential spatial", SOF), + 0xFFC8: ("JPG", "Extension", None), + 0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF), + 0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF), + 0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF), + 0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip), + 0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF), + 0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF), + 0xFFCF: ("SOF15", "Differential spatial (AC)", SOF), + 0xFFD0: ("RST0", "Restart 0", None), + 0xFFD1: ("RST1", "Restart 1", None), + 0xFFD2: ("RST2", "Restart 2", None), + 0xFFD3: ("RST3", "Restart 3", None), + 0xFFD4: ("RST4", "Restart 4", None), + 0xFFD5: ("RST5", "Restart 5", None), + 0xFFD6: ("RST6", "Restart 6", None), + 0xFFD7: ("RST7", "Restart 7", None), + 0xFFD8: ("SOI", "Start of image", None), + 0xFFD9: ("EOI", "End of image", None), + 0xFFDA: ("SOS", "Start of scan", Skip), + 0xFFDB: ("DQT", "Define quantization table", DQT), + 0xFFDC: ("DNL", "Define number of lines", Skip), + 0xFFDD: ("DRI", "Define restart interval", Skip), + 0xFFDE: ("DHP", "Define hierarchical progression", SOF), + 0xFFDF: ("EXP", "Expand reference component", Skip), + 0xFFE0: ("APP0", "Application segment 0", APP), + 0xFFE1: ("APP1", "Application segment 1", APP), + 0xFFE2: ("APP2", "Application segment 2", APP), + 0xFFE3: ("APP3", "Application segment 3", APP), + 0xFFE4: ("APP4", "Application segment 4", APP), + 0xFFE5: ("APP5", "Application segment 5", APP), + 0xFFE6: ("APP6", "Application segment 6", APP), + 0xFFE7: ("APP7", "Application segment 7", APP), + 0xFFE8: ("APP8", "Application segment 8", APP), + 0xFFE9: ("APP9", "Application segment 9", APP), + 0xFFEA: ("APP10", "Application segment 10", APP), + 0xFFEB: ("APP11", "Application segment 11", APP), + 0xFFEC: ("APP12", "Application segment 12", APP), + 0xFFED: ("APP13", "Application segment 13", APP), + 0xFFEE: ("APP14", "Application segment 14", APP), + 0xFFEF: ("APP15", "Application segment 15", APP), + 0xFFF0: ("JPG0", "Extension 0", None), + 0xFFF1: ("JPG1", "Extension 1", None), + 0xFFF2: ("JPG2", "Extension 2", None), + 0xFFF3: ("JPG3", "Extension 3", None), + 0xFFF4: ("JPG4", "Extension 4", None), + 0xFFF5: ("JPG5", "Extension 5", None), + 0xFFF6: ("JPG6", "Extension 6", None), + 0xFFF7: ("JPG7", "Extension 7", None), + 0xFFF8: ("JPG8", "Extension 8", None), + 0xFFF9: ("JPG9", "Extension 9", None), + 0xFFFA: ("JPG10", "Extension 10", None), + 0xFFFB: ("JPG11", "Extension 11", None), + 0xFFFC: ("JPG12", "Extension 12", None), + 0xFFFD: ("JPG13", "Extension 13", None), + 0xFFFE: ("COM", "Comment", COM), +} + + +def _accept(prefix): + # Magic number was taken from https://en.wikipedia.org/wiki/JPEG + return prefix[0:3] == b"\xFF\xD8\xFF" + + +## +# Image plugin for JPEG and JFIF images. + + +class JpegImageFile(ImageFile.ImageFile): + + format = "JPEG" + format_description = "JPEG (ISO 10918)" + + def _open(self): + + s = self.fp.read(3) + + if not _accept(s): + raise SyntaxError("not a JPEG file") + s = b"\xFF" + + # Create attributes + self.bits = self.layers = 0 + + # JPEG specifics (internal) + self.layer = [] + self.huffman_dc = {} + self.huffman_ac = {} + self.quantization = {} + self.app = {} # compatibility + self.applist = [] + self.icclist = [] + + while True: + + i = i8(s) + if i == 0xFF: + s = s + self.fp.read(1) + i = i16(s) + else: + # Skip non-0xFF junk + s = self.fp.read(1) + continue + + if i in MARKER: + name, description, handler = MARKER[i] + if handler is not None: + handler(self, i) + if i == 0xFFDA: # start of scan + rawmode = self.mode + if self.mode == "CMYK": + rawmode = "CMYK;I" # assume adobe conventions + self.tile = [("jpeg", (0, 0) + self.size, 0, (rawmode, ""))] + # self.__offset = self.fp.tell() + break + s = self.fp.read(1) + elif i == 0 or i == 0xFFFF: + # padded marker or junk; move on + s = b"\xff" + elif i == 0xFF00: # Skip extraneous data (escaped 0xFF) + s = self.fp.read(1) + else: + raise SyntaxError("no marker found") + + def load_read(self, read_bytes): + """ + internal: read more image data + For premature EOF and LOAD_TRUNCATED_IMAGES adds EOI marker + so libjpeg can finish decoding + """ + s = self.fp.read(read_bytes) + + if not s and ImageFile.LOAD_TRUNCATED_IMAGES: + # Premature EOF. + # Pretend file is finished adding EOI marker + return b"\xFF\xD9" + + return s + + def draft(self, mode, size): + + if len(self.tile) != 1: + return + + # Protect from second call + if self.decoderconfig: + return + + d, e, o, a = self.tile[0] + scale = 1 + original_size = self.size + + if a[0] == "RGB" and mode in ["L", "YCbCr"]: + self.mode = mode + a = mode, "" + + if size: + scale = min(self.size[0] // size[0], self.size[1] // size[1]) + for s in [8, 4, 2, 1]: + if scale >= s: + break + e = ( + e[0], + e[1], + (e[2] - e[0] + s - 1) // s + e[0], + (e[3] - e[1] + s - 1) // s + e[1], + ) + self._size = ((self.size[0] + s - 1) // s, (self.size[1] + s - 1) // s) + scale = s + + self.tile = [(d, e, o, a)] + self.decoderconfig = (scale, 0) + + box = (0, 0, original_size[0] / scale, original_size[1] / scale) + return (self.mode, box) + + def load_djpeg(self): + + # ALTERNATIVE: handle JPEGs via the IJG command line utilities + + f, path = tempfile.mkstemp() + os.close(f) + if os.path.exists(self.filename): + subprocess.check_call(["djpeg", "-outfile", path, self.filename]) + else: + raise ValueError("Invalid Filename") + + try: + with Image.open(path) as _im: + _im.load() + self.im = _im.im + finally: + try: + os.unlink(path) + except OSError: + pass + + self.mode = self.im.mode + self._size = self.im.size + + self.tile = [] + + def _getexif(self): + return _getexif(self) + + def _getmp(self): + return _getmp(self) + + +def _fixup_dict(src_dict): + # Helper function for _getexif() + # returns a dict with any single item tuples/lists as individual values + exif = Image.Exif() + return exif._fixup_dict(src_dict) + + +def _getexif(self): + if "exif" not in self.info: + return None + return dict(self.getexif()) + + +def _getmp(self): + # Extract MP information. This method was inspired by the "highly + # experimental" _getexif version that's been in use for years now, + # itself based on the ImageFileDirectory class in the TIFF plug-in. + + # The MP record essentially consists of a TIFF file embedded in a JPEG + # application marker. + try: + data = self.info["mp"] + except KeyError: + return None + file_contents = io.BytesIO(data) + head = file_contents.read(8) + endianness = ">" if head[:4] == b"\x4d\x4d\x00\x2a" else "<" + # process dictionary + try: + info = TiffImagePlugin.ImageFileDirectory_v2(head) + file_contents.seek(info.next) + info.load(file_contents) + mp = dict(info) + except Exception as e: + raise SyntaxError("malformed MP Index (unreadable directory)") from e + # it's an error not to have a number of images + try: + quant = mp[0xB001] + except KeyError as e: + raise SyntaxError("malformed MP Index (no number of images)") from e + # get MP entries + mpentries = [] + try: + rawmpentries = mp[0xB002] + for entrynum in range(0, quant): + unpackedentry = struct.unpack_from( + "{}LLLHH".format(endianness), rawmpentries, entrynum * 16 + ) + labels = ("Attribute", "Size", "DataOffset", "EntryNo1", "EntryNo2") + mpentry = dict(zip(labels, unpackedentry)) + mpentryattr = { + "DependentParentImageFlag": bool(mpentry["Attribute"] & (1 << 31)), + "DependentChildImageFlag": bool(mpentry["Attribute"] & (1 << 30)), + "RepresentativeImageFlag": bool(mpentry["Attribute"] & (1 << 29)), + "Reserved": (mpentry["Attribute"] & (3 << 27)) >> 27, + "ImageDataFormat": (mpentry["Attribute"] & (7 << 24)) >> 24, + "MPType": mpentry["Attribute"] & 0x00FFFFFF, + } + if mpentryattr["ImageDataFormat"] == 0: + mpentryattr["ImageDataFormat"] = "JPEG" + else: + raise SyntaxError("unsupported picture format in MPO") + mptypemap = { + 0x000000: "Undefined", + 0x010001: "Large Thumbnail (VGA Equivalent)", + 0x010002: "Large Thumbnail (Full HD Equivalent)", + 0x020001: "Multi-Frame Image (Panorama)", + 0x020002: "Multi-Frame Image: (Disparity)", + 0x020003: "Multi-Frame Image: (Multi-Angle)", + 0x030000: "Baseline MP Primary Image", + } + mpentryattr["MPType"] = mptypemap.get(mpentryattr["MPType"], "Unknown") + mpentry["Attribute"] = mpentryattr + mpentries.append(mpentry) + mp[0xB002] = mpentries + except KeyError as e: + raise SyntaxError("malformed MP Index (bad MP Entry)") from e + # Next we should try and parse the individual image unique ID list; + # we don't because I've never seen this actually used in a real MPO + # file and so can't test it. + return mp + + +# -------------------------------------------------------------------- +# stuff to save JPEG files + +RAWMODE = { + "1": "L", + "L": "L", + "RGB": "RGB", + "RGBX": "RGB", + "CMYK": "CMYK;I", # assume adobe conventions + "YCbCr": "YCbCr", +} + +# fmt: off +zigzag_index = ( + 0, 1, 5, 6, 14, 15, 27, 28, + 2, 4, 7, 13, 16, 26, 29, 42, + 3, 8, 12, 17, 25, 30, 41, 43, + 9, 11, 18, 24, 31, 40, 44, 53, + 10, 19, 23, 32, 39, 45, 52, 54, + 20, 22, 33, 38, 46, 51, 55, 60, + 21, 34, 37, 47, 50, 56, 59, 61, + 35, 36, 48, 49, 57, 58, 62, 63, +) + +samplings = { + (1, 1, 1, 1, 1, 1): 0, + (2, 1, 1, 1, 1, 1): 1, + (2, 2, 1, 1, 1, 1): 2, +} +# fmt: on + + +def convert_dict_qtables(qtables): + qtables = [qtables[key] for key in range(len(qtables)) if key in qtables] + for idx, table in enumerate(qtables): + qtables[idx] = [table[i] for i in zigzag_index] + return qtables + + +def get_sampling(im): + # There's no subsampling when images have only 1 layer + # (grayscale images) or when they are CMYK (4 layers), + # so set subsampling to the default value. + # + # NOTE: currently Pillow can't encode JPEG to YCCK format. + # If YCCK support is added in the future, subsampling code will have + # to be updated (here and in JpegEncode.c) to deal with 4 layers. + if not hasattr(im, "layers") or im.layers in (1, 4): + return -1 + sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3] + return samplings.get(sampling, -1) + + +def _save(im, fp, filename): + + try: + rawmode = RAWMODE[im.mode] + except KeyError as e: + raise OSError("cannot write mode %s as JPEG" % im.mode) from e + + info = im.encoderinfo + + dpi = [round(x) for x in info.get("dpi", (0, 0))] + + quality = info.get("quality", -1) + subsampling = info.get("subsampling", -1) + qtables = info.get("qtables") + + if quality == "keep": + quality = -1 + subsampling = "keep" + qtables = "keep" + elif quality in presets: + preset = presets[quality] + quality = -1 + subsampling = preset.get("subsampling", -1) + qtables = preset.get("quantization") + elif not isinstance(quality, int): + raise ValueError("Invalid quality setting") + else: + if subsampling in presets: + subsampling = presets[subsampling].get("subsampling", -1) + if isinstance(qtables, str) and qtables in presets: + qtables = presets[qtables].get("quantization") + + if subsampling == "4:4:4": + subsampling = 0 + elif subsampling == "4:2:2": + subsampling = 1 + elif subsampling == "4:2:0": + subsampling = 2 + elif subsampling == "4:1:1": + # For compatibility. Before Pillow 4.3, 4:1:1 actually meant 4:2:0. + # Set 4:2:0 if someone is still using that value. + subsampling = 2 + elif subsampling == "keep": + if im.format != "JPEG": + raise ValueError("Cannot use 'keep' when original image is not a JPEG") + subsampling = get_sampling(im) + + def validate_qtables(qtables): + if qtables is None: + return qtables + if isinstance(qtables, str): + try: + lines = [ + int(num) + for line in qtables.splitlines() + for num in line.split("#", 1)[0].split() + ] + except ValueError as e: + raise ValueError("Invalid quantization table") from e + else: + qtables = [lines[s : s + 64] for s in range(0, len(lines), 64)] + if isinstance(qtables, (tuple, list, dict)): + if isinstance(qtables, dict): + qtables = convert_dict_qtables(qtables) + elif isinstance(qtables, tuple): + qtables = list(qtables) + if not (0 < len(qtables) < 5): + raise ValueError("None or too many quantization tables") + for idx, table in enumerate(qtables): + try: + if len(table) != 64: + raise TypeError + table = array.array("B", table) + except TypeError as e: + raise ValueError("Invalid quantization table") from e + else: + qtables[idx] = list(table) + return qtables + + if qtables == "keep": + if im.format != "JPEG": + raise ValueError("Cannot use 'keep' when original image is not a JPEG") + qtables = getattr(im, "quantization", None) + qtables = validate_qtables(qtables) + + extra = b"" + + icc_profile = info.get("icc_profile") + if icc_profile: + ICC_OVERHEAD_LEN = 14 + MAX_BYTES_IN_MARKER = 65533 + MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN + markers = [] + while icc_profile: + markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER]) + icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:] + i = 1 + for marker in markers: + size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker)) + extra += ( + b"\xFF\xE2" + + size + + b"ICC_PROFILE\0" + + o8(i) + + o8(len(markers)) + + marker + ) + i += 1 + + # "progressive" is the official name, but older documentation + # says "progression" + # FIXME: issue a warning if the wrong form is used (post-1.1.7) + progressive = info.get("progressive", False) or info.get("progression", False) + + optimize = info.get("optimize", False) + + exif = info.get("exif", b"") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + + # get keyword arguments + im.encoderconfig = ( + quality, + progressive, + info.get("smooth", 0), + optimize, + info.get("streamtype", 0), + dpi[0], + dpi[1], + subsampling, + qtables, + extra, + exif, + ) + + # if we optimize, libjpeg needs a buffer big enough to hold the whole image + # in a shot. Guessing on the size, at im.size bytes. (raw pixel size is + # channels*size, this is a value that's been used in a django patch. + # https://github.com/matthewwithanm/django-imagekit/issues/50 + bufsize = 0 + if optimize or progressive: + # CMYK can be bigger + if im.mode == "CMYK": + bufsize = 4 * im.size[0] * im.size[1] + # keep sets quality to -1, but the actual value may be high. + elif quality >= 95 or quality == -1: + bufsize = 2 * im.size[0] * im.size[1] + else: + bufsize = im.size[0] * im.size[1] + + # The EXIF info needs to be written as one block, + APP1, + one spare byte. + # Ensure that our buffer is big enough. Same with the icc_profile block. + bufsize = max(ImageFile.MAXBLOCK, bufsize, len(exif) + 5, len(extra) + 1) + + ImageFile._save(im, fp, [("jpeg", (0, 0) + im.size, 0, rawmode)], bufsize) + + +def _save_cjpeg(im, fp, filename): + # ALTERNATIVE: handle JPEGs via the IJG command line utilities. + tempfile = im._dump() + subprocess.check_call(["cjpeg", "-outfile", filename, tempfile]) + try: + os.unlink(tempfile) + except OSError: + pass + + +## +# Factory for making JPEG and MPO instances +def jpeg_factory(fp=None, filename=None): + im = JpegImageFile(fp, filename) + try: + mpheader = im._getmp() + if mpheader[45057] > 1: + # It's actually an MPO + from .MpoImagePlugin import MpoImageFile + + # Don't reload everything, just convert it. + im = MpoImageFile.adopt(im, mpheader) + except (TypeError, IndexError): + # It is really a JPEG + pass + except SyntaxError: + warnings.warn( + "Image appears to be a malformed MPO file, it will be " + "interpreted as a base JPEG file" + ) + return im + + +# --------------------------------------------------------------------- +# Registry stuff + +Image.register_open(JpegImageFile.format, jpeg_factory, _accept) +Image.register_save(JpegImageFile.format, _save) + +Image.register_extensions(JpegImageFile.format, [".jfif", ".jpe", ".jpg", ".jpeg"]) + +Image.register_mime(JpegImageFile.format, "image/jpeg") diff --git a/venv/Lib/site-packages/PIL/JpegPresets.py b/venv/Lib/site-packages/PIL/JpegPresets.py new file mode 100644 index 000000000..09691d79d --- /dev/null +++ b/venv/Lib/site-packages/PIL/JpegPresets.py @@ -0,0 +1,248 @@ +""" +JPEG quality settings equivalent to the Photoshop settings. +Can be used when saving JPEG files. + +The following presets are available by default: +``web_low``, ``web_medium``, ``web_high``, ``web_very_high``, ``web_maximum``, +``low``, ``medium``, ``high``, ``maximum``. +More presets can be added to the :py:data:`presets` dict if needed. + +To apply the preset, specify:: + + quality="preset_name" + +To apply only the quantization table:: + + qtables="preset_name" + +To apply only the subsampling setting:: + + subsampling="preset_name" + +Example:: + + im.save("image_name.jpg", quality="web_high") + +Subsampling +----------- + +Subsampling is the practice of encoding images by implementing less resolution +for chroma information than for luma information. +(ref.: https://en.wikipedia.org/wiki/Chroma_subsampling) + +Possible subsampling values are 0, 1 and 2 that correspond to 4:4:4, 4:2:2 and +4:2:0. + +You can get the subsampling of a JPEG with the +`JpegImagePlugin.get_sampling(im)` function. + +In JPEG compressed data a JPEG marker is used instead of an EXIF tag. +(ref.: https://www.exiv2.org/tags.html) + + +Quantization tables +------------------- + +They are values use by the DCT (Discrete cosine transform) to remove +*unnecessary* information from the image (the lossy part of the compression). +(ref.: https://en.wikipedia.org/wiki/Quantization_matrix#Quantization_matrices, +https://en.wikipedia.org/wiki/JPEG#Quantization) + +You can get the quantization tables of a JPEG with:: + + im.quantization + +This will return a dict with a number of arrays. You can pass this dict +directly as the qtables argument when saving a JPEG. + +The tables format between im.quantization and quantization in presets differ in +3 ways: + +1. The base container of the preset is a list with sublists instead of dict. + dict[0] -> list[0], dict[1] -> list[1], ... +2. Each table in a preset is a list instead of an array. +3. The zigzag order is remove in the preset (needed by libjpeg >= 6a). + +You can convert the dict format to the preset format with the +`JpegImagePlugin.convert_dict_qtables(dict_qtables)` function. + +Libjpeg ref.: +https://web.archive.org/web/20120328125543/http://www.jpegcameras.com/libjpeg/libjpeg-3.html + +""" + +# fmt: off +presets = { + 'web_low': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [20, 16, 25, 39, 50, 46, 62, 68, + 16, 18, 23, 38, 38, 53, 65, 68, + 25, 23, 31, 38, 53, 65, 68, 68, + 39, 38, 38, 53, 65, 68, 68, 68, + 50, 38, 53, 65, 68, 68, 68, 68, + 46, 53, 65, 68, 68, 68, 68, 68, + 62, 65, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68], + [21, 25, 32, 38, 54, 68, 68, 68, + 25, 28, 24, 38, 54, 68, 68, 68, + 32, 24, 32, 43, 66, 68, 68, 68, + 38, 38, 43, 53, 68, 68, 68, 68, + 54, 54, 66, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68, 68, 68, 68, 68] + ]}, + 'web_medium': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [16, 11, 11, 16, 23, 27, 31, 30, + 11, 12, 12, 15, 20, 23, 23, 30, + 11, 12, 13, 16, 23, 26, 35, 47, + 16, 15, 16, 23, 26, 37, 47, 64, + 23, 20, 23, 26, 39, 51, 64, 64, + 27, 23, 26, 37, 51, 64, 64, 64, + 31, 23, 35, 47, 64, 64, 64, 64, + 30, 30, 47, 64, 64, 64, 64, 64], + [17, 15, 17, 21, 20, 26, 38, 48, + 15, 19, 18, 17, 20, 26, 35, 43, + 17, 18, 20, 22, 26, 30, 46, 53, + 21, 17, 22, 28, 30, 39, 53, 64, + 20, 20, 26, 30, 39, 48, 64, 64, + 26, 26, 30, 39, 48, 63, 64, 64, + 38, 35, 46, 53, 64, 64, 64, 64, + 48, 43, 53, 64, 64, 64, 64, 64] + ]}, + 'web_high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [6, 4, 4, 6, 9, 11, 12, 16, + 4, 5, 5, 6, 8, 10, 12, 12, + 4, 5, 5, 6, 10, 12, 14, 19, + 6, 6, 6, 11, 12, 15, 19, 28, + 9, 8, 10, 12, 16, 20, 27, 31, + 11, 10, 12, 15, 20, 27, 31, 31, + 12, 12, 14, 19, 27, 31, 31, 31, + 16, 12, 19, 28, 31, 31, 31, 31], + [7, 7, 13, 24, 26, 31, 31, 31, + 7, 12, 16, 21, 31, 31, 31, 31, + 13, 16, 17, 31, 31, 31, 31, 31, + 24, 21, 31, 31, 31, 31, 31, 31, + 26, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31] + ]}, + 'web_very_high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 4, 5, 7, 9, + 2, 2, 2, 4, 5, 7, 9, 12, + 3, 3, 4, 5, 8, 10, 12, 12, + 4, 4, 5, 7, 10, 12, 12, 12, + 5, 5, 7, 9, 12, 12, 12, 12, + 6, 6, 9, 12, 12, 12, 12, 12], + [3, 3, 5, 9, 13, 15, 15, 15, + 3, 4, 6, 11, 14, 12, 12, 12, + 5, 6, 9, 14, 12, 12, 12, 12, + 9, 11, 14, 12, 12, 12, 12, 12, + 13, 14, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'web_maximum': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 1, 1, 2, 2, 3, + 1, 1, 1, 1, 2, 2, 3, 3, + 1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 2, 2, 3, 3, 3, 3], + [1, 1, 1, 2, 2, 3, 3, 3, + 1, 1, 1, 2, 3, 3, 3, 3, + 1, 1, 1, 3, 3, 3, 3, 3, + 2, 2, 3, 3, 3, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3] + ]}, + 'low': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [18, 14, 14, 21, 30, 35, 34, 17, + 14, 16, 16, 19, 26, 23, 12, 12, + 14, 16, 17, 21, 23, 12, 12, 12, + 21, 19, 21, 23, 12, 12, 12, 12, + 30, 26, 23, 12, 12, 12, 12, 12, + 35, 23, 12, 12, 12, 12, 12, 12, + 34, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12], + [20, 19, 22, 27, 20, 20, 17, 17, + 19, 25, 23, 14, 14, 12, 12, 12, + 22, 23, 14, 14, 12, 12, 12, 12, + 27, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'medium': {'subsampling': 2, # "4:2:0" + 'quantization': [ + [12, 8, 8, 12, 17, 21, 24, 17, + 8, 9, 9, 11, 15, 19, 12, 12, + 8, 9, 10, 12, 19, 12, 12, 12, + 12, 11, 12, 21, 12, 12, 12, 12, + 17, 15, 19, 12, 12, 12, 12, 12, + 21, 19, 12, 12, 12, 12, 12, 12, + 24, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12], + [13, 11, 13, 16, 20, 20, 17, 17, + 11, 14, 14, 14, 14, 12, 12, 12, + 13, 14, 14, 14, 12, 12, 12, 12, + 16, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'high': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [6, 4, 4, 6, 9, 11, 12, 16, + 4, 5, 5, 6, 8, 10, 12, 12, + 4, 5, 5, 6, 10, 12, 12, 12, + 6, 6, 6, 11, 12, 12, 12, 12, + 9, 8, 10, 12, 12, 12, 12, 12, + 11, 10, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 12, 12, 12, 12, 12, + 16, 12, 12, 12, 12, 12, 12, 12], + [7, 7, 13, 24, 20, 20, 17, 17, + 7, 12, 16, 14, 14, 12, 12, 12, + 13, 16, 14, 14, 12, 12, 12, 12, + 24, 14, 14, 12, 12, 12, 12, 12, + 20, 14, 12, 12, 12, 12, 12, 12, + 20, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12, + 17, 12, 12, 12, 12, 12, 12, 12] + ]}, + 'maximum': {'subsampling': 0, # "4:4:4" + 'quantization': [ + [2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 3, 4, 5, 6, + 2, 2, 2, 2, 4, 5, 7, 9, + 2, 2, 2, 4, 5, 7, 9, 12, + 3, 3, 4, 5, 8, 10, 12, 12, + 4, 4, 5, 7, 10, 12, 12, 12, + 5, 5, 7, 9, 12, 12, 12, 12, + 6, 6, 9, 12, 12, 12, 12, 12], + [3, 3, 5, 9, 13, 15, 15, 15, + 3, 4, 6, 10, 14, 12, 12, 12, + 5, 6, 9, 14, 12, 12, 12, 12, + 9, 10, 14, 12, 12, 12, 12, 12, + 13, 14, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12, + 15, 12, 12, 12, 12, 12, 12, 12] + ]}, +} +# fmt: on diff --git a/venv/Lib/site-packages/PIL/McIdasImagePlugin.py b/venv/Lib/site-packages/PIL/McIdasImagePlugin.py new file mode 100644 index 000000000..cd047fe9d --- /dev/null +++ b/venv/Lib/site-packages/PIL/McIdasImagePlugin.py @@ -0,0 +1,75 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Basic McIdas support for PIL +# +# History: +# 1997-05-05 fl Created (8-bit images only) +# 2009-03-08 fl Added 16/32-bit support. +# +# Thanks to Richard Jones and Craig Swank for specs and samples. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +import struct + +from . import Image, ImageFile + + +def _accept(s): + return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04" + + +## +# Image plugin for McIdas area images. + + +class McIdasImageFile(ImageFile.ImageFile): + + format = "MCIDAS" + format_description = "McIdas area file" + + def _open(self): + + # parse area file directory + s = self.fp.read(256) + if not _accept(s) or len(s) != 256: + raise SyntaxError("not an McIdas area file") + + self.area_descriptor_raw = s + self.area_descriptor = w = [0] + list(struct.unpack("!64i", s)) + + # get mode + if w[11] == 1: + mode = rawmode = "L" + elif w[11] == 2: + # FIXME: add memory map support + mode = "I" + rawmode = "I;16B" + elif w[11] == 4: + # FIXME: add memory map support + mode = "I" + rawmode = "I;32B" + else: + raise SyntaxError("unsupported McIdas format") + + self.mode = mode + self._size = w[10], w[9] + + offset = w[34] + w[15] + stride = w[15] + w[10] * w[11] * w[14] + + self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))] + + +# -------------------------------------------------------------------- +# registry + +Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept) + +# no default extension diff --git a/venv/Lib/site-packages/PIL/MicImagePlugin.py b/venv/Lib/site-packages/PIL/MicImagePlugin.py new file mode 100644 index 000000000..2aed26030 --- /dev/null +++ b/venv/Lib/site-packages/PIL/MicImagePlugin.py @@ -0,0 +1,107 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Microsoft Image Composer support for PIL +# +# Notes: +# uses TiffImagePlugin.py to read the actual image streams +# +# History: +# 97-01-20 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + + +import olefile + +from . import Image, TiffImagePlugin + +# +# -------------------------------------------------------------------- + + +def _accept(prefix): + return prefix[:8] == olefile.MAGIC + + +## +# Image plugin for Microsoft's Image Composer file format. + + +class MicImageFile(TiffImagePlugin.TiffImageFile): + + format = "MIC" + format_description = "Microsoft Image Composer" + _close_exclusive_fp_after_loading = False + + def _open(self): + + # read the OLE directory and see if this is a likely + # to be a Microsoft Image Composer file + + try: + self.ole = olefile.OleFileIO(self.fp) + except OSError as e: + raise SyntaxError("not an MIC file; invalid OLE file") from e + + # find ACI subfiles with Image members (maybe not the + # best way to identify MIC files, but what the... ;-) + + self.images = [] + for path in self.ole.listdir(): + if path[1:] and path[0][-4:] == ".ACI" and path[1] == "Image": + self.images.append(path) + + # if we didn't find any images, this is probably not + # an MIC file. + if not self.images: + raise SyntaxError("not an MIC file; no image entries") + + self.__fp = self.fp + self.frame = None + self._n_frames = len(self.images) + self.is_animated = self._n_frames > 1 + + if len(self.images) > 1: + self.category = Image.CONTAINER + + self.seek(0) + + def seek(self, frame): + if not self._seek_check(frame): + return + try: + filename = self.images[frame] + except IndexError as e: + raise EOFError("no such frame") from e + + self.fp = self.ole.openstream(filename) + + TiffImagePlugin.TiffImageFile._open(self) + + self.frame = frame + + def tell(self): + return self.frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# +# -------------------------------------------------------------------- + +Image.register_open(MicImageFile.format, MicImageFile, _accept) + +Image.register_extension(MicImageFile.format, ".mic") diff --git a/venv/Lib/site-packages/PIL/MpegImagePlugin.py b/venv/Lib/site-packages/PIL/MpegImagePlugin.py new file mode 100644 index 000000000..a358dfdce --- /dev/null +++ b/venv/Lib/site-packages/PIL/MpegImagePlugin.py @@ -0,0 +1,83 @@ +# +# The Python Imaging Library. +# $Id$ +# +# MPEG file handling +# +# History: +# 95-09-09 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile +from ._binary import i8 + +# +# Bitstream parser + + +class BitStream: + def __init__(self, fp): + self.fp = fp + self.bits = 0 + self.bitbuffer = 0 + + def next(self): + return i8(self.fp.read(1)) + + def peek(self, bits): + while self.bits < bits: + c = self.next() + if c < 0: + self.bits = 0 + continue + self.bitbuffer = (self.bitbuffer << 8) + c + self.bits += 8 + return self.bitbuffer >> (self.bits - bits) & (1 << bits) - 1 + + def skip(self, bits): + while self.bits < bits: + self.bitbuffer = (self.bitbuffer << 8) + i8(self.fp.read(1)) + self.bits += 8 + self.bits = self.bits - bits + + def read(self, bits): + v = self.peek(bits) + self.bits = self.bits - bits + return v + + +## +# Image plugin for MPEG streams. This plugin can identify a stream, +# but it cannot read it. + + +class MpegImageFile(ImageFile.ImageFile): + + format = "MPEG" + format_description = "MPEG" + + def _open(self): + + s = BitStream(self.fp) + + if s.read(32) != 0x1B3: + raise SyntaxError("not an MPEG file") + + self.mode = "RGB" + self._size = s.read(12), s.read(12) + + +# -------------------------------------------------------------------- +# Registry stuff + +Image.register_open(MpegImageFile.format, MpegImageFile) + +Image.register_extensions(MpegImageFile.format, [".mpg", ".mpeg"]) + +Image.register_mime(MpegImageFile.format, "video/mpeg") diff --git a/venv/Lib/site-packages/PIL/MpoImagePlugin.py b/venv/Lib/site-packages/PIL/MpoImagePlugin.py new file mode 100644 index 000000000..575cc9c8e --- /dev/null +++ b/venv/Lib/site-packages/PIL/MpoImagePlugin.py @@ -0,0 +1,134 @@ +# +# The Python Imaging Library. +# $Id$ +# +# MPO file handling +# +# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the +# Camera & Imaging Products Association) +# +# The multi-picture object combines multiple JPEG images (with a modified EXIF +# data format) into a single file. While it can theoretically be used much like +# a GIF animation, it is commonly used to represent 3D photographs and is (as +# of this writing) the most commonly used format by 3D cameras. +# +# History: +# 2014-03-13 Feneric Created +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile, JpegImagePlugin +from ._binary import i16be as i16 + + +def _accept(prefix): + return JpegImagePlugin._accept(prefix) + + +def _save(im, fp, filename): + # Note that we can only save the current frame at present + return JpegImagePlugin._save(im, fp, filename) + + +## +# Image plugin for MPO images. + + +class MpoImageFile(JpegImagePlugin.JpegImageFile): + + format = "MPO" + format_description = "MPO (CIPA DC-007)" + _close_exclusive_fp_after_loading = False + + def _open(self): + self.fp.seek(0) # prep the fp in order to pass the JPEG test + JpegImagePlugin.JpegImageFile._open(self) + self._after_jpeg_open() + + def _after_jpeg_open(self, mpheader=None): + self.mpinfo = mpheader if mpheader is not None else self._getmp() + self.n_frames = self.mpinfo[0xB001] + self.__mpoffsets = [ + mpent["DataOffset"] + self.info["mpoffset"] for mpent in self.mpinfo[0xB002] + ] + self.__mpoffsets[0] = 0 + # Note that the following assertion will only be invalid if something + # gets broken within JpegImagePlugin. + assert self.n_frames == len(self.__mpoffsets) + del self.info["mpoffset"] # no longer needed + self.is_animated = self.n_frames > 1 + self.__fp = self.fp # FIXME: hack + self.__fp.seek(self.__mpoffsets[0]) # get ready to read first frame + self.__frame = 0 + self.offset = 0 + # for now we can only handle reading and individual frame extraction + self.readonly = 1 + + def load_seek(self, pos): + self.__fp.seek(pos) + + def seek(self, frame): + if not self._seek_check(frame): + return + self.fp = self.__fp + self.offset = self.__mpoffsets[frame] + + self.fp.seek(self.offset + 2) # skip SOI marker + segment = self.fp.read(2) + if not segment: + raise ValueError("No data found for frame") + if i16(segment) == 0xFFE1: # APP1 + n = i16(self.fp.read(2)) - 2 + self.info["exif"] = ImageFile._safe_read(self.fp, n) + + exif = self.getexif() + if 40962 in exif and 40963 in exif: + self._size = (exif[40962], exif[40963]) + elif "exif" in self.info: + del self.info["exif"] + + self.tile = [("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))] + self.__frame = frame + + def tell(self): + return self.__frame + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + @staticmethod + def adopt(jpeg_instance, mpheader=None): + """ + Transform the instance of JpegImageFile into + an instance of MpoImageFile. + After the call, the JpegImageFile is extended + to be an MpoImageFile. + + This is essentially useful when opening a JPEG + file that reveals itself as an MPO, to avoid + double call to _open. + """ + jpeg_instance.__class__ = MpoImageFile + jpeg_instance._after_jpeg_open(mpheader) + return jpeg_instance + + +# --------------------------------------------------------------------- +# Registry stuff + +# Note that since MPO shares a factory with JPEG, we do not need to do a +# separate registration for it here. +# Image.register_open(MpoImageFile.format, +# JpegImagePlugin.jpeg_factory, _accept) +Image.register_save(MpoImageFile.format, _save) + +Image.register_extension(MpoImageFile.format, ".mpo") + +Image.register_mime(MpoImageFile.format, "image/mpo") diff --git a/venv/Lib/site-packages/PIL/MspImagePlugin.py b/venv/Lib/site-packages/PIL/MspImagePlugin.py new file mode 100644 index 000000000..ca9572187 --- /dev/null +++ b/venv/Lib/site-packages/PIL/MspImagePlugin.py @@ -0,0 +1,193 @@ +# +# The Python Imaging Library. +# +# MSP file handling +# +# This is the format used by the Paint program in Windows 1 and 2. +# +# History: +# 95-09-05 fl Created +# 97-01-03 fl Read/write MSP images +# 17-02-21 es Fixed RLE interpretation +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995-97. +# Copyright (c) Eric Soroos 2017. +# +# See the README file for information on usage and redistribution. +# +# More info on this format: https://archive.org/details/gg243631 +# Page 313: +# Figure 205. Windows Paint Version 1: "DanM" Format +# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03 +# +# See also: http://www.fileformat.info/format/mspaint/egff.htm + +import io +import struct + +from . import Image, ImageFile +from ._binary import i8, i16le as i16, o16le as o16 + +# +# read MSP files + + +def _accept(prefix): + return prefix[:4] in [b"DanM", b"LinS"] + + +## +# Image plugin for Windows MSP images. This plugin supports both +# uncompressed (Windows 1.0). + + +class MspImageFile(ImageFile.ImageFile): + + format = "MSP" + format_description = "Windows Paint" + + def _open(self): + + # Header + s = self.fp.read(32) + if not _accept(s): + raise SyntaxError("not an MSP file") + + # Header checksum + checksum = 0 + for i in range(0, 32, 2): + checksum = checksum ^ i16(s[i : i + 2]) + if checksum != 0: + raise SyntaxError("bad MSP checksum") + + self.mode = "1" + self._size = i16(s[4:]), i16(s[6:]) + + if s[:4] == b"DanM": + self.tile = [("raw", (0, 0) + self.size, 32, ("1", 0, 1))] + else: + self.tile = [("MSP", (0, 0) + self.size, 32, None)] + + +class MspDecoder(ImageFile.PyDecoder): + # The algo for the MSP decoder is from + # http://www.fileformat.info/format/mspaint/egff.htm + # cc-by-attribution -- That page references is taken from the + # Encyclopedia of Graphics File Formats and is licensed by + # O'Reilly under the Creative Common/Attribution license + # + # For RLE encoded files, the 32byte header is followed by a scan + # line map, encoded as one 16bit word of encoded byte length per + # line. + # + # NOTE: the encoded length of the line can be 0. This was not + # handled in the previous version of this encoder, and there's no + # mention of how to handle it in the documentation. From the few + # examples I've seen, I've assumed that it is a fill of the + # background color, in this case, white. + # + # + # Pseudocode of the decoder: + # Read a BYTE value as the RunType + # If the RunType value is zero + # Read next byte as the RunCount + # Read the next byte as the RunValue + # Write the RunValue byte RunCount times + # If the RunType value is non-zero + # Use this value as the RunCount + # Read and write the next RunCount bytes literally + # + # e.g.: + # 0x00 03 ff 05 00 01 02 03 04 + # would yield the bytes: + # 0xff ff ff 00 01 02 03 04 + # + # which are then interpreted as a bit packed mode '1' image + + _pulls_fd = True + + def decode(self, buffer): + + img = io.BytesIO() + blank_line = bytearray((0xFF,) * ((self.state.xsize + 7) // 8)) + try: + self.fd.seek(32) + rowmap = struct.unpack_from( + "<%dH" % (self.state.ysize), self.fd.read(self.state.ysize * 2) + ) + except struct.error as e: + raise OSError("Truncated MSP file in row map") from e + + for x, rowlen in enumerate(rowmap): + try: + if rowlen == 0: + img.write(blank_line) + continue + row = self.fd.read(rowlen) + if len(row) != rowlen: + raise OSError( + "Truncated MSP file, expected %d bytes on row %s", (rowlen, x) + ) + idx = 0 + while idx < rowlen: + runtype = i8(row[idx]) + idx += 1 + if runtype == 0: + (runcount, runval) = struct.unpack_from("Bc", row, idx) + img.write(runval * runcount) + idx += 2 + else: + runcount = runtype + img.write(row[idx : idx + runcount]) + idx += runcount + + except struct.error as e: + raise OSError("Corrupted MSP file in row %d" % x) from e + + self.set_as_raw(img.getvalue(), ("1", 0, 1)) + + return 0, 0 + + +Image.register_decoder("MSP", MspDecoder) + + +# +# write MSP files (uncompressed only) + + +def _save(im, fp, filename): + + if im.mode != "1": + raise OSError("cannot write mode %s as MSP" % im.mode) + + # create MSP header + header = [0] * 16 + + header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1 + header[2], header[3] = im.size + header[4], header[5] = 1, 1 + header[6], header[7] = 1, 1 + header[8], header[9] = im.size + + checksum = 0 + for h in header: + checksum = checksum ^ h + header[12] = checksum # FIXME: is this the right field? + + # header + for h in header: + fp.write(o16(h)) + + # image body + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 32, ("1", 0, 1))]) + + +# +# registry + +Image.register_open(MspImageFile.format, MspImageFile, _accept) +Image.register_save(MspImageFile.format, _save) + +Image.register_extension(MspImageFile.format, ".msp") diff --git a/venv/Lib/site-packages/PIL/PSDraw.py b/venv/Lib/site-packages/PIL/PSDraw.py new file mode 100644 index 000000000..762d31e88 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PSDraw.py @@ -0,0 +1,237 @@ +# +# The Python Imaging Library +# $Id$ +# +# simple postscript graphics interface +# +# History: +# 1996-04-20 fl Created +# 1999-01-10 fl Added gsave/grestore to image method +# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge) +# +# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved. +# Copyright (c) 1996 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import sys + +from . import EpsImagePlugin + +## +# Simple Postscript graphics interface. + + +class PSDraw: + """ + Sets up printing to the given file. If **fp** is omitted, + :py:attr:`sys.stdout` is assumed. + """ + + def __init__(self, fp=None): + if not fp: + fp = sys.stdout + self.fp = fp + + def _fp_write(self, to_write): + if self.fp == sys.stdout: + self.fp.write(to_write) + else: + self.fp.write(bytes(to_write, "UTF-8")) + + def begin_document(self, id=None): + """Set up printing of a document. (Write Postscript DSC header.)""" + # FIXME: incomplete + self._fp_write( + "%!PS-Adobe-3.0\n" + "save\n" + "/showpage { } def\n" + "%%EndComments\n" + "%%BeginDocument\n" + ) + # self._fp_write(ERROR_PS) # debugging! + self._fp_write(EDROFF_PS) + self._fp_write(VDI_PS) + self._fp_write("%%EndProlog\n") + self.isofont = {} + + def end_document(self): + """Ends printing. (Write Postscript DSC footer.)""" + self._fp_write("%%EndDocument\nrestore showpage\n%%End\n") + if hasattr(self.fp, "flush"): + self.fp.flush() + + def setfont(self, font, size): + """ + Selects which font to use. + + :param font: A Postscript font name + :param size: Size in points. + """ + if font not in self.isofont: + # reencode font + self._fp_write("/PSDraw-{} ISOLatin1Encoding /{} E\n".format(font, font)) + self.isofont[font] = 1 + # rough + self._fp_write("/F0 %d /PSDraw-%s F\n" % (size, font)) + + def line(self, xy0, xy1): + """ + Draws a line between the two points. Coordinates are given in + Postscript point coordinates (72 points per inch, (0, 0) is the lower + left corner of the page). + """ + xy = xy0 + xy1 + self._fp_write("%d %d %d %d Vl\n" % xy) + + def rectangle(self, box): + """ + Draws a rectangle. + + :param box: A 4-tuple of integers whose order and function is currently + undocumented. + + Hint: the tuple is passed into this format string: + + .. code-block:: python + + %d %d M %d %d 0 Vr\n + """ + self._fp_write("%d %d M %d %d 0 Vr\n" % box) + + def text(self, xy, text): + """ + Draws text at the given position. You must use + :py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method. + """ + text = "\\(".join(text.split("(")) + text = "\\)".join(text.split(")")) + xy = xy + (text,) + self._fp_write("%d %d M (%s) S\n" % xy) + + def image(self, box, im, dpi=None): + """Draw a PIL image, centered in the given box.""" + # default resolution depends on mode + if not dpi: + if im.mode == "1": + dpi = 200 # fax + else: + dpi = 100 # greyscale + # image size (on paper) + x = im.size[0] * 72 / dpi + y = im.size[1] * 72 / dpi + # max allowed size + xmax = float(box[2] - box[0]) + ymax = float(box[3] - box[1]) + if x > xmax: + y = y * xmax / x + x = xmax + if y > ymax: + x = x * ymax / y + y = ymax + dx = (xmax - x) / 2 + box[0] + dy = (ymax - y) / 2 + box[1] + self._fp_write("gsave\n{:f} {:f} translate\n".format(dx, dy)) + if (x, y) != im.size: + # EpsImagePlugin._save prints the image at (0,0,xsize,ysize) + sx = x / im.size[0] + sy = y / im.size[1] + self._fp_write("{:f} {:f} scale\n".format(sx, sy)) + EpsImagePlugin._save(im, self.fp, None, 0) + self._fp_write("\ngrestore\n") + + +# -------------------------------------------------------------------- +# Postscript driver + +# +# EDROFF.PS -- Postscript driver for Edroff 2 +# +# History: +# 94-01-25 fl: created (edroff 2.04) +# +# Copyright (c) Fredrik Lundh 1994. +# + + +EDROFF_PS = """\ +/S { show } bind def +/P { moveto show } bind def +/M { moveto } bind def +/X { 0 rmoveto } bind def +/Y { 0 exch rmoveto } bind def +/E { findfont + dup maxlength dict begin + { + 1 index /FID ne { def } { pop pop } ifelse + } forall + /Encoding exch def + dup /FontName exch def + currentdict end definefont pop +} bind def +/F { findfont exch scalefont dup setfont + [ exch /setfont cvx ] cvx bind def +} bind def +""" + +# +# VDI.PS -- Postscript driver for VDI meta commands +# +# History: +# 94-01-25 fl: created (edroff 2.04) +# +# Copyright (c) Fredrik Lundh 1994. +# + +VDI_PS = """\ +/Vm { moveto } bind def +/Va { newpath arcn stroke } bind def +/Vl { moveto lineto stroke } bind def +/Vc { newpath 0 360 arc closepath } bind def +/Vr { exch dup 0 rlineto + exch dup neg 0 exch rlineto + exch neg 0 rlineto + 0 exch rlineto + 100 div setgray fill 0 setgray } bind def +/Tm matrix def +/Ve { Tm currentmatrix pop + translate scale newpath 0 0 .5 0 360 arc closepath + Tm setmatrix +} bind def +/Vf { currentgray exch setgray fill setgray } bind def +""" + +# +# ERROR.PS -- Error handler +# +# History: +# 89-11-21 fl: created (pslist 1.10) +# + +ERROR_PS = """\ +/landscape false def +/errorBUF 200 string def +/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def +errordict begin /handleerror { + initmatrix /Courier findfont 10 scalefont setfont + newpath 72 720 moveto $error begin /newerror false def + (PostScript Error) show errorNL errorNL + (Error: ) show + /errorname load errorBUF cvs show errorNL errorNL + (Command: ) show + /command load dup type /stringtype ne { errorBUF cvs } if show + errorNL errorNL + (VMstatus: ) show + vmstatus errorBUF cvs show ( bytes available, ) show + errorBUF cvs show ( bytes used at level ) show + errorBUF cvs show errorNL errorNL + (Operand stargck: ) show errorNL /ostargck load { + dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL + } forall errorNL + (Execution stargck: ) show errorNL /estargck load { + dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL + } forall + end showpage +} def end +""" diff --git a/venv/Lib/site-packages/PIL/PaletteFile.py b/venv/Lib/site-packages/PIL/PaletteFile.py new file mode 100644 index 000000000..6ccaa1f53 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PaletteFile.py @@ -0,0 +1,53 @@ +# +# Python Imaging Library +# $Id$ +# +# stuff to read simple, teragon-style palette files +# +# History: +# 97-08-23 fl Created +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +from ._binary import o8 + + +class PaletteFile: + """File handler for Teragon-style palette files.""" + + rawmode = "RGB" + + def __init__(self, fp): + + self.palette = [(i, i, i) for i in range(256)] + + while True: + + s = fp.readline() + + if not s: + break + if s[0:1] == b"#": + continue + if len(s) > 100: + raise SyntaxError("bad palette file") + + v = [int(x) for x in s.split()] + try: + [i, r, g, b] = v + except ValueError: + [i, r] = v + g = b = r + + if 0 <= i <= 255: + self.palette[i] = o8(r) + o8(g) + o8(b) + + self.palette = b"".join(self.palette) + + def getpalette(self): + + return self.palette, self.rawmode diff --git a/venv/Lib/site-packages/PIL/PalmImagePlugin.py b/venv/Lib/site-packages/PIL/PalmImagePlugin.py new file mode 100644 index 000000000..9fc55d795 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PalmImagePlugin.py @@ -0,0 +1,226 @@ +# +# The Python Imaging Library. +# $Id$ +# + +## +# Image plugin for Palm pixmap images (output only). +## + +from . import Image, ImageFile +from ._binary import o8, o16be as o16b + +# fmt: off +_Palm8BitColormapValues = ( + (255, 255, 255), (255, 204, 255), (255, 153, 255), (255, 102, 255), + (255, 51, 255), (255, 0, 255), (255, 255, 204), (255, 204, 204), + (255, 153, 204), (255, 102, 204), (255, 51, 204), (255, 0, 204), + (255, 255, 153), (255, 204, 153), (255, 153, 153), (255, 102, 153), + (255, 51, 153), (255, 0, 153), (204, 255, 255), (204, 204, 255), + (204, 153, 255), (204, 102, 255), (204, 51, 255), (204, 0, 255), + (204, 255, 204), (204, 204, 204), (204, 153, 204), (204, 102, 204), + (204, 51, 204), (204, 0, 204), (204, 255, 153), (204, 204, 153), + (204, 153, 153), (204, 102, 153), (204, 51, 153), (204, 0, 153), + (153, 255, 255), (153, 204, 255), (153, 153, 255), (153, 102, 255), + (153, 51, 255), (153, 0, 255), (153, 255, 204), (153, 204, 204), + (153, 153, 204), (153, 102, 204), (153, 51, 204), (153, 0, 204), + (153, 255, 153), (153, 204, 153), (153, 153, 153), (153, 102, 153), + (153, 51, 153), (153, 0, 153), (102, 255, 255), (102, 204, 255), + (102, 153, 255), (102, 102, 255), (102, 51, 255), (102, 0, 255), + (102, 255, 204), (102, 204, 204), (102, 153, 204), (102, 102, 204), + (102, 51, 204), (102, 0, 204), (102, 255, 153), (102, 204, 153), + (102, 153, 153), (102, 102, 153), (102, 51, 153), (102, 0, 153), + (51, 255, 255), (51, 204, 255), (51, 153, 255), (51, 102, 255), + (51, 51, 255), (51, 0, 255), (51, 255, 204), (51, 204, 204), + (51, 153, 204), (51, 102, 204), (51, 51, 204), (51, 0, 204), + (51, 255, 153), (51, 204, 153), (51, 153, 153), (51, 102, 153), + (51, 51, 153), (51, 0, 153), (0, 255, 255), (0, 204, 255), + (0, 153, 255), (0, 102, 255), (0, 51, 255), (0, 0, 255), + (0, 255, 204), (0, 204, 204), (0, 153, 204), (0, 102, 204), + (0, 51, 204), (0, 0, 204), (0, 255, 153), (0, 204, 153), + (0, 153, 153), (0, 102, 153), (0, 51, 153), (0, 0, 153), + (255, 255, 102), (255, 204, 102), (255, 153, 102), (255, 102, 102), + (255, 51, 102), (255, 0, 102), (255, 255, 51), (255, 204, 51), + (255, 153, 51), (255, 102, 51), (255, 51, 51), (255, 0, 51), + (255, 255, 0), (255, 204, 0), (255, 153, 0), (255, 102, 0), + (255, 51, 0), (255, 0, 0), (204, 255, 102), (204, 204, 102), + (204, 153, 102), (204, 102, 102), (204, 51, 102), (204, 0, 102), + (204, 255, 51), (204, 204, 51), (204, 153, 51), (204, 102, 51), + (204, 51, 51), (204, 0, 51), (204, 255, 0), (204, 204, 0), + (204, 153, 0), (204, 102, 0), (204, 51, 0), (204, 0, 0), + (153, 255, 102), (153, 204, 102), (153, 153, 102), (153, 102, 102), + (153, 51, 102), (153, 0, 102), (153, 255, 51), (153, 204, 51), + (153, 153, 51), (153, 102, 51), (153, 51, 51), (153, 0, 51), + (153, 255, 0), (153, 204, 0), (153, 153, 0), (153, 102, 0), + (153, 51, 0), (153, 0, 0), (102, 255, 102), (102, 204, 102), + (102, 153, 102), (102, 102, 102), (102, 51, 102), (102, 0, 102), + (102, 255, 51), (102, 204, 51), (102, 153, 51), (102, 102, 51), + (102, 51, 51), (102, 0, 51), (102, 255, 0), (102, 204, 0), + (102, 153, 0), (102, 102, 0), (102, 51, 0), (102, 0, 0), + (51, 255, 102), (51, 204, 102), (51, 153, 102), (51, 102, 102), + (51, 51, 102), (51, 0, 102), (51, 255, 51), (51, 204, 51), + (51, 153, 51), (51, 102, 51), (51, 51, 51), (51, 0, 51), + (51, 255, 0), (51, 204, 0), (51, 153, 0), (51, 102, 0), + (51, 51, 0), (51, 0, 0), (0, 255, 102), (0, 204, 102), + (0, 153, 102), (0, 102, 102), (0, 51, 102), (0, 0, 102), + (0, 255, 51), (0, 204, 51), (0, 153, 51), (0, 102, 51), + (0, 51, 51), (0, 0, 51), (0, 255, 0), (0, 204, 0), + (0, 153, 0), (0, 102, 0), (0, 51, 0), (17, 17, 17), + (34, 34, 34), (68, 68, 68), (85, 85, 85), (119, 119, 119), + (136, 136, 136), (170, 170, 170), (187, 187, 187), (221, 221, 221), + (238, 238, 238), (192, 192, 192), (128, 0, 0), (128, 0, 128), + (0, 128, 0), (0, 128, 128), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), + (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)) +# fmt: on + + +# so build a prototype image to be used for palette resampling +def build_prototype_image(): + image = Image.new("L", (1, len(_Palm8BitColormapValues))) + image.putdata(list(range(len(_Palm8BitColormapValues)))) + palettedata = () + for colormapValue in _Palm8BitColormapValues: + palettedata += colormapValue + palettedata += (0, 0, 0) * (256 - len(_Palm8BitColormapValues)) + image.putpalette(palettedata) + return image + + +Palm8BitColormapImage = build_prototype_image() + +# OK, we now have in Palm8BitColormapImage, +# a "P"-mode image with the right palette +# +# -------------------------------------------------------------------- + +_FLAGS = {"custom-colormap": 0x4000, "is-compressed": 0x8000, "has-transparent": 0x2000} + +_COMPRESSION_TYPES = {"none": 0xFF, "rle": 0x01, "scanline": 0x00} + + +# +# -------------------------------------------------------------------- + +## +# (Internal) Image save plugin for the Palm format. + + +def _save(im, fp, filename): + + if im.mode == "P": + + # we assume this is a color Palm image with the standard colormap, + # unless the "info" dict has a "custom-colormap" field + + rawmode = "P" + bpp = 8 + version = 1 + + elif im.mode == "L": + if im.encoderinfo.get("bpp") in (1, 2, 4): + # this is 8-bit grayscale, so we shift it to get the high-order bits, + # and invert it because + # Palm does greyscale from white (0) to black (1) + bpp = im.encoderinfo["bpp"] + im = im.point( + lambda x, shift=8 - bpp, maxval=(1 << bpp) - 1: maxval - (x >> shift) + ) + elif im.info.get("bpp") in (1, 2, 4): + # here we assume that even though the inherent mode is 8-bit grayscale, + # only the lower bpp bits are significant. + # We invert them to match the Palm. + bpp = im.info["bpp"] + im = im.point(lambda x, maxval=(1 << bpp) - 1: maxval - (x & maxval)) + else: + raise OSError("cannot write mode %s as Palm" % im.mode) + + # we ignore the palette here + im.mode = "P" + rawmode = "P;" + str(bpp) + version = 1 + + elif im.mode == "1": + + # monochrome -- write it inverted, as is the Palm standard + rawmode = "1;I" + bpp = 1 + version = 0 + + else: + + raise OSError("cannot write mode %s as Palm" % im.mode) + + # + # make sure image data is available + im.load() + + # write header + + cols = im.size[0] + rows = im.size[1] + + rowbytes = int((cols + (16 // bpp - 1)) / (16 // bpp)) * 2 + transparent_index = 0 + compression_type = _COMPRESSION_TYPES["none"] + + flags = 0 + if im.mode == "P" and "custom-colormap" in im.info: + flags = flags & _FLAGS["custom-colormap"] + colormapsize = 4 * 256 + 2 + colormapmode = im.palette.mode + colormap = im.getdata().getpalette() + else: + colormapsize = 0 + + if "offset" in im.info: + offset = (rowbytes * rows + 16 + 3 + colormapsize) // 4 + else: + offset = 0 + + fp.write(o16b(cols) + o16b(rows) + o16b(rowbytes) + o16b(flags)) + fp.write(o8(bpp)) + fp.write(o8(version)) + fp.write(o16b(offset)) + fp.write(o8(transparent_index)) + fp.write(o8(compression_type)) + fp.write(o16b(0)) # reserved by Palm + + # now write colormap if necessary + + if colormapsize > 0: + fp.write(o16b(256)) + for i in range(256): + fp.write(o8(i)) + if colormapmode == "RGB": + fp.write( + o8(colormap[3 * i]) + + o8(colormap[3 * i + 1]) + + o8(colormap[3 * i + 2]) + ) + elif colormapmode == "RGBA": + fp.write( + o8(colormap[4 * i]) + + o8(colormap[4 * i + 1]) + + o8(colormap[4 * i + 2]) + ) + + # now convert data to raw form + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, rowbytes, 1))]) + + if hasattr(fp, "flush"): + fp.flush() + + +# +# -------------------------------------------------------------------- + +Image.register_save("Palm", _save) + +Image.register_extension("Palm", ".palm") + +Image.register_mime("Palm", "image/palm") diff --git a/venv/Lib/site-packages/PIL/PcdImagePlugin.py b/venv/Lib/site-packages/PIL/PcdImagePlugin.py new file mode 100644 index 000000000..625f55646 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PcdImagePlugin.py @@ -0,0 +1,64 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PCD file handling +# +# History: +# 96-05-10 fl Created +# 96-05-27 fl Added draft mode (128x192, 256x384) +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile +from ._binary import i8 + +## +# Image plugin for PhotoCD images. This plugin only reads the 768x512 +# image from the file; higher resolutions are encoded in a proprietary +# encoding. + + +class PcdImageFile(ImageFile.ImageFile): + + format = "PCD" + format_description = "Kodak PhotoCD" + + def _open(self): + + # rough + self.fp.seek(2048) + s = self.fp.read(2048) + + if s[:4] != b"PCD_": + raise SyntaxError("not a PCD file") + + orientation = i8(s[1538]) & 3 + self.tile_post_rotate = None + if orientation == 1: + self.tile_post_rotate = 90 + elif orientation == 3: + self.tile_post_rotate = -90 + + self.mode = "RGB" + self._size = 768, 512 # FIXME: not correct for rotated images! + self.tile = [("pcd", (0, 0) + self.size, 96 * 2048, None)] + + def load_end(self): + if self.tile_post_rotate: + # Handle rotated PCDs + self.im = self.im.rotate(self.tile_post_rotate) + self._size = self.im.size + + +# +# registry + +Image.register_open(PcdImageFile.format, PcdImageFile) + +Image.register_extension(PcdImageFile.format, ".pcd") diff --git a/venv/Lib/site-packages/PIL/PcfFontFile.py b/venv/Lib/site-packages/PIL/PcfFontFile.py new file mode 100644 index 000000000..f8836ad88 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PcfFontFile.py @@ -0,0 +1,244 @@ +# +# THIS IS WORK IN PROGRESS +# +# The Python Imaging Library +# $Id$ +# +# portable compiled font file parser +# +# history: +# 1997-08-19 fl created +# 2003-09-13 fl fixed loading of unicode fonts +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1997-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import io + +from . import FontFile, Image +from ._binary import i8, i16be as b16, i16le as l16, i32be as b32, i32le as l32 + +# -------------------------------------------------------------------- +# declarations + +PCF_MAGIC = 0x70636601 # "\x01fcp" + +PCF_PROPERTIES = 1 << 0 +PCF_ACCELERATORS = 1 << 1 +PCF_METRICS = 1 << 2 +PCF_BITMAPS = 1 << 3 +PCF_INK_METRICS = 1 << 4 +PCF_BDF_ENCODINGS = 1 << 5 +PCF_SWIDTHS = 1 << 6 +PCF_GLYPH_NAMES = 1 << 7 +PCF_BDF_ACCELERATORS = 1 << 8 + +BYTES_PER_ROW = [ + lambda bits: ((bits + 7) >> 3), + lambda bits: ((bits + 15) >> 3) & ~1, + lambda bits: ((bits + 31) >> 3) & ~3, + lambda bits: ((bits + 63) >> 3) & ~7, +] + + +def sz(s, o): + return s[o : s.index(b"\0", o)] + + +class PcfFontFile(FontFile.FontFile): + """Font file plugin for the X11 PCF format.""" + + name = "name" + + def __init__(self, fp, charset_encoding="iso8859-1"): + + self.charset_encoding = charset_encoding + + magic = l32(fp.read(4)) + if magic != PCF_MAGIC: + raise SyntaxError("not a PCF file") + + super().__init__() + + count = l32(fp.read(4)) + self.toc = {} + for i in range(count): + type = l32(fp.read(4)) + self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4)) + + self.fp = fp + + self.info = self._load_properties() + + metrics = self._load_metrics() + bitmaps = self._load_bitmaps(metrics) + encoding = self._load_encoding() + + # + # create glyph structure + + for ch in range(256): + ix = encoding[ch] + if ix is not None: + x, y, l, r, w, a, d, f = metrics[ix] + glyph = (w, 0), (l, d - y, x + l, d), (0, 0, x, y), bitmaps[ix] + self.glyph[ch] = glyph + + def _getformat(self, tag): + + format, size, offset = self.toc[tag] + + fp = self.fp + fp.seek(offset) + + format = l32(fp.read(4)) + + if format & 4: + i16, i32 = b16, b32 + else: + i16, i32 = l16, l32 + + return fp, format, i16, i32 + + def _load_properties(self): + + # + # font properties + + properties = {} + + fp, format, i16, i32 = self._getformat(PCF_PROPERTIES) + + nprops = i32(fp.read(4)) + + # read property description + p = [] + for i in range(nprops): + p.append((i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4)))) + if nprops & 3: + fp.seek(4 - (nprops & 3), io.SEEK_CUR) # pad + + data = fp.read(i32(fp.read(4))) + + for k, s, v in p: + k = sz(data, k) + if s: + v = sz(data, v) + properties[k] = v + + return properties + + def _load_metrics(self): + + # + # font metrics + + metrics = [] + + fp, format, i16, i32 = self._getformat(PCF_METRICS) + + append = metrics.append + + if (format & 0xFF00) == 0x100: + + # "compressed" metrics + for i in range(i16(fp.read(2))): + left = i8(fp.read(1)) - 128 + right = i8(fp.read(1)) - 128 + width = i8(fp.read(1)) - 128 + ascent = i8(fp.read(1)) - 128 + descent = i8(fp.read(1)) - 128 + xsize = right - left + ysize = ascent + descent + append((xsize, ysize, left, right, width, ascent, descent, 0)) + + else: + + # "jumbo" metrics + for i in range(i32(fp.read(4))): + left = i16(fp.read(2)) + right = i16(fp.read(2)) + width = i16(fp.read(2)) + ascent = i16(fp.read(2)) + descent = i16(fp.read(2)) + attributes = i16(fp.read(2)) + xsize = right - left + ysize = ascent + descent + append((xsize, ysize, left, right, width, ascent, descent, attributes)) + + return metrics + + def _load_bitmaps(self, metrics): + + # + # bitmap data + + bitmaps = [] + + fp, format, i16, i32 = self._getformat(PCF_BITMAPS) + + nbitmaps = i32(fp.read(4)) + + if nbitmaps != len(metrics): + raise OSError("Wrong number of bitmaps") + + offsets = [] + for i in range(nbitmaps): + offsets.append(i32(fp.read(4))) + + bitmapSizes = [] + for i in range(4): + bitmapSizes.append(i32(fp.read(4))) + + # byteorder = format & 4 # non-zero => MSB + bitorder = format & 8 # non-zero => MSB + padindex = format & 3 + + bitmapsize = bitmapSizes[padindex] + offsets.append(bitmapsize) + + data = fp.read(bitmapsize) + + pad = BYTES_PER_ROW[padindex] + mode = "1;R" + if bitorder: + mode = "1" + + for i in range(nbitmaps): + x, y, l, r, w, a, d, f = metrics[i] + b, e = offsets[i], offsets[i + 1] + bitmaps.append(Image.frombytes("1", (x, y), data[b:e], "raw", mode, pad(x))) + + return bitmaps + + def _load_encoding(self): + + # map character code to bitmap index + encoding = [None] * 256 + + fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS) + + firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2)) + firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2)) + + i16(fp.read(2)) # default + + nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1) + + encodingOffsets = [i16(fp.read(2)) for _ in range(nencoding)] + + for i in range(firstCol, len(encoding)): + try: + encodingOffset = encodingOffsets[ + ord(bytearray([i]).decode(self.charset_encoding)) + ] + if encodingOffset != 0xFFFF: + encoding[i] = encodingOffset + except UnicodeDecodeError: + # character is not supported in selected encoding + pass + + return encoding diff --git a/venv/Lib/site-packages/PIL/PcxImagePlugin.py b/venv/Lib/site-packages/PIL/PcxImagePlugin.py new file mode 100644 index 000000000..f7ae3bf70 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PcxImagePlugin.py @@ -0,0 +1,206 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PCX file handling +# +# This format was originally used by ZSoft's popular PaintBrush +# program for the IBM PC. It is also supported by many MS-DOS and +# Windows applications, including the Windows PaintBrush program in +# Windows 3. +# +# history: +# 1995-09-01 fl Created +# 1996-05-20 fl Fixed RGB support +# 1997-01-03 fl Fixed 2-bit and 4-bit support +# 1999-02-03 fl Fixed 8-bit support (broken in 1.0b1) +# 1999-02-07 fl Added write support +# 2002-06-09 fl Made 2-bit and 4-bit support a bit more robust +# 2002-07-30 fl Seek from to current position, not beginning of file +# 2003-06-03 fl Extract DPI settings (info["dpi"]) +# +# Copyright (c) 1997-2003 by Secret Labs AB. +# Copyright (c) 1995-2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +import io +import logging + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16le as i16, o8, o16le as o16 + +logger = logging.getLogger(__name__) + + +def _accept(prefix): + return i8(prefix[0]) == 10 and i8(prefix[1]) in [0, 2, 3, 5] + + +## +# Image plugin for Paintbrush images. + + +class PcxImageFile(ImageFile.ImageFile): + + format = "PCX" + format_description = "Paintbrush" + + def _open(self): + + # header + s = self.fp.read(128) + if not _accept(s): + raise SyntaxError("not a PCX file") + + # image + bbox = i16(s, 4), i16(s, 6), i16(s, 8) + 1, i16(s, 10) + 1 + if bbox[2] <= bbox[0] or bbox[3] <= bbox[1]: + raise SyntaxError("bad PCX image size") + logger.debug("BBox: %s %s %s %s", *bbox) + + # format + version = i8(s[1]) + bits = i8(s[3]) + planes = i8(s[65]) + stride = i16(s, 66) + logger.debug( + "PCX version %s, bits %s, planes %s, stride %s", + version, + bits, + planes, + stride, + ) + + self.info["dpi"] = i16(s, 12), i16(s, 14) + + if bits == 1 and planes == 1: + mode = rawmode = "1" + + elif bits == 1 and planes in (2, 4): + mode = "P" + rawmode = "P;%dL" % planes + self.palette = ImagePalette.raw("RGB", s[16:64]) + + elif version == 5 and bits == 8 and planes == 1: + mode = rawmode = "L" + # FIXME: hey, this doesn't work with the incremental loader !!! + self.fp.seek(-769, io.SEEK_END) + s = self.fp.read(769) + if len(s) == 769 and i8(s[0]) == 12: + # check if the palette is linear greyscale + for i in range(256): + if s[i * 3 + 1 : i * 3 + 4] != o8(i) * 3: + mode = rawmode = "P" + break + if mode == "P": + self.palette = ImagePalette.raw("RGB", s[1:]) + self.fp.seek(128) + + elif version == 5 and bits == 8 and planes == 3: + mode = "RGB" + rawmode = "RGB;L" + + else: + raise OSError("unknown PCX mode") + + self.mode = mode + self._size = bbox[2] - bbox[0], bbox[3] - bbox[1] + + bbox = (0, 0) + self.size + logger.debug("size: %sx%s", *self.size) + + self.tile = [("pcx", bbox, self.fp.tell(), (rawmode, planes * stride))] + + +# -------------------------------------------------------------------- +# save PCX files + + +SAVE = { + # mode: (version, bits, planes, raw mode) + "1": (2, 1, 1, "1"), + "L": (5, 8, 1, "L"), + "P": (5, 8, 1, "P"), + "RGB": (5, 8, 3, "RGB;L"), +} + + +def _save(im, fp, filename): + + try: + version, bits, planes, rawmode = SAVE[im.mode] + except KeyError as e: + raise ValueError("Cannot save %s images as PCX" % im.mode) from e + + # bytes per plane + stride = (im.size[0] * bits + 7) // 8 + # stride should be even + stride += stride % 2 + # Stride needs to be kept in sync with the PcxEncode.c version. + # Ideally it should be passed in in the state, but the bytes value + # gets overwritten. + + logger.debug( + "PcxImagePlugin._save: xwidth: %d, bits: %d, stride: %d", + im.size[0], + bits, + stride, + ) + + # under windows, we could determine the current screen size with + # "Image.core.display_mode()[1]", but I think that's overkill... + + screen = im.size + + dpi = 100, 100 + + # PCX header + fp.write( + o8(10) + + o8(version) + + o8(1) + + o8(bits) + + o16(0) + + o16(0) + + o16(im.size[0] - 1) + + o16(im.size[1] - 1) + + o16(dpi[0]) + + o16(dpi[1]) + + b"\0" * 24 + + b"\xFF" * 24 + + b"\0" + + o8(planes) + + o16(stride) + + o16(1) + + o16(screen[0]) + + o16(screen[1]) + + b"\0" * 54 + ) + + assert fp.tell() == 128 + + ImageFile._save(im, fp, [("pcx", (0, 0) + im.size, 0, (rawmode, bits * planes))]) + + if im.mode == "P": + # colour palette + fp.write(o8(12)) + fp.write(im.im.getpalette("RGB", "RGB")) # 768 bytes + elif im.mode == "L": + # greyscale palette + fp.write(o8(12)) + for i in range(256): + fp.write(o8(i) * 3) + + +# -------------------------------------------------------------------- +# registry + + +Image.register_open(PcxImageFile.format, PcxImageFile, _accept) +Image.register_save(PcxImageFile.format, _save) + +Image.register_extension(PcxImageFile.format, ".pcx") + +Image.register_mime(PcxImageFile.format, "image/x-pcx") diff --git a/venv/Lib/site-packages/PIL/PdfImagePlugin.py b/venv/Lib/site-packages/PIL/PdfImagePlugin.py new file mode 100644 index 000000000..47500baf7 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PdfImagePlugin.py @@ -0,0 +1,243 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PDF (Acrobat) file handling +# +# History: +# 1996-07-16 fl Created +# 1997-01-18 fl Fixed header +# 2004-02-21 fl Fixes for 1/L/CMYK images, etc. +# 2004-02-24 fl Fixes for 1 and P images. +# +# Copyright (c) 1997-2004 by Secret Labs AB. All rights reserved. +# Copyright (c) 1996-1997 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +## +# Image plugin for PDF images (output only). +## + +import io +import os +import time + +from . import Image, ImageFile, ImageSequence, PdfParser, __version__ + +# +# -------------------------------------------------------------------- + +# object ids: +# 1. catalogue +# 2. pages +# 3. image +# 4. page +# 5. page contents + + +def _save_all(im, fp, filename): + _save(im, fp, filename, save_all=True) + + +## +# (Internal) Image save plugin for the PDF format. + + +def _save(im, fp, filename, save_all=False): + is_appending = im.encoderinfo.get("append", False) + if is_appending: + existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="r+b") + else: + existing_pdf = PdfParser.PdfParser(f=fp, filename=filename, mode="w+b") + + resolution = im.encoderinfo.get("resolution", 72.0) + + info = { + "title": None + if is_appending + else os.path.splitext(os.path.basename(filename))[0], + "author": None, + "subject": None, + "keywords": None, + "creator": None, + "producer": None, + "creationDate": None if is_appending else time.gmtime(), + "modDate": None if is_appending else time.gmtime(), + } + for k, default in info.items(): + v = im.encoderinfo.get(k) if k in im.encoderinfo else default + if v: + existing_pdf.info[k[0].upper() + k[1:]] = v + + # + # make sure image data is available + im.load() + + existing_pdf.start_writing() + existing_pdf.write_header() + existing_pdf.write_comment("created by Pillow {} PDF driver".format(__version__)) + + # + # pages + ims = [im] + if save_all: + append_images = im.encoderinfo.get("append_images", []) + for append_im in append_images: + append_im.encoderinfo = im.encoderinfo.copy() + ims.append(append_im) + numberOfPages = 0 + image_refs = [] + page_refs = [] + contents_refs = [] + for im in ims: + im_numberOfPages = 1 + if save_all: + try: + im_numberOfPages = im.n_frames + except AttributeError: + # Image format does not have n_frames. + # It is a single frame image + pass + numberOfPages += im_numberOfPages + for i in range(im_numberOfPages): + image_refs.append(existing_pdf.next_object_id(0)) + page_refs.append(existing_pdf.next_object_id(0)) + contents_refs.append(existing_pdf.next_object_id(0)) + existing_pdf.pages.append(page_refs[-1]) + + # + # catalog and list of pages + existing_pdf.write_catalog() + + pageNumber = 0 + for imSequence in ims: + im_pages = ImageSequence.Iterator(imSequence) if save_all else [imSequence] + for im in im_pages: + # FIXME: Should replace ASCIIHexDecode with RunLengthDecode + # (packbits) or LZWDecode (tiff/lzw compression). Note that + # PDF 1.2 also supports Flatedecode (zip compression). + + bits = 8 + params = None + + if im.mode == "1": + filter = "ASCIIHexDecode" + colorspace = PdfParser.PdfName("DeviceGray") + procset = "ImageB" # grayscale + bits = 1 + elif im.mode == "L": + filter = "DCTDecode" + # params = "<< /Predictor 15 /Columns %d >>" % (width-2) + colorspace = PdfParser.PdfName("DeviceGray") + procset = "ImageB" # grayscale + elif im.mode == "P": + filter = "ASCIIHexDecode" + palette = im.im.getpalette("RGB") + colorspace = [ + PdfParser.PdfName("Indexed"), + PdfParser.PdfName("DeviceRGB"), + 255, + PdfParser.PdfBinary(palette), + ] + procset = "ImageI" # indexed color + elif im.mode == "RGB": + filter = "DCTDecode" + colorspace = PdfParser.PdfName("DeviceRGB") + procset = "ImageC" # color images + elif im.mode == "CMYK": + filter = "DCTDecode" + colorspace = PdfParser.PdfName("DeviceCMYK") + procset = "ImageC" # color images + else: + raise ValueError("cannot save mode %s" % im.mode) + + # + # image + + op = io.BytesIO() + + if filter == "ASCIIHexDecode": + if bits == 1: + # FIXME: the hex encoder doesn't support packed 1-bit + # images; do things the hard way... + data = im.tobytes("raw", "1") + im = Image.new("L", im.size) + im.putdata(data) + ImageFile._save(im, op, [("hex", (0, 0) + im.size, 0, im.mode)]) + elif filter == "DCTDecode": + Image.SAVE["JPEG"](im, op, filename) + elif filter == "FlateDecode": + ImageFile._save(im, op, [("zip", (0, 0) + im.size, 0, im.mode)]) + elif filter == "RunLengthDecode": + ImageFile._save(im, op, [("packbits", (0, 0) + im.size, 0, im.mode)]) + else: + raise ValueError("unsupported PDF filter (%s)" % filter) + + # + # Get image characteristics + + width, height = im.size + + existing_pdf.write_obj( + image_refs[pageNumber], + stream=op.getvalue(), + Type=PdfParser.PdfName("XObject"), + Subtype=PdfParser.PdfName("Image"), + Width=width, # * 72.0 / resolution, + Height=height, # * 72.0 / resolution, + Filter=PdfParser.PdfName(filter), + BitsPerComponent=bits, + DecodeParams=params, + ColorSpace=colorspace, + ) + + # + # page + + existing_pdf.write_page( + page_refs[pageNumber], + Resources=PdfParser.PdfDict( + ProcSet=[PdfParser.PdfName("PDF"), PdfParser.PdfName(procset)], + XObject=PdfParser.PdfDict(image=image_refs[pageNumber]), + ), + MediaBox=[ + 0, + 0, + int(width * 72.0 / resolution), + int(height * 72.0 / resolution), + ], + Contents=contents_refs[pageNumber], + ) + + # + # page contents + + page_contents = b"q %d 0 0 %d 0 0 cm /image Do Q\n" % ( + int(width * 72.0 / resolution), + int(height * 72.0 / resolution), + ) + + existing_pdf.write_obj(contents_refs[pageNumber], stream=page_contents) + + pageNumber += 1 + + # + # trailer + existing_pdf.write_xref_and_trailer() + if hasattr(fp, "flush"): + fp.flush() + existing_pdf.close() + + +# +# -------------------------------------------------------------------- + + +Image.register_save("PDF", _save) +Image.register_save_all("PDF", _save_all) + +Image.register_extension("PDF", ".pdf") + +Image.register_mime("PDF", "application/pdf") diff --git a/venv/Lib/site-packages/PIL/PdfParser.py b/venv/Lib/site-packages/PIL/PdfParser.py new file mode 100644 index 000000000..3c343c5e8 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PdfParser.py @@ -0,0 +1,995 @@ +import calendar +import codecs +import collections +import mmap +import os +import re +import time +import zlib + + +# see 7.9.2.2 Text String Type on page 86 and D.3 PDFDocEncoding Character Set +# on page 656 +def encode_text(s): + return codecs.BOM_UTF16_BE + s.encode("utf_16_be") + + +PDFDocEncoding = { + 0x16: "\u0017", + 0x18: "\u02D8", + 0x19: "\u02C7", + 0x1A: "\u02C6", + 0x1B: "\u02D9", + 0x1C: "\u02DD", + 0x1D: "\u02DB", + 0x1E: "\u02DA", + 0x1F: "\u02DC", + 0x80: "\u2022", + 0x81: "\u2020", + 0x82: "\u2021", + 0x83: "\u2026", + 0x84: "\u2014", + 0x85: "\u2013", + 0x86: "\u0192", + 0x87: "\u2044", + 0x88: "\u2039", + 0x89: "\u203A", + 0x8A: "\u2212", + 0x8B: "\u2030", + 0x8C: "\u201E", + 0x8D: "\u201C", + 0x8E: "\u201D", + 0x8F: "\u2018", + 0x90: "\u2019", + 0x91: "\u201A", + 0x92: "\u2122", + 0x93: "\uFB01", + 0x94: "\uFB02", + 0x95: "\u0141", + 0x96: "\u0152", + 0x97: "\u0160", + 0x98: "\u0178", + 0x99: "\u017D", + 0x9A: "\u0131", + 0x9B: "\u0142", + 0x9C: "\u0153", + 0x9D: "\u0161", + 0x9E: "\u017E", + 0xA0: "\u20AC", +} + + +def decode_text(b): + if b[: len(codecs.BOM_UTF16_BE)] == codecs.BOM_UTF16_BE: + return b[len(codecs.BOM_UTF16_BE) :].decode("utf_16_be") + else: + return "".join(PDFDocEncoding.get(byte, chr(byte)) for byte in b) + + +class PdfFormatError(RuntimeError): + """An error that probably indicates a syntactic or semantic error in the + PDF file structure""" + + pass + + +def check_format_condition(condition, error_message): + if not condition: + raise PdfFormatError(error_message) + + +class IndirectReference( + collections.namedtuple("IndirectReferenceTuple", ["object_id", "generation"]) +): + def __str__(self): + return "%s %s R" % self + + def __bytes__(self): + return self.__str__().encode("us-ascii") + + def __eq__(self, other): + return ( + other.__class__ is self.__class__ + and other.object_id == self.object_id + and other.generation == self.generation + ) + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash((self.object_id, self.generation)) + + +class IndirectObjectDef(IndirectReference): + def __str__(self): + return "%s %s obj" % self + + +class XrefTable: + def __init__(self): + self.existing_entries = {} # object ID => (offset, generation) + self.new_entries = {} # object ID => (offset, generation) + self.deleted_entries = {0: 65536} # object ID => generation + self.reading_finished = False + + def __setitem__(self, key, value): + if self.reading_finished: + self.new_entries[key] = value + else: + self.existing_entries[key] = value + if key in self.deleted_entries: + del self.deleted_entries[key] + + def __getitem__(self, key): + try: + return self.new_entries[key] + except KeyError: + return self.existing_entries[key] + + def __delitem__(self, key): + if key in self.new_entries: + generation = self.new_entries[key][1] + 1 + del self.new_entries[key] + self.deleted_entries[key] = generation + elif key in self.existing_entries: + generation = self.existing_entries[key][1] + 1 + self.deleted_entries[key] = generation + elif key in self.deleted_entries: + generation = self.deleted_entries[key] + else: + raise IndexError( + "object ID " + str(key) + " cannot be deleted because it doesn't exist" + ) + + def __contains__(self, key): + return key in self.existing_entries or key in self.new_entries + + def __len__(self): + return len( + set(self.existing_entries.keys()) + | set(self.new_entries.keys()) + | set(self.deleted_entries.keys()) + ) + + def keys(self): + return ( + set(self.existing_entries.keys()) - set(self.deleted_entries.keys()) + ) | set(self.new_entries.keys()) + + def write(self, f): + keys = sorted(set(self.new_entries.keys()) | set(self.deleted_entries.keys())) + deleted_keys = sorted(set(self.deleted_entries.keys())) + startxref = f.tell() + f.write(b"xref\n") + while keys: + # find a contiguous sequence of object IDs + prev = None + for index, key in enumerate(keys): + if prev is None or prev + 1 == key: + prev = key + else: + contiguous_keys = keys[:index] + keys = keys[index:] + break + else: + contiguous_keys = keys + keys = None + f.write(b"%d %d\n" % (contiguous_keys[0], len(contiguous_keys))) + for object_id in contiguous_keys: + if object_id in self.new_entries: + f.write(b"%010d %05d n \n" % self.new_entries[object_id]) + else: + this_deleted_object_id = deleted_keys.pop(0) + check_format_condition( + object_id == this_deleted_object_id, + "expected the next deleted object ID to be %s, instead found %s" + % (object_id, this_deleted_object_id), + ) + try: + next_in_linked_list = deleted_keys[0] + except IndexError: + next_in_linked_list = 0 + f.write( + b"%010d %05d f \n" + % (next_in_linked_list, self.deleted_entries[object_id]) + ) + return startxref + + +class PdfName: + def __init__(self, name): + if isinstance(name, PdfName): + self.name = name.name + elif isinstance(name, bytes): + self.name = name + else: + self.name = name.encode("us-ascii") + + def name_as_str(self): + return self.name.decode("us-ascii") + + def __eq__(self, other): + return ( + isinstance(other, PdfName) and other.name == self.name + ) or other == self.name + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return "PdfName(%s)" % repr(self.name) + + @classmethod + def from_pdf_stream(cls, data): + return cls(PdfParser.interpret_name(data)) + + allowed_chars = set(range(33, 127)) - {ord(c) for c in "#%/()<>[]{}"} + + def __bytes__(self): + result = bytearray(b"/") + for b in self.name: + if b in self.allowed_chars: + result.append(b) + else: + result.extend(b"#%02X" % b) + return bytes(result) + + +class PdfArray(list): + def __bytes__(self): + return b"[ " + b" ".join(pdf_repr(x) for x in self) + b" ]" + + +class PdfDict(collections.UserDict): + def __setattr__(self, key, value): + if key == "data": + collections.UserDict.__setattr__(self, key, value) + else: + self[key.encode("us-ascii")] = value + + def __getattr__(self, key): + try: + value = self[key.encode("us-ascii")] + except KeyError as e: + raise AttributeError(key) from e + if isinstance(value, bytes): + value = decode_text(value) + if key.endswith("Date"): + if value.startswith("D:"): + value = value[2:] + + relationship = "Z" + if len(value) > 17: + relationship = value[14] + offset = int(value[15:17]) * 60 + if len(value) > 20: + offset += int(value[18:20]) + + format = "%Y%m%d%H%M%S"[: len(value) - 2] + value = time.strptime(value[: len(format) + 2], format) + if relationship in ["+", "-"]: + offset *= 60 + if relationship == "+": + offset *= -1 + value = time.gmtime(calendar.timegm(value) + offset) + return value + + def __bytes__(self): + out = bytearray(b"<<") + for key, value in self.items(): + if value is None: + continue + value = pdf_repr(value) + out.extend(b"\n") + out.extend(bytes(PdfName(key))) + out.extend(b" ") + out.extend(value) + out.extend(b"\n>>") + return bytes(out) + + +class PdfBinary: + def __init__(self, data): + self.data = data + + def __bytes__(self): + return b"<%s>" % b"".join(b"%02X" % b for b in self.data) + + +class PdfStream: + def __init__(self, dictionary, buf): + self.dictionary = dictionary + self.buf = buf + + def decode(self): + try: + filter = self.dictionary.Filter + except AttributeError: + return self.buf + if filter == b"FlateDecode": + try: + expected_length = self.dictionary.DL + except AttributeError: + expected_length = self.dictionary.Length + return zlib.decompress(self.buf, bufsize=int(expected_length)) + else: + raise NotImplementedError( + "stream filter %s unknown/unsupported" % repr(self.dictionary.Filter) + ) + + +def pdf_repr(x): + if x is True: + return b"true" + elif x is False: + return b"false" + elif x is None: + return b"null" + elif isinstance(x, (PdfName, PdfDict, PdfArray, PdfBinary)): + return bytes(x) + elif isinstance(x, int): + return str(x).encode("us-ascii") + elif isinstance(x, time.struct_time): + return b"(D:" + time.strftime("%Y%m%d%H%M%SZ", x).encode("us-ascii") + b")" + elif isinstance(x, dict): + return bytes(PdfDict(x)) + elif isinstance(x, list): + return bytes(PdfArray(x)) + elif isinstance(x, str): + return pdf_repr(encode_text(x)) + elif isinstance(x, bytes): + # XXX escape more chars? handle binary garbage + x = x.replace(b"\\", b"\\\\") + x = x.replace(b"(", b"\\(") + x = x.replace(b")", b"\\)") + return b"(" + x + b")" + else: + return bytes(x) + + +class PdfParser: + """Based on + https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/PDF32000_2008.pdf + Supports PDF up to 1.4 + """ + + def __init__(self, filename=None, f=None, buf=None, start_offset=0, mode="rb"): + if buf and f: + raise RuntimeError("specify buf or f or filename, but not both buf and f") + self.filename = filename + self.buf = buf + self.f = f + self.start_offset = start_offset + self.should_close_buf = False + self.should_close_file = False + if filename is not None and f is None: + self.f = f = open(filename, mode) + self.should_close_file = True + if f is not None: + self.buf = buf = self.get_buf_from_file(f) + self.should_close_buf = True + if not filename and hasattr(f, "name"): + self.filename = f.name + self.cached_objects = {} + if buf: + self.read_pdf_info() + else: + self.file_size_total = self.file_size_this = 0 + self.root = PdfDict() + self.root_ref = None + self.info = PdfDict() + self.info_ref = None + self.page_tree_root = {} + self.pages = [] + self.orig_pages = [] + self.pages_ref = None + self.last_xref_section_offset = None + self.trailer_dict = {} + self.xref_table = XrefTable() + self.xref_table.reading_finished = True + if f: + self.seek_end() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + return False # do not suppress exceptions + + def start_writing(self): + self.close_buf() + self.seek_end() + + def close_buf(self): + try: + self.buf.close() + except AttributeError: + pass + self.buf = None + + def close(self): + if self.should_close_buf: + self.close_buf() + if self.f is not None and self.should_close_file: + self.f.close() + self.f = None + + def seek_end(self): + self.f.seek(0, os.SEEK_END) + + def write_header(self): + self.f.write(b"%PDF-1.4\n") + + def write_comment(self, s): + self.f.write(("% {}\n".format(s)).encode("utf-8")) + + def write_catalog(self): + self.del_root() + self.root_ref = self.next_object_id(self.f.tell()) + self.pages_ref = self.next_object_id(0) + self.rewrite_pages() + self.write_obj(self.root_ref, Type=PdfName(b"Catalog"), Pages=self.pages_ref) + self.write_obj( + self.pages_ref, + Type=PdfName(b"Pages"), + Count=len(self.pages), + Kids=self.pages, + ) + return self.root_ref + + def rewrite_pages(self): + pages_tree_nodes_to_delete = [] + for i, page_ref in enumerate(self.orig_pages): + page_info = self.cached_objects[page_ref] + del self.xref_table[page_ref.object_id] + pages_tree_nodes_to_delete.append(page_info[PdfName(b"Parent")]) + if page_ref not in self.pages: + # the page has been deleted + continue + # make dict keys into strings for passing to write_page + stringified_page_info = {} + for key, value in page_info.items(): + # key should be a PdfName + stringified_page_info[key.name_as_str()] = value + stringified_page_info["Parent"] = self.pages_ref + new_page_ref = self.write_page(None, **stringified_page_info) + for j, cur_page_ref in enumerate(self.pages): + if cur_page_ref == page_ref: + # replace the page reference with the new one + self.pages[j] = new_page_ref + # delete redundant Pages tree nodes from xref table + for pages_tree_node_ref in pages_tree_nodes_to_delete: + while pages_tree_node_ref: + pages_tree_node = self.cached_objects[pages_tree_node_ref] + if pages_tree_node_ref.object_id in self.xref_table: + del self.xref_table[pages_tree_node_ref.object_id] + pages_tree_node_ref = pages_tree_node.get(b"Parent", None) + self.orig_pages = [] + + def write_xref_and_trailer(self, new_root_ref=None): + if new_root_ref: + self.del_root() + self.root_ref = new_root_ref + if self.info: + self.info_ref = self.write_obj(None, self.info) + start_xref = self.xref_table.write(self.f) + num_entries = len(self.xref_table) + trailer_dict = {b"Root": self.root_ref, b"Size": num_entries} + if self.last_xref_section_offset is not None: + trailer_dict[b"Prev"] = self.last_xref_section_offset + if self.info: + trailer_dict[b"Info"] = self.info_ref + self.last_xref_section_offset = start_xref + self.f.write( + b"trailer\n" + + bytes(PdfDict(trailer_dict)) + + b"\nstartxref\n%d\n%%%%EOF" % start_xref + ) + + def write_page(self, ref, *objs, **dict_obj): + if isinstance(ref, int): + ref = self.pages[ref] + if "Type" not in dict_obj: + dict_obj["Type"] = PdfName(b"Page") + if "Parent" not in dict_obj: + dict_obj["Parent"] = self.pages_ref + return self.write_obj(ref, *objs, **dict_obj) + + def write_obj(self, ref, *objs, **dict_obj): + f = self.f + if ref is None: + ref = self.next_object_id(f.tell()) + else: + self.xref_table[ref.object_id] = (f.tell(), ref.generation) + f.write(bytes(IndirectObjectDef(*ref))) + stream = dict_obj.pop("stream", None) + if stream is not None: + dict_obj["Length"] = len(stream) + if dict_obj: + f.write(pdf_repr(dict_obj)) + for obj in objs: + f.write(pdf_repr(obj)) + if stream is not None: + f.write(b"stream\n") + f.write(stream) + f.write(b"\nendstream\n") + f.write(b"endobj\n") + return ref + + def del_root(self): + if self.root_ref is None: + return + del self.xref_table[self.root_ref.object_id] + del self.xref_table[self.root[b"Pages"].object_id] + + @staticmethod + def get_buf_from_file(f): + if hasattr(f, "getbuffer"): + return f.getbuffer() + elif hasattr(f, "getvalue"): + return f.getvalue() + else: + try: + return mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + except ValueError: # cannot mmap an empty file + return b"" + + def read_pdf_info(self): + self.file_size_total = len(self.buf) + self.file_size_this = self.file_size_total - self.start_offset + self.read_trailer() + self.root_ref = self.trailer_dict[b"Root"] + self.info_ref = self.trailer_dict.get(b"Info", None) + self.root = PdfDict(self.read_indirect(self.root_ref)) + if self.info_ref is None: + self.info = PdfDict() + else: + self.info = PdfDict(self.read_indirect(self.info_ref)) + check_format_condition(b"Type" in self.root, "/Type missing in Root") + check_format_condition( + self.root[b"Type"] == b"Catalog", "/Type in Root is not /Catalog" + ) + check_format_condition(b"Pages" in self.root, "/Pages missing in Root") + check_format_condition( + isinstance(self.root[b"Pages"], IndirectReference), + "/Pages in Root is not an indirect reference", + ) + self.pages_ref = self.root[b"Pages"] + self.page_tree_root = self.read_indirect(self.pages_ref) + self.pages = self.linearize_page_tree(self.page_tree_root) + # save the original list of page references + # in case the user modifies, adds or deletes some pages + # and we need to rewrite the pages and their list + self.orig_pages = self.pages[:] + + def next_object_id(self, offset=None): + try: + # TODO: support reuse of deleted objects + reference = IndirectReference(max(self.xref_table.keys()) + 1, 0) + except ValueError: + reference = IndirectReference(1, 0) + if offset is not None: + self.xref_table[reference.object_id] = (offset, 0) + return reference + + delimiter = br"[][()<>{}/%]" + delimiter_or_ws = br"[][()<>{}/%\000\011\012\014\015\040]" + whitespace = br"[\000\011\012\014\015\040]" + whitespace_or_hex = br"[\000\011\012\014\015\0400-9a-fA-F]" + whitespace_optional = whitespace + b"*" + whitespace_mandatory = whitespace + b"+" + newline_only = br"[\r\n]+" + newline = whitespace_optional + newline_only + whitespace_optional + re_trailer_end = re.compile( + whitespace_mandatory + + br"trailer" + + whitespace_optional + + br"\<\<(.*\>\>)" + + newline + + br"startxref" + + newline + + br"([0-9]+)" + + newline + + br"%%EOF" + + whitespace_optional + + br"$", + re.DOTALL, + ) + re_trailer_prev = re.compile( + whitespace_optional + + br"trailer" + + whitespace_optional + + br"\<\<(.*?\>\>)" + + newline + + br"startxref" + + newline + + br"([0-9]+)" + + newline + + br"%%EOF" + + whitespace_optional, + re.DOTALL, + ) + + def read_trailer(self): + search_start_offset = len(self.buf) - 16384 + if search_start_offset < self.start_offset: + search_start_offset = self.start_offset + m = self.re_trailer_end.search(self.buf, search_start_offset) + check_format_condition(m, "trailer end not found") + # make sure we found the LAST trailer + last_match = m + while m: + last_match = m + m = self.re_trailer_end.search(self.buf, m.start() + 16) + if not m: + m = last_match + trailer_data = m.group(1) + self.last_xref_section_offset = int(m.group(2)) + self.trailer_dict = self.interpret_trailer(trailer_data) + self.xref_table = XrefTable() + self.read_xref_table(xref_section_offset=self.last_xref_section_offset) + if b"Prev" in self.trailer_dict: + self.read_prev_trailer(self.trailer_dict[b"Prev"]) + + def read_prev_trailer(self, xref_section_offset): + trailer_offset = self.read_xref_table(xref_section_offset=xref_section_offset) + m = self.re_trailer_prev.search( + self.buf[trailer_offset : trailer_offset + 16384] + ) + check_format_condition(m, "previous trailer not found") + trailer_data = m.group(1) + check_format_condition( + int(m.group(2)) == xref_section_offset, + "xref section offset in previous trailer doesn't match what was expected", + ) + trailer_dict = self.interpret_trailer(trailer_data) + if b"Prev" in trailer_dict: + self.read_prev_trailer(trailer_dict[b"Prev"]) + + re_whitespace_optional = re.compile(whitespace_optional) + re_name = re.compile( + whitespace_optional + + br"/([!-$&'*-.0-;=?-Z\\^-z|~]+)(?=" + + delimiter_or_ws + + br")" + ) + re_dict_start = re.compile(whitespace_optional + br"\<\<") + re_dict_end = re.compile(whitespace_optional + br"\>\>" + whitespace_optional) + + @classmethod + def interpret_trailer(cls, trailer_data): + trailer = {} + offset = 0 + while True: + m = cls.re_name.match(trailer_data, offset) + if not m: + m = cls.re_dict_end.match(trailer_data, offset) + check_format_condition( + m and m.end() == len(trailer_data), + "name not found in trailer, remaining data: " + + repr(trailer_data[offset:]), + ) + break + key = cls.interpret_name(m.group(1)) + value, offset = cls.get_value(trailer_data, m.end()) + trailer[key] = value + check_format_condition( + b"Size" in trailer and isinstance(trailer[b"Size"], int), + "/Size not in trailer or not an integer", + ) + check_format_condition( + b"Root" in trailer and isinstance(trailer[b"Root"], IndirectReference), + "/Root not in trailer or not an indirect reference", + ) + return trailer + + re_hashes_in_name = re.compile(br"([^#]*)(#([0-9a-fA-F]{2}))?") + + @classmethod + def interpret_name(cls, raw, as_text=False): + name = b"" + for m in cls.re_hashes_in_name.finditer(raw): + if m.group(3): + name += m.group(1) + bytearray.fromhex(m.group(3).decode("us-ascii")) + else: + name += m.group(1) + if as_text: + return name.decode("utf-8") + else: + return bytes(name) + + re_null = re.compile(whitespace_optional + br"null(?=" + delimiter_or_ws + br")") + re_true = re.compile(whitespace_optional + br"true(?=" + delimiter_or_ws + br")") + re_false = re.compile(whitespace_optional + br"false(?=" + delimiter_or_ws + br")") + re_int = re.compile( + whitespace_optional + br"([-+]?[0-9]+)(?=" + delimiter_or_ws + br")" + ) + re_real = re.compile( + whitespace_optional + + br"([-+]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+))(?=" + + delimiter_or_ws + + br")" + ) + re_array_start = re.compile(whitespace_optional + br"\[") + re_array_end = re.compile(whitespace_optional + br"]") + re_string_hex = re.compile( + whitespace_optional + br"\<(" + whitespace_or_hex + br"*)\>" + ) + re_string_lit = re.compile(whitespace_optional + br"\(") + re_indirect_reference = re.compile( + whitespace_optional + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"R(?=" + + delimiter_or_ws + + br")" + ) + re_indirect_def_start = re.compile( + whitespace_optional + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"([-+]?[0-9]+)" + + whitespace_mandatory + + br"obj(?=" + + delimiter_or_ws + + br")" + ) + re_indirect_def_end = re.compile( + whitespace_optional + br"endobj(?=" + delimiter_or_ws + br")" + ) + re_comment = re.compile( + br"(" + whitespace_optional + br"%[^\r\n]*" + newline + br")*" + ) + re_stream_start = re.compile(whitespace_optional + br"stream\r?\n") + re_stream_end = re.compile( + whitespace_optional + br"endstream(?=" + delimiter_or_ws + br")" + ) + + @classmethod + def get_value(cls, data, offset, expect_indirect=None, max_nesting=-1): + if max_nesting == 0: + return None, None + m = cls.re_comment.match(data, offset) + if m: + offset = m.end() + m = cls.re_indirect_def_start.match(data, offset) + if m: + check_format_condition( + int(m.group(1)) > 0, + "indirect object definition: object ID must be greater than 0", + ) + check_format_condition( + int(m.group(2)) >= 0, + "indirect object definition: generation must be non-negative", + ) + check_format_condition( + expect_indirect is None + or expect_indirect + == IndirectReference(int(m.group(1)), int(m.group(2))), + "indirect object definition different than expected", + ) + object, offset = cls.get_value(data, m.end(), max_nesting=max_nesting - 1) + if offset is None: + return object, None + m = cls.re_indirect_def_end.match(data, offset) + check_format_condition(m, "indirect object definition end not found") + return object, m.end() + check_format_condition( + not expect_indirect, "indirect object definition not found" + ) + m = cls.re_indirect_reference.match(data, offset) + if m: + check_format_condition( + int(m.group(1)) > 0, + "indirect object reference: object ID must be greater than 0", + ) + check_format_condition( + int(m.group(2)) >= 0, + "indirect object reference: generation must be non-negative", + ) + return IndirectReference(int(m.group(1)), int(m.group(2))), m.end() + m = cls.re_dict_start.match(data, offset) + if m: + offset = m.end() + result = {} + m = cls.re_dict_end.match(data, offset) + while not m: + key, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) + if offset is None: + return result, None + value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) + result[key] = value + if offset is None: + return result, None + m = cls.re_dict_end.match(data, offset) + offset = m.end() + m = cls.re_stream_start.match(data, offset) + if m: + try: + stream_len = int(result[b"Length"]) + except (TypeError, KeyError, ValueError) as e: + raise PdfFormatError( + "bad or missing Length in stream dict (%r)" + % result.get(b"Length", None) + ) from e + stream_data = data[m.end() : m.end() + stream_len] + m = cls.re_stream_end.match(data, m.end() + stream_len) + check_format_condition(m, "stream end not found") + offset = m.end() + result = PdfStream(PdfDict(result), stream_data) + else: + result = PdfDict(result) + return result, offset + m = cls.re_array_start.match(data, offset) + if m: + offset = m.end() + result = [] + m = cls.re_array_end.match(data, offset) + while not m: + value, offset = cls.get_value(data, offset, max_nesting=max_nesting - 1) + result.append(value) + if offset is None: + return result, None + m = cls.re_array_end.match(data, offset) + return result, m.end() + m = cls.re_null.match(data, offset) + if m: + return None, m.end() + m = cls.re_true.match(data, offset) + if m: + return True, m.end() + m = cls.re_false.match(data, offset) + if m: + return False, m.end() + m = cls.re_name.match(data, offset) + if m: + return PdfName(cls.interpret_name(m.group(1))), m.end() + m = cls.re_int.match(data, offset) + if m: + return int(m.group(1)), m.end() + m = cls.re_real.match(data, offset) + if m: + # XXX Decimal instead of float??? + return float(m.group(1)), m.end() + m = cls.re_string_hex.match(data, offset) + if m: + # filter out whitespace + hex_string = bytearray( + [b for b in m.group(1) if b in b"0123456789abcdefABCDEF"] + ) + if len(hex_string) % 2 == 1: + # append a 0 if the length is not even - yes, at the end + hex_string.append(ord(b"0")) + return bytearray.fromhex(hex_string.decode("us-ascii")), m.end() + m = cls.re_string_lit.match(data, offset) + if m: + return cls.get_literal_string(data, m.end()) + # return None, offset # fallback (only for debugging) + raise PdfFormatError("unrecognized object: " + repr(data[offset : offset + 32])) + + re_lit_str_token = re.compile( + br"(\\[nrtbf()\\])|(\\[0-9]{1,3})|(\\(\r\n|\r|\n))|(\r\n|\r|\n)|(\()|(\))" + ) + escaped_chars = { + b"n": b"\n", + b"r": b"\r", + b"t": b"\t", + b"b": b"\b", + b"f": b"\f", + b"(": b"(", + b")": b")", + b"\\": b"\\", + ord(b"n"): b"\n", + ord(b"r"): b"\r", + ord(b"t"): b"\t", + ord(b"b"): b"\b", + ord(b"f"): b"\f", + ord(b"("): b"(", + ord(b")"): b")", + ord(b"\\"): b"\\", + } + + @classmethod + def get_literal_string(cls, data, offset): + nesting_depth = 0 + result = bytearray() + for m in cls.re_lit_str_token.finditer(data, offset): + result.extend(data[offset : m.start()]) + if m.group(1): + result.extend(cls.escaped_chars[m.group(1)[1]]) + elif m.group(2): + result.append(int(m.group(2)[1:], 8)) + elif m.group(3): + pass + elif m.group(5): + result.extend(b"\n") + elif m.group(6): + result.extend(b"(") + nesting_depth += 1 + elif m.group(7): + if nesting_depth == 0: + return bytes(result), m.end() + result.extend(b")") + nesting_depth -= 1 + offset = m.end() + raise PdfFormatError("unfinished literal string") + + re_xref_section_start = re.compile(whitespace_optional + br"xref" + newline) + re_xref_subsection_start = re.compile( + whitespace_optional + + br"([0-9]+)" + + whitespace_mandatory + + br"([0-9]+)" + + whitespace_optional + + newline_only + ) + re_xref_entry = re.compile(br"([0-9]{10}) ([0-9]{5}) ([fn])( \r| \n|\r\n)") + + def read_xref_table(self, xref_section_offset): + subsection_found = False + m = self.re_xref_section_start.match( + self.buf, xref_section_offset + self.start_offset + ) + check_format_condition(m, "xref section start not found") + offset = m.end() + while True: + m = self.re_xref_subsection_start.match(self.buf, offset) + if not m: + check_format_condition( + subsection_found, "xref subsection start not found" + ) + break + subsection_found = True + offset = m.end() + first_object = int(m.group(1)) + num_objects = int(m.group(2)) + for i in range(first_object, first_object + num_objects): + m = self.re_xref_entry.match(self.buf, offset) + check_format_condition(m, "xref entry not found") + offset = m.end() + is_free = m.group(3) == b"f" + generation = int(m.group(2)) + if not is_free: + new_entry = (int(m.group(1)), generation) + check_format_condition( + i not in self.xref_table or self.xref_table[i] == new_entry, + "xref entry duplicated (and not identical)", + ) + self.xref_table[i] = new_entry + return offset + + def read_indirect(self, ref, max_nesting=-1): + offset, generation = self.xref_table[ref[0]] + check_format_condition( + generation == ref[1], + "expected to find generation %s for object ID %s in xref table, " + "instead found generation %s at offset %s" + % (ref[1], ref[0], generation, offset), + ) + value = self.get_value( + self.buf, + offset + self.start_offset, + expect_indirect=IndirectReference(*ref), + max_nesting=max_nesting, + )[0] + self.cached_objects[ref] = value + return value + + def linearize_page_tree(self, node=None): + if node is None: + node = self.page_tree_root + check_format_condition( + node[b"Type"] == b"Pages", "/Type of page tree node is not /Pages" + ) + pages = [] + for kid in node[b"Kids"]: + kid_object = self.read_indirect(kid) + if kid_object[b"Type"] == b"Page": + pages.append(kid) + else: + pages.extend(self.linearize_page_tree(node=kid_object)) + return pages diff --git a/venv/Lib/site-packages/PIL/PixarImagePlugin.py b/venv/Lib/site-packages/PIL/PixarImagePlugin.py new file mode 100644 index 000000000..91f0314b5 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PixarImagePlugin.py @@ -0,0 +1,70 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PIXAR raster support for PIL +# +# history: +# 97-01-29 fl Created +# +# notes: +# This is incomplete; it is based on a few samples created with +# Photoshop 2.5 and 3.0, and a summary description provided by +# Greg Coats . Hopefully, "L" and +# "RGBA" support will be added in future versions. +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1997. +# +# See the README file for information on usage and redistribution. +# + +from . import Image, ImageFile +from ._binary import i16le as i16 + +# +# helpers + + +def _accept(prefix): + return prefix[:4] == b"\200\350\000\000" + + +## +# Image plugin for PIXAR raster images. + + +class PixarImageFile(ImageFile.ImageFile): + + format = "PIXAR" + format_description = "PIXAR raster image" + + def _open(self): + + # assuming a 4-byte magic label + s = self.fp.read(4) + if not _accept(s): + raise SyntaxError("not a PIXAR file") + + # read rest of header + s = s + self.fp.read(508) + + self._size = i16(s[418:420]), i16(s[416:418]) + + # get channel/depth descriptions + mode = i16(s[424:426]), i16(s[426:428]) + + if mode == (14, 2): + self.mode = "RGB" + # FIXME: to be continued... + + # create tile descriptor (assuming "dumped") + self.tile = [("raw", (0, 0) + self.size, 1024, (self.mode, 0, 1))] + + +# +# -------------------------------------------------------------------- + +Image.register_open(PixarImageFile.format, PixarImageFile, _accept) + +Image.register_extension(PixarImageFile.format, ".pxr") diff --git a/venv/Lib/site-packages/PIL/PngImagePlugin.py b/venv/Lib/site-packages/PIL/PngImagePlugin.py new file mode 100644 index 000000000..e027953dd --- /dev/null +++ b/venv/Lib/site-packages/PIL/PngImagePlugin.py @@ -0,0 +1,1333 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PNG support code +# +# See "PNG (Portable Network Graphics) Specification, version 1.0; +# W3C Recommendation", 1996-10-01, Thomas Boutell (ed.). +# +# history: +# 1996-05-06 fl Created (couldn't resist it) +# 1996-12-14 fl Upgraded, added read and verify support (0.2) +# 1996-12-15 fl Separate PNG stream parser +# 1996-12-29 fl Added write support, added getchunks +# 1996-12-30 fl Eliminated circular references in decoder (0.3) +# 1998-07-12 fl Read/write 16-bit images as mode I (0.4) +# 2001-02-08 fl Added transparency support (from Zircon) (0.5) +# 2001-04-16 fl Don't close data source in "open" method (0.6) +# 2004-02-24 fl Don't even pretend to support interlaced files (0.7) +# 2004-08-31 fl Do basic sanity check on chunk identifiers (0.8) +# 2004-09-20 fl Added PngInfo chunk container +# 2004-12-18 fl Added DPI read support (based on code by Niki Spahiev) +# 2008-08-13 fl Added tRNS support for RGB images +# 2009-03-06 fl Support for preserving ICC profiles (by Florian Hoech) +# 2009-03-08 fl Added zTXT support (from Lowell Alleman) +# 2009-03-29 fl Read interlaced PNG files (from Conrado Porto Lopes Gouvua) +# +# Copyright (c) 1997-2009 by Secret Labs AB +# Copyright (c) 1996 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import itertools +import logging +import re +import struct +import warnings +import zlib + +from . import Image, ImageChops, ImageFile, ImagePalette, ImageSequence +from ._binary import i8, i16be as i16, i32be as i32, o8, o16be as o16, o32be as o32 + +logger = logging.getLogger(__name__) + +is_cid = re.compile(br"\w\w\w\w").match + + +_MAGIC = b"\211PNG\r\n\032\n" + + +_MODES = { + # supported bits/color combinations, and corresponding modes/rawmodes + # Greyscale + (1, 0): ("1", "1"), + (2, 0): ("L", "L;2"), + (4, 0): ("L", "L;4"), + (8, 0): ("L", "L"), + (16, 0): ("I", "I;16B"), + # Truecolour + (8, 2): ("RGB", "RGB"), + (16, 2): ("RGB", "RGB;16B"), + # Indexed-colour + (1, 3): ("P", "P;1"), + (2, 3): ("P", "P;2"), + (4, 3): ("P", "P;4"), + (8, 3): ("P", "P"), + # Greyscale with alpha + (8, 4): ("LA", "LA"), + (16, 4): ("RGBA", "LA;16B"), # LA;16B->LA not yet available + # Truecolour with alpha + (8, 6): ("RGBA", "RGBA"), + (16, 6): ("RGBA", "RGBA;16B"), +} + + +_simple_palette = re.compile(b"^\xff*\x00\xff*$") + +# Maximum decompressed size for a iTXt or zTXt chunk. +# Eliminates decompression bombs where compressed chunks can expand 1000x +MAX_TEXT_CHUNK = ImageFile.SAFEBLOCK +# Set the maximum total text chunk size. +MAX_TEXT_MEMORY = 64 * MAX_TEXT_CHUNK + + +# APNG frame disposal modes +APNG_DISPOSE_OP_NONE = 0 +APNG_DISPOSE_OP_BACKGROUND = 1 +APNG_DISPOSE_OP_PREVIOUS = 2 + +# APNG frame blend modes +APNG_BLEND_OP_SOURCE = 0 +APNG_BLEND_OP_OVER = 1 + + +def _safe_zlib_decompress(s): + dobj = zlib.decompressobj() + plaintext = dobj.decompress(s, MAX_TEXT_CHUNK) + if dobj.unconsumed_tail: + raise ValueError("Decompressed Data Too Large") + return plaintext + + +def _crc32(data, seed=0): + return zlib.crc32(data, seed) & 0xFFFFFFFF + + +# -------------------------------------------------------------------- +# Support classes. Suitable for PNG and related formats like MNG etc. + + +class ChunkStream: + def __init__(self, fp): + + self.fp = fp + self.queue = [] + + def read(self): + """Fetch a new chunk. Returns header information.""" + cid = None + + if self.queue: + cid, pos, length = self.queue.pop() + self.fp.seek(pos) + else: + s = self.fp.read(8) + cid = s[4:] + pos = self.fp.tell() + length = i32(s) + + if not is_cid(cid): + if not ImageFile.LOAD_TRUNCATED_IMAGES: + raise SyntaxError("broken PNG file (chunk %s)" % repr(cid)) + + return cid, pos, length + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + self.queue = self.crc = self.fp = None + + def push(self, cid, pos, length): + + self.queue.append((cid, pos, length)) + + def call(self, cid, pos, length): + """Call the appropriate chunk handler""" + + logger.debug("STREAM %r %s %s", cid, pos, length) + return getattr(self, "chunk_" + cid.decode("ascii"))(pos, length) + + def crc(self, cid, data): + """Read and verify checksum""" + + # Skip CRC checks for ancillary chunks if allowed to load truncated + # images + # 5th byte of first char is 1 [specs, section 5.4] + if ImageFile.LOAD_TRUNCATED_IMAGES and (i8(cid[0]) >> 5 & 1): + self.crc_skip(cid, data) + return + + try: + crc1 = _crc32(data, _crc32(cid)) + crc2 = i32(self.fp.read(4)) + if crc1 != crc2: + raise SyntaxError("broken PNG file (bad header checksum in %r)" % cid) + except struct.error as e: + raise SyntaxError( + "broken PNG file (incomplete checksum in %r)" % cid + ) from e + + def crc_skip(self, cid, data): + """Read checksum. Used if the C module is not present""" + + self.fp.read(4) + + def verify(self, endchunk=b"IEND"): + + # Simple approach; just calculate checksum for all remaining + # blocks. Must be called directly after open. + + cids = [] + + while True: + try: + cid, pos, length = self.read() + except struct.error as e: + raise OSError("truncated PNG file") from e + + if cid == endchunk: + break + self.crc(cid, ImageFile._safe_read(self.fp, length)) + cids.append(cid) + + return cids + + +class iTXt(str): + """ + Subclass of string to allow iTXt chunks to look like strings while + keeping their extra information + + """ + + @staticmethod + def __new__(cls, text, lang=None, tkey=None): + """ + :param cls: the class to use when creating the instance + :param text: value for this key + :param lang: language code + :param tkey: UTF-8 version of the key name + """ + + self = str.__new__(cls, text) + self.lang = lang + self.tkey = tkey + return self + + +class PngInfo: + """ + PNG chunk container (for use with save(pnginfo=)) + + """ + + def __init__(self): + self.chunks = [] + + def add(self, cid, data): + """Appends an arbitrary chunk. Use with caution. + + :param cid: a byte string, 4 bytes long. + :param data: a byte string of the encoded data + + """ + + self.chunks.append((cid, data)) + + def add_itxt(self, key, value, lang="", tkey="", zip=False): + """Appends an iTXt chunk. + + :param key: latin-1 encodable text key name + :param value: value for this key + :param lang: language code + :param tkey: UTF-8 version of the key name + :param zip: compression flag + + """ + + if not isinstance(key, bytes): + key = key.encode("latin-1", "strict") + if not isinstance(value, bytes): + value = value.encode("utf-8", "strict") + if not isinstance(lang, bytes): + lang = lang.encode("utf-8", "strict") + if not isinstance(tkey, bytes): + tkey = tkey.encode("utf-8", "strict") + + if zip: + self.add( + b"iTXt", + key + b"\0\x01\0" + lang + b"\0" + tkey + b"\0" + zlib.compress(value), + ) + else: + self.add(b"iTXt", key + b"\0\0\0" + lang + b"\0" + tkey + b"\0" + value) + + def add_text(self, key, value, zip=False): + """Appends a text chunk. + + :param key: latin-1 encodable text key name + :param value: value for this key, text or an + :py:class:`PIL.PngImagePlugin.iTXt` instance + :param zip: compression flag + + """ + if isinstance(value, iTXt): + return self.add_itxt(key, value, value.lang, value.tkey, zip=zip) + + # The tEXt chunk stores latin-1 text + if not isinstance(value, bytes): + try: + value = value.encode("latin-1", "strict") + except UnicodeError: + return self.add_itxt(key, value, zip=zip) + + if not isinstance(key, bytes): + key = key.encode("latin-1", "strict") + + if zip: + self.add(b"zTXt", key + b"\0\0" + zlib.compress(value)) + else: + self.add(b"tEXt", key + b"\0" + value) + + +# -------------------------------------------------------------------- +# PNG image stream (IHDR/IEND) + + +class PngStream(ChunkStream): + def __init__(self, fp): + super().__init__(fp) + + # local copies of Image attributes + self.im_info = {} + self.im_text = {} + self.im_size = (0, 0) + self.im_mode = None + self.im_tile = None + self.im_palette = None + self.im_custom_mimetype = None + self.im_n_frames = None + self._seq_num = None + self.rewind_state = None + + self.text_memory = 0 + + def check_text_memory(self, chunklen): + self.text_memory += chunklen + if self.text_memory > MAX_TEXT_MEMORY: + raise ValueError( + "Too much memory used in text chunks: %s>MAX_TEXT_MEMORY" + % self.text_memory + ) + + def save_rewind(self): + self.rewind_state = { + "info": self.im_info.copy(), + "tile": self.im_tile, + "seq_num": self._seq_num, + } + + def rewind(self): + self.im_info = self.rewind_state["info"] + self.im_tile = self.rewind_state["tile"] + self._seq_num = self.rewind_state["seq_num"] + + def chunk_iCCP(self, pos, length): + + # ICC profile + s = ImageFile._safe_read(self.fp, length) + # according to PNG spec, the iCCP chunk contains: + # Profile name 1-79 bytes (character string) + # Null separator 1 byte (null character) + # Compression method 1 byte (0) + # Compressed profile n bytes (zlib with deflate compression) + i = s.find(b"\0") + logger.debug("iCCP profile name %r", s[:i]) + logger.debug("Compression method %s", i8(s[i])) + comp_method = i8(s[i]) + if comp_method != 0: + raise SyntaxError( + "Unknown compression method %s in iCCP chunk" % comp_method + ) + try: + icc_profile = _safe_zlib_decompress(s[i + 2 :]) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + icc_profile = None + else: + raise + except zlib.error: + icc_profile = None # FIXME + self.im_info["icc_profile"] = icc_profile + return s + + def chunk_IHDR(self, pos, length): + + # image header + s = ImageFile._safe_read(self.fp, length) + self.im_size = i32(s), i32(s[4:]) + try: + self.im_mode, self.im_rawmode = _MODES[(i8(s[8]), i8(s[9]))] + except Exception: + pass + if i8(s[12]): + self.im_info["interlace"] = 1 + if i8(s[11]): + raise SyntaxError("unknown filter category") + return s + + def chunk_IDAT(self, pos, length): + + # image data + if "bbox" in self.im_info: + tile = [("zip", self.im_info["bbox"], pos, self.im_rawmode)] + else: + if self.im_n_frames is not None: + self.im_info["default_image"] = True + tile = [("zip", (0, 0) + self.im_size, pos, self.im_rawmode)] + self.im_tile = tile + self.im_idat = length + raise EOFError + + def chunk_IEND(self, pos, length): + + # end of PNG image + raise EOFError + + def chunk_PLTE(self, pos, length): + + # palette + s = ImageFile._safe_read(self.fp, length) + if self.im_mode == "P": + self.im_palette = "RGB", s + return s + + def chunk_tRNS(self, pos, length): + + # transparency + s = ImageFile._safe_read(self.fp, length) + if self.im_mode == "P": + if _simple_palette.match(s): + # tRNS contains only one full-transparent entry, + # other entries are full opaque + i = s.find(b"\0") + if i >= 0: + self.im_info["transparency"] = i + else: + # otherwise, we have a byte string with one alpha value + # for each palette entry + self.im_info["transparency"] = s + elif self.im_mode in ("1", "L", "I"): + self.im_info["transparency"] = i16(s) + elif self.im_mode == "RGB": + self.im_info["transparency"] = i16(s), i16(s[2:]), i16(s[4:]) + return s + + def chunk_gAMA(self, pos, length): + # gamma setting + s = ImageFile._safe_read(self.fp, length) + self.im_info["gamma"] = i32(s) / 100000.0 + return s + + def chunk_cHRM(self, pos, length): + # chromaticity, 8 unsigned ints, actual value is scaled by 100,000 + # WP x,y, Red x,y, Green x,y Blue x,y + + s = ImageFile._safe_read(self.fp, length) + raw_vals = struct.unpack(">%dI" % (len(s) // 4), s) + self.im_info["chromaticity"] = tuple(elt / 100000.0 for elt in raw_vals) + return s + + def chunk_sRGB(self, pos, length): + # srgb rendering intent, 1 byte + # 0 perceptual + # 1 relative colorimetric + # 2 saturation + # 3 absolute colorimetric + + s = ImageFile._safe_read(self.fp, length) + self.im_info["srgb"] = i8(s) + return s + + def chunk_pHYs(self, pos, length): + + # pixels per unit + s = ImageFile._safe_read(self.fp, length) + px, py = i32(s), i32(s[4:]) + unit = i8(s[8]) + if unit == 1: # meter + dpi = int(px * 0.0254 + 0.5), int(py * 0.0254 + 0.5) + self.im_info["dpi"] = dpi + elif unit == 0: + self.im_info["aspect"] = px, py + return s + + def chunk_tEXt(self, pos, length): + + # text + s = ImageFile._safe_read(self.fp, length) + try: + k, v = s.split(b"\0", 1) + except ValueError: + # fallback for broken tEXt tags + k = s + v = b"" + if k: + k = k.decode("latin-1", "strict") + v = v.decode("latin-1", "replace") + + self.im_info[k] = self.im_text[k] = v + self.check_text_memory(len(v)) + + return s + + def chunk_zTXt(self, pos, length): + + # compressed text + s = ImageFile._safe_read(self.fp, length) + try: + k, v = s.split(b"\0", 1) + except ValueError: + k = s + v = b"" + if v: + comp_method = i8(v[0]) + else: + comp_method = 0 + if comp_method != 0: + raise SyntaxError( + "Unknown compression method %s in zTXt chunk" % comp_method + ) + try: + v = _safe_zlib_decompress(v[1:]) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + v = b"" + else: + raise + except zlib.error: + v = b"" + + if k: + k = k.decode("latin-1", "strict") + v = v.decode("latin-1", "replace") + + self.im_info[k] = self.im_text[k] = v + self.check_text_memory(len(v)) + + return s + + def chunk_iTXt(self, pos, length): + + # international text + r = s = ImageFile._safe_read(self.fp, length) + try: + k, r = r.split(b"\0", 1) + except ValueError: + return s + if len(r) < 2: + return s + cf, cm, r = i8(r[0]), i8(r[1]), r[2:] + try: + lang, tk, v = r.split(b"\0", 2) + except ValueError: + return s + if cf != 0: + if cm == 0: + try: + v = _safe_zlib_decompress(v) + except ValueError: + if ImageFile.LOAD_TRUNCATED_IMAGES: + return s + else: + raise + except zlib.error: + return s + else: + return s + try: + k = k.decode("latin-1", "strict") + lang = lang.decode("utf-8", "strict") + tk = tk.decode("utf-8", "strict") + v = v.decode("utf-8", "strict") + except UnicodeError: + return s + + self.im_info[k] = self.im_text[k] = iTXt(v, lang, tk) + self.check_text_memory(len(v)) + + return s + + def chunk_eXIf(self, pos, length): + s = ImageFile._safe_read(self.fp, length) + self.im_info["exif"] = b"Exif\x00\x00" + s + return s + + # APNG chunks + def chunk_acTL(self, pos, length): + s = ImageFile._safe_read(self.fp, length) + if self.im_n_frames is not None: + self.im_n_frames = None + warnings.warn("Invalid APNG, will use default PNG image if possible") + return s + n_frames = i32(s) + if n_frames == 0 or n_frames > 0x80000000: + warnings.warn("Invalid APNG, will use default PNG image if possible") + return s + self.im_n_frames = n_frames + self.im_info["loop"] = i32(s[4:]) + self.im_custom_mimetype = "image/apng" + return s + + def chunk_fcTL(self, pos, length): + s = ImageFile._safe_read(self.fp, length) + seq = i32(s) + if (self._seq_num is None and seq != 0) or ( + self._seq_num is not None and self._seq_num != seq - 1 + ): + raise SyntaxError("APNG contains frame sequence errors") + self._seq_num = seq + width, height = i32(s[4:]), i32(s[8:]) + px, py = i32(s[12:]), i32(s[16:]) + im_w, im_h = self.im_size + if px + width > im_w or py + height > im_h: + raise SyntaxError("APNG contains invalid frames") + self.im_info["bbox"] = (px, py, px + width, py + height) + delay_num, delay_den = i16(s[20:]), i16(s[22:]) + if delay_den == 0: + delay_den = 100 + self.im_info["duration"] = float(delay_num) / float(delay_den) * 1000 + self.im_info["disposal"] = i8(s[24]) + self.im_info["blend"] = i8(s[25]) + return s + + def chunk_fdAT(self, pos, length): + s = ImageFile._safe_read(self.fp, 4) + seq = i32(s) + if self._seq_num != seq - 1: + raise SyntaxError("APNG contains frame sequence errors") + self._seq_num = seq + return self.chunk_IDAT(pos + 4, length - 4) + + +# -------------------------------------------------------------------- +# PNG reader + + +def _accept(prefix): + return prefix[:8] == _MAGIC + + +## +# Image plugin for PNG images. + + +class PngImageFile(ImageFile.ImageFile): + + format = "PNG" + format_description = "Portable network graphics" + + def _open(self): + + if not _accept(self.fp.read(8)): + raise SyntaxError("not a PNG file") + self.__fp = self.fp + self.__frame = 0 + + # + # Parse headers up to the first IDAT or fDAT chunk + + self.png = PngStream(self.fp) + + while True: + + # + # get next chunk + + cid, pos, length = self.png.read() + + try: + s = self.png.call(cid, pos, length) + except EOFError: + break + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + s = ImageFile._safe_read(self.fp, length) + + self.png.crc(cid, s) + + # + # Copy relevant attributes from the PngStream. An alternative + # would be to let the PngStream class modify these attributes + # directly, but that introduces circular references which are + # difficult to break if things go wrong in the decoder... + # (believe me, I've tried ;-) + + self.mode = self.png.im_mode + self._size = self.png.im_size + self.info = self.png.im_info + self._text = None + self.tile = self.png.im_tile + self.custom_mimetype = self.png.im_custom_mimetype + self.n_frames = self.png.im_n_frames or 1 + self.default_image = self.info.get("default_image", False) + + if self.png.im_palette: + rawmode, data = self.png.im_palette + self.palette = ImagePalette.raw(rawmode, data) + + if cid == b"fdAT": + self.__prepare_idat = length - 4 + else: + self.__prepare_idat = length # used by load_prepare() + + if self.png.im_n_frames is not None: + self._close_exclusive_fp_after_loading = False + self.png.save_rewind() + self.__rewind_idat = self.__prepare_idat + self.__rewind = self.__fp.tell() + if self.default_image: + # IDAT chunk contains default image and not first animation frame + self.n_frames += 1 + self._seek(0) + self.is_animated = self.n_frames > 1 + + @property + def text(self): + # experimental + if self._text is None: + # iTxt, tEXt and zTXt chunks may appear at the end of the file + # So load the file to ensure that they are read + if self.is_animated: + frame = self.__frame + # for APNG, seek to the final frame before loading + self.seek(self.n_frames - 1) + self.load() + if self.is_animated: + self.seek(frame) + return self._text + + def verify(self): + """Verify PNG file""" + + if self.fp is None: + raise RuntimeError("verify must be called directly after open") + + # back up to beginning of IDAT block + self.fp.seek(self.tile[0][2] - 8) + + self.png.verify() + self.png.close() + + if self._exclusive_fp: + self.fp.close() + self.fp = None + + def seek(self, frame): + if not self._seek_check(frame): + return + if frame < self.__frame: + self._seek(0, True) + + last_frame = self.__frame + for f in range(self.__frame + 1, frame + 1): + try: + self._seek(f) + except EOFError as e: + self.seek(last_frame) + raise EOFError("no more images in APNG file") from e + + def _seek(self, frame, rewind=False): + if frame == 0: + if rewind: + self.__fp.seek(self.__rewind) + self.png.rewind() + self.__prepare_idat = self.__rewind_idat + self.im = None + if self.pyaccess: + self.pyaccess = None + self.info = self.png.im_info + self.tile = self.png.im_tile + self.fp = self.__fp + self._prev_im = None + self.dispose = None + self.default_image = self.info.get("default_image", False) + self.dispose_op = self.info.get("disposal") + self.blend_op = self.info.get("blend") + self.dispose_extent = self.info.get("bbox") + self.__frame = 0 + return + else: + if frame != self.__frame + 1: + raise ValueError("cannot seek to frame %d" % frame) + + # ensure previous frame was loaded + self.load() + + self.fp = self.__fp + + # advance to the next frame + if self.__prepare_idat: + ImageFile._safe_read(self.fp, self.__prepare_idat) + self.__prepare_idat = 0 + frame_start = False + while True: + self.fp.read(4) # CRC + + try: + cid, pos, length = self.png.read() + except (struct.error, SyntaxError): + break + + if cid == b"IEND": + raise EOFError("No more images in APNG file") + if cid == b"fcTL": + if frame_start: + # there must be at least one fdAT chunk between fcTL chunks + raise SyntaxError("APNG missing frame data") + frame_start = True + + try: + self.png.call(cid, pos, length) + except UnicodeDecodeError: + break + except EOFError: + if cid == b"fdAT": + length -= 4 + if frame_start: + self.__prepare_idat = length + break + ImageFile._safe_read(self.fp, length) + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + ImageFile._safe_read(self.fp, length) + + self.__frame = frame + self.tile = self.png.im_tile + self.dispose_op = self.info.get("disposal") + self.blend_op = self.info.get("blend") + self.dispose_extent = self.info.get("bbox") + + if not self.tile: + raise EOFError + + def tell(self): + return self.__frame + + def load_prepare(self): + """internal: prepare to read PNG file""" + + if self.info.get("interlace"): + self.decoderconfig = self.decoderconfig + (1,) + + self.__idat = self.__prepare_idat # used by load_read() + ImageFile.ImageFile.load_prepare(self) + + def load_read(self, read_bytes): + """internal: read more image data""" + + while self.__idat == 0: + # end of chunk, skip forward to next one + + self.fp.read(4) # CRC + + cid, pos, length = self.png.read() + + if cid not in [b"IDAT", b"DDAT", b"fdAT"]: + self.png.push(cid, pos, length) + return b"" + + if cid == b"fdAT": + try: + self.png.call(cid, pos, length) + except EOFError: + pass + self.__idat = length - 4 # sequence_num has already been read + else: + self.__idat = length # empty chunks are allowed + + # read more data from this chunk + if read_bytes <= 0: + read_bytes = self.__idat + else: + read_bytes = min(read_bytes, self.__idat) + + self.__idat = self.__idat - read_bytes + + return self.fp.read(read_bytes) + + def load_end(self): + """internal: finished reading image data""" + while True: + self.fp.read(4) # CRC + + try: + cid, pos, length = self.png.read() + except (struct.error, SyntaxError): + break + + if cid == b"IEND": + break + elif cid == b"fcTL" and self.is_animated: + # start of the next frame, stop reading + self.__prepare_idat = 0 + self.png.push(cid, pos, length) + break + + try: + self.png.call(cid, pos, length) + except UnicodeDecodeError: + break + except EOFError: + if cid == b"fdAT": + length -= 4 + ImageFile._safe_read(self.fp, length) + except AttributeError: + logger.debug("%r %s %s (unknown)", cid, pos, length) + ImageFile._safe_read(self.fp, length) + self._text = self.png.im_text + if not self.is_animated: + self.png.close() + self.png = None + else: + # setup frame disposal (actual disposal done when needed in _seek()) + if self._prev_im is None and self.dispose_op == APNG_DISPOSE_OP_PREVIOUS: + self.dispose_op = APNG_DISPOSE_OP_BACKGROUND + + if self.dispose_op == APNG_DISPOSE_OP_PREVIOUS: + dispose = self._prev_im.copy() + dispose = self._crop(dispose, self.dispose_extent) + elif self.dispose_op == APNG_DISPOSE_OP_BACKGROUND: + dispose = Image.core.fill(self.im.mode, self.size) + dispose = self._crop(dispose, self.dispose_extent) + else: + dispose = None + + if self._prev_im and self.blend_op == APNG_BLEND_OP_OVER: + updated = self._crop(self.im, self.dispose_extent) + self._prev_im.paste( + updated, self.dispose_extent, updated.convert("RGBA") + ) + self.im = self._prev_im + if self.pyaccess: + self.pyaccess = None + self._prev_im = self.im.copy() + + if dispose: + self._prev_im.paste(dispose, self.dispose_extent) + + def _getexif(self): + if "exif" not in self.info: + self.load() + if "exif" not in self.info and "Raw profile type exif" not in self.info: + return None + return dict(self.getexif()) + + def getexif(self): + if "exif" not in self.info: + self.load() + + return super().getexif() + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# -------------------------------------------------------------------- +# PNG writer + +_OUTMODES = { + # supported PIL modes, and corresponding rawmodes/bits/color combinations + "1": ("1", b"\x01\x00"), + "L;1": ("L;1", b"\x01\x00"), + "L;2": ("L;2", b"\x02\x00"), + "L;4": ("L;4", b"\x04\x00"), + "L": ("L", b"\x08\x00"), + "LA": ("LA", b"\x08\x04"), + "I": ("I;16B", b"\x10\x00"), + "I;16": ("I;16B", b"\x10\x00"), + "P;1": ("P;1", b"\x01\x03"), + "P;2": ("P;2", b"\x02\x03"), + "P;4": ("P;4", b"\x04\x03"), + "P": ("P", b"\x08\x03"), + "RGB": ("RGB", b"\x08\x02"), + "RGBA": ("RGBA", b"\x08\x06"), +} + + +def putchunk(fp, cid, *data): + """Write a PNG chunk (including CRC field)""" + + data = b"".join(data) + + fp.write(o32(len(data)) + cid) + fp.write(data) + crc = _crc32(data, _crc32(cid)) + fp.write(o32(crc)) + + +class _idat: + # wrap output from the encoder in IDAT chunks + + def __init__(self, fp, chunk): + self.fp = fp + self.chunk = chunk + + def write(self, data): + self.chunk(self.fp, b"IDAT", data) + + +class _fdat: + # wrap encoder output in fdAT chunks + + def __init__(self, fp, chunk, seq_num): + self.fp = fp + self.chunk = chunk + self.seq_num = seq_num + + def write(self, data): + self.chunk(self.fp, b"fdAT", o32(self.seq_num), data) + self.seq_num += 1 + + +def _write_multiple_frames(im, fp, chunk, rawmode): + default_image = im.encoderinfo.get("default_image", im.info.get("default_image")) + duration = im.encoderinfo.get("duration", im.info.get("duration", 0)) + loop = im.encoderinfo.get("loop", im.info.get("loop", 0)) + disposal = im.encoderinfo.get("disposal", im.info.get("disposal")) + blend = im.encoderinfo.get("blend", im.info.get("blend")) + + if default_image: + chain = itertools.chain(im.encoderinfo.get("append_images", [])) + else: + chain = itertools.chain([im], im.encoderinfo.get("append_images", [])) + + im_frames = [] + frame_count = 0 + for im_seq in chain: + for im_frame in ImageSequence.Iterator(im_seq): + im_frame = im_frame.copy() + if im_frame.mode != im.mode: + if im.mode == "P": + im_frame = im_frame.convert(im.mode, palette=im.palette) + else: + im_frame = im_frame.convert(im.mode) + encoderinfo = im.encoderinfo.copy() + if isinstance(duration, (list, tuple)): + encoderinfo["duration"] = duration[frame_count] + if isinstance(disposal, (list, tuple)): + encoderinfo["disposal"] = disposal[frame_count] + if isinstance(blend, (list, tuple)): + encoderinfo["blend"] = blend[frame_count] + frame_count += 1 + + if im_frames: + previous = im_frames[-1] + prev_disposal = previous["encoderinfo"].get("disposal") + prev_blend = previous["encoderinfo"].get("blend") + if prev_disposal == APNG_DISPOSE_OP_PREVIOUS and len(im_frames) < 2: + prev_disposal = APNG_DISPOSE_OP_BACKGROUND + + if prev_disposal == APNG_DISPOSE_OP_BACKGROUND: + base_im = previous["im"] + dispose = Image.core.fill("RGBA", im.size, (0, 0, 0, 0)) + bbox = previous["bbox"] + if bbox: + dispose = dispose.crop(bbox) + else: + bbox = (0, 0) + im.size + base_im.paste(dispose, bbox) + elif prev_disposal == APNG_DISPOSE_OP_PREVIOUS: + base_im = im_frames[-2]["im"] + else: + base_im = previous["im"] + delta = ImageChops.subtract_modulo( + im_frame.convert("RGB"), base_im.convert("RGB") + ) + bbox = delta.getbbox() + if ( + not bbox + and prev_disposal == encoderinfo.get("disposal") + and prev_blend == encoderinfo.get("blend") + ): + duration = encoderinfo.get("duration", 0) + if duration: + if "duration" in previous["encoderinfo"]: + previous["encoderinfo"]["duration"] += duration + else: + previous["encoderinfo"]["duration"] = duration + continue + else: + bbox = None + im_frames.append({"im": im_frame, "bbox": bbox, "encoderinfo": encoderinfo}) + + # animation control + chunk( + fp, b"acTL", o32(len(im_frames)), o32(loop), # 0: num_frames # 4: num_plays + ) + + # default image IDAT (if it exists) + if default_image: + ImageFile._save(im, _idat(fp, chunk), [("zip", (0, 0) + im.size, 0, rawmode)]) + + seq_num = 0 + for frame, frame_data in enumerate(im_frames): + im_frame = frame_data["im"] + if not frame_data["bbox"]: + bbox = (0, 0) + im_frame.size + else: + bbox = frame_data["bbox"] + im_frame = im_frame.crop(bbox) + size = im_frame.size + duration = int(round(frame_data["encoderinfo"].get("duration", 0))) + disposal = frame_data["encoderinfo"].get("disposal", APNG_DISPOSE_OP_NONE) + blend = frame_data["encoderinfo"].get("blend", APNG_BLEND_OP_SOURCE) + # frame control + chunk( + fp, + b"fcTL", + o32(seq_num), # sequence_number + o32(size[0]), # width + o32(size[1]), # height + o32(bbox[0]), # x_offset + o32(bbox[1]), # y_offset + o16(duration), # delay_numerator + o16(1000), # delay_denominator + o8(disposal), # dispose_op + o8(blend), # blend_op + ) + seq_num += 1 + # frame data + if frame == 0 and not default_image: + # first frame must be in IDAT chunks for backwards compatibility + ImageFile._save( + im_frame, + _idat(fp, chunk), + [("zip", (0, 0) + im_frame.size, 0, rawmode)], + ) + else: + fdat_chunks = _fdat(fp, chunk, seq_num) + ImageFile._save( + im_frame, fdat_chunks, [("zip", (0, 0) + im_frame.size, 0, rawmode)], + ) + seq_num = fdat_chunks.seq_num + + +def _save_all(im, fp, filename): + _save(im, fp, filename, save_all=True) + + +def _save(im, fp, filename, chunk=putchunk, save_all=False): + # save an image to disk (called by the save method) + + mode = im.mode + + if mode == "P": + + # + # attempt to minimize storage requirements for palette images + if "bits" in im.encoderinfo: + # number of bits specified by user + colors = 1 << im.encoderinfo["bits"] + else: + # check palette contents + if im.palette: + colors = max(min(len(im.palette.getdata()[1]) // 3, 256), 2) + else: + colors = 256 + + if colors <= 2: + bits = 1 + elif colors <= 4: + bits = 2 + elif colors <= 16: + bits = 4 + else: + bits = 8 + if bits != 8: + mode = "%s;%d" % (mode, bits) + + # encoder options + im.encoderconfig = ( + im.encoderinfo.get("optimize", False), + im.encoderinfo.get("compress_level", -1), + im.encoderinfo.get("compress_type", -1), + im.encoderinfo.get("dictionary", b""), + ) + + # get the corresponding PNG mode + try: + rawmode, mode = _OUTMODES[mode] + except KeyError as e: + raise OSError("cannot write mode %s as PNG" % mode) from e + + # + # write minimal PNG file + + fp.write(_MAGIC) + + chunk( + fp, + b"IHDR", + o32(im.size[0]), # 0: size + o32(im.size[1]), + mode, # 8: depth/type + b"\0", # 10: compression + b"\0", # 11: filter category + b"\0", # 12: interlace flag + ) + + chunks = [b"cHRM", b"gAMA", b"sBIT", b"sRGB", b"tIME"] + + icc = im.encoderinfo.get("icc_profile", im.info.get("icc_profile")) + if icc: + # ICC profile + # according to PNG spec, the iCCP chunk contains: + # Profile name 1-79 bytes (character string) + # Null separator 1 byte (null character) + # Compression method 1 byte (0) + # Compressed profile n bytes (zlib with deflate compression) + name = b"ICC Profile" + data = name + b"\0\0" + zlib.compress(icc) + chunk(fp, b"iCCP", data) + + # You must either have sRGB or iCCP. + # Disallow sRGB chunks when an iCCP-chunk has been emitted. + chunks.remove(b"sRGB") + + info = im.encoderinfo.get("pnginfo") + if info: + chunks_multiple_allowed = [b"sPLT", b"iTXt", b"tEXt", b"zTXt"] + for cid, data in info.chunks: + if cid in chunks: + chunks.remove(cid) + chunk(fp, cid, data) + elif cid in chunks_multiple_allowed: + chunk(fp, cid, data) + + if im.mode == "P": + palette_byte_number = (2 ** bits) * 3 + palette_bytes = im.im.getpalette("RGB")[:palette_byte_number] + while len(palette_bytes) < palette_byte_number: + palette_bytes += b"\0" + chunk(fp, b"PLTE", palette_bytes) + + transparency = im.encoderinfo.get("transparency", im.info.get("transparency", None)) + + if transparency or transparency == 0: + if im.mode == "P": + # limit to actual palette size + alpha_bytes = 2 ** bits + if isinstance(transparency, bytes): + chunk(fp, b"tRNS", transparency[:alpha_bytes]) + else: + transparency = max(0, min(255, transparency)) + alpha = b"\xFF" * transparency + b"\0" + chunk(fp, b"tRNS", alpha[:alpha_bytes]) + elif im.mode in ("1", "L", "I"): + transparency = max(0, min(65535, transparency)) + chunk(fp, b"tRNS", o16(transparency)) + elif im.mode == "RGB": + red, green, blue = transparency + chunk(fp, b"tRNS", o16(red) + o16(green) + o16(blue)) + else: + if "transparency" in im.encoderinfo: + # don't bother with transparency if it's an RGBA + # and it's in the info dict. It's probably just stale. + raise OSError("cannot use transparency for this mode") + else: + if im.mode == "P" and im.im.getpalettemode() == "RGBA": + alpha = im.im.getpalette("RGBA", "A") + alpha_bytes = 2 ** bits + chunk(fp, b"tRNS", alpha[:alpha_bytes]) + + dpi = im.encoderinfo.get("dpi") + if dpi: + chunk( + fp, + b"pHYs", + o32(int(dpi[0] / 0.0254 + 0.5)), + o32(int(dpi[1] / 0.0254 + 0.5)), + b"\x01", + ) + + if info: + chunks = [b"bKGD", b"hIST"] + for cid, data in info.chunks: + if cid in chunks: + chunks.remove(cid) + chunk(fp, cid, data) + + exif = im.encoderinfo.get("exif", im.info.get("exif")) + if exif: + if isinstance(exif, Image.Exif): + exif = exif.tobytes(8) + if exif.startswith(b"Exif\x00\x00"): + exif = exif[6:] + chunk(fp, b"eXIf", exif) + + if save_all: + _write_multiple_frames(im, fp, chunk, rawmode) + else: + ImageFile._save(im, _idat(fp, chunk), [("zip", (0, 0) + im.size, 0, rawmode)]) + + chunk(fp, b"IEND", b"") + + if hasattr(fp, "flush"): + fp.flush() + + +# -------------------------------------------------------------------- +# PNG chunk converter + + +def getchunks(im, **params): + """Return a list of PNG chunks representing this image.""" + + class collector: + data = [] + + def write(self, data): + pass + + def append(self, chunk): + self.data.append(chunk) + + def append(fp, cid, *data): + data = b"".join(data) + crc = o32(_crc32(data, _crc32(cid))) + fp.append((cid, data, crc)) + + fp = collector() + + try: + im.encoderinfo = params + _save(im, fp, None, append) + finally: + del im.encoderinfo + + return fp.data + + +# -------------------------------------------------------------------- +# Registry + +Image.register_open(PngImageFile.format, PngImageFile, _accept) +Image.register_save(PngImageFile.format, _save) +Image.register_save_all(PngImageFile.format, _save_all) + +Image.register_extensions(PngImageFile.format, [".png", ".apng"]) + +Image.register_mime(PngImageFile.format, "image/png") diff --git a/venv/Lib/site-packages/PIL/PpmImagePlugin.py b/venv/Lib/site-packages/PIL/PpmImagePlugin.py new file mode 100644 index 000000000..35a77bafb --- /dev/null +++ b/venv/Lib/site-packages/PIL/PpmImagePlugin.py @@ -0,0 +1,164 @@ +# +# The Python Imaging Library. +# $Id$ +# +# PPM support for PIL +# +# History: +# 96-03-24 fl Created +# 98-03-06 fl Write RGBA images (as RGB, that is) +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile + +# +# -------------------------------------------------------------------- + +b_whitespace = b"\x20\x09\x0a\x0b\x0c\x0d" + +MODES = { + # standard + b"P4": "1", + b"P5": "L", + b"P6": "RGB", + # extensions + b"P0CMYK": "CMYK", + # PIL extensions (for test purposes only) + b"PyP": "P", + b"PyRGBA": "RGBA", + b"PyCMYK": "CMYK", +} + + +def _accept(prefix): + return prefix[0:1] == b"P" and prefix[1] in b"0456y" + + +## +# Image plugin for PBM, PGM, and PPM images. + + +class PpmImageFile(ImageFile.ImageFile): + + format = "PPM" + format_description = "Pbmplus image" + + def _token(self, s=b""): + while True: # read until next whitespace + c = self.fp.read(1) + if not c or c in b_whitespace: + break + if c > b"\x79": + raise ValueError("Expected ASCII value, found binary") + s = s + c + if len(s) > 9: + raise ValueError("Expected int, got > 9 digits") + return s + + def _open(self): + + # check magic + s = self.fp.read(1) + if s != b"P": + raise SyntaxError("not a PPM file") + magic_number = self._token(s) + mode = MODES[magic_number] + + self.custom_mimetype = { + b"P4": "image/x-portable-bitmap", + b"P5": "image/x-portable-graymap", + b"P6": "image/x-portable-pixmap", + }.get(magic_number) + + if mode == "1": + self.mode = "1" + rawmode = "1;I" + else: + self.mode = rawmode = mode + + for ix in range(3): + while True: + while True: + s = self.fp.read(1) + if s not in b_whitespace: + break + if s == b"": + raise ValueError("File does not extend beyond magic number") + if s != b"#": + break + s = self.fp.readline() + s = int(self._token(s)) + if ix == 0: + xsize = s + elif ix == 1: + ysize = s + if mode == "1": + break + elif ix == 2: + # maxgrey + if s > 255: + if not mode == "L": + raise ValueError("Too many colors for band: %s" % s) + if s < 2 ** 16: + self.mode = "I" + rawmode = "I;16B" + else: + self.mode = "I" + rawmode = "I;32B" + + self._size = xsize, ysize + self.tile = [("raw", (0, 0, xsize, ysize), self.fp.tell(), (rawmode, 0, 1))] + + +# +# -------------------------------------------------------------------- + + +def _save(im, fp, filename): + if im.mode == "1": + rawmode, head = "1;I", b"P4" + elif im.mode == "L": + rawmode, head = "L", b"P5" + elif im.mode == "I": + if im.getextrema()[1] < 2 ** 16: + rawmode, head = "I;16B", b"P5" + else: + rawmode, head = "I;32B", b"P5" + elif im.mode == "RGB": + rawmode, head = "RGB", b"P6" + elif im.mode == "RGBA": + rawmode, head = "RGB", b"P6" + else: + raise OSError("cannot write mode %s as PPM" % im.mode) + fp.write(head + ("\n%d %d\n" % im.size).encode("ascii")) + if head == b"P6": + fp.write(b"255\n") + if head == b"P5": + if rawmode == "L": + fp.write(b"255\n") + elif rawmode == "I;16B": + fp.write(b"65535\n") + elif rawmode == "I;32B": + fp.write(b"2147483648\n") + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]) + + # ALTERNATIVE: save via builtin debug function + # im._dump(filename) + + +# +# -------------------------------------------------------------------- + + +Image.register_open(PpmImageFile.format, PpmImageFile, _accept) +Image.register_save(PpmImageFile.format, _save) + +Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm"]) + +Image.register_mime(PpmImageFile.format, "image/x-portable-anymap") diff --git a/venv/Lib/site-packages/PIL/PsdImagePlugin.py b/venv/Lib/site-packages/PIL/PsdImagePlugin.py new file mode 100644 index 000000000..f019bb64e --- /dev/null +++ b/venv/Lib/site-packages/PIL/PsdImagePlugin.py @@ -0,0 +1,309 @@ +# +# The Python Imaging Library +# $Id$ +# +# Adobe PSD 2.5/3.0 file handling +# +# History: +# 1995-09-01 fl Created +# 1997-01-03 fl Read most PSD images +# 1997-01-18 fl Fixed P and CMYK support +# 2001-10-21 fl Added seek/tell support (for layers) +# +# Copyright (c) 1997-2001 by Secret Labs AB. +# Copyright (c) 1995-2001 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import io + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16be as i16, i32be as i32 + +MODES = { + # (photoshop mode, bits) -> (pil mode, required channels) + (0, 1): ("1", 1), + (0, 8): ("L", 1), + (1, 8): ("L", 1), + (2, 8): ("P", 1), + (3, 8): ("RGB", 3), + (4, 8): ("CMYK", 4), + (7, 8): ("L", 1), # FIXME: multilayer + (8, 8): ("L", 1), # duotone + (9, 8): ("LAB", 3), +} + + +# --------------------------------------------------------------------. +# read PSD images + + +def _accept(prefix): + return prefix[:4] == b"8BPS" + + +## +# Image plugin for Photoshop images. + + +class PsdImageFile(ImageFile.ImageFile): + + format = "PSD" + format_description = "Adobe Photoshop" + _close_exclusive_fp_after_loading = False + + def _open(self): + + read = self.fp.read + + # + # header + + s = read(26) + if not _accept(s) or i16(s[4:]) != 1: + raise SyntaxError("not a PSD file") + + psd_bits = i16(s[22:]) + psd_channels = i16(s[12:]) + psd_mode = i16(s[24:]) + + mode, channels = MODES[(psd_mode, psd_bits)] + + if channels > psd_channels: + raise OSError("not enough channels") + + self.mode = mode + self._size = i32(s[18:]), i32(s[14:]) + + # + # color mode data + + size = i32(read(4)) + if size: + data = read(size) + if mode == "P" and size == 768: + self.palette = ImagePalette.raw("RGB;L", data) + + # + # image resources + + self.resources = [] + + size = i32(read(4)) + if size: + # load resources + end = self.fp.tell() + size + while self.fp.tell() < end: + read(4) # signature + id = i16(read(2)) + name = read(i8(read(1))) + if not (len(name) & 1): + read(1) # padding + data = read(i32(read(4))) + if len(data) & 1: + read(1) # padding + self.resources.append((id, name, data)) + if id == 1039: # ICC profile + self.info["icc_profile"] = data + + # + # layer and mask information + + self.layers = [] + + size = i32(read(4)) + if size: + end = self.fp.tell() + size + size = i32(read(4)) + if size: + self.layers = _layerinfo(self.fp) + self.fp.seek(end) + self.n_frames = len(self.layers) + self.is_animated = self.n_frames > 1 + + # + # image descriptor + + self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels) + + # keep the file open + self.__fp = self.fp + self.frame = 1 + self._min_frame = 1 + + def seek(self, layer): + if not self._seek_check(layer): + return + + # seek to given layer (1..max) + try: + name, mode, bbox, tile = self.layers[layer - 1] + self.mode = mode + self.tile = tile + self.frame = layer + self.fp = self.__fp + return name, bbox + except IndexError as e: + raise EOFError("no such layer") from e + + def tell(self): + # return layer number (0=image, 1..max=layers) + return self.frame + + def load_prepare(self): + # create image memory if necessary + if not self.im or self.im.mode != self.mode or self.im.size != self.size: + self.im = Image.core.fill(self.mode, self.size, 0) + # create palette (optional) + if self.mode == "P": + Image.Image.load(self) + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +def _layerinfo(file): + # read layerinfo block + layers = [] + read = file.read + for i in range(abs(i16(read(2)))): + + # bounding box + y0 = i32(read(4)) + x0 = i32(read(4)) + y1 = i32(read(4)) + x1 = i32(read(4)) + + # image info + info = [] + mode = [] + types = list(range(i16(read(2)))) + if len(types) > 4: + continue + + for i in types: + type = i16(read(2)) + + if type == 65535: + m = "A" + else: + m = "RGBA"[type] + + mode.append(m) + size = i32(read(4)) + info.append((m, size)) + + # figure out the image mode + mode.sort() + if mode == ["R"]: + mode = "L" + elif mode == ["B", "G", "R"]: + mode = "RGB" + elif mode == ["A", "B", "G", "R"]: + mode = "RGBA" + else: + mode = None # unknown + + # skip over blend flags and extra information + read(12) # filler + name = "" + size = i32(read(4)) # length of the extra data field + combined = 0 + if size: + data_end = file.tell() + size + + length = i32(read(4)) + if length: + file.seek(length - 16, io.SEEK_CUR) + combined += length + 4 + + length = i32(read(4)) + if length: + file.seek(length, io.SEEK_CUR) + combined += length + 4 + + length = i8(read(1)) + if length: + # Don't know the proper encoding, + # Latin-1 should be a good guess + name = read(length).decode("latin-1", "replace") + combined += length + 1 + + file.seek(data_end) + layers.append((name, mode, (x0, y0, x1, y1))) + + # get tiles + i = 0 + for name, mode, bbox in layers: + tile = [] + for m in mode: + t = _maketile(file, m, bbox, 1) + if t: + tile.extend(t) + layers[i] = name, mode, bbox, tile + i += 1 + + return layers + + +def _maketile(file, mode, bbox, channels): + + tile = None + read = file.read + + compression = i16(read(2)) + + xsize = bbox[2] - bbox[0] + ysize = bbox[3] - bbox[1] + + offset = file.tell() + + if compression == 0: + # + # raw compression + tile = [] + for channel in range(channels): + layer = mode[channel] + if mode == "CMYK": + layer += ";I" + tile.append(("raw", bbox, offset, layer)) + offset = offset + xsize * ysize + + elif compression == 1: + # + # packbits compression + i = 0 + tile = [] + bytecount = read(channels * ysize * 2) + offset = file.tell() + for channel in range(channels): + layer = mode[channel] + if mode == "CMYK": + layer += ";I" + tile.append(("packbits", bbox, offset, layer)) + for y in range(ysize): + offset = offset + i16(bytecount[i : i + 2]) + i += 2 + + file.seek(offset) + + if offset & 1: + read(1) # padding + + return tile + + +# -------------------------------------------------------------------- +# registry + + +Image.register_open(PsdImageFile.format, PsdImageFile, _accept) + +Image.register_extension(PsdImageFile.format, ".psd") diff --git a/venv/Lib/site-packages/PIL/PyAccess.py b/venv/Lib/site-packages/PIL/PyAccess.py new file mode 100644 index 000000000..494f5f9f4 --- /dev/null +++ b/venv/Lib/site-packages/PIL/PyAccess.py @@ -0,0 +1,352 @@ +# +# The Python Imaging Library +# Pillow fork +# +# Python implementation of the PixelAccess Object +# +# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-2009 by Fredrik Lundh. +# Copyright (c) 2013 Eric Soroos +# +# See the README file for information on usage and redistribution +# + +# Notes: +# +# * Implements the pixel access object following Access. +# * Does not implement the line functions, as they don't appear to be used +# * Taking only the tuple form, which is used from python. +# * Fill.c uses the integer form, but it's still going to use the old +# Access.c implementation. +# + +import logging +import sys + +try: + from cffi import FFI + + defs = """ + struct Pixel_RGBA { + unsigned char r,g,b,a; + }; + struct Pixel_I16 { + unsigned char l,r; + }; + """ + ffi = FFI() + ffi.cdef(defs) +except ImportError as ex: + # Allow error import for doc purposes, but error out when accessing + # anything in core. + from ._util import deferred_error + + FFI = ffi = deferred_error(ex) + +logger = logging.getLogger(__name__) + + +class PyAccess: + def __init__(self, img, readonly=False): + vals = dict(img.im.unsafe_ptrs) + self.readonly = readonly + self.image8 = ffi.cast("unsigned char **", vals["image8"]) + self.image32 = ffi.cast("int **", vals["image32"]) + self.image = ffi.cast("unsigned char **", vals["image"]) + self.xsize, self.ysize = img.im.size + + # Keep pointer to im object to prevent dereferencing. + self._im = img.im + if self._im.mode == "P": + self._palette = img.palette + + # Debugging is polluting test traces, only useful here + # when hacking on PyAccess + # logger.debug("%s", vals) + self._post_init() + + def _post_init(self): + pass + + def __setitem__(self, xy, color): + """ + Modifies the pixel at x,y. The color is given as a single + numerical value for single band images, and a tuple for + multi-band images + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :param color: The pixel value. + """ + if self.readonly: + raise ValueError("Attempt to putpixel a read only image") + (x, y) = xy + if x < 0: + x = self.xsize + x + if y < 0: + y = self.ysize + y + (x, y) = self.check_xy((x, y)) + + if ( + self._im.mode == "P" + and isinstance(color, (list, tuple)) + and len(color) in [3, 4] + ): + # RGB or RGBA value for a P image + color = self._palette.getcolor(color) + + return self.set_pixel(x, y, color) + + def __getitem__(self, xy): + """ + Returns the pixel at x,y. The pixel is returned as a single + value for single band images or a tuple for multiple band + images + + :param xy: The pixel coordinate, given as (x, y). See + :ref:`coordinate-system`. + :returns: a pixel value for single band images, a tuple of + pixel values for multiband images. + """ + (x, y) = xy + if x < 0: + x = self.xsize + x + if y < 0: + y = self.ysize + y + (x, y) = self.check_xy((x, y)) + return self.get_pixel(x, y) + + putpixel = __setitem__ + getpixel = __getitem__ + + def check_xy(self, xy): + (x, y) = xy + if not (0 <= x < self.xsize and 0 <= y < self.ysize): + raise ValueError("pixel location out of range") + return xy + + +class _PyAccess32_2(PyAccess): + """ PA, LA, stored in first and last bytes of a 32 bit word """ + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.a) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.a = min(color[1], 255) + + +class _PyAccess32_3(PyAccess): + """ RGB and friends, stored in the first three bytes of a 32 bit word """ + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.g, pixel.b) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.g = min(color[1], 255) + pixel.b = min(color[2], 255) + pixel.a = 255 + + +class _PyAccess32_4(PyAccess): + """ RGBA etc, all 4 bytes of a 32 bit word """ + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_RGBA **", self.image32) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return (pixel.r, pixel.g, pixel.b, pixel.a) + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + # tuple + pixel.r = min(color[0], 255) + pixel.g = min(color[1], 255) + pixel.b = min(color[2], 255) + pixel.a = min(color[3], 255) + + +class _PyAccess8(PyAccess): + """ 1, L, P, 8 bit images stored as uint8 """ + + def _post_init(self, *args, **kwargs): + self.pixels = self.image8 + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # integer + self.pixels[y][x] = min(color, 255) + except TypeError: + # tuple + self.pixels[y][x] = min(color[0], 255) + + +class _PyAccessI16_N(PyAccess): + """ I;16 access, native bitendian without conversion """ + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("unsigned short **", self.image) + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # integer + self.pixels[y][x] = min(color, 65535) + except TypeError: + # tuple + self.pixels[y][x] = min(color[0], 65535) + + +class _PyAccessI16_L(PyAccess): + """ I;16L access, with conversion """ + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_I16 **", self.image) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return pixel.l + pixel.r * 256 + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + try: + color = min(color, 65535) + except TypeError: + color = min(color[0], 65535) + + pixel.l = color & 0xFF # noqa: E741 + pixel.r = color >> 8 + + +class _PyAccessI16_B(PyAccess): + """ I;16B access, with conversion """ + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("struct Pixel_I16 **", self.image) + + def get_pixel(self, x, y): + pixel = self.pixels[y][x] + return pixel.l * 256 + pixel.r + + def set_pixel(self, x, y, color): + pixel = self.pixels[y][x] + try: + color = min(color, 65535) + except Exception: + color = min(color[0], 65535) + + pixel.l = color >> 8 # noqa: E741 + pixel.r = color & 0xFF + + +class _PyAccessI32_N(PyAccess): + """ Signed Int32 access, native endian """ + + def _post_init(self, *args, **kwargs): + self.pixels = self.image32 + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + self.pixels[y][x] = color + + +class _PyAccessI32_Swap(PyAccess): + """ I;32L/B access, with byteswapping conversion """ + + def _post_init(self, *args, **kwargs): + self.pixels = self.image32 + + def reverse(self, i): + orig = ffi.new("int *", i) + chars = ffi.cast("unsigned char *", orig) + chars[0], chars[1], chars[2], chars[3] = chars[3], chars[2], chars[1], chars[0] + return ffi.cast("int *", chars)[0] + + def get_pixel(self, x, y): + return self.reverse(self.pixels[y][x]) + + def set_pixel(self, x, y, color): + self.pixels[y][x] = self.reverse(color) + + +class _PyAccessF(PyAccess): + """ 32 bit float access """ + + def _post_init(self, *args, **kwargs): + self.pixels = ffi.cast("float **", self.image32) + + def get_pixel(self, x, y): + return self.pixels[y][x] + + def set_pixel(self, x, y, color): + try: + # not a tuple + self.pixels[y][x] = color + except TypeError: + # tuple + self.pixels[y][x] = color[0] + + +mode_map = { + "1": _PyAccess8, + "L": _PyAccess8, + "P": _PyAccess8, + "LA": _PyAccess32_2, + "La": _PyAccess32_2, + "PA": _PyAccess32_2, + "RGB": _PyAccess32_3, + "LAB": _PyAccess32_3, + "HSV": _PyAccess32_3, + "YCbCr": _PyAccess32_3, + "RGBA": _PyAccess32_4, + "RGBa": _PyAccess32_4, + "RGBX": _PyAccess32_4, + "CMYK": _PyAccess32_4, + "F": _PyAccessF, + "I": _PyAccessI32_N, +} + +if sys.byteorder == "little": + mode_map["I;16"] = _PyAccessI16_N + mode_map["I;16L"] = _PyAccessI16_N + mode_map["I;16B"] = _PyAccessI16_B + + mode_map["I;32L"] = _PyAccessI32_N + mode_map["I;32B"] = _PyAccessI32_Swap +else: + mode_map["I;16"] = _PyAccessI16_L + mode_map["I;16L"] = _PyAccessI16_L + mode_map["I;16B"] = _PyAccessI16_N + + mode_map["I;32L"] = _PyAccessI32_Swap + mode_map["I;32B"] = _PyAccessI32_N + + +def new(img, readonly=False): + access_type = mode_map.get(img.mode, None) + if not access_type: + logger.debug("PyAccess Not Implemented: %s", img.mode) + return None + return access_type(img, readonly) diff --git a/venv/Lib/site-packages/PIL/SgiImagePlugin.py b/venv/Lib/site-packages/PIL/SgiImagePlugin.py new file mode 100644 index 000000000..ec9855e77 --- /dev/null +++ b/venv/Lib/site-packages/PIL/SgiImagePlugin.py @@ -0,0 +1,230 @@ +# +# The Python Imaging Library. +# $Id$ +# +# SGI image file handling +# +# See "The SGI Image File Format (Draft version 0.97)", Paul Haeberli. +# +# +# +# History: +# 2017-22-07 mb Add RLE decompression +# 2016-16-10 mb Add save method without compression +# 1995-09-10 fl Created +# +# Copyright (c) 2016 by Mickael Bonfill. +# Copyright (c) 2008 by Karsten Hiddemann. +# Copyright (c) 1997 by Secret Labs AB. +# Copyright (c) 1995 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + + +import os +import struct + +from . import Image, ImageFile +from ._binary import i8, i16be as i16, o8 + + +def _accept(prefix): + return len(prefix) >= 2 and i16(prefix) == 474 + + +MODES = { + (1, 1, 1): "L", + (1, 2, 1): "L", + (2, 1, 1): "L;16B", + (2, 2, 1): "L;16B", + (1, 3, 3): "RGB", + (2, 3, 3): "RGB;16B", + (1, 3, 4): "RGBA", + (2, 3, 4): "RGBA;16B", +} + + +## +# Image plugin for SGI images. +class SgiImageFile(ImageFile.ImageFile): + + format = "SGI" + format_description = "SGI Image File Format" + + def _open(self): + + # HEAD + headlen = 512 + s = self.fp.read(headlen) + + if not _accept(s): + raise ValueError("Not an SGI image file") + + # compression : verbatim or RLE + compression = i8(s[2]) + + # bpc : 1 or 2 bytes (8bits or 16bits) + bpc = i8(s[3]) + + # dimension : 1, 2 or 3 (depending on xsize, ysize and zsize) + dimension = i16(s[4:]) + + # xsize : width + xsize = i16(s[6:]) + + # ysize : height + ysize = i16(s[8:]) + + # zsize : channels count + zsize = i16(s[10:]) + + # layout + layout = bpc, dimension, zsize + + # determine mode from bits/zsize + rawmode = "" + try: + rawmode = MODES[layout] + except KeyError: + pass + + if rawmode == "": + raise ValueError("Unsupported SGI image mode") + + self._size = xsize, ysize + self.mode = rawmode.split(";")[0] + if self.mode == "RGB": + self.custom_mimetype = "image/rgb" + + # orientation -1 : scanlines begins at the bottom-left corner + orientation = -1 + + # decoder info + if compression == 0: + pagesize = xsize * ysize * bpc + if bpc == 2: + self.tile = [ + ("SGI16", (0, 0) + self.size, headlen, (self.mode, 0, orientation)) + ] + else: + self.tile = [] + offset = headlen + for layer in self.mode: + self.tile.append( + ("raw", (0, 0) + self.size, offset, (layer, 0, orientation)) + ) + offset += pagesize + elif compression == 1: + self.tile = [ + ("sgi_rle", (0, 0) + self.size, headlen, (rawmode, orientation, bpc)) + ] + + +def _save(im, fp, filename): + if im.mode != "RGB" and im.mode != "RGBA" and im.mode != "L": + raise ValueError("Unsupported SGI image mode") + + # Get the keyword arguments + info = im.encoderinfo + + # Byte-per-pixel precision, 1 = 8bits per pixel + bpc = info.get("bpc", 1) + + if bpc not in (1, 2): + raise ValueError("Unsupported number of bytes per pixel") + + # Flip the image, since the origin of SGI file is the bottom-left corner + orientation = -1 + # Define the file as SGI File Format + magicNumber = 474 + # Run-Length Encoding Compression - Unsupported at this time + rle = 0 + + # Number of dimensions (x,y,z) + dim = 3 + # X Dimension = width / Y Dimension = height + x, y = im.size + if im.mode == "L" and y == 1: + dim = 1 + elif im.mode == "L": + dim = 2 + # Z Dimension: Number of channels + z = len(im.mode) + + if dim == 1 or dim == 2: + z = 1 + + # assert we've got the right number of bands. + if len(im.getbands()) != z: + raise ValueError( + "incorrect number of bands in SGI write: {} vs {}".format( + z, len(im.getbands()) + ) + ) + + # Minimum Byte value + pinmin = 0 + # Maximum Byte value (255 = 8bits per pixel) + pinmax = 255 + # Image name (79 characters max, truncated below in write) + imgName = os.path.splitext(os.path.basename(filename))[0] + imgName = imgName.encode("ascii", "ignore") + # Standard representation of pixel in the file + colormap = 0 + fp.write(struct.pack(">h", magicNumber)) + fp.write(o8(rle)) + fp.write(o8(bpc)) + fp.write(struct.pack(">H", dim)) + fp.write(struct.pack(">H", x)) + fp.write(struct.pack(">H", y)) + fp.write(struct.pack(">H", z)) + fp.write(struct.pack(">l", pinmin)) + fp.write(struct.pack(">l", pinmax)) + fp.write(struct.pack("4s", b"")) # dummy + fp.write(struct.pack("79s", imgName)) # truncates to 79 chars + fp.write(struct.pack("s", b"")) # force null byte after imgname + fp.write(struct.pack(">l", colormap)) + fp.write(struct.pack("404s", b"")) # dummy + + rawmode = "L" + if bpc == 2: + rawmode = "L;16B" + + for channel in im.split(): + fp.write(channel.tobytes("raw", rawmode, 0, orientation)) + + fp.close() + + +class SGI16Decoder(ImageFile.PyDecoder): + _pulls_fd = True + + def decode(self, buffer): + rawmode, stride, orientation = self.args + pagesize = self.state.xsize * self.state.ysize + zsize = len(self.mode) + self.fd.seek(512) + + for band in range(zsize): + channel = Image.new("L", (self.state.xsize, self.state.ysize)) + channel.frombytes( + self.fd.read(2 * pagesize), "raw", "L;16B", stride, orientation + ) + self.im.putband(channel.im, band) + + return -1, 0 + + +# +# registry + + +Image.register_decoder("SGI16", SGI16Decoder) +Image.register_open(SgiImageFile.format, SgiImageFile, _accept) +Image.register_save(SgiImageFile.format, _save) +Image.register_mime(SgiImageFile.format, "image/sgi") + +Image.register_extensions(SgiImageFile.format, [".bw", ".rgb", ".rgba", ".sgi"]) + +# End of file diff --git a/venv/Lib/site-packages/PIL/SpiderImagePlugin.py b/venv/Lib/site-packages/PIL/SpiderImagePlugin.py new file mode 100644 index 000000000..56aac2987 --- /dev/null +++ b/venv/Lib/site-packages/PIL/SpiderImagePlugin.py @@ -0,0 +1,324 @@ +# +# The Python Imaging Library. +# +# SPIDER image file handling +# +# History: +# 2004-08-02 Created BB +# 2006-03-02 added save method +# 2006-03-13 added support for stack images +# +# Copyright (c) 2004 by Health Research Inc. (HRI) RENSSELAER, NY 12144. +# Copyright (c) 2004 by William Baxter. +# Copyright (c) 2004 by Secret Labs AB. +# Copyright (c) 2004 by Fredrik Lundh. +# + +## +# Image plugin for the Spider image format. This format is is used +# by the SPIDER software, in processing image data from electron +# microscopy and tomography. +## + +# +# SpiderImagePlugin.py +# +# The Spider image format is used by SPIDER software, in processing +# image data from electron microscopy and tomography. +# +# Spider home page: +# https://spider.wadsworth.org/spider_doc/spider/docs/spider.html +# +# Details about the Spider image format: +# https://spider.wadsworth.org/spider_doc/spider/docs/image_doc.html +# +import os +import struct +import sys + +from PIL import Image, ImageFile + + +def isInt(f): + try: + i = int(f) + if f - i == 0: + return 1 + else: + return 0 + except (ValueError, OverflowError): + return 0 + + +iforms = [1, 3, -11, -12, -21, -22] + + +# There is no magic number to identify Spider files, so just check a +# series of header locations to see if they have reasonable values. +# Returns no. of bytes in the header, if it is a valid Spider header, +# otherwise returns 0 + + +def isSpiderHeader(t): + h = (99,) + t # add 1 value so can use spider header index start=1 + # header values 1,2,5,12,13,22,23 should be integers + for i in [1, 2, 5, 12, 13, 22, 23]: + if not isInt(h[i]): + return 0 + # check iform + iform = int(h[5]) + if iform not in iforms: + return 0 + # check other header values + labrec = int(h[13]) # no. records in file header + labbyt = int(h[22]) # total no. of bytes in header + lenbyt = int(h[23]) # record length in bytes + if labbyt != (labrec * lenbyt): + return 0 + # looks like a valid header + return labbyt + + +def isSpiderImage(filename): + with open(filename, "rb") as fp: + f = fp.read(92) # read 23 * 4 bytes + t = struct.unpack(">23f", f) # try big-endian first + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + t = struct.unpack("<23f", f) # little-endian + hdrlen = isSpiderHeader(t) + return hdrlen + + +class SpiderImageFile(ImageFile.ImageFile): + + format = "SPIDER" + format_description = "Spider 2D image" + _close_exclusive_fp_after_loading = False + + def _open(self): + # check header + n = 27 * 4 # read 27 float values + f = self.fp.read(n) + + try: + self.bigendian = 1 + t = struct.unpack(">27f", f) # try big-endian first + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + self.bigendian = 0 + t = struct.unpack("<27f", f) # little-endian + hdrlen = isSpiderHeader(t) + if hdrlen == 0: + raise SyntaxError("not a valid Spider file") + except struct.error as e: + raise SyntaxError("not a valid Spider file") from e + + h = (99,) + t # add 1 value : spider header index starts at 1 + iform = int(h[5]) + if iform != 1: + raise SyntaxError("not a Spider 2D image") + + self._size = int(h[12]), int(h[2]) # size in pixels (width, height) + self.istack = int(h[24]) + self.imgnumber = int(h[27]) + + if self.istack == 0 and self.imgnumber == 0: + # stk=0, img=0: a regular 2D image + offset = hdrlen + self._nimages = 1 + elif self.istack > 0 and self.imgnumber == 0: + # stk>0, img=0: Opening the stack for the first time + self.imgbytes = int(h[12]) * int(h[2]) * 4 + self.hdrlen = hdrlen + self._nimages = int(h[26]) + # Point to the first image in the stack + offset = hdrlen * 2 + self.imgnumber = 1 + elif self.istack == 0 and self.imgnumber > 0: + # stk=0, img>0: an image within the stack + offset = hdrlen + self.stkoffset + self.istack = 2 # So Image knows it's still a stack + else: + raise SyntaxError("inconsistent stack header values") + + if self.bigendian: + self.rawmode = "F;32BF" + else: + self.rawmode = "F;32F" + self.mode = "F" + + self.tile = [("raw", (0, 0) + self.size, offset, (self.rawmode, 0, 1))] + self.__fp = self.fp # FIXME: hack + + @property + def n_frames(self): + return self._nimages + + @property + def is_animated(self): + return self._nimages > 1 + + # 1st image index is zero (although SPIDER imgnumber starts at 1) + def tell(self): + if self.imgnumber < 1: + return 0 + else: + return self.imgnumber - 1 + + def seek(self, frame): + if self.istack == 0: + raise EOFError("attempt to seek in a non-stack file") + if not self._seek_check(frame): + return + self.stkoffset = self.hdrlen + frame * (self.hdrlen + self.imgbytes) + self.fp = self.__fp + self.fp.seek(self.stkoffset) + self._open() + + # returns a byte image after rescaling to 0..255 + def convert2byte(self, depth=255): + (minimum, maximum) = self.getextrema() + m = 1 + if maximum != minimum: + m = depth / (maximum - minimum) + b = -m * minimum + return self.point(lambda i, m=m, b=b: i * m + b).convert("L") + + # returns a ImageTk.PhotoImage object, after rescaling to 0..255 + def tkPhotoImage(self): + from PIL import ImageTk + + return ImageTk.PhotoImage(self.convert2byte(), palette=256) + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# -------------------------------------------------------------------- +# Image series + +# given a list of filenames, return a list of images +def loadImageSeries(filelist=None): + """create a list of :py:class:`~PIL.Image.Image` objects for use in a montage""" + if filelist is None or len(filelist) < 1: + return + + imglist = [] + for img in filelist: + if not os.path.exists(img): + print("unable to find %s" % img) + continue + try: + with Image.open(img) as im: + im = im.convert2byte() + except Exception: + if not isSpiderImage(img): + print(img + " is not a Spider image file") + continue + im.info["filename"] = img + imglist.append(im) + return imglist + + +# -------------------------------------------------------------------- +# For saving images in Spider format + + +def makeSpiderHeader(im): + nsam, nrow = im.size + lenbyt = nsam * 4 # There are labrec records in the header + labrec = int(1024 / lenbyt) + if 1024 % lenbyt != 0: + labrec += 1 + labbyt = labrec * lenbyt + hdr = [] + nvalues = int(labbyt / 4) + for i in range(nvalues): + hdr.append(0.0) + + if len(hdr) < 23: + return [] + + # NB these are Fortran indices + hdr[1] = 1.0 # nslice (=1 for an image) + hdr[2] = float(nrow) # number of rows per slice + hdr[5] = 1.0 # iform for 2D image + hdr[12] = float(nsam) # number of pixels per line + hdr[13] = float(labrec) # number of records in file header + hdr[22] = float(labbyt) # total number of bytes in header + hdr[23] = float(lenbyt) # record length in bytes + + # adjust for Fortran indexing + hdr = hdr[1:] + hdr.append(0.0) + # pack binary data into a string + hdrstr = [] + for v in hdr: + hdrstr.append(struct.pack("f", v)) + return hdrstr + + +def _save(im, fp, filename): + if im.mode[0] != "F": + im = im.convert("F") + + hdr = makeSpiderHeader(im) + if len(hdr) < 256: + raise OSError("Error creating Spider header") + + # write the SPIDER header + fp.writelines(hdr) + + rawmode = "F;32NF" # 32-bit native floating point + ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, 1))]) + + +def _save_spider(im, fp, filename): + # get the filename extension and register it with Image + ext = os.path.splitext(filename)[1] + Image.register_extension(SpiderImageFile.format, ext) + _save(im, fp, filename) + + +# -------------------------------------------------------------------- + + +Image.register_open(SpiderImageFile.format, SpiderImageFile) +Image.register_save(SpiderImageFile.format, _save_spider) + +if __name__ == "__main__": + + if len(sys.argv) < 2: + print("Syntax: python SpiderImagePlugin.py [infile] [outfile]") + sys.exit() + + filename = sys.argv[1] + if not isSpiderImage(filename): + print("input image must be in Spider format") + sys.exit() + + with Image.open(filename) as im: + print("image: " + str(im)) + print("format: " + str(im.format)) + print("size: " + str(im.size)) + print("mode: " + str(im.mode)) + print("max, min: ", end=" ") + print(im.getextrema()) + + if len(sys.argv) > 2: + outfile = sys.argv[2] + + # perform some image operation + im = im.transpose(Image.FLIP_LEFT_RIGHT) + print( + "saving a flipped version of %s as %s " + % (os.path.basename(filename), outfile) + ) + im.save(outfile, SpiderImageFile.format) diff --git a/venv/Lib/site-packages/PIL/SunImagePlugin.py b/venv/Lib/site-packages/PIL/SunImagePlugin.py new file mode 100644 index 000000000..d99884293 --- /dev/null +++ b/venv/Lib/site-packages/PIL/SunImagePlugin.py @@ -0,0 +1,136 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Sun image file handling +# +# History: +# 1995-09-10 fl Created +# 1996-05-28 fl Fixed 32-bit alignment +# 1998-12-29 fl Import ImagePalette module +# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault) +# +# Copyright (c) 1997-2001 by Secret Labs AB +# Copyright (c) 1995-1996 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + + +from . import Image, ImageFile, ImagePalette +from ._binary import i32be as i32 + + +def _accept(prefix): + return len(prefix) >= 4 and i32(prefix) == 0x59A66A95 + + +## +# Image plugin for Sun raster files. + + +class SunImageFile(ImageFile.ImageFile): + + format = "SUN" + format_description = "Sun Raster File" + + def _open(self): + + # The Sun Raster file header is 32 bytes in length + # and has the following format: + + # typedef struct _SunRaster + # { + # DWORD MagicNumber; /* Magic (identification) number */ + # DWORD Width; /* Width of image in pixels */ + # DWORD Height; /* Height of image in pixels */ + # DWORD Depth; /* Number of bits per pixel */ + # DWORD Length; /* Size of image data in bytes */ + # DWORD Type; /* Type of raster file */ + # DWORD ColorMapType; /* Type of color map */ + # DWORD ColorMapLength; /* Size of the color map in bytes */ + # } SUNRASTER; + + # HEAD + s = self.fp.read(32) + if not _accept(s): + raise SyntaxError("not an SUN raster file") + + offset = 32 + + self._size = i32(s[4:8]), i32(s[8:12]) + + depth = i32(s[12:16]) + # data_length = i32(s[16:20]) # unreliable, ignore. + file_type = i32(s[20:24]) + palette_type = i32(s[24:28]) # 0: None, 1: RGB, 2: Raw/arbitrary + palette_length = i32(s[28:32]) + + if depth == 1: + self.mode, rawmode = "1", "1;I" + elif depth == 4: + self.mode, rawmode = "L", "L;4" + elif depth == 8: + self.mode = rawmode = "L" + elif depth == 24: + if file_type == 3: + self.mode, rawmode = "RGB", "RGB" + else: + self.mode, rawmode = "RGB", "BGR" + elif depth == 32: + if file_type == 3: + self.mode, rawmode = "RGB", "RGBX" + else: + self.mode, rawmode = "RGB", "BGRX" + else: + raise SyntaxError("Unsupported Mode/Bit Depth") + + if palette_length: + if palette_length > 1024: + raise SyntaxError("Unsupported Color Palette Length") + + if palette_type != 1: + raise SyntaxError("Unsupported Palette Type") + + offset = offset + palette_length + self.palette = ImagePalette.raw("RGB;L", self.fp.read(palette_length)) + if self.mode == "L": + self.mode = "P" + rawmode = rawmode.replace("L", "P") + + # 16 bit boundaries on stride + stride = ((self.size[0] * depth + 15) // 16) * 2 + + # file type: Type is the version (or flavor) of the bitmap + # file. The following values are typically found in the Type + # field: + # 0000h Old + # 0001h Standard + # 0002h Byte-encoded + # 0003h RGB format + # 0004h TIFF format + # 0005h IFF format + # FFFFh Experimental + + # Old and standard are the same, except for the length tag. + # byte-encoded is run-length-encoded + # RGB looks similar to standard, but RGB byte order + # TIFF and IFF mean that they were converted from T/IFF + # Experimental means that it's something else. + # (https://www.fileformat.info/format/sunraster/egff.htm) + + if file_type in (0, 1, 3, 4, 5): + self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride))] + elif file_type == 2: + self.tile = [("sun_rle", (0, 0) + self.size, offset, rawmode)] + else: + raise SyntaxError("Unsupported Sun Raster file type") + + +# +# registry + + +Image.register_open(SunImageFile.format, SunImageFile, _accept) + +Image.register_extension(SunImageFile.format, ".ras") diff --git a/venv/Lib/site-packages/PIL/TarIO.py b/venv/Lib/site-packages/PIL/TarIO.py new file mode 100644 index 000000000..d108362fc --- /dev/null +++ b/venv/Lib/site-packages/PIL/TarIO.py @@ -0,0 +1,65 @@ +# +# The Python Imaging Library. +# $Id$ +# +# read files from within a tar file +# +# History: +# 95-06-18 fl Created +# 96-05-28 fl Open files in binary mode +# +# Copyright (c) Secret Labs AB 1997. +# Copyright (c) Fredrik Lundh 1995-96. +# +# See the README file for information on usage and redistribution. +# + +import io + +from . import ContainerIO + + +class TarIO(ContainerIO.ContainerIO): + """A file object that provides read access to a given member of a TAR file.""" + + def __init__(self, tarfile, file): + """ + Create file object. + + :param tarfile: Name of TAR file. + :param file: Name of member file. + """ + self.fh = open(tarfile, "rb") + + while True: + + s = self.fh.read(512) + if len(s) != 512: + raise OSError("unexpected end of tar file") + + name = s[:100].decode("utf-8") + i = name.find("\0") + if i == 0: + raise OSError("cannot find subfile") + if i > 0: + name = name[:i] + + size = int(s[124:135], 8) + + if file == name: + break + + self.fh.seek((size + 511) & (~511), io.SEEK_CUR) + + # Open region + super().__init__(self.fh, self.fh.tell(), size) + + # Context manager support + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def close(self): + self.fh.close() diff --git a/venv/Lib/site-packages/PIL/TgaImagePlugin.py b/venv/Lib/site-packages/PIL/TgaImagePlugin.py new file mode 100644 index 000000000..566f0ac18 --- /dev/null +++ b/venv/Lib/site-packages/PIL/TgaImagePlugin.py @@ -0,0 +1,246 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TGA file handling +# +# History: +# 95-09-01 fl created (reads 24-bit files only) +# 97-01-04 fl support more TGA versions, including compressed images +# 98-07-04 fl fixed orientation and alpha layer bugs +# 98-09-11 fl fixed orientation for runlength decoder +# +# Copyright (c) Secret Labs AB 1997-98. +# Copyright (c) Fredrik Lundh 1995-97. +# +# See the README file for information on usage and redistribution. +# + + +import warnings + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, i16le as i16, o8, o16le as o16 + +# +# -------------------------------------------------------------------- +# Read RGA file + + +MODES = { + # map imagetype/depth to rawmode + (1, 8): "P", + (3, 1): "1", + (3, 8): "L", + (3, 16): "LA", + (2, 16): "BGR;5", + (2, 24): "BGR", + (2, 32): "BGRA", +} + + +## +# Image plugin for Targa files. + + +class TgaImageFile(ImageFile.ImageFile): + + format = "TGA" + format_description = "Targa" + + def _open(self): + + # process header + s = self.fp.read(18) + + id_len = i8(s[0]) + + colormaptype = i8(s[1]) + imagetype = i8(s[2]) + + depth = i8(s[16]) + + flags = i8(s[17]) + + self._size = i16(s[12:]), i16(s[14:]) + + # validate header fields + if ( + colormaptype not in (0, 1) + or self.size[0] <= 0 + or self.size[1] <= 0 + or depth not in (1, 8, 16, 24, 32) + ): + raise SyntaxError("not a TGA file") + + # image mode + if imagetype in (3, 11): + self.mode = "L" + if depth == 1: + self.mode = "1" # ??? + elif depth == 16: + self.mode = "LA" + elif imagetype in (1, 9): + self.mode = "P" + elif imagetype in (2, 10): + self.mode = "RGB" + if depth == 32: + self.mode = "RGBA" + else: + raise SyntaxError("unknown TGA mode") + + # orientation + orientation = flags & 0x30 + if orientation == 0x20: + orientation = 1 + elif not orientation: + orientation = -1 + else: + raise SyntaxError("unknown TGA orientation") + + self.info["orientation"] = orientation + + if imagetype & 8: + self.info["compression"] = "tga_rle" + + if id_len: + self.info["id_section"] = self.fp.read(id_len) + + if colormaptype: + # read palette + start, size, mapdepth = i16(s[3:]), i16(s[5:]), i16(s[7:]) + if mapdepth == 16: + self.palette = ImagePalette.raw( + "BGR;16", b"\0" * 2 * start + self.fp.read(2 * size) + ) + elif mapdepth == 24: + self.palette = ImagePalette.raw( + "BGR", b"\0" * 3 * start + self.fp.read(3 * size) + ) + elif mapdepth == 32: + self.palette = ImagePalette.raw( + "BGRA", b"\0" * 4 * start + self.fp.read(4 * size) + ) + + # setup tile descriptor + try: + rawmode = MODES[(imagetype & 7, depth)] + if imagetype & 8: + # compressed + self.tile = [ + ( + "tga_rle", + (0, 0) + self.size, + self.fp.tell(), + (rawmode, orientation, depth), + ) + ] + else: + self.tile = [ + ( + "raw", + (0, 0) + self.size, + self.fp.tell(), + (rawmode, 0, orientation), + ) + ] + except KeyError: + pass # cannot decode + + +# +# -------------------------------------------------------------------- +# Write TGA file + + +SAVE = { + "1": ("1", 1, 0, 3), + "L": ("L", 8, 0, 3), + "LA": ("LA", 16, 0, 3), + "P": ("P", 8, 1, 1), + "RGB": ("BGR", 24, 0, 2), + "RGBA": ("BGRA", 32, 0, 2), +} + + +def _save(im, fp, filename): + + try: + rawmode, bits, colormaptype, imagetype = SAVE[im.mode] + except KeyError as e: + raise OSError("cannot write mode %s as TGA" % im.mode) from e + + if "rle" in im.encoderinfo: + rle = im.encoderinfo["rle"] + else: + compression = im.encoderinfo.get("compression", im.info.get("compression")) + rle = compression == "tga_rle" + if rle: + imagetype += 8 + + id_section = im.encoderinfo.get("id_section", im.info.get("id_section", "")) + id_len = len(id_section) + if id_len > 255: + id_len = 255 + id_section = id_section[:255] + warnings.warn("id_section has been trimmed to 255 characters") + + if colormaptype: + colormapfirst, colormaplength, colormapentry = 0, 256, 24 + else: + colormapfirst, colormaplength, colormapentry = 0, 0, 0 + + if im.mode in ("LA", "RGBA"): + flags = 8 + else: + flags = 0 + + orientation = im.encoderinfo.get("orientation", im.info.get("orientation", -1)) + if orientation > 0: + flags = flags | 0x20 + + fp.write( + o8(id_len) + + o8(colormaptype) + + o8(imagetype) + + o16(colormapfirst) + + o16(colormaplength) + + o8(colormapentry) + + o16(0) + + o16(0) + + o16(im.size[0]) + + o16(im.size[1]) + + o8(bits) + + o8(flags) + ) + + if id_section: + fp.write(id_section) + + if colormaptype: + fp.write(im.im.getpalette("RGB", "BGR")) + + if rle: + ImageFile._save( + im, fp, [("tga_rle", (0, 0) + im.size, 0, (rawmode, orientation))] + ) + else: + ImageFile._save( + im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, orientation))] + ) + + # write targa version 2 footer + fp.write(b"\000" * 8 + b"TRUEVISION-XFILE." + b"\000") + + +# +# -------------------------------------------------------------------- +# Registry + + +Image.register_open(TgaImageFile.format, TgaImageFile) +Image.register_save(TgaImageFile.format, _save) + +Image.register_extensions(TgaImageFile.format, [".tga", ".icb", ".vda", ".vst"]) + +Image.register_mime(TgaImageFile.format, "image/x-tga") diff --git a/venv/Lib/site-packages/PIL/TiffImagePlugin.py b/venv/Lib/site-packages/PIL/TiffImagePlugin.py new file mode 100644 index 000000000..73e9a2763 --- /dev/null +++ b/venv/Lib/site-packages/PIL/TiffImagePlugin.py @@ -0,0 +1,1891 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TIFF file handling +# +# TIFF is a flexible, if somewhat aged, image file format originally +# defined by Aldus. Although TIFF supports a wide variety of pixel +# layouts and compression methods, the name doesn't really stand for +# "thousands of incompatible file formats," it just feels that way. +# +# To read TIFF data from a stream, the stream must be seekable. For +# progressive decoding, make sure to use TIFF files where the tag +# directory is placed first in the file. +# +# History: +# 1995-09-01 fl Created +# 1996-05-04 fl Handle JPEGTABLES tag +# 1996-05-18 fl Fixed COLORMAP support +# 1997-01-05 fl Fixed PREDICTOR support +# 1997-08-27 fl Added support for rational tags (from Perry Stoll) +# 1998-01-10 fl Fixed seek/tell (from Jan Blom) +# 1998-07-15 fl Use private names for internal variables +# 1999-06-13 fl Rewritten for PIL 1.0 (1.0) +# 2000-10-11 fl Additional fixes for Python 2.0 (1.1) +# 2001-04-17 fl Fixed rewind support (seek to frame 0) (1.2) +# 2001-05-12 fl Added write support for more tags (from Greg Couch) (1.3) +# 2001-12-18 fl Added workaround for broken Matrox library +# 2002-01-18 fl Don't mess up if photometric tag is missing (D. Alan Stewart) +# 2003-05-19 fl Check FILLORDER tag +# 2003-09-26 fl Added RGBa support +# 2004-02-24 fl Added DPI support; fixed rational write support +# 2005-02-07 fl Added workaround for broken Corel Draw 10 files +# 2006-01-09 fl Added support for float/double tags (from Russell Nelson) +# +# Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved. +# Copyright (c) 1995-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# +import io +import itertools +import logging +import os +import struct +import warnings +from collections.abc import MutableMapping +from fractions import Fraction +from numbers import Number, Rational + +from . import Image, ImageFile, ImagePalette, TiffTags +from ._binary import i8, o8 +from .TiffTags import TYPES + +logger = logging.getLogger(__name__) + +# Set these to true to force use of libtiff for reading or writing. +READ_LIBTIFF = False +WRITE_LIBTIFF = False +IFD_LEGACY_API = True + +II = b"II" # little-endian (Intel style) +MM = b"MM" # big-endian (Motorola style) + +# +# -------------------------------------------------------------------- +# Read TIFF files + +# a few tag names, just to make the code below a bit more readable +IMAGEWIDTH = 256 +IMAGELENGTH = 257 +BITSPERSAMPLE = 258 +COMPRESSION = 259 +PHOTOMETRIC_INTERPRETATION = 262 +FILLORDER = 266 +IMAGEDESCRIPTION = 270 +STRIPOFFSETS = 273 +SAMPLESPERPIXEL = 277 +ROWSPERSTRIP = 278 +STRIPBYTECOUNTS = 279 +X_RESOLUTION = 282 +Y_RESOLUTION = 283 +PLANAR_CONFIGURATION = 284 +RESOLUTION_UNIT = 296 +TRANSFERFUNCTION = 301 +SOFTWARE = 305 +DATE_TIME = 306 +ARTIST = 315 +PREDICTOR = 317 +COLORMAP = 320 +TILEOFFSETS = 324 +EXTRASAMPLES = 338 +SAMPLEFORMAT = 339 +JPEGTABLES = 347 +REFERENCEBLACKWHITE = 532 +COPYRIGHT = 33432 +IPTC_NAA_CHUNK = 33723 # newsphoto properties +PHOTOSHOP_CHUNK = 34377 # photoshop properties +ICCPROFILE = 34675 +EXIFIFD = 34665 +XMP = 700 +JPEGQUALITY = 65537 # pseudo-tag by libtiff + +# https://github.com/imagej/ImageJA/blob/master/src/main/java/ij/io/TiffDecoder.java +IMAGEJ_META_DATA_BYTE_COUNTS = 50838 +IMAGEJ_META_DATA = 50839 + +COMPRESSION_INFO = { + # Compression => pil compression name + 1: "raw", + 2: "tiff_ccitt", + 3: "group3", + 4: "group4", + 5: "tiff_lzw", + 6: "tiff_jpeg", # obsolete + 7: "jpeg", + 8: "tiff_adobe_deflate", + 32771: "tiff_raw_16", # 16-bit padding + 32773: "packbits", + 32809: "tiff_thunderscan", + 32946: "tiff_deflate", + 34676: "tiff_sgilog", + 34677: "tiff_sgilog24", + 34925: "lzma", + 50000: "zstd", + 50001: "webp", +} + +COMPRESSION_INFO_REV = {v: k for k, v in COMPRESSION_INFO.items()} + +OPEN_INFO = { + # (ByteOrder, PhotoInterpretation, SampleFormat, FillOrder, BitsPerSample, + # ExtraSamples) => mode, rawmode + (II, 0, (1,), 1, (1,), ()): ("1", "1;I"), + (MM, 0, (1,), 1, (1,), ()): ("1", "1;I"), + (II, 0, (1,), 2, (1,), ()): ("1", "1;IR"), + (MM, 0, (1,), 2, (1,), ()): ("1", "1;IR"), + (II, 1, (1,), 1, (1,), ()): ("1", "1"), + (MM, 1, (1,), 1, (1,), ()): ("1", "1"), + (II, 1, (1,), 2, (1,), ()): ("1", "1;R"), + (MM, 1, (1,), 2, (1,), ()): ("1", "1;R"), + (II, 0, (1,), 1, (2,), ()): ("L", "L;2I"), + (MM, 0, (1,), 1, (2,), ()): ("L", "L;2I"), + (II, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), + (MM, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), + (II, 1, (1,), 1, (2,), ()): ("L", "L;2"), + (MM, 1, (1,), 1, (2,), ()): ("L", "L;2"), + (II, 1, (1,), 2, (2,), ()): ("L", "L;2R"), + (MM, 1, (1,), 2, (2,), ()): ("L", "L;2R"), + (II, 0, (1,), 1, (4,), ()): ("L", "L;4I"), + (MM, 0, (1,), 1, (4,), ()): ("L", "L;4I"), + (II, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), + (MM, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), + (II, 1, (1,), 1, (4,), ()): ("L", "L;4"), + (MM, 1, (1,), 1, (4,), ()): ("L", "L;4"), + (II, 1, (1,), 2, (4,), ()): ("L", "L;4R"), + (MM, 1, (1,), 2, (4,), ()): ("L", "L;4R"), + (II, 0, (1,), 1, (8,), ()): ("L", "L;I"), + (MM, 0, (1,), 1, (8,), ()): ("L", "L;I"), + (II, 0, (1,), 2, (8,), ()): ("L", "L;IR"), + (MM, 0, (1,), 2, (8,), ()): ("L", "L;IR"), + (II, 1, (1,), 1, (8,), ()): ("L", "L"), + (MM, 1, (1,), 1, (8,), ()): ("L", "L"), + (II, 1, (1,), 2, (8,), ()): ("L", "L;R"), + (MM, 1, (1,), 2, (8,), ()): ("L", "L;R"), + (II, 1, (1,), 1, (12,), ()): ("I;16", "I;12"), + (II, 1, (1,), 1, (16,), ()): ("I;16", "I;16"), + (MM, 1, (1,), 1, (16,), ()): ("I;16B", "I;16B"), + (II, 1, (2,), 1, (16,), ()): ("I", "I;16S"), + (MM, 1, (2,), 1, (16,), ()): ("I", "I;16BS"), + (II, 0, (3,), 1, (32,), ()): ("F", "F;32F"), + (MM, 0, (3,), 1, (32,), ()): ("F", "F;32BF"), + (II, 1, (1,), 1, (32,), ()): ("I", "I;32N"), + (II, 1, (2,), 1, (32,), ()): ("I", "I;32S"), + (MM, 1, (2,), 1, (32,), ()): ("I", "I;32BS"), + (II, 1, (3,), 1, (32,), ()): ("F", "F;32F"), + (MM, 1, (3,), 1, (32,), ()): ("F", "F;32BF"), + (II, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), + (MM, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), + (II, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), + (MM, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), + (II, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), + (MM, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), + (II, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples + (MM, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples + (II, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGBX", "RGBX"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGBX", "RGBX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGBX", "RGBXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGBX", "RGBXX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGBX", "RGBXXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGBX", "RGBXXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), + (MM, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"), + (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), + (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), + (II, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 + (MM, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 + (II, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16L"), + (MM, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGBX", "RGBX;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGBX", "RGBX;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16B"), + (II, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16L"), + (MM, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16B"), + (II, 3, (1,), 1, (1,), ()): ("P", "P;1"), + (MM, 3, (1,), 1, (1,), ()): ("P", "P;1"), + (II, 3, (1,), 2, (1,), ()): ("P", "P;1R"), + (MM, 3, (1,), 2, (1,), ()): ("P", "P;1R"), + (II, 3, (1,), 1, (2,), ()): ("P", "P;2"), + (MM, 3, (1,), 1, (2,), ()): ("P", "P;2"), + (II, 3, (1,), 2, (2,), ()): ("P", "P;2R"), + (MM, 3, (1,), 2, (2,), ()): ("P", "P;2R"), + (II, 3, (1,), 1, (4,), ()): ("P", "P;4"), + (MM, 3, (1,), 1, (4,), ()): ("P", "P;4"), + (II, 3, (1,), 2, (4,), ()): ("P", "P;4R"), + (MM, 3, (1,), 2, (4,), ()): ("P", "P;4R"), + (II, 3, (1,), 1, (8,), ()): ("P", "P"), + (MM, 3, (1,), 1, (8,), ()): ("P", "P"), + (II, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), + (MM, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), + (II, 3, (1,), 2, (8,), ()): ("P", "P;R"), + (MM, 3, (1,), 2, (8,), ()): ("P", "P;R"), + (II, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), + (MM, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), + (II, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), + (MM, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), + (II, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), + (MM, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), + (II, 5, (1,), 1, (16, 16, 16, 16), ()): ("CMYK", "CMYK;16L"), + # JPEG compressed images handled by LibTiff and auto-converted to RGBX + # Minimal Baseline TIFF requires YCbCr images to have 3 SamplesPerPixel + (II, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), + (MM, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), + (II, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), + (MM, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), +} + +PREFIXES = [ + b"MM\x00\x2A", # Valid TIFF header with big-endian byte order + b"II\x2A\x00", # Valid TIFF header with little-endian byte order + b"MM\x2A\x00", # Invalid TIFF header, assume big-endian + b"II\x00\x2A", # Invalid TIFF header, assume little-endian +] + + +def _accept(prefix): + return prefix[:4] in PREFIXES + + +def _limit_rational(val, max_val): + inv = abs(val) > 1 + n_d = IFDRational(1 / val if inv else val).limit_rational(max_val) + return n_d[::-1] if inv else n_d + + +def _limit_signed_rational(val, max_val, min_val): + frac = Fraction(val) + n_d = frac.numerator, frac.denominator + + if min(n_d) < min_val: + n_d = _limit_rational(val, abs(min_val)) + + if max(n_d) > max_val: + val = Fraction(*n_d) + n_d = _limit_rational(val, max_val) + + return n_d + + +## +# Wrapper for TIFF IFDs. + +_load_dispatch = {} +_write_dispatch = {} + + +class IFDRational(Rational): + """ Implements a rational class where 0/0 is a legal value to match + the in the wild use of exif rationals. + + e.g., DigitalZoomRatio - 0.00/0.00 indicates that no digital zoom was used + """ + + """ If the denominator is 0, store this as a float('nan'), otherwise store + as a fractions.Fraction(). Delegate as appropriate + + """ + + __slots__ = ("_numerator", "_denominator", "_val") + + def __init__(self, value, denominator=1): + """ + :param value: either an integer numerator, a + float/rational/other number, or an IFDRational + :param denominator: Optional integer denominator + """ + if isinstance(value, IFDRational): + self._numerator = value.numerator + self._denominator = value.denominator + self._val = value._val + return + + if isinstance(value, Fraction): + self._numerator = value.numerator + self._denominator = value.denominator + else: + self._numerator = value + self._denominator = denominator + + if denominator == 0: + self._val = float("nan") + elif denominator == 1: + self._val = Fraction(value) + else: + self._val = Fraction(value, denominator) + + @property + def numerator(a): + return a._numerator + + @property + def denominator(a): + return a._denominator + + def limit_rational(self, max_denominator): + """ + + :param max_denominator: Integer, the maximum denominator value + :returns: Tuple of (numerator, denominator) + """ + + if self.denominator == 0: + return (self.numerator, self.denominator) + + f = self._val.limit_denominator(max_denominator) + return (f.numerator, f.denominator) + + def __repr__(self): + return str(float(self._val)) + + def __hash__(self): + return self._val.__hash__() + + def __eq__(self, other): + return self._val == other + + def _delegate(op): + def delegate(self, *args): + return getattr(self._val, op)(*args) + + return delegate + + """ a = ['add','radd', 'sub', 'rsub', 'mul', 'rmul', + 'truediv', 'rtruediv', 'floordiv', 'rfloordiv', + 'mod','rmod', 'pow','rpow', 'pos', 'neg', + 'abs', 'trunc', 'lt', 'gt', 'le', 'ge', 'bool', + 'ceil', 'floor', 'round'] + print("\n".join("__%s__ = _delegate('__%s__')" % (s,s) for s in a)) + """ + + __add__ = _delegate("__add__") + __radd__ = _delegate("__radd__") + __sub__ = _delegate("__sub__") + __rsub__ = _delegate("__rsub__") + __mul__ = _delegate("__mul__") + __rmul__ = _delegate("__rmul__") + __truediv__ = _delegate("__truediv__") + __rtruediv__ = _delegate("__rtruediv__") + __floordiv__ = _delegate("__floordiv__") + __rfloordiv__ = _delegate("__rfloordiv__") + __mod__ = _delegate("__mod__") + __rmod__ = _delegate("__rmod__") + __pow__ = _delegate("__pow__") + __rpow__ = _delegate("__rpow__") + __pos__ = _delegate("__pos__") + __neg__ = _delegate("__neg__") + __abs__ = _delegate("__abs__") + __trunc__ = _delegate("__trunc__") + __lt__ = _delegate("__lt__") + __gt__ = _delegate("__gt__") + __le__ = _delegate("__le__") + __ge__ = _delegate("__ge__") + __bool__ = _delegate("__bool__") + __ceil__ = _delegate("__ceil__") + __floor__ = _delegate("__floor__") + __round__ = _delegate("__round__") + + +class ImageFileDirectory_v2(MutableMapping): + """This class represents a TIFF tag directory. To speed things up, we + don't decode tags unless they're asked for. + + Exposes a dictionary interface of the tags in the directory:: + + ifd = ImageFileDirectory_v2() + ifd[key] = 'Some Data' + ifd.tagtype[key] = TiffTags.ASCII + print(ifd[key]) + 'Some Data' + + Individual values are returned as the strings or numbers, sequences are + returned as tuples of the values. + + The tiff metadata type of each item is stored in a dictionary of + tag types in + `~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype`. The types + are read from a tiff file, guessed from the type added, or added + manually. + + Data Structures: + + * self.tagtype = {} + + * Key: numerical tiff tag number + * Value: integer corresponding to the data type from + ~PIL.TiffTags.TYPES` + + .. versionadded:: 3.0.0 + """ + + """ + Documentation: + + 'internal' data structures: + * self._tags_v2 = {} Key: numerical tiff tag number + Value: decoded data, as tuple for multiple values + * self._tagdata = {} Key: numerical tiff tag number + Value: undecoded byte string from file + * self._tags_v1 = {} Key: numerical tiff tag number + Value: decoded data in the v1 format + + Tags will be found in the private attributes self._tagdata, and in + self._tags_v2 once decoded. + + Self.legacy_api is a value for internal use, and shouldn't be + changed from outside code. In cooperation with the + ImageFileDirectory_v1 class, if legacy_api is true, then decoded + tags will be populated into both _tags_v1 and _tags_v2. _Tags_v2 + will be used if this IFD is used in the TIFF save routine. Tags + should be read from tags_v1 if legacy_api == true. + + """ + + def __init__(self, ifh=b"II\052\0\0\0\0\0", prefix=None): + """Initialize an ImageFileDirectory. + + To construct an ImageFileDirectory from a real file, pass the 8-byte + magic header to the constructor. To only set the endianness, pass it + as the 'prefix' keyword argument. + + :param ifh: One of the accepted magic headers (cf. PREFIXES); also sets + endianness. + :param prefix: Override the endianness of the file. + """ + if ifh[:4] not in PREFIXES: + raise SyntaxError("not a TIFF file (header %r not valid)" % ifh) + self._prefix = prefix if prefix is not None else ifh[:2] + if self._prefix == MM: + self._endian = ">" + elif self._prefix == II: + self._endian = "<" + else: + raise SyntaxError("not a TIFF IFD") + self.reset() + (self.next,) = self._unpack("L", ifh[4:]) + self._legacy_api = False + + prefix = property(lambda self: self._prefix) + offset = property(lambda self: self._offset) + legacy_api = property(lambda self: self._legacy_api) + + @legacy_api.setter + def legacy_api(self, value): + raise Exception("Not allowing setting of legacy api") + + def reset(self): + self._tags_v1 = {} # will remain empty if legacy_api is false + self._tags_v2 = {} # main tag storage + self._tagdata = {} + self.tagtype = {} # added 2008-06-05 by Florian Hoech + self._next = None + self._offset = None + + def __str__(self): + return str(dict(self)) + + def named(self): + """ + :returns: dict of name|key: value + + Returns the complete tag dictionary, with named tags where possible. + """ + return {TiffTags.lookup(code).name: value for code, value in self.items()} + + def __len__(self): + return len(set(self._tagdata) | set(self._tags_v2)) + + def __getitem__(self, tag): + if tag not in self._tags_v2: # unpack on the fly + data = self._tagdata[tag] + typ = self.tagtype[tag] + size, handler = self._load_dispatch[typ] + self[tag] = handler(self, data, self.legacy_api) # check type + val = self._tags_v2[tag] + if self.legacy_api and not isinstance(val, (tuple, bytes)): + val = (val,) + return val + + def __contains__(self, tag): + return tag in self._tags_v2 or tag in self._tagdata + + def __setitem__(self, tag, value): + self._setitem(tag, value, self.legacy_api) + + def _setitem(self, tag, value, legacy_api): + basetypes = (Number, bytes, str) + + info = TiffTags.lookup(tag) + values = [value] if isinstance(value, basetypes) else value + + if tag not in self.tagtype: + if info.type: + self.tagtype[tag] = info.type + else: + self.tagtype[tag] = TiffTags.UNDEFINED + if all(isinstance(v, IFDRational) for v in values): + self.tagtype[tag] = ( + TiffTags.RATIONAL + if all(v >= 0 for v in values) + else TiffTags.SIGNED_RATIONAL + ) + elif all(isinstance(v, int) for v in values): + if all(0 <= v < 2 ** 16 for v in values): + self.tagtype[tag] = TiffTags.SHORT + elif all(-(2 ** 15) < v < 2 ** 15 for v in values): + self.tagtype[tag] = TiffTags.SIGNED_SHORT + else: + self.tagtype[tag] = ( + TiffTags.LONG + if all(v >= 0 for v in values) + else TiffTags.SIGNED_LONG + ) + elif all(isinstance(v, float) for v in values): + self.tagtype[tag] = TiffTags.DOUBLE + elif all(isinstance(v, str) for v in values): + self.tagtype[tag] = TiffTags.ASCII + elif all(isinstance(v, bytes) for v in values): + self.tagtype[tag] = TiffTags.BYTE + + if self.tagtype[tag] == TiffTags.UNDEFINED: + values = [ + value.encode("ascii", "replace") if isinstance(value, str) else value + ] + elif self.tagtype[tag] == TiffTags.RATIONAL: + values = [float(v) if isinstance(v, int) else v for v in values] + + values = tuple(info.cvt_enum(value) for value in values) + + dest = self._tags_v1 if legacy_api else self._tags_v2 + + # Three branches: + # Spec'd length == 1, Actual length 1, store as element + # Spec'd length == 1, Actual > 1, Warn and truncate. Formerly barfed. + # No Spec, Actual length 1, Formerly (<4.2) returned a 1 element tuple. + # Don't mess with the legacy api, since it's frozen. + if ( + (info.length == 1) + or self.tagtype[tag] == TiffTags.BYTE + or (info.length is None and len(values) == 1 and not legacy_api) + ): + # Don't mess with the legacy api, since it's frozen. + if legacy_api and self.tagtype[tag] in [ + TiffTags.RATIONAL, + TiffTags.SIGNED_RATIONAL, + ]: # rationals + values = (values,) + try: + (dest[tag],) = values + except ValueError: + # We've got a builtin tag with 1 expected entry + warnings.warn( + "Metadata Warning, tag %s had too many entries: %s, expected 1" + % (tag, len(values)) + ) + dest[tag] = values[0] + + else: + # Spec'd length > 1 or undefined + # Unspec'd, and length > 1 + dest[tag] = values + + def __delitem__(self, tag): + self._tags_v2.pop(tag, None) + self._tags_v1.pop(tag, None) + self._tagdata.pop(tag, None) + + def __iter__(self): + return iter(set(self._tagdata) | set(self._tags_v2)) + + def _unpack(self, fmt, data): + return struct.unpack(self._endian + fmt, data) + + def _pack(self, fmt, *values): + return struct.pack(self._endian + fmt, *values) + + def _register_loader(idx, size): + def decorator(func): + from .TiffTags import TYPES + + if func.__name__.startswith("load_"): + TYPES[idx] = func.__name__[5:].replace("_", " ") + _load_dispatch[idx] = size, func # noqa: F821 + return func + + return decorator + + def _register_writer(idx): + def decorator(func): + _write_dispatch[idx] = func # noqa: F821 + return func + + return decorator + + def _register_basic(idx_fmt_name): + from .TiffTags import TYPES + + idx, fmt, name = idx_fmt_name + TYPES[idx] = name + size = struct.calcsize("=" + fmt) + _load_dispatch[idx] = ( # noqa: F821 + size, + lambda self, data, legacy_api=True: ( + self._unpack("{}{}".format(len(data) // size, fmt), data) + ), + ) + _write_dispatch[idx] = lambda self, *values: ( # noqa: F821 + b"".join(self._pack(fmt, value) for value in values) + ) + + list( + map( + _register_basic, + [ + (TiffTags.SHORT, "H", "short"), + (TiffTags.LONG, "L", "long"), + (TiffTags.SIGNED_BYTE, "b", "signed byte"), + (TiffTags.SIGNED_SHORT, "h", "signed short"), + (TiffTags.SIGNED_LONG, "l", "signed long"), + (TiffTags.FLOAT, "f", "float"), + (TiffTags.DOUBLE, "d", "double"), + ], + ) + ) + + @_register_loader(1, 1) # Basic type, except for the legacy API. + def load_byte(self, data, legacy_api=True): + return data + + @_register_writer(1) # Basic type, except for the legacy API. + def write_byte(self, data): + return data + + @_register_loader(2, 1) + def load_string(self, data, legacy_api=True): + if data.endswith(b"\0"): + data = data[:-1] + return data.decode("latin-1", "replace") + + @_register_writer(2) + def write_string(self, value): + # remerge of https://github.com/python-pillow/Pillow/pull/1416 + return b"" + value.encode("ascii", "replace") + b"\0" + + @_register_loader(5, 8) + def load_rational(self, data, legacy_api=True): + vals = self._unpack("{}L".format(len(data) // 4), data) + + def combine(a, b): + return (a, b) if legacy_api else IFDRational(a, b) + + return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) + + @_register_writer(5) + def write_rational(self, *values): + return b"".join( + self._pack("2L", *_limit_rational(frac, 2 ** 32 - 1)) for frac in values + ) + + @_register_loader(7, 1) + def load_undefined(self, data, legacy_api=True): + return data + + @_register_writer(7) + def write_undefined(self, value): + return value + + @_register_loader(10, 8) + def load_signed_rational(self, data, legacy_api=True): + vals = self._unpack("{}l".format(len(data) // 4), data) + + def combine(a, b): + return (a, b) if legacy_api else IFDRational(a, b) + + return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) + + @_register_writer(10) + def write_signed_rational(self, *values): + return b"".join( + self._pack("2l", *_limit_signed_rational(frac, 2 ** 31 - 1, -(2 ** 31))) + for frac in values + ) + + def _ensure_read(self, fp, size): + ret = fp.read(size) + if len(ret) != size: + raise OSError( + "Corrupt EXIF data. " + + "Expecting to read %d bytes but only got %d. " % (size, len(ret)) + ) + return ret + + def load(self, fp): + + self.reset() + self._offset = fp.tell() + + try: + for i in range(self._unpack("H", self._ensure_read(fp, 2))[0]): + tag, typ, count, data = self._unpack("HHL4s", self._ensure_read(fp, 12)) + + tagname = TiffTags.lookup(tag).name + typname = TYPES.get(typ, "unknown") + msg = "tag: %s (%d) - type: %s (%d)" % (tagname, tag, typname, typ) + + try: + unit_size, handler = self._load_dispatch[typ] + except KeyError: + logger.debug(msg + " - unsupported type {}".format(typ)) + continue # ignore unsupported type + size = count * unit_size + if size > 4: + here = fp.tell() + (offset,) = self._unpack("L", data) + msg += " Tag Location: {} - Data Location: {}".format(here, offset) + fp.seek(offset) + data = ImageFile._safe_read(fp, size) + fp.seek(here) + else: + data = data[:size] + + if len(data) != size: + warnings.warn( + "Possibly corrupt EXIF data. " + "Expecting to read %d bytes but only got %d." + " Skipping tag %s" % (size, len(data), tag) + ) + logger.debug(msg) + continue + + if not data: + logger.debug(msg) + continue + + self._tagdata[tag] = data + self.tagtype[tag] = typ + + msg += " - value: " + ( + "" % size if size > 32 else str(data) + ) + logger.debug(msg) + + (self.next,) = self._unpack("L", self._ensure_read(fp, 4)) + except OSError as msg: + warnings.warn(str(msg)) + return + + def tobytes(self, offset=0): + # FIXME What about tagdata? + result = self._pack("H", len(self._tags_v2)) + + entries = [] + offset = offset + len(result) + len(self._tags_v2) * 12 + 4 + stripoffsets = None + + # pass 1: convert tags to binary format + # always write tags in ascending order + for tag, value in sorted(self._tags_v2.items()): + if tag == STRIPOFFSETS: + stripoffsets = len(entries) + typ = self.tagtype.get(tag) + logger.debug("Tag {}, Type: {}, Value: {}".format(tag, typ, value)) + values = value if isinstance(value, tuple) else (value,) + data = self._write_dispatch[typ](self, *values) + + tagname = TiffTags.lookup(tag).name + typname = TYPES.get(typ, "unknown") + msg = "save: %s (%d) - type: %s (%d)" % (tagname, tag, typname, typ) + msg += " - value: " + ( + "" % len(data) if len(data) >= 16 else str(values) + ) + logger.debug(msg) + + # count is sum of lengths for string and arbitrary data + if typ in [TiffTags.BYTE, TiffTags.ASCII, TiffTags.UNDEFINED]: + count = len(data) + else: + count = len(values) + # figure out if data fits into the entry + if len(data) <= 4: + entries.append((tag, typ, count, data.ljust(4, b"\0"), b"")) + else: + entries.append((tag, typ, count, self._pack("L", offset), data)) + offset += (len(data) + 1) // 2 * 2 # pad to word + + # update strip offset data to point beyond auxiliary data + if stripoffsets is not None: + tag, typ, count, value, data = entries[stripoffsets] + if data: + raise NotImplementedError("multistrip support not yet implemented") + value = self._pack("L", self._unpack("L", value)[0] + offset) + entries[stripoffsets] = tag, typ, count, value, data + + # pass 2: write entries to file + for tag, typ, count, value, data in entries: + logger.debug( + "{} {} {} {} {}".format(tag, typ, count, repr(value), repr(data)) + ) + result += self._pack("HHL4s", tag, typ, count, value) + + # -- overwrite here for multi-page -- + result += b"\0\0\0\0" # end of entries + + # pass 3: write auxiliary data to file + for tag, typ, count, value, data in entries: + result += data + if len(data) & 1: + result += b"\0" + + return result + + def save(self, fp): + + if fp.tell() == 0: # skip TIFF header on subsequent pages + # tiff header -- PIL always starts the first IFD at offset 8 + fp.write(self._prefix + self._pack("HL", 42, 8)) + + offset = fp.tell() + result = self.tobytes(offset) + fp.write(result) + return offset + len(result) + + +ImageFileDirectory_v2._load_dispatch = _load_dispatch +ImageFileDirectory_v2._write_dispatch = _write_dispatch +for idx, name in TYPES.items(): + name = name.replace(" ", "_") + setattr(ImageFileDirectory_v2, "load_" + name, _load_dispatch[idx][1]) + setattr(ImageFileDirectory_v2, "write_" + name, _write_dispatch[idx]) +del _load_dispatch, _write_dispatch, idx, name + + +# Legacy ImageFileDirectory support. +class ImageFileDirectory_v1(ImageFileDirectory_v2): + """This class represents the **legacy** interface to a TIFF tag directory. + + Exposes a dictionary interface of the tags in the directory:: + + ifd = ImageFileDirectory_v1() + ifd[key] = 'Some Data' + ifd.tagtype[key] = TiffTags.ASCII + print(ifd[key]) + ('Some Data',) + + Also contains a dictionary of tag types as read from the tiff image file, + `~PIL.TiffImagePlugin.ImageFileDirectory_v1.tagtype`. + + Values are returned as a tuple. + + .. deprecated:: 3.0.0 + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._legacy_api = True + + tags = property(lambda self: self._tags_v1) + tagdata = property(lambda self: self._tagdata) + + @classmethod + def from_v2(cls, original): + """ Returns an + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + instance with the same data as is contained in the original + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + instance. + + :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + + """ + + ifd = cls(prefix=original.prefix) + ifd._tagdata = original._tagdata + ifd.tagtype = original.tagtype + ifd.next = original.next # an indicator for multipage tiffs + return ifd + + def to_v2(self): + """ Returns an + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + instance with the same data as is contained in the original + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` + instance. + + :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + + """ + + ifd = ImageFileDirectory_v2(prefix=self.prefix) + ifd._tagdata = dict(self._tagdata) + ifd.tagtype = dict(self.tagtype) + ifd._tags_v2 = dict(self._tags_v2) + return ifd + + def __contains__(self, tag): + return tag in self._tags_v1 or tag in self._tagdata + + def __len__(self): + return len(set(self._tagdata) | set(self._tags_v1)) + + def __iter__(self): + return iter(set(self._tagdata) | set(self._tags_v1)) + + def __setitem__(self, tag, value): + for legacy_api in (False, True): + self._setitem(tag, value, legacy_api) + + def __getitem__(self, tag): + if tag not in self._tags_v1: # unpack on the fly + data = self._tagdata[tag] + typ = self.tagtype[tag] + size, handler = self._load_dispatch[typ] + for legacy in (False, True): + self._setitem(tag, handler(self, data, legacy), legacy) + val = self._tags_v1[tag] + if not isinstance(val, (tuple, bytes)): + val = (val,) + return val + + +# undone -- switch this pointer when IFD_LEGACY_API == False +ImageFileDirectory = ImageFileDirectory_v1 + + +## +# Image plugin for TIFF files. + + +class TiffImageFile(ImageFile.ImageFile): + + format = "TIFF" + format_description = "Adobe TIFF" + _close_exclusive_fp_after_loading = False + + def _open(self): + """Open the first image in a TIFF file""" + + # Header + ifh = self.fp.read(8) + + # image file directory (tag dictionary) + self.tag_v2 = ImageFileDirectory_v2(ifh) + + # legacy tag/ifd entries will be filled in later + self.tag = self.ifd = None + + # setup frame pointers + self.__first = self.__next = self.tag_v2.next + self.__frame = -1 + self.__fp = self.fp + self._frame_pos = [] + self._n_frames = None + + logger.debug("*** TiffImageFile._open ***") + logger.debug("- __first: {}".format(self.__first)) + logger.debug("- ifh: {}".format(ifh)) + + # and load the first frame + self._seek(0) + + @property + def n_frames(self): + if self._n_frames is None: + current = self.tell() + self._seek(len(self._frame_pos)) + while self._n_frames is None: + self._seek(self.tell() + 1) + self.seek(current) + return self._n_frames + + def seek(self, frame): + """Select a given frame as current image""" + if not self._seek_check(frame): + return + self._seek(frame) + # Create a new core image object on second and + # subsequent frames in the image. Image may be + # different size/mode. + Image._decompression_bomb_check(self.size) + self.im = Image.core.new(self.mode, self.size) + + def _seek(self, frame): + self.fp = self.__fp + while len(self._frame_pos) <= frame: + if not self.__next: + raise EOFError("no more images in TIFF file") + logger.debug( + "Seeking to frame %s, on frame %s, __next %s, location: %s" + % (frame, self.__frame, self.__next, self.fp.tell()) + ) + # reset buffered io handle in case fp + # was passed to libtiff, invalidating the buffer + self.fp.tell() + self.fp.seek(self.__next) + self._frame_pos.append(self.__next) + logger.debug("Loading tags, location: %s" % self.fp.tell()) + self.tag_v2.load(self.fp) + self.__next = self.tag_v2.next + if self.__next == 0: + self._n_frames = frame + 1 + if len(self._frame_pos) == 1: + self.is_animated = self.__next != 0 + self.__frame += 1 + self.fp.seek(self._frame_pos[frame]) + self.tag_v2.load(self.fp) + # fill the legacy tag/ifd entries + self.tag = self.ifd = ImageFileDirectory_v1.from_v2(self.tag_v2) + self.__frame = frame + self._setup() + + def tell(self): + """Return the current frame number""" + return self.__frame + + def load(self): + if self.tile and self.use_load_libtiff: + return self._load_libtiff() + return super().load() + + def load_end(self): + if self._tile_orientation: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(self._tile_orientation) + if method is not None: + self.im = self.im.transpose(method) + self._size = self.im.size + + # allow closing if we're on the first frame, there's no next + # This is the ImageFile.load path only, libtiff specific below. + if not self.is_animated: + self._close_exclusive_fp_after_loading = True + + def _load_libtiff(self): + """ Overload method triggered when we detect a compressed tiff + Calls out to libtiff """ + + Image.Image.load(self) + + self.load_prepare() + + if not len(self.tile) == 1: + raise OSError("Not exactly one tile") + + # (self._compression, (extents tuple), + # 0, (rawmode, self._compression, fp)) + extents = self.tile[0][1] + args = list(self.tile[0][3]) + + # To be nice on memory footprint, if there's a + # file descriptor, use that instead of reading + # into a string in python. + # libtiff closes the file descriptor, so pass in a dup. + try: + fp = hasattr(self.fp, "fileno") and os.dup(self.fp.fileno()) + # flush the file descriptor, prevents error on pypy 2.4+ + # should also eliminate the need for fp.tell + # in _seek + if hasattr(self.fp, "flush"): + self.fp.flush() + except OSError: + # io.BytesIO have a fileno, but returns an OSError if + # it doesn't use a file descriptor. + fp = False + + if fp: + args[2] = fp + + decoder = Image._getdecoder( + self.mode, "libtiff", tuple(args), self.decoderconfig + ) + try: + decoder.setimage(self.im, extents) + except ValueError as e: + raise OSError("Couldn't set the image") from e + + close_self_fp = self._exclusive_fp and not self.is_animated + if hasattr(self.fp, "getvalue"): + # We've got a stringio like thing passed in. Yay for all in memory. + # The decoder needs the entire file in one shot, so there's not + # a lot we can do here other than give it the entire file. + # unless we could do something like get the address of the + # underlying string for stringio. + # + # Rearranging for supporting byteio items, since they have a fileno + # that returns an OSError if there's no underlying fp. Easier to + # deal with here by reordering. + logger.debug("have getvalue. just sending in a string from getvalue") + n, err = decoder.decode(self.fp.getvalue()) + elif fp: + # we've got a actual file on disk, pass in the fp. + logger.debug("have fileno, calling fileno version of the decoder.") + if not close_self_fp: + self.fp.seek(0) + # 4 bytes, otherwise the trace might error out + n, err = decoder.decode(b"fpfp") + else: + # we have something else. + logger.debug("don't have fileno or getvalue. just reading") + self.fp.seek(0) + # UNDONE -- so much for that buffer size thing. + n, err = decoder.decode(self.fp.read()) + + self.tile = [] + self.readonly = 0 + + self.load_end() + + # libtiff closed the fp in a, we need to close self.fp, if possible + if close_self_fp: + self.fp.close() + self.fp = None # might be shared + + if err < 0: + raise OSError(err) + + return Image.Image.load(self) + + def _setup(self): + """Setup this image object based on current tags""" + + if 0xBC01 in self.tag_v2: + raise OSError("Windows Media Photo files not yet supported") + + # extract relevant tags + self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)] + self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1) + + # photometric is a required tag, but not everyone is reading + # the specification + photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0) + + # old style jpeg compression images most certainly are YCbCr + if self._compression == "tiff_jpeg": + photo = 6 + + fillorder = self.tag_v2.get(FILLORDER, 1) + + logger.debug("*** Summary ***") + logger.debug("- compression: {}".format(self._compression)) + logger.debug("- photometric_interpretation: {}".format(photo)) + logger.debug("- planar_configuration: {}".format(self._planar_configuration)) + logger.debug("- fill_order: {}".format(fillorder)) + logger.debug("- YCbCr subsampling: {}".format(self.tag.get(530))) + + # size + xsize = int(self.tag_v2.get(IMAGEWIDTH)) + ysize = int(self.tag_v2.get(IMAGELENGTH)) + self._size = xsize, ysize + + logger.debug("- size: {}".format(self.size)) + + sampleFormat = self.tag_v2.get(SAMPLEFORMAT, (1,)) + if len(sampleFormat) > 1 and max(sampleFormat) == min(sampleFormat) == 1: + # SAMPLEFORMAT is properly per band, so an RGB image will + # be (1,1,1). But, we don't support per band pixel types, + # and anything more than one band is a uint8. So, just + # take the first element. Revisit this if adding support + # for more exotic images. + sampleFormat = (1,) + + bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,)) + extra_tuple = self.tag_v2.get(EXTRASAMPLES, ()) + if photo in (2, 6, 8): # RGB, YCbCr, LAB + bps_count = 3 + elif photo == 5: # CMYK + bps_count = 4 + else: + bps_count = 1 + bps_count += len(extra_tuple) + # Some files have only one value in bps_tuple, + # while should have more. Fix it + if bps_count > len(bps_tuple) and len(bps_tuple) == 1: + bps_tuple = bps_tuple * bps_count + + # mode: check photometric interpretation and bits per pixel + key = ( + self.tag_v2.prefix, + photo, + sampleFormat, + fillorder, + bps_tuple, + extra_tuple, + ) + logger.debug("format key: {}".format(key)) + try: + self.mode, rawmode = OPEN_INFO[key] + except KeyError as e: + logger.debug("- unsupported format") + raise SyntaxError("unknown pixel mode") from e + + logger.debug("- raw mode: {}".format(rawmode)) + logger.debug("- pil mode: {}".format(self.mode)) + + self.info["compression"] = self._compression + + xres = self.tag_v2.get(X_RESOLUTION, 1) + yres = self.tag_v2.get(Y_RESOLUTION, 1) + + if xres and yres: + resunit = self.tag_v2.get(RESOLUTION_UNIT) + if resunit == 2: # dots per inch + self.info["dpi"] = int(xres + 0.5), int(yres + 0.5) + elif resunit == 3: # dots per centimeter. convert to dpi + self.info["dpi"] = int(xres * 2.54 + 0.5), int(yres * 2.54 + 0.5) + elif resunit is None: # used to default to 1, but now 2) + self.info["dpi"] = int(xres + 0.5), int(yres + 0.5) + # For backward compatibility, + # we also preserve the old behavior + self.info["resolution"] = xres, yres + else: # No absolute unit of measurement + self.info["resolution"] = xres, yres + + # build tile descriptors + x = y = layer = 0 + self.tile = [] + self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw" + if self.use_load_libtiff: + # Decoder expects entire file as one tile. + # There's a buffer size limit in load (64k) + # so large g4 images will fail if we use that + # function. + # + # Setup the one tile for the whole image, then + # use the _load_libtiff function. + + # libtiff handles the fillmode for us, so 1;IR should + # actually be 1;I. Including the R double reverses the + # bits, so stripes of the image are reversed. See + # https://github.com/python-pillow/Pillow/issues/279 + if fillorder == 2: + # Replace fillorder with fillorder=1 + key = key[:3] + (1,) + key[4:] + logger.debug("format key: {}".format(key)) + # this should always work, since all the + # fillorder==2 modes have a corresponding + # fillorder=1 mode + self.mode, rawmode = OPEN_INFO[key] + # libtiff always returns the bytes in native order. + # we're expecting image byte order. So, if the rawmode + # contains I;16, we need to convert from native to image + # byte order. + if rawmode == "I;16": + rawmode = "I;16N" + if ";16B" in rawmode: + rawmode = rawmode.replace(";16B", ";16N") + if ";16L" in rawmode: + rawmode = rawmode.replace(";16L", ";16N") + + # Offset in the tile tuple is 0, we go from 0,0 to + # w,h, and we only do this once -- eds + a = (rawmode, self._compression, False, self.tag_v2.offset) + self.tile.append(("libtiff", (0, 0, xsize, ysize), 0, a)) + + elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2: + # striped image + if STRIPOFFSETS in self.tag_v2: + offsets = self.tag_v2[STRIPOFFSETS] + h = self.tag_v2.get(ROWSPERSTRIP, ysize) + w = self.size[0] + else: + # tiled image + offsets = self.tag_v2[TILEOFFSETS] + w = self.tag_v2.get(322) + h = self.tag_v2.get(323) + + for offset in offsets: + if x + w > xsize: + stride = w * sum(bps_tuple) / 8 # bytes per line + else: + stride = 0 + + tile_rawmode = rawmode + if self._planar_configuration == 2: + # each band on it's own layer + tile_rawmode = rawmode[layer] + # adjust stride width accordingly + stride /= bps_count + + a = (tile_rawmode, int(stride), 1) + self.tile.append( + ( + self._compression, + (x, y, min(x + w, xsize), min(y + h, ysize)), + offset, + a, + ) + ) + x = x + w + if x >= self.size[0]: + x, y = 0, y + h + if y >= self.size[1]: + x = y = 0 + layer += 1 + else: + logger.debug("- unsupported data organization") + raise SyntaxError("unknown data organization") + + # Fix up info. + if ICCPROFILE in self.tag_v2: + self.info["icc_profile"] = self.tag_v2[ICCPROFILE] + + # fixup palette descriptor + + if self.mode in ["P", "PA"]: + palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]] + self.palette = ImagePalette.raw("RGB;L", b"".join(palette)) + + self._tile_orientation = self.tag_v2.get(0x0112) + + def _close__fp(self): + try: + if self.__fp != self.fp: + self.__fp.close() + except AttributeError: + pass + finally: + self.__fp = None + + +# +# -------------------------------------------------------------------- +# Write TIFF files + +# little endian is default except for image modes with +# explicit big endian byte-order + +SAVE_INFO = { + # mode => rawmode, byteorder, photometrics, + # sampleformat, bitspersample, extra + "1": ("1", II, 1, 1, (1,), None), + "L": ("L", II, 1, 1, (8,), None), + "LA": ("LA", II, 1, 1, (8, 8), 2), + "P": ("P", II, 3, 1, (8,), None), + "PA": ("PA", II, 3, 1, (8, 8), 2), + "I": ("I;32S", II, 1, 2, (32,), None), + "I;16": ("I;16", II, 1, 1, (16,), None), + "I;16S": ("I;16S", II, 1, 2, (16,), None), + "F": ("F;32F", II, 1, 3, (32,), None), + "RGB": ("RGB", II, 2, 1, (8, 8, 8), None), + "RGBX": ("RGBX", II, 2, 1, (8, 8, 8, 8), 0), + "RGBA": ("RGBA", II, 2, 1, (8, 8, 8, 8), 2), + "CMYK": ("CMYK", II, 5, 1, (8, 8, 8, 8), None), + "YCbCr": ("YCbCr", II, 6, 1, (8, 8, 8), None), + "LAB": ("LAB", II, 8, 1, (8, 8, 8), None), + "I;32BS": ("I;32BS", MM, 1, 2, (32,), None), + "I;16B": ("I;16B", MM, 1, 1, (16,), None), + "I;16BS": ("I;16BS", MM, 1, 2, (16,), None), + "F;32BF": ("F;32BF", MM, 1, 3, (32,), None), +} + + +def _save(im, fp, filename): + + try: + rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode] + except KeyError as e: + raise OSError("cannot write mode %s as TIFF" % im.mode) from e + + ifd = ImageFileDirectory_v2(prefix=prefix) + + compression = im.encoderinfo.get("compression", im.info.get("compression")) + if compression is None: + compression = "raw" + elif compression == "tiff_jpeg": + # OJPEG is obsolete, so use new-style JPEG compression instead + compression = "jpeg" + + libtiff = WRITE_LIBTIFF or compression != "raw" + + # required for color libtiff images + ifd[PLANAR_CONFIGURATION] = getattr(im, "_planar_configuration", 1) + + ifd[IMAGEWIDTH] = im.size[0] + ifd[IMAGELENGTH] = im.size[1] + + # write any arbitrary tags passed in as an ImageFileDirectory + info = im.encoderinfo.get("tiffinfo", {}) + logger.debug("Tiffinfo Keys: %s" % list(info)) + if isinstance(info, ImageFileDirectory_v1): + info = info.to_v2() + for key in info: + ifd[key] = info.get(key) + try: + ifd.tagtype[key] = info.tagtype[key] + except Exception: + pass # might not be an IFD. Might not have populated type + + # additions written by Greg Couch, gregc@cgl.ucsf.edu + # inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com + if hasattr(im, "tag_v2"): + # preserve tags from original TIFF image file + for key in ( + RESOLUTION_UNIT, + X_RESOLUTION, + Y_RESOLUTION, + IPTC_NAA_CHUNK, + PHOTOSHOP_CHUNK, + XMP, + ): + if key in im.tag_v2: + ifd[key] = im.tag_v2[key] + ifd.tagtype[key] = im.tag_v2.tagtype[key] + + # preserve ICC profile (should also work when saving other formats + # which support profiles as TIFF) -- 2008-06-06 Florian Hoech + if "icc_profile" in im.info: + ifd[ICCPROFILE] = im.info["icc_profile"] + + for key, name in [ + (IMAGEDESCRIPTION, "description"), + (X_RESOLUTION, "resolution"), + (Y_RESOLUTION, "resolution"), + (X_RESOLUTION, "x_resolution"), + (Y_RESOLUTION, "y_resolution"), + (RESOLUTION_UNIT, "resolution_unit"), + (SOFTWARE, "software"), + (DATE_TIME, "date_time"), + (ARTIST, "artist"), + (COPYRIGHT, "copyright"), + ]: + if name in im.encoderinfo: + ifd[key] = im.encoderinfo[name] + + dpi = im.encoderinfo.get("dpi") + if dpi: + ifd[RESOLUTION_UNIT] = 2 + ifd[X_RESOLUTION] = int(dpi[0] + 0.5) + ifd[Y_RESOLUTION] = int(dpi[1] + 0.5) + + if bits != (1,): + ifd[BITSPERSAMPLE] = bits + if len(bits) != 1: + ifd[SAMPLESPERPIXEL] = len(bits) + if extra is not None: + ifd[EXTRASAMPLES] = extra + if format != 1: + ifd[SAMPLEFORMAT] = format + + ifd[PHOTOMETRIC_INTERPRETATION] = photo + + if im.mode in ["P", "PA"]: + lut = im.im.getpalette("RGB", "RGB;L") + ifd[COLORMAP] = tuple(i8(v) * 256 for v in lut) + # data orientation + stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8) + ifd[ROWSPERSTRIP] = im.size[1] + strip_byte_counts = stride * im.size[1] + if strip_byte_counts >= 2 ** 16: + ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG + ifd[STRIPBYTECOUNTS] = strip_byte_counts + ifd[STRIPOFFSETS] = 0 # this is adjusted by IFD writer + # no compression by default: + ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1) + + if libtiff: + if "quality" in im.encoderinfo: + quality = im.encoderinfo["quality"] + if not isinstance(quality, int) or quality < 0 or quality > 100: + raise ValueError("Invalid quality setting") + if compression != "jpeg": + raise ValueError( + "quality setting only supported for 'jpeg' compression" + ) + ifd[JPEGQUALITY] = quality + + logger.debug("Saving using libtiff encoder") + logger.debug("Items: %s" % sorted(ifd.items())) + _fp = 0 + if hasattr(fp, "fileno"): + try: + fp.seek(0) + _fp = os.dup(fp.fileno()) + except io.UnsupportedOperation: + pass + + # optional types for non core tags + types = {} + # SAMPLEFORMAT is determined by the image format and should not be copied + # from legacy_ifd. + # STRIPOFFSETS and STRIPBYTECOUNTS are added by the library + # based on the data in the strip. + # The other tags expect arrays with a certain length (fixed or depending on + # BITSPERSAMPLE, etc), passing arrays with a different length will result in + # segfaults. Block these tags until we add extra validation. + blocklist = [ + REFERENCEBLACKWHITE, + SAMPLEFORMAT, + STRIPBYTECOUNTS, + STRIPOFFSETS, + TRANSFERFUNCTION, + ] + + atts = {} + # bits per sample is a single short in the tiff directory, not a list. + atts[BITSPERSAMPLE] = bits[0] + # Merge the ones that we have with (optional) more bits from + # the original file, e.g x,y resolution so that we can + # save(load('')) == original file. + legacy_ifd = {} + if hasattr(im, "tag"): + legacy_ifd = im.tag.to_v2() + for tag, value in itertools.chain( + ifd.items(), getattr(im, "tag_v2", {}).items(), legacy_ifd.items() + ): + # Libtiff can only process certain core items without adding + # them to the custom dictionary. + # Custom items are supported for int, float, unicode, string and byte + # values. Other types and tuples require a tagtype. + if tag not in TiffTags.LIBTIFF_CORE: + if not Image.core.libtiff_support_custom_tags: + continue + + if tag in ifd.tagtype: + types[tag] = ifd.tagtype[tag] + elif not (isinstance(value, (int, float, str, bytes))): + continue + else: + type = TiffTags.lookup(tag).type + if type: + types[tag] = type + if tag not in atts and tag not in blocklist: + if isinstance(value, str): + atts[tag] = value.encode("ascii", "replace") + b"\0" + elif isinstance(value, IFDRational): + atts[tag] = float(value) + else: + atts[tag] = value + + logger.debug("Converted items: %s" % sorted(atts.items())) + + # libtiff always expects the bytes in native order. + # we're storing image byte order. So, if the rawmode + # contains I;16, we need to convert from native to image + # byte order. + if im.mode in ("I;16B", "I;16"): + rawmode = "I;16N" + + # Pass tags as sorted list so that the tags are set in a fixed order. + # This is required by libtiff for some tags. For example, the JPEGQUALITY + # pseudo tag requires that the COMPRESS tag was already set. + tags = list(atts.items()) + tags.sort() + a = (rawmode, compression, _fp, filename, tags, types) + e = Image._getencoder(im.mode, "libtiff", a, im.encoderconfig) + e.setimage(im.im, (0, 0) + im.size) + while True: + # undone, change to self.decodermaxblock: + l, s, d = e.encode(16 * 1024) + if not _fp: + fp.write(d) + if s: + break + if s < 0: + raise OSError("encoder error %d when writing image file" % s) + + else: + offset = ifd.save(fp) + + ImageFile._save( + im, fp, [("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))] + ) + + # -- helper for multi-page save -- + if "_debug_multipage" in im.encoderinfo: + # just to access o32 and o16 (using correct byte order) + im._debug_multipage = ifd + + +class AppendingTiffWriter: + fieldSizes = [ + 0, # None + 1, # byte + 1, # ascii + 2, # short + 4, # long + 8, # rational + 1, # sbyte + 1, # undefined + 2, # sshort + 4, # slong + 8, # srational + 4, # float + 8, # double + ] + + # StripOffsets = 273 + # FreeOffsets = 288 + # TileOffsets = 324 + # JPEGQTables = 519 + # JPEGDCTables = 520 + # JPEGACTables = 521 + Tags = {273, 288, 324, 519, 520, 521} + + def __init__(self, fn, new=False): + if hasattr(fn, "read"): + self.f = fn + self.close_fp = False + else: + self.name = fn + self.close_fp = True + try: + self.f = open(fn, "w+b" if new else "r+b") + except OSError: + self.f = open(fn, "w+b") + self.beginning = self.f.tell() + self.setup() + + def setup(self): + # Reset everything. + self.f.seek(self.beginning, os.SEEK_SET) + + self.whereToWriteNewIFDOffset = None + self.offsetOfNewPage = 0 + + self.IIMM = IIMM = self.f.read(4) + if not IIMM: + # empty file - first page + self.isFirst = True + return + + self.isFirst = False + if IIMM == b"II\x2a\x00": + self.setEndian("<") + elif IIMM == b"MM\x00\x2a": + self.setEndian(">") + else: + raise RuntimeError("Invalid TIFF file header") + + self.skipIFDs() + self.goToEnd() + + def finalize(self): + if self.isFirst: + return + + # fix offsets + self.f.seek(self.offsetOfNewPage) + + IIMM = self.f.read(4) + if not IIMM: + # raise RuntimeError("nothing written into new page") + # Make it easy to finish a frame without committing to a new one. + return + + if IIMM != self.IIMM: + raise RuntimeError("IIMM of new page doesn't match IIMM of first page") + + IFDoffset = self.readLong() + IFDoffset += self.offsetOfNewPage + self.f.seek(self.whereToWriteNewIFDOffset) + self.writeLong(IFDoffset) + self.f.seek(IFDoffset) + self.fixIFD() + + def newFrame(self): + # Call this to finish a frame. + self.finalize() + self.setup() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.close_fp: + self.close() + return False + + def tell(self): + return self.f.tell() - self.offsetOfNewPage + + def seek(self, offset, whence=io.SEEK_SET): + if whence == os.SEEK_SET: + offset += self.offsetOfNewPage + + self.f.seek(offset, whence) + return self.tell() + + def goToEnd(self): + self.f.seek(0, os.SEEK_END) + pos = self.f.tell() + + # pad to 16 byte boundary + padBytes = 16 - pos % 16 + if 0 < padBytes < 16: + self.f.write(bytes(padBytes)) + self.offsetOfNewPage = self.f.tell() + + def setEndian(self, endian): + self.endian = endian + self.longFmt = self.endian + "L" + self.shortFmt = self.endian + "H" + self.tagFormat = self.endian + "HHL" + + def skipIFDs(self): + while True: + IFDoffset = self.readLong() + if IFDoffset == 0: + self.whereToWriteNewIFDOffset = self.f.tell() - 4 + break + + self.f.seek(IFDoffset) + numTags = self.readShort() + self.f.seek(numTags * 12, os.SEEK_CUR) + + def write(self, data): + return self.f.write(data) + + def readShort(self): + (value,) = struct.unpack(self.shortFmt, self.f.read(2)) + return value + + def readLong(self): + (value,) = struct.unpack(self.longFmt, self.f.read(4)) + return value + + def rewriteLastShortToLong(self, value): + self.f.seek(-2, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError("wrote only %u bytes but wanted 4" % bytesWritten) + + def rewriteLastShort(self, value): + self.f.seek(-2, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.shortFmt, value)) + if bytesWritten is not None and bytesWritten != 2: + raise RuntimeError("wrote only %u bytes but wanted 2" % bytesWritten) + + def rewriteLastLong(self, value): + self.f.seek(-4, os.SEEK_CUR) + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError("wrote only %u bytes but wanted 4" % bytesWritten) + + def writeShort(self, value): + bytesWritten = self.f.write(struct.pack(self.shortFmt, value)) + if bytesWritten is not None and bytesWritten != 2: + raise RuntimeError("wrote only %u bytes but wanted 2" % bytesWritten) + + def writeLong(self, value): + bytesWritten = self.f.write(struct.pack(self.longFmt, value)) + if bytesWritten is not None and bytesWritten != 4: + raise RuntimeError("wrote only %u bytes but wanted 4" % bytesWritten) + + def close(self): + self.finalize() + self.f.close() + + def fixIFD(self): + numTags = self.readShort() + + for i in range(numTags): + tag, fieldType, count = struct.unpack(self.tagFormat, self.f.read(8)) + + fieldSize = self.fieldSizes[fieldType] + totalSize = fieldSize * count + isLocal = totalSize <= 4 + if not isLocal: + offset = self.readLong() + offset += self.offsetOfNewPage + self.rewriteLastLong(offset) + + if tag in self.Tags: + curPos = self.f.tell() + + if isLocal: + self.fixOffsets( + count, isShort=(fieldSize == 2), isLong=(fieldSize == 4) + ) + self.f.seek(curPos + 4) + else: + self.f.seek(offset) + self.fixOffsets( + count, isShort=(fieldSize == 2), isLong=(fieldSize == 4) + ) + self.f.seek(curPos) + + offset = curPos = None + + elif isLocal: + # skip the locally stored value that is not an offset + self.f.seek(4, os.SEEK_CUR) + + def fixOffsets(self, count, isShort=False, isLong=False): + if not isShort and not isLong: + raise RuntimeError("offset is neither short nor long") + + for i in range(count): + offset = self.readShort() if isShort else self.readLong() + offset += self.offsetOfNewPage + if isShort and offset >= 65536: + # offset is now too large - we must convert shorts to longs + if count != 1: + raise RuntimeError("not implemented") # XXX TODO + + # simple case - the offset is just one and therefore it is + # local (not referenced with another offset) + self.rewriteLastShortToLong(offset) + self.f.seek(-10, os.SEEK_CUR) + self.writeShort(TiffTags.LONG) # rewrite the type to LONG + self.f.seek(8, os.SEEK_CUR) + elif isShort: + self.rewriteLastShort(offset) + else: + self.rewriteLastLong(offset) + + +def _save_all(im, fp, filename): + encoderinfo = im.encoderinfo.copy() + encoderconfig = im.encoderconfig + append_images = list(encoderinfo.get("append_images", [])) + if not hasattr(im, "n_frames") and not append_images: + return _save(im, fp, filename) + + cur_idx = im.tell() + try: + with AppendingTiffWriter(fp) as tf: + for ims in [im] + append_images: + ims.encoderinfo = encoderinfo + ims.encoderconfig = encoderconfig + if not hasattr(ims, "n_frames"): + nfr = 1 + else: + nfr = ims.n_frames + + for idx in range(nfr): + ims.seek(idx) + ims.load() + _save(ims, tf, filename) + tf.newFrame() + finally: + im.seek(cur_idx) + + +# +# -------------------------------------------------------------------- +# Register + +Image.register_open(TiffImageFile.format, TiffImageFile, _accept) +Image.register_save(TiffImageFile.format, _save) +Image.register_save_all(TiffImageFile.format, _save_all) + +Image.register_extensions(TiffImageFile.format, [".tif", ".tiff"]) + +Image.register_mime(TiffImageFile.format, "image/tiff") diff --git a/venv/Lib/site-packages/PIL/TiffTags.py b/venv/Lib/site-packages/PIL/TiffTags.py new file mode 100644 index 000000000..e1c1b701b --- /dev/null +++ b/venv/Lib/site-packages/PIL/TiffTags.py @@ -0,0 +1,498 @@ +# +# The Python Imaging Library. +# $Id$ +# +# TIFF tags +# +# This module provides clear-text names for various well-known +# TIFF tags. the TIFF codec works just fine without it. +# +# Copyright (c) Secret Labs AB 1999. +# +# See the README file for information on usage and redistribution. +# + +## +# This module provides constants and clear-text names for various +# well-known TIFF tags. +## + +from collections import namedtuple + + +class TagInfo(namedtuple("_TagInfo", "value name type length enum")): + __slots__ = [] + + def __new__(cls, value=None, name="unknown", type=None, length=None, enum=None): + return super().__new__(cls, value, name, type, length, enum or {}) + + def cvt_enum(self, value): + # Using get will call hash(value), which can be expensive + # for some types (e.g. Fraction). Since self.enum is rarely + # used, it's usually better to test it first. + return self.enum.get(value, value) if self.enum else value + + +def lookup(tag): + """ + :param tag: Integer tag number + :returns: Taginfo namedtuple, From the TAGS_V2 info if possible, + otherwise just populating the value and name from TAGS. + If the tag is not recognized, "unknown" is returned for the name + + """ + + return TAGS_V2.get(tag, TagInfo(tag, TAGS.get(tag, "unknown"))) + + +## +# Map tag numbers to tag info. +# +# id: (Name, Type, Length, enum_values) +# +# The length here differs from the length in the tiff spec. For +# numbers, the tiff spec is for the number of fields returned. We +# agree here. For string-like types, the tiff spec uses the length of +# field in bytes. In Pillow, we are using the number of expected +# fields, in general 1 for string-like types. + + +BYTE = 1 +ASCII = 2 +SHORT = 3 +LONG = 4 +RATIONAL = 5 +SIGNED_BYTE = 6 +UNDEFINED = 7 +SIGNED_SHORT = 8 +SIGNED_LONG = 9 +SIGNED_RATIONAL = 10 +FLOAT = 11 +DOUBLE = 12 + +TAGS_V2 = { + 254: ("NewSubfileType", LONG, 1), + 255: ("SubfileType", SHORT, 1), + 256: ("ImageWidth", LONG, 1), + 257: ("ImageLength", LONG, 1), + 258: ("BitsPerSample", SHORT, 0), + 259: ( + "Compression", + SHORT, + 1, + { + "Uncompressed": 1, + "CCITT 1d": 2, + "Group 3 Fax": 3, + "Group 4 Fax": 4, + "LZW": 5, + "JPEG": 6, + "PackBits": 32773, + }, + ), + 262: ( + "PhotometricInterpretation", + SHORT, + 1, + { + "WhiteIsZero": 0, + "BlackIsZero": 1, + "RGB": 2, + "RGB Palette": 3, + "Transparency Mask": 4, + "CMYK": 5, + "YCbCr": 6, + "CieLAB": 8, + "CFA": 32803, # TIFF/EP, Adobe DNG + "LinearRaw": 32892, # Adobe DNG + }, + ), + 263: ("Threshholding", SHORT, 1), + 264: ("CellWidth", SHORT, 1), + 265: ("CellLength", SHORT, 1), + 266: ("FillOrder", SHORT, 1), + 269: ("DocumentName", ASCII, 1), + 270: ("ImageDescription", ASCII, 1), + 271: ("Make", ASCII, 1), + 272: ("Model", ASCII, 1), + 273: ("StripOffsets", LONG, 0), + 274: ("Orientation", SHORT, 1), + 277: ("SamplesPerPixel", SHORT, 1), + 278: ("RowsPerStrip", LONG, 1), + 279: ("StripByteCounts", LONG, 0), + 280: ("MinSampleValue", SHORT, 0), + 281: ("MaxSampleValue", SHORT, 0), + 282: ("XResolution", RATIONAL, 1), + 283: ("YResolution", RATIONAL, 1), + 284: ("PlanarConfiguration", SHORT, 1, {"Contiguous": 1, "Separate": 2}), + 285: ("PageName", ASCII, 1), + 286: ("XPosition", RATIONAL, 1), + 287: ("YPosition", RATIONAL, 1), + 288: ("FreeOffsets", LONG, 1), + 289: ("FreeByteCounts", LONG, 1), + 290: ("GrayResponseUnit", SHORT, 1), + 291: ("GrayResponseCurve", SHORT, 0), + 292: ("T4Options", LONG, 1), + 293: ("T6Options", LONG, 1), + 296: ("ResolutionUnit", SHORT, 1, {"none": 1, "inch": 2, "cm": 3}), + 297: ("PageNumber", SHORT, 2), + 301: ("TransferFunction", SHORT, 0), + 305: ("Software", ASCII, 1), + 306: ("DateTime", ASCII, 1), + 315: ("Artist", ASCII, 1), + 316: ("HostComputer", ASCII, 1), + 317: ("Predictor", SHORT, 1, {"none": 1, "Horizontal Differencing": 2}), + 318: ("WhitePoint", RATIONAL, 2), + 319: ("PrimaryChromaticities", RATIONAL, 6), + 320: ("ColorMap", SHORT, 0), + 321: ("HalftoneHints", SHORT, 2), + 322: ("TileWidth", LONG, 1), + 323: ("TileLength", LONG, 1), + 324: ("TileOffsets", LONG, 0), + 325: ("TileByteCounts", LONG, 0), + 332: ("InkSet", SHORT, 1), + 333: ("InkNames", ASCII, 1), + 334: ("NumberOfInks", SHORT, 1), + 336: ("DotRange", SHORT, 0), + 337: ("TargetPrinter", ASCII, 1), + 338: ("ExtraSamples", SHORT, 0), + 339: ("SampleFormat", SHORT, 0), + 340: ("SMinSampleValue", DOUBLE, 0), + 341: ("SMaxSampleValue", DOUBLE, 0), + 342: ("TransferRange", SHORT, 6), + 347: ("JPEGTables", UNDEFINED, 1), + # obsolete JPEG tags + 512: ("JPEGProc", SHORT, 1), + 513: ("JPEGInterchangeFormat", LONG, 1), + 514: ("JPEGInterchangeFormatLength", LONG, 1), + 515: ("JPEGRestartInterval", SHORT, 1), + 517: ("JPEGLosslessPredictors", SHORT, 0), + 518: ("JPEGPointTransforms", SHORT, 0), + 519: ("JPEGQTables", LONG, 0), + 520: ("JPEGDCTables", LONG, 0), + 521: ("JPEGACTables", LONG, 0), + 529: ("YCbCrCoefficients", RATIONAL, 3), + 530: ("YCbCrSubSampling", SHORT, 2), + 531: ("YCbCrPositioning", SHORT, 1), + 532: ("ReferenceBlackWhite", RATIONAL, 6), + 700: ("XMP", BYTE, 0), + 33432: ("Copyright", ASCII, 1), + 33723: ("IptcNaaInfo", UNDEFINED, 0), + 34377: ("PhotoshopInfo", BYTE, 0), + # FIXME add more tags here + 34665: ("ExifIFD", LONG, 1), + 34675: ("ICCProfile", UNDEFINED, 1), + 34853: ("GPSInfoIFD", LONG, 1), + # MPInfo + 45056: ("MPFVersion", UNDEFINED, 1), + 45057: ("NumberOfImages", LONG, 1), + 45058: ("MPEntry", UNDEFINED, 1), + 45059: ("ImageUIDList", UNDEFINED, 0), # UNDONE, check + 45060: ("TotalFrames", LONG, 1), + 45313: ("MPIndividualNum", LONG, 1), + 45569: ("PanOrientation", LONG, 1), + 45570: ("PanOverlap_H", RATIONAL, 1), + 45571: ("PanOverlap_V", RATIONAL, 1), + 45572: ("BaseViewpointNum", LONG, 1), + 45573: ("ConvergenceAngle", SIGNED_RATIONAL, 1), + 45574: ("BaselineLength", RATIONAL, 1), + 45575: ("VerticalDivergence", SIGNED_RATIONAL, 1), + 45576: ("AxisDistance_X", SIGNED_RATIONAL, 1), + 45577: ("AxisDistance_Y", SIGNED_RATIONAL, 1), + 45578: ("AxisDistance_Z", SIGNED_RATIONAL, 1), + 45579: ("YawAngle", SIGNED_RATIONAL, 1), + 45580: ("PitchAngle", SIGNED_RATIONAL, 1), + 45581: ("RollAngle", SIGNED_RATIONAL, 1), + 50741: ("MakerNoteSafety", SHORT, 1, {"Unsafe": 0, "Safe": 1}), + 50780: ("BestQualityScale", RATIONAL, 1), + 50838: ("ImageJMetaDataByteCounts", LONG, 0), # Can be more than one + 50839: ("ImageJMetaData", UNDEFINED, 1), # see Issue #2006 +} + +# Legacy Tags structure +# these tags aren't included above, but were in the previous versions +TAGS = { + 347: "JPEGTables", + 700: "XMP", + # Additional Exif Info + 32932: "Wang Annotation", + 33434: "ExposureTime", + 33437: "FNumber", + 33445: "MD FileTag", + 33446: "MD ScalePixel", + 33447: "MD ColorTable", + 33448: "MD LabName", + 33449: "MD SampleInfo", + 33450: "MD PrepDate", + 33451: "MD PrepTime", + 33452: "MD FileUnits", + 33550: "ModelPixelScaleTag", + 33723: "IptcNaaInfo", + 33918: "INGR Packet Data Tag", + 33919: "INGR Flag Registers", + 33920: "IrasB Transformation Matrix", + 33922: "ModelTiepointTag", + 34264: "ModelTransformationTag", + 34377: "PhotoshopInfo", + 34735: "GeoKeyDirectoryTag", + 34736: "GeoDoubleParamsTag", + 34737: "GeoAsciiParamsTag", + 34850: "ExposureProgram", + 34852: "SpectralSensitivity", + 34855: "ISOSpeedRatings", + 34856: "OECF", + 34864: "SensitivityType", + 34865: "StandardOutputSensitivity", + 34866: "RecommendedExposureIndex", + 34867: "ISOSpeed", + 34868: "ISOSpeedLatitudeyyy", + 34869: "ISOSpeedLatitudezzz", + 34908: "HylaFAX FaxRecvParams", + 34909: "HylaFAX FaxSubAddress", + 34910: "HylaFAX FaxRecvTime", + 36864: "ExifVersion", + 36867: "DateTimeOriginal", + 36868: "DateTImeDigitized", + 37121: "ComponentsConfiguration", + 37122: "CompressedBitsPerPixel", + 37724: "ImageSourceData", + 37377: "ShutterSpeedValue", + 37378: "ApertureValue", + 37379: "BrightnessValue", + 37380: "ExposureBiasValue", + 37381: "MaxApertureValue", + 37382: "SubjectDistance", + 37383: "MeteringMode", + 37384: "LightSource", + 37385: "Flash", + 37386: "FocalLength", + 37396: "SubjectArea", + 37500: "MakerNote", + 37510: "UserComment", + 37520: "SubSec", + 37521: "SubSecTimeOriginal", + 37522: "SubsecTimeDigitized", + 40960: "FlashPixVersion", + 40961: "ColorSpace", + 40962: "PixelXDimension", + 40963: "PixelYDimension", + 40964: "RelatedSoundFile", + 40965: "InteroperabilityIFD", + 41483: "FlashEnergy", + 41484: "SpatialFrequencyResponse", + 41486: "FocalPlaneXResolution", + 41487: "FocalPlaneYResolution", + 41488: "FocalPlaneResolutionUnit", + 41492: "SubjectLocation", + 41493: "ExposureIndex", + 41495: "SensingMethod", + 41728: "FileSource", + 41729: "SceneType", + 41730: "CFAPattern", + 41985: "CustomRendered", + 41986: "ExposureMode", + 41987: "WhiteBalance", + 41988: "DigitalZoomRatio", + 41989: "FocalLengthIn35mmFilm", + 41990: "SceneCaptureType", + 41991: "GainControl", + 41992: "Contrast", + 41993: "Saturation", + 41994: "Sharpness", + 41995: "DeviceSettingDescription", + 41996: "SubjectDistanceRange", + 42016: "ImageUniqueID", + 42032: "CameraOwnerName", + 42033: "BodySerialNumber", + 42034: "LensSpecification", + 42035: "LensMake", + 42036: "LensModel", + 42037: "LensSerialNumber", + 42112: "GDAL_METADATA", + 42113: "GDAL_NODATA", + 42240: "Gamma", + 50215: "Oce Scanjob Description", + 50216: "Oce Application Selector", + 50217: "Oce Identification Number", + 50218: "Oce ImageLogic Characteristics", + # Adobe DNG + 50706: "DNGVersion", + 50707: "DNGBackwardVersion", + 50708: "UniqueCameraModel", + 50709: "LocalizedCameraModel", + 50710: "CFAPlaneColor", + 50711: "CFALayout", + 50712: "LinearizationTable", + 50713: "BlackLevelRepeatDim", + 50714: "BlackLevel", + 50715: "BlackLevelDeltaH", + 50716: "BlackLevelDeltaV", + 50717: "WhiteLevel", + 50718: "DefaultScale", + 50719: "DefaultCropOrigin", + 50720: "DefaultCropSize", + 50721: "ColorMatrix1", + 50722: "ColorMatrix2", + 50723: "CameraCalibration1", + 50724: "CameraCalibration2", + 50725: "ReductionMatrix1", + 50726: "ReductionMatrix2", + 50727: "AnalogBalance", + 50728: "AsShotNeutral", + 50729: "AsShotWhiteXY", + 50730: "BaselineExposure", + 50731: "BaselineNoise", + 50732: "BaselineSharpness", + 50733: "BayerGreenSplit", + 50734: "LinearResponseLimit", + 50735: "CameraSerialNumber", + 50736: "LensInfo", + 50737: "ChromaBlurRadius", + 50738: "AntiAliasStrength", + 50740: "DNGPrivateData", + 50778: "CalibrationIlluminant1", + 50779: "CalibrationIlluminant2", + 50784: "Alias Layer Metadata", +} + + +def _populate(): + for k, v in TAGS_V2.items(): + # Populate legacy structure. + TAGS[k] = v[0] + if len(v) == 4: + for sk, sv in v[3].items(): + TAGS[(k, sv)] = sk + + TAGS_V2[k] = TagInfo(k, *v) + + +_populate() +## +# Map type numbers to type names -- defined in ImageFileDirectory. + +TYPES = {} + +# was: +# TYPES = { +# 1: "byte", +# 2: "ascii", +# 3: "short", +# 4: "long", +# 5: "rational", +# 6: "signed byte", +# 7: "undefined", +# 8: "signed short", +# 9: "signed long", +# 10: "signed rational", +# 11: "float", +# 12: "double", +# } + +# +# These tags are handled by default in libtiff, without +# adding to the custom dictionary. From tif_dir.c, searching for +# case TIFFTAG in the _TIFFVSetField function: +# Line: item. +# 148: case TIFFTAG_SUBFILETYPE: +# 151: case TIFFTAG_IMAGEWIDTH: +# 154: case TIFFTAG_IMAGELENGTH: +# 157: case TIFFTAG_BITSPERSAMPLE: +# 181: case TIFFTAG_COMPRESSION: +# 202: case TIFFTAG_PHOTOMETRIC: +# 205: case TIFFTAG_THRESHHOLDING: +# 208: case TIFFTAG_FILLORDER: +# 214: case TIFFTAG_ORIENTATION: +# 221: case TIFFTAG_SAMPLESPERPIXEL: +# 228: case TIFFTAG_ROWSPERSTRIP: +# 238: case TIFFTAG_MINSAMPLEVALUE: +# 241: case TIFFTAG_MAXSAMPLEVALUE: +# 244: case TIFFTAG_SMINSAMPLEVALUE: +# 247: case TIFFTAG_SMAXSAMPLEVALUE: +# 250: case TIFFTAG_XRESOLUTION: +# 256: case TIFFTAG_YRESOLUTION: +# 262: case TIFFTAG_PLANARCONFIG: +# 268: case TIFFTAG_XPOSITION: +# 271: case TIFFTAG_YPOSITION: +# 274: case TIFFTAG_RESOLUTIONUNIT: +# 280: case TIFFTAG_PAGENUMBER: +# 284: case TIFFTAG_HALFTONEHINTS: +# 288: case TIFFTAG_COLORMAP: +# 294: case TIFFTAG_EXTRASAMPLES: +# 298: case TIFFTAG_MATTEING: +# 305: case TIFFTAG_TILEWIDTH: +# 316: case TIFFTAG_TILELENGTH: +# 327: case TIFFTAG_TILEDEPTH: +# 333: case TIFFTAG_DATATYPE: +# 344: case TIFFTAG_SAMPLEFORMAT: +# 361: case TIFFTAG_IMAGEDEPTH: +# 364: case TIFFTAG_SUBIFD: +# 376: case TIFFTAG_YCBCRPOSITIONING: +# 379: case TIFFTAG_YCBCRSUBSAMPLING: +# 383: case TIFFTAG_TRANSFERFUNCTION: +# 389: case TIFFTAG_REFERENCEBLACKWHITE: +# 393: case TIFFTAG_INKNAMES: + +# Following pseudo-tags are also handled by default in libtiff: +# TIFFTAG_JPEGQUALITY 65537 + +# some of these are not in our TAGS_V2 dict and were included from tiff.h + +# This list also exists in encode.c +LIBTIFF_CORE = { + 255, + 256, + 257, + 258, + 259, + 262, + 263, + 266, + 274, + 277, + 278, + 280, + 281, + 340, + 341, + 282, + 283, + 284, + 286, + 287, + 296, + 297, + 321, + 320, + 338, + 32995, + 322, + 323, + 32998, + 32996, + 339, + 32997, + 330, + 531, + 530, + 301, + 532, + 333, + # as above + 269, # this has been in our tests forever, and works + 65537, +} + +LIBTIFF_CORE.remove(301) # Array of short, crashes +LIBTIFF_CORE.remove(532) # Array of long, crashes + +LIBTIFF_CORE.remove(255) # We don't have support for subfiletypes +LIBTIFF_CORE.remove(322) # We don't have support for writing tiled images with libtiff +LIBTIFF_CORE.remove(323) # Tiled images +LIBTIFF_CORE.remove(333) # Ink Names either + +# Note to advanced users: There may be combinations of these +# parameters and values that when added properly, will work and +# produce valid tiff images that may work in your application. +# It is safe to add and remove tags from this set from Pillow's point +# of view so long as you test against libtiff. diff --git a/venv/Lib/site-packages/PIL/WalImageFile.py b/venv/Lib/site-packages/PIL/WalImageFile.py new file mode 100644 index 000000000..b578d6981 --- /dev/null +++ b/venv/Lib/site-packages/PIL/WalImageFile.py @@ -0,0 +1,126 @@ +# +# The Python Imaging Library. +# $Id$ +# +# WAL file handling +# +# History: +# 2003-04-23 fl created +# +# Copyright (c) 2003 by Fredrik Lundh. +# +# See the README file for information on usage and redistribution. +# + +""" +This reader is based on the specification available from: +https://www.flipcode.com/archives/Quake_2_BSP_File_Format.shtml +and has been tested with a few sample files found using google. + +.. note:: + This format cannot be automatically recognized, so the reader + is not registered for use with :py:func:`PIL.Image.open()`. + To open a WAL file, use the :py:func:`PIL.WalImageFile.open()` function instead. +""" + +import builtins + +from . import Image +from ._binary import i32le as i32 + + +def open(filename): + """ + Load texture from a Quake2 WAL texture file. + + By default, a Quake2 standard palette is attached to the texture. + To override the palette, use the :py:func:`PIL.Image.Image.putpalette()` method. + + :param filename: WAL file name, or an opened file handle. + :returns: An image instance. + """ + # FIXME: modify to return a WalImageFile instance instead of + # plain Image object ? + + def imopen(fp): + # read header fields + header = fp.read(32 + 24 + 32 + 12) + size = i32(header, 32), i32(header, 36) + offset = i32(header, 40) + + # load pixel data + fp.seek(offset) + + Image._decompression_bomb_check(size) + im = Image.frombytes("P", size, fp.read(size[0] * size[1])) + im.putpalette(quake2palette) + + im.format = "WAL" + im.format_description = "Quake2 Texture" + + # strings are null-terminated + im.info["name"] = header[:32].split(b"\0", 1)[0] + next_name = header[56 : 56 + 32].split(b"\0", 1)[0] + if next_name: + im.info["next_name"] = next_name + + return im + + if hasattr(filename, "read"): + return imopen(filename) + else: + with builtins.open(filename, "rb") as fp: + return imopen(fp) + + +quake2palette = ( + # default palette taken from piffo 0.93 by Hans Häggström + b"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e" + b"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f" + b"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c" + b"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b" + b"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10" + b"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07" + b"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f" + b"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16" + b"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d" + b"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31" + b"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28" + b"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07" + b"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27" + b"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b" + b"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01" + b"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21" + b"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14" + b"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07" + b"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14" + b"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f" + b"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34" + b"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d" + b"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14" + b"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01" + b"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24" + b"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10" + b"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01" + b"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27" + b"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c" + b"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a" + b"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26" + b"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d" + b"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01" + b"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20" + b"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17" + b"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07" + b"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25" + b"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c" + b"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01" + b"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23" + b"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f" + b"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b" + b"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37" + b"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b" + b"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01" + b"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10" + b"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b" + b"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20" +) diff --git a/venv/Lib/site-packages/PIL/WebPImagePlugin.py b/venv/Lib/site-packages/PIL/WebPImagePlugin.py new file mode 100644 index 000000000..2e9746fa3 --- /dev/null +++ b/venv/Lib/site-packages/PIL/WebPImagePlugin.py @@ -0,0 +1,351 @@ +from io import BytesIO + +from . import Image, ImageFile + +try: + from . import _webp + + SUPPORTED = True +except ImportError: + SUPPORTED = False + + +_VALID_WEBP_MODES = {"RGBX": True, "RGBA": True, "RGB": True} + +_VALID_WEBP_LEGACY_MODES = {"RGB": True, "RGBA": True} + +_VP8_MODES_BY_IDENTIFIER = { + b"VP8 ": "RGB", + b"VP8X": "RGBA", + b"VP8L": "RGBA", # lossless +} + + +def _accept(prefix): + is_riff_file_format = prefix[:4] == b"RIFF" + is_webp_file = prefix[8:12] == b"WEBP" + is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER + + if is_riff_file_format and is_webp_file and is_valid_vp8_mode: + if not SUPPORTED: + return ( + "image file could not be identified because WEBP support not installed" + ) + return True + + +class WebPImageFile(ImageFile.ImageFile): + + format = "WEBP" + format_description = "WebP image" + __loaded = 0 + __logical_frame = 0 + + def _open(self): + if not _webp.HAVE_WEBPANIM: + # Legacy mode + data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode( + self.fp.read() + ) + if icc_profile: + self.info["icc_profile"] = icc_profile + if exif: + self.info["exif"] = exif + self._size = width, height + self.fp = BytesIO(data) + self.tile = [("raw", (0, 0) + self.size, 0, self.mode)] + self.n_frames = 1 + self.is_animated = False + return + + # Use the newer AnimDecoder API to parse the (possibly) animated file, + # and access muxed chunks like ICC/EXIF/XMP. + self._decoder = _webp.WebPAnimDecoder(self.fp.read()) + + # Get info from decoder + width, height, loop_count, bgcolor, frame_count, mode = self._decoder.get_info() + self._size = width, height + self.info["loop"] = loop_count + bg_a, bg_r, bg_g, bg_b = ( + (bgcolor >> 24) & 0xFF, + (bgcolor >> 16) & 0xFF, + (bgcolor >> 8) & 0xFF, + bgcolor & 0xFF, + ) + self.info["background"] = (bg_r, bg_g, bg_b, bg_a) + self.n_frames = frame_count + self.is_animated = self.n_frames > 1 + self.mode = "RGB" if mode == "RGBX" else mode + self.rawmode = mode + self.tile = [] + + # Attempt to read ICC / EXIF / XMP chunks from file + icc_profile = self._decoder.get_chunk("ICCP") + exif = self._decoder.get_chunk("EXIF") + xmp = self._decoder.get_chunk("XMP ") + if icc_profile: + self.info["icc_profile"] = icc_profile + if exif: + self.info["exif"] = exif + if xmp: + self.info["xmp"] = xmp + + # Initialize seek state + self._reset(reset=False) + + def _getexif(self): + if "exif" not in self.info: + return None + return dict(self.getexif()) + + def seek(self, frame): + if not self._seek_check(frame): + return + + # Set logical frame to requested position + self.__logical_frame = frame + + def _reset(self, reset=True): + if reset: + self._decoder.reset() + self.__physical_frame = 0 + self.__loaded = -1 + self.__timestamp = 0 + + def _get_next(self): + # Get next frame + ret = self._decoder.get_next() + self.__physical_frame += 1 + + # Check if an error occurred + if ret is None: + self._reset() # Reset just to be safe + self.seek(0) + raise EOFError("failed to decode next frame in WebP file") + + # Compute duration + data, timestamp = ret + duration = timestamp - self.__timestamp + self.__timestamp = timestamp + + # libwebp gives frame end, adjust to start of frame + timestamp -= duration + return data, timestamp, duration + + def _seek(self, frame): + if self.__physical_frame == frame: + return # Nothing to do + if frame < self.__physical_frame: + self._reset() # Rewind to beginning + while self.__physical_frame < frame: + self._get_next() # Advance to the requested frame + + def load(self): + if _webp.HAVE_WEBPANIM: + if self.__loaded != self.__logical_frame: + self._seek(self.__logical_frame) + + # We need to load the image data for this frame + data, timestamp, duration = self._get_next() + self.info["timestamp"] = timestamp + self.info["duration"] = duration + self.__loaded = self.__logical_frame + + # Set tile + if self.fp and self._exclusive_fp: + self.fp.close() + self.fp = BytesIO(data) + self.tile = [("raw", (0, 0) + self.size, 0, self.rawmode)] + + return super().load() + + def tell(self): + if not _webp.HAVE_WEBPANIM: + return super().tell() + + return self.__logical_frame + + +def _save_all(im, fp, filename): + encoderinfo = im.encoderinfo.copy() + append_images = list(encoderinfo.get("append_images", [])) + + # If total frame count is 1, then save using the legacy API, which + # will preserve non-alpha modes + total = 0 + for ims in [im] + append_images: + total += getattr(ims, "n_frames", 1) + if total == 1: + _save(im, fp, filename) + return + + background = (0, 0, 0, 0) + if "background" in encoderinfo: + background = encoderinfo["background"] + elif "background" in im.info: + background = im.info["background"] + if isinstance(background, int): + # GifImagePlugin stores a global color table index in + # info["background"]. So it must be converted to an RGBA value + palette = im.getpalette() + if palette: + r, g, b = palette[background * 3 : (background + 1) * 3] + background = (r, g, b, 0) + + duration = im.encoderinfo.get("duration", 0) + loop = im.encoderinfo.get("loop", 0) + minimize_size = im.encoderinfo.get("minimize_size", False) + kmin = im.encoderinfo.get("kmin", None) + kmax = im.encoderinfo.get("kmax", None) + allow_mixed = im.encoderinfo.get("allow_mixed", False) + verbose = False + lossless = im.encoderinfo.get("lossless", False) + quality = im.encoderinfo.get("quality", 80) + method = im.encoderinfo.get("method", 0) + icc_profile = im.encoderinfo.get("icc_profile", "") + exif = im.encoderinfo.get("exif", "") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + xmp = im.encoderinfo.get("xmp", "") + if allow_mixed: + lossless = False + + # Sensible keyframe defaults are from gif2webp.c script + if kmin is None: + kmin = 9 if lossless else 3 + if kmax is None: + kmax = 17 if lossless else 5 + + # Validate background color + if ( + not isinstance(background, (list, tuple)) + or len(background) != 4 + or not all(v >= 0 and v < 256 for v in background) + ): + raise OSError( + "Background color is not an RGBA tuple clamped to (0-255): %s" + % str(background) + ) + + # Convert to packed uint + bg_r, bg_g, bg_b, bg_a = background + background = (bg_a << 24) | (bg_r << 16) | (bg_g << 8) | (bg_b << 0) + + # Setup the WebP animation encoder + enc = _webp.WebPAnimEncoder( + im.size[0], + im.size[1], + background, + loop, + minimize_size, + kmin, + kmax, + allow_mixed, + verbose, + ) + + # Add each frame + frame_idx = 0 + timestamp = 0 + cur_idx = im.tell() + try: + for ims in [im] + append_images: + # Get # of frames in this image + nfr = getattr(ims, "n_frames", 1) + + for idx in range(nfr): + ims.seek(idx) + ims.load() + + # Make sure image mode is supported + frame = ims + rawmode = ims.mode + if ims.mode not in _VALID_WEBP_MODES: + alpha = ( + "A" in ims.mode + or "a" in ims.mode + or (ims.mode == "P" and "A" in ims.im.getpalettemode()) + ) + rawmode = "RGBA" if alpha else "RGB" + frame = ims.convert(rawmode) + + if rawmode == "RGB": + # For faster conversion, use RGBX + rawmode = "RGBX" + + # Append the frame to the animation encoder + enc.add( + frame.tobytes("raw", rawmode), + timestamp, + frame.size[0], + frame.size[1], + rawmode, + lossless, + quality, + method, + ) + + # Update timestamp and frame index + if isinstance(duration, (list, tuple)): + timestamp += duration[frame_idx] + else: + timestamp += duration + frame_idx += 1 + + finally: + im.seek(cur_idx) + + # Force encoder to flush frames + enc.add(None, timestamp, 0, 0, "", lossless, quality, 0) + + # Get the final output from the encoder + data = enc.assemble(icc_profile, exif, xmp) + if data is None: + raise OSError("cannot write file as WebP (encoder returned None)") + + fp.write(data) + + +def _save(im, fp, filename): + lossless = im.encoderinfo.get("lossless", False) + quality = im.encoderinfo.get("quality", 80) + icc_profile = im.encoderinfo.get("icc_profile", "") + exif = im.encoderinfo.get("exif", "") + if isinstance(exif, Image.Exif): + exif = exif.tobytes() + xmp = im.encoderinfo.get("xmp", "") + method = im.encoderinfo.get("method", 0) + + if im.mode not in _VALID_WEBP_LEGACY_MODES: + alpha = ( + "A" in im.mode + or "a" in im.mode + or (im.mode == "P" and "A" in im.im.getpalettemode()) + ) + im = im.convert("RGBA" if alpha else "RGB") + + data = _webp.WebPEncode( + im.tobytes(), + im.size[0], + im.size[1], + lossless, + float(quality), + im.mode, + icc_profile, + method, + exif, + xmp, + ) + if data is None: + raise OSError("cannot write file as WebP (encoder returned None)") + + fp.write(data) + + +Image.register_open(WebPImageFile.format, WebPImageFile, _accept) +if SUPPORTED: + Image.register_save(WebPImageFile.format, _save) + if _webp.HAVE_WEBPANIM: + Image.register_save_all(WebPImageFile.format, _save_all) + Image.register_extension(WebPImageFile.format, ".webp") + Image.register_mime(WebPImageFile.format, "image/webp") diff --git a/venv/Lib/site-packages/PIL/WmfImagePlugin.py b/venv/Lib/site-packages/PIL/WmfImagePlugin.py new file mode 100644 index 000000000..024222c9b --- /dev/null +++ b/venv/Lib/site-packages/PIL/WmfImagePlugin.py @@ -0,0 +1,175 @@ +# +# The Python Imaging Library +# $Id$ +# +# WMF stub codec +# +# history: +# 1996-12-14 fl Created +# 2004-02-22 fl Turned into a stub driver +# 2004-02-23 fl Added EMF support +# +# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. +# Copyright (c) Fredrik Lundh 1996. +# +# See the README file for information on usage and redistribution. +# +# WMF/EMF reference documentation: +# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf +# http://wvware.sourceforge.net/caolan/index.html +# http://wvware.sourceforge.net/caolan/ora-wmf.html + +from . import Image, ImageFile +from ._binary import i16le as word, i32le as dword, si16le as short, si32le as _long + +_handler = None + + +def register_handler(handler): + """ + Install application-specific WMF image handler. + + :param handler: Handler object. + """ + global _handler + _handler = handler + + +if hasattr(Image.core, "drawwmf"): + # install default handler (windows only) + + class WmfHandler: + def open(self, im): + im.mode = "RGB" + self.bbox = im.info["wmf_bbox"] + + def load(self, im): + im.fp.seek(0) # rewind + return Image.frombytes( + "RGB", + im.size, + Image.core.drawwmf(im.fp.read(), im.size, self.bbox), + "raw", + "BGR", + (im.size[0] * 3 + 3) & -4, + -1, + ) + + register_handler(WmfHandler()) + +# +# -------------------------------------------------------------------- +# Read WMF file + + +def _accept(prefix): + return ( + prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or prefix[:4] == b"\x01\x00\x00\x00" + ) + + +## +# Image plugin for Windows metafiles. + + +class WmfStubImageFile(ImageFile.StubImageFile): + + format = "WMF" + format_description = "Windows Metafile" + + def _open(self): + self._inch = None + + # check placable header + s = self.fp.read(80) + + if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00": + + # placeable windows metafile + + # get units per inch + self._inch = word(s, 14) + + # get bounding box + x0 = short(s, 6) + y0 = short(s, 8) + x1 = short(s, 10) + y1 = short(s, 12) + + # normalize size to 72 dots per inch + self.info["dpi"] = 72 + size = ( + (x1 - x0) * self.info["dpi"] // self._inch, + (y1 - y0) * self.info["dpi"] // self._inch, + ) + + self.info["wmf_bbox"] = x0, y0, x1, y1 + + # sanity check (standard metafile header) + if s[22:26] != b"\x01\x00\t\x00": + raise SyntaxError("Unsupported WMF file format") + + elif dword(s) == 1 and s[40:44] == b" EMF": + # enhanced metafile + + # get bounding box + x0 = _long(s, 8) + y0 = _long(s, 12) + x1 = _long(s, 16) + y1 = _long(s, 20) + + # get frame (in 0.01 millimeter units) + frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36) + + size = x1 - x0, y1 - y0 + + # calculate dots per inch from bbox and frame + xdpi = int(2540.0 * (x1 - y0) / (frame[2] - frame[0]) + 0.5) + ydpi = int(2540.0 * (y1 - y0) / (frame[3] - frame[1]) + 0.5) + + self.info["wmf_bbox"] = x0, y0, x1, y1 + + if xdpi == ydpi: + self.info["dpi"] = xdpi + else: + self.info["dpi"] = xdpi, ydpi + + else: + raise SyntaxError("Unsupported file format") + + self.mode = "RGB" + self._size = size + + loader = self._load() + if loader: + loader.open(self) + + def _load(self): + return _handler + + def load(self, dpi=None): + if dpi is not None and self._inch is not None: + self.info["dpi"] = int(dpi + 0.5) + x0, y0, x1, y1 = self.info["wmf_bbox"] + self._size = ( + (x1 - x0) * self.info["dpi"] // self._inch, + (y1 - y0) * self.info["dpi"] // self._inch, + ) + super().load() + + +def _save(im, fp, filename): + if _handler is None or not hasattr(_handler, "save"): + raise OSError("WMF save handler not installed") + _handler.save(im, fp, filename) + + +# +# -------------------------------------------------------------------- +# Registry stuff + + +Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept) +Image.register_save(WmfStubImageFile.format, _save) + +Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"]) diff --git a/venv/Lib/site-packages/PIL/XVThumbImagePlugin.py b/venv/Lib/site-packages/PIL/XVThumbImagePlugin.py new file mode 100644 index 000000000..c0d8db09a --- /dev/null +++ b/venv/Lib/site-packages/PIL/XVThumbImagePlugin.py @@ -0,0 +1,78 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XV Thumbnail file handler by Charles E. "Gene" Cash +# (gcash@magicnet.net) +# +# see xvcolor.c and xvbrowse.c in the sources to John Bradley's XV, +# available from ftp://ftp.cis.upenn.edu/pub/xv/ +# +# history: +# 98-08-15 cec created (b/w only) +# 98-12-09 cec added color palette +# 98-12-28 fl added to PIL (with only a few very minor modifications) +# +# To do: +# FIXME: make save work (this requires quantization support) +# + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, o8 + +_MAGIC = b"P7 332" + +# standard color palette for thumbnails (RGB332) +PALETTE = b"" +for r in range(8): + for g in range(8): + for b in range(4): + PALETTE = PALETTE + ( + o8((r * 255) // 7) + o8((g * 255) // 7) + o8((b * 255) // 3) + ) + + +def _accept(prefix): + return prefix[:6] == _MAGIC + + +## +# Image plugin for XV thumbnail images. + + +class XVThumbImageFile(ImageFile.ImageFile): + + format = "XVThumb" + format_description = "XV thumbnail image" + + def _open(self): + + # check magic + if not _accept(self.fp.read(6)): + raise SyntaxError("not an XV thumbnail file") + + # Skip to beginning of next line + self.fp.readline() + + # skip info comments + while True: + s = self.fp.readline() + if not s: + raise SyntaxError("Unexpected EOF reading XV thumbnail file") + if i8(s[0]) != 35: # ie. when not a comment: '#' + break + + # parse header line (already read) + s = s.strip().split() + + self.mode = "P" + self._size = int(s[0]), int(s[1]) + + self.palette = ImagePalette.raw("RGB", PALETTE) + + self.tile = [("raw", (0, 0) + self.size, self.fp.tell(), (self.mode, 0, 1))] + + +# -------------------------------------------------------------------- + +Image.register_open(XVThumbImageFile.format, XVThumbImageFile, _accept) diff --git a/venv/Lib/site-packages/PIL/XbmImagePlugin.py b/venv/Lib/site-packages/PIL/XbmImagePlugin.py new file mode 100644 index 000000000..ead9722c8 --- /dev/null +++ b/venv/Lib/site-packages/PIL/XbmImagePlugin.py @@ -0,0 +1,94 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XBM File handling +# +# History: +# 1995-09-08 fl Created +# 1996-11-01 fl Added save support +# 1997-07-07 fl Made header parser more tolerant +# 1997-07-22 fl Fixed yet another parser bug +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4) +# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog) +# 2004-02-24 fl Allow some whitespace before first #define +# +# Copyright (c) 1997-2004 by Secret Labs AB +# Copyright (c) 1996-1997 by Fredrik Lundh +# +# See the README file for information on usage and redistribution. +# + +import re + +from . import Image, ImageFile + +# XBM header +xbm_head = re.compile( + br"\s*#define[ \t]+.*_width[ \t]+(?P[0-9]+)[\r\n]+" + b"#define[ \t]+.*_height[ \t]+(?P[0-9]+)[\r\n]+" + b"(?P" + b"#define[ \t]+[^_]*_x_hot[ \t]+(?P[0-9]+)[\r\n]+" + b"#define[ \t]+[^_]*_y_hot[ \t]+(?P[0-9]+)[\r\n]+" + b")?" + b"[\\000-\\377]*_bits\\[\\]" +) + + +def _accept(prefix): + return prefix.lstrip()[:7] == b"#define" + + +## +# Image plugin for X11 bitmaps. + + +class XbmImageFile(ImageFile.ImageFile): + + format = "XBM" + format_description = "X11 Bitmap" + + def _open(self): + + m = xbm_head.match(self.fp.read(512)) + + if m: + + xsize = int(m.group("width")) + ysize = int(m.group("height")) + + if m.group("hotspot"): + self.info["hotspot"] = (int(m.group("xhot")), int(m.group("yhot"))) + + self.mode = "1" + self._size = xsize, ysize + + self.tile = [("xbm", (0, 0) + self.size, m.end(), None)] + + +def _save(im, fp, filename): + + if im.mode != "1": + raise OSError("cannot write mode %s as XBM" % im.mode) + + fp.write(("#define im_width %d\n" % im.size[0]).encode("ascii")) + fp.write(("#define im_height %d\n" % im.size[1]).encode("ascii")) + + hotspot = im.encoderinfo.get("hotspot") + if hotspot: + fp.write(("#define im_x_hot %d\n" % hotspot[0]).encode("ascii")) + fp.write(("#define im_y_hot %d\n" % hotspot[1]).encode("ascii")) + + fp.write(b"static char im_bits[] = {\n") + + ImageFile._save(im, fp, [("xbm", (0, 0) + im.size, 0, None)]) + + fp.write(b"};\n") + + +Image.register_open(XbmImageFile.format, XbmImageFile, _accept) +Image.register_save(XbmImageFile.format, _save) + +Image.register_extension(XbmImageFile.format, ".xbm") + +Image.register_mime(XbmImageFile.format, "image/xbm") diff --git a/venv/Lib/site-packages/PIL/XpmImagePlugin.py b/venv/Lib/site-packages/PIL/XpmImagePlugin.py new file mode 100644 index 000000000..d8bd00a1b --- /dev/null +++ b/venv/Lib/site-packages/PIL/XpmImagePlugin.py @@ -0,0 +1,130 @@ +# +# The Python Imaging Library. +# $Id$ +# +# XPM File handling +# +# History: +# 1996-12-29 fl Created +# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7) +# +# Copyright (c) Secret Labs AB 1997-2001. +# Copyright (c) Fredrik Lundh 1996-2001. +# +# See the README file for information on usage and redistribution. +# + + +import re + +from . import Image, ImageFile, ImagePalette +from ._binary import i8, o8 + +# XPM header +xpm_head = re.compile(b'"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)') + + +def _accept(prefix): + return prefix[:9] == b"/* XPM */" + + +## +# Image plugin for X11 pixel maps. + + +class XpmImageFile(ImageFile.ImageFile): + + format = "XPM" + format_description = "X11 Pixel Map" + + def _open(self): + + if not _accept(self.fp.read(9)): + raise SyntaxError("not an XPM file") + + # skip forward to next string + while True: + s = self.fp.readline() + if not s: + raise SyntaxError("broken XPM file") + m = xpm_head.match(s) + if m: + break + + self._size = int(m.group(1)), int(m.group(2)) + + pal = int(m.group(3)) + bpp = int(m.group(4)) + + if pal > 256 or bpp != 1: + raise ValueError("cannot read this XPM file") + + # + # load palette description + + palette = [b"\0\0\0"] * 256 + + for i in range(pal): + + s = self.fp.readline() + if s[-2:] == b"\r\n": + s = s[:-2] + elif s[-1:] in b"\r\n": + s = s[:-1] + + c = i8(s[1]) + s = s[2:-2].split() + + for i in range(0, len(s), 2): + + if s[i] == b"c": + + # process colour key + rgb = s[i + 1] + if rgb == b"None": + self.info["transparency"] = c + elif rgb[0:1] == b"#": + # FIXME: handle colour names (see ImagePalette.py) + rgb = int(rgb[1:], 16) + palette[c] = ( + o8((rgb >> 16) & 255) + o8((rgb >> 8) & 255) + o8(rgb & 255) + ) + else: + # unknown colour + raise ValueError("cannot read this XPM file") + break + + else: + + # missing colour key + raise ValueError("cannot read this XPM file") + + self.mode = "P" + self.palette = ImagePalette.raw("RGB", b"".join(palette)) + + self.tile = [("raw", (0, 0) + self.size, self.fp.tell(), ("P", 0, 1))] + + def load_read(self, bytes): + + # + # load all image data in one chunk + + xsize, ysize = self.size + + s = [None] * ysize + + for i in range(ysize): + s[i] = self.fp.readline()[1 : xsize + 1].ljust(xsize) + + return b"".join(s) + + +# +# Registry + + +Image.register_open(XpmImageFile.format, XpmImageFile, _accept) + +Image.register_extension(XpmImageFile.format, ".xpm") + +Image.register_mime(XpmImageFile.format, "image/xpm") diff --git a/venv/Lib/site-packages/PIL/__init__.py b/venv/Lib/site-packages/PIL/__init__.py new file mode 100644 index 000000000..d225ed134 --- /dev/null +++ b/venv/Lib/site-packages/PIL/__init__.py @@ -0,0 +1,139 @@ +"""Pillow (Fork of the Python Imaging Library) + +Pillow is the friendly PIL fork by Alex Clark and Contributors. + https://github.com/python-pillow/Pillow/ + +Pillow is forked from PIL 1.1.7. + +PIL is the Python Imaging Library by Fredrik Lundh and Contributors. +Copyright (c) 1999 by Secret Labs AB. + +Use PIL.__version__ for this Pillow version. + +;-) +""" + +import sys +import warnings + +from . import _version + +# VERSION was removed in Pillow 6.0.0. +__version__ = _version.__version__ + + +# PILLOW_VERSION is deprecated and will be removed in a future release. +# Use __version__ instead. +def _raise_version_warning(): + warnings.warn( + "PILLOW_VERSION is deprecated and will be removed in a future release. " + "Use __version__ instead.", + DeprecationWarning, + stacklevel=3, + ) + + +if sys.version_info >= (3, 7): + + def __getattr__(name): + if name == "PILLOW_VERSION": + _raise_version_warning() + return __version__ + raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name)) + + +else: + + class _Deprecated_Version(str): + def __str__(self): + _raise_version_warning() + return super().__str__() + + def __getitem__(self, key): + _raise_version_warning() + return super().__getitem__(key) + + def __eq__(self, other): + _raise_version_warning() + return super().__eq__(other) + + def __ne__(self, other): + _raise_version_warning() + return super().__ne__(other) + + def __gt__(self, other): + _raise_version_warning() + return super().__gt__(other) + + def __lt__(self, other): + _raise_version_warning() + return super().__lt__(other) + + def __ge__(self, other): + _raise_version_warning() + return super().__gt__(other) + + def __le__(self, other): + _raise_version_warning() + return super().__lt__(other) + + PILLOW_VERSION = _Deprecated_Version(__version__) + +del _version + + +_plugins = [ + "BlpImagePlugin", + "BmpImagePlugin", + "BufrStubImagePlugin", + "CurImagePlugin", + "DcxImagePlugin", + "DdsImagePlugin", + "EpsImagePlugin", + "FitsStubImagePlugin", + "FliImagePlugin", + "FpxImagePlugin", + "FtexImagePlugin", + "GbrImagePlugin", + "GifImagePlugin", + "GribStubImagePlugin", + "Hdf5StubImagePlugin", + "IcnsImagePlugin", + "IcoImagePlugin", + "ImImagePlugin", + "ImtImagePlugin", + "IptcImagePlugin", + "JpegImagePlugin", + "Jpeg2KImagePlugin", + "McIdasImagePlugin", + "MicImagePlugin", + "MpegImagePlugin", + "MpoImagePlugin", + "MspImagePlugin", + "PalmImagePlugin", + "PcdImagePlugin", + "PcxImagePlugin", + "PdfImagePlugin", + "PixarImagePlugin", + "PngImagePlugin", + "PpmImagePlugin", + "PsdImagePlugin", + "SgiImagePlugin", + "SpiderImagePlugin", + "SunImagePlugin", + "TgaImagePlugin", + "TiffImagePlugin", + "WebPImagePlugin", + "WmfImagePlugin", + "XbmImagePlugin", + "XpmImagePlugin", + "XVThumbImagePlugin", +] + + +class UnidentifiedImageError(OSError): + """ + Raised in :py:meth:`PIL.Image.open` if an image cannot be opened and identified. + """ + + pass diff --git a/venv/Lib/site-packages/PIL/__main__.py b/venv/Lib/site-packages/PIL/__main__.py new file mode 100644 index 000000000..a05323f93 --- /dev/null +++ b/venv/Lib/site-packages/PIL/__main__.py @@ -0,0 +1,3 @@ +from .features import pilinfo + +pilinfo() diff --git a/venv/Lib/site-packages/PIL/__pycache__/BdfFontFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/BdfFontFile.cpython-36.pyc new file mode 100644 index 000000000..ccfe49524 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/BdfFontFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/BlpImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/BlpImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..0dbedf613 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/BlpImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/BmpImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/BmpImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..1293368bf Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/BmpImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/BufrStubImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/BufrStubImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..a4d7b1f1b Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/BufrStubImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ContainerIO.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ContainerIO.cpython-36.pyc new file mode 100644 index 000000000..e48e70e72 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ContainerIO.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/CurImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/CurImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..2c36bff39 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/CurImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/DcxImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/DcxImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..3d7c2f640 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/DcxImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/DdsImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/DdsImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..1e839c847 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/DdsImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/EpsImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/EpsImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..09c37e184 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/EpsImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ExifTags.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ExifTags.cpython-36.pyc new file mode 100644 index 000000000..6b42d8f58 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ExifTags.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/FitsStubImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/FitsStubImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..360b1203c Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/FitsStubImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/FliImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/FliImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..2ac98672a Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/FliImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/FontFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/FontFile.cpython-36.pyc new file mode 100644 index 000000000..218e8eda4 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/FontFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/FpxImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/FpxImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..355e569c7 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/FpxImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/FtexImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/FtexImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..72cce4986 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/FtexImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/GbrImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/GbrImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..325eb8570 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/GbrImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/GdImageFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/GdImageFile.cpython-36.pyc new file mode 100644 index 000000000..c0ecb7a70 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/GdImageFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/GifImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/GifImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..1452ccd38 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/GifImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/GimpGradientFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/GimpGradientFile.cpython-36.pyc new file mode 100644 index 000000000..d6f703185 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/GimpGradientFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/GimpPaletteFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/GimpPaletteFile.cpython-36.pyc new file mode 100644 index 000000000..a00a2a729 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/GimpPaletteFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/GribStubImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/GribStubImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..4fe8dcd78 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/GribStubImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/Hdf5StubImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/Hdf5StubImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..130214551 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/Hdf5StubImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/IcnsImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/IcnsImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..4f7c9f6d4 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/IcnsImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/IcoImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/IcoImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..aa89f1153 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/IcoImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..4531fa6df Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/Image.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/Image.cpython-36.pyc new file mode 100644 index 000000000..6679450a0 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/Image.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageChops.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageChops.cpython-36.pyc new file mode 100644 index 000000000..423673e1f Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageChops.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageCms.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageCms.cpython-36.pyc new file mode 100644 index 000000000..36bf1ae86 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageCms.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageColor.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageColor.cpython-36.pyc new file mode 100644 index 000000000..1ae15dd38 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageColor.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageDraw.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageDraw.cpython-36.pyc new file mode 100644 index 000000000..0b3e14914 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageDraw.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageDraw2.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageDraw2.cpython-36.pyc new file mode 100644 index 000000000..9b3b558ab Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageDraw2.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageEnhance.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageEnhance.cpython-36.pyc new file mode 100644 index 000000000..4ad6ca2f5 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageEnhance.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageFile.cpython-36.pyc new file mode 100644 index 000000000..20ebcec87 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageFilter.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageFilter.cpython-36.pyc new file mode 100644 index 000000000..1001bc44b Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageFilter.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageFont.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageFont.cpython-36.pyc new file mode 100644 index 000000000..362cf1460 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageFont.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageGrab.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageGrab.cpython-36.pyc new file mode 100644 index 000000000..3773a8b9b Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageGrab.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageMath.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageMath.cpython-36.pyc new file mode 100644 index 000000000..00f5b2371 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageMath.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageMode.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageMode.cpython-36.pyc new file mode 100644 index 000000000..879dd5c3c Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageMode.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageMorph.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageMorph.cpython-36.pyc new file mode 100644 index 000000000..ea31a0f89 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageMorph.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageOps.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageOps.cpython-36.pyc new file mode 100644 index 000000000..191a57a2a Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageOps.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImagePalette.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImagePalette.cpython-36.pyc new file mode 100644 index 000000000..ccf8194cb Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImagePalette.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImagePath.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImagePath.cpython-36.pyc new file mode 100644 index 000000000..361ee0ac1 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImagePath.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageQt.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageQt.cpython-36.pyc new file mode 100644 index 000000000..dbc513ebb Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageQt.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageSequence.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageSequence.cpython-36.pyc new file mode 100644 index 000000000..0c25f5fa8 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageSequence.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageShow.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageShow.cpython-36.pyc new file mode 100644 index 000000000..4607ae14e Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageShow.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageStat.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageStat.cpython-36.pyc new file mode 100644 index 000000000..923e1eabd Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageStat.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageTk.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageTk.cpython-36.pyc new file mode 100644 index 000000000..4097844a3 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageTk.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageTransform.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageTransform.cpython-36.pyc new file mode 100644 index 000000000..ef77979fd Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageTransform.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImageWin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImageWin.cpython-36.pyc new file mode 100644 index 000000000..3be2024bf Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImageWin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/ImtImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/ImtImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..5c749b9cb Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/ImtImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/IptcImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/IptcImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..6887fb15c Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/IptcImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/Jpeg2KImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/Jpeg2KImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..a0e8b756b Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/Jpeg2KImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/JpegImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/JpegImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..184569799 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/JpegImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/JpegPresets.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/JpegPresets.cpython-36.pyc new file mode 100644 index 000000000..92141a2b5 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/JpegPresets.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/McIdasImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/McIdasImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..3a0b9f8be Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/McIdasImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/MicImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/MicImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..064c5bfc9 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/MicImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/MpegImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/MpegImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..bc4008516 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/MpegImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/MpoImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/MpoImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..e4153abc4 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/MpoImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/MspImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/MspImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..f151c0683 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/MspImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PSDraw.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PSDraw.cpython-36.pyc new file mode 100644 index 000000000..d5673e3df Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PSDraw.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PaletteFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PaletteFile.cpython-36.pyc new file mode 100644 index 000000000..02e59b3c8 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PaletteFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PalmImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PalmImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..7692e5052 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PalmImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PcdImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PcdImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..9b1378096 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PcdImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PcfFontFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PcfFontFile.cpython-36.pyc new file mode 100644 index 000000000..398b42e93 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PcfFontFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PcxImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PcxImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..b46df320b Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PcxImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PdfImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PdfImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..e4d1b1c9c Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PdfImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PdfParser.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PdfParser.cpython-36.pyc new file mode 100644 index 000000000..98e902284 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PdfParser.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PixarImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PixarImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..6ada109fc Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PixarImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PngImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PngImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..d9744cd7f Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PngImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PpmImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PpmImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..d5103c84d Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PpmImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PsdImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PsdImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..0e789ebf7 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PsdImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/PyAccess.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/PyAccess.cpython-36.pyc new file mode 100644 index 000000000..2c6b9bb90 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/PyAccess.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/SgiImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/SgiImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..230bdb8b8 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/SgiImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/SpiderImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/SpiderImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..77a63f867 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/SpiderImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/SunImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/SunImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..d9150ad0f Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/SunImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/TarIO.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/TarIO.cpython-36.pyc new file mode 100644 index 000000000..8920901fe Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/TarIO.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/TgaImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/TgaImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..d23823d94 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/TgaImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/TiffImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/TiffImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..3e281aeb5 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/TiffImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/TiffTags.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/TiffTags.cpython-36.pyc new file mode 100644 index 000000000..66d799b4a Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/TiffTags.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/WalImageFile.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/WalImageFile.cpython-36.pyc new file mode 100644 index 000000000..38e4408c7 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/WalImageFile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/WebPImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/WebPImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..e8fafce16 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/WebPImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/WmfImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/WmfImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..dab725175 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/WmfImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/XVThumbImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/XVThumbImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..fe352d57e Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/XVThumbImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/XbmImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/XbmImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..0103e2d7c Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/XbmImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/XpmImagePlugin.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/XpmImagePlugin.cpython-36.pyc new file mode 100644 index 000000000..6f08ab424 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/XpmImagePlugin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..dfc39087f Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/__main__.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/__main__.cpython-36.pyc new file mode 100644 index 000000000..5115024c0 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/__main__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/_binary.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/_binary.cpython-36.pyc new file mode 100644 index 000000000..a8c32c251 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/_binary.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/_tkinter_finder.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/_tkinter_finder.cpython-36.pyc new file mode 100644 index 000000000..7d7e76f02 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/_tkinter_finder.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/_util.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/_util.cpython-36.pyc new file mode 100644 index 000000000..706bce83a Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/_util.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/_version.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/_version.cpython-36.pyc new file mode 100644 index 000000000..c3c6f3000 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/_version.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/__pycache__/features.cpython-36.pyc b/venv/Lib/site-packages/PIL/__pycache__/features.cpython-36.pyc new file mode 100644 index 000000000..821fe6f56 Binary files /dev/null and b/venv/Lib/site-packages/PIL/__pycache__/features.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/PIL/_binary.py b/venv/Lib/site-packages/PIL/_binary.py new file mode 100644 index 000000000..5564f450d --- /dev/null +++ b/venv/Lib/site-packages/PIL/_binary.py @@ -0,0 +1,92 @@ +# +# The Python Imaging Library. +# $Id$ +# +# Binary input/output support routines. +# +# Copyright (c) 1997-2003 by Secret Labs AB +# Copyright (c) 1995-2003 by Fredrik Lundh +# Copyright (c) 2012 by Brian Crowell +# +# See the README file for information on usage and redistribution. +# + + +"""Binary input/output support routines.""" + + +from struct import pack, unpack_from + + +def i8(c): + return c if c.__class__ is int else c[0] + + +def o8(i): + return bytes((i & 255,)) + + +# Input, le = little endian, be = big endian +def i16le(c, o=0): + """ + Converts a 2-bytes (16 bits) string to an unsigned integer. + + :param c: string containing bytes to convert + :param o: offset of bytes to convert in string + """ + return unpack_from("H", c, o)[0] + + +def i32be(c, o=0): + return unpack_from(">I", c, o)[0] + + +# Output, le = little endian, be = big endian +def o16le(i): + return pack("H", i) + + +def o32be(i): + return pack(">I", i) diff --git a/venv/Lib/site-packages/PIL/_imaging.cp36-win32.pyd b/venv/Lib/site-packages/PIL/_imaging.cp36-win32.pyd new file mode 100644 index 000000000..43ca20350 Binary files /dev/null and b/venv/Lib/site-packages/PIL/_imaging.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/PIL/_imagingcms.cp36-win32.pyd b/venv/Lib/site-packages/PIL/_imagingcms.cp36-win32.pyd new file mode 100644 index 000000000..524c4b164 Binary files /dev/null and b/venv/Lib/site-packages/PIL/_imagingcms.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/PIL/_imagingft.cp36-win32.pyd b/venv/Lib/site-packages/PIL/_imagingft.cp36-win32.pyd new file mode 100644 index 000000000..5b1d6d882 Binary files /dev/null and b/venv/Lib/site-packages/PIL/_imagingft.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/PIL/_imagingmath.cp36-win32.pyd b/venv/Lib/site-packages/PIL/_imagingmath.cp36-win32.pyd new file mode 100644 index 000000000..8d3755c3b Binary files /dev/null and b/venv/Lib/site-packages/PIL/_imagingmath.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/PIL/_imagingmorph.cp36-win32.pyd b/venv/Lib/site-packages/PIL/_imagingmorph.cp36-win32.pyd new file mode 100644 index 000000000..47ff38e2c Binary files /dev/null and b/venv/Lib/site-packages/PIL/_imagingmorph.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/PIL/_imagingtk.cp36-win32.pyd b/venv/Lib/site-packages/PIL/_imagingtk.cp36-win32.pyd new file mode 100644 index 000000000..1775f2fc4 Binary files /dev/null and b/venv/Lib/site-packages/PIL/_imagingtk.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/PIL/_tkinter_finder.py b/venv/Lib/site-packages/PIL/_tkinter_finder.py new file mode 100644 index 000000000..30493066a --- /dev/null +++ b/venv/Lib/site-packages/PIL/_tkinter_finder.py @@ -0,0 +1,16 @@ +""" Find compiled module linking to Tcl / Tk libraries +""" +import sys +from tkinter import _tkinter as tk + +if hasattr(sys, "pypy_find_executable"): + # Tested with packages at https://bitbucket.org/pypy/pypy/downloads. + # PyPies 1.6, 2.0 do not have tkinter built in. PyPy3-2.3.1 gives an + # OSError trying to import tkinter. Otherwise: + try: # PyPy 5.1, 4.0.0, 2.6.1, 2.6.0 + TKINTER_LIB = tk.tklib_cffi.__file__ + except AttributeError: + # PyPy3 2.4, 2.1-beta1; PyPy 2.5.1, 2.5.0, 2.4.0, 2.3, 2.2, 2.1 + TKINTER_LIB = tk.tkffi.verifier.modulefilename +else: + TKINTER_LIB = tk.__file__ diff --git a/venv/Lib/site-packages/PIL/_util.py b/venv/Lib/site-packages/PIL/_util.py new file mode 100644 index 000000000..755b4b272 --- /dev/null +++ b/venv/Lib/site-packages/PIL/_util.py @@ -0,0 +1,30 @@ +import os +import sys + +py36 = sys.version_info[0:2] >= (3, 6) + + +if py36: + from pathlib import Path + + def isPath(f): + return isinstance(f, (bytes, str, Path)) + + +else: + + def isPath(f): + return isinstance(f, (bytes, str)) + + +# Checks if an object is a string, and that it points to a directory. +def isDirectory(f): + return isPath(f) and os.path.isdir(f) + + +class deferred_error: + def __init__(self, ex): + self.ex = ex + + def __getattr__(self, elt): + raise self.ex diff --git a/venv/Lib/site-packages/PIL/_version.py b/venv/Lib/site-packages/PIL/_version.py new file mode 100644 index 000000000..035deeba7 --- /dev/null +++ b/venv/Lib/site-packages/PIL/_version.py @@ -0,0 +1,2 @@ +# Master version for Pillow +__version__ = "7.2.0" diff --git a/venv/Lib/site-packages/PIL/_webp.cp36-win32.pyd b/venv/Lib/site-packages/PIL/_webp.cp36-win32.pyd new file mode 100644 index 000000000..93bc044ae Binary files /dev/null and b/venv/Lib/site-packages/PIL/_webp.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/PIL/features.py b/venv/Lib/site-packages/PIL/features.py new file mode 100644 index 000000000..66b093350 --- /dev/null +++ b/venv/Lib/site-packages/PIL/features.py @@ -0,0 +1,309 @@ +import collections +import os +import sys +import warnings + +import PIL + +from . import Image + +modules = { + "pil": ("PIL._imaging", "PILLOW_VERSION"), + "tkinter": ("PIL._tkinter_finder", None), + "freetype2": ("PIL._imagingft", "freetype2_version"), + "littlecms2": ("PIL._imagingcms", "littlecms_version"), + "webp": ("PIL._webp", "webpdecoder_version"), +} + + +def check_module(feature): + """ + Checks if a module is available. + + :param feature: The module to check for. + :returns: ``True`` if available, ``False`` otherwise. + :raises ValueError: If the module is not defined in this version of Pillow. + """ + if not (feature in modules): + raise ValueError("Unknown module %s" % feature) + + module, ver = modules[feature] + + try: + __import__(module) + return True + except ImportError: + return False + + +def version_module(feature): + """ + :param feature: The module to check for. + :returns: + The loaded version number as a string, or ``None`` if unknown or not available. + :raises ValueError: If the module is not defined in this version of Pillow. + """ + if not check_module(feature): + return None + + module, ver = modules[feature] + + if ver is None: + return None + + return getattr(__import__(module, fromlist=[ver]), ver) + + +def get_supported_modules(): + """ + :returns: A list of all supported modules. + """ + return [f for f in modules if check_module(f)] + + +codecs = { + "jpg": ("jpeg", "jpeglib"), + "jpg_2000": ("jpeg2k", "jp2klib"), + "zlib": ("zip", "zlib"), + "libtiff": ("libtiff", "libtiff"), +} + + +def check_codec(feature): + """ + Checks if a codec is available. + + :param feature: The codec to check for. + :returns: ``True`` if available, ``False`` otherwise. + :raises ValueError: If the codec is not defined in this version of Pillow. + """ + if feature not in codecs: + raise ValueError("Unknown codec %s" % feature) + + codec, lib = codecs[feature] + + return codec + "_encoder" in dir(Image.core) + + +def version_codec(feature): + """ + :param feature: The codec to check for. + :returns: + The version number as a string, or ``None`` if not available. + Checked at compile time for ``jpg``, run-time otherwise. + :raises ValueError: If the codec is not defined in this version of Pillow. + """ + if not check_codec(feature): + return None + + codec, lib = codecs[feature] + + version = getattr(Image.core, lib + "_version") + + if feature == "libtiff": + return version.split("\n")[0].split("Version ")[1] + + return version + + +def get_supported_codecs(): + """ + :returns: A list of all supported codecs. + """ + return [f for f in codecs if check_codec(f)] + + +features = { + "webp_anim": ("PIL._webp", "HAVE_WEBPANIM", None), + "webp_mux": ("PIL._webp", "HAVE_WEBPMUX", None), + "transp_webp": ("PIL._webp", "HAVE_TRANSPARENCY", None), + "raqm": ("PIL._imagingft", "HAVE_RAQM", "raqm_version"), + "libjpeg_turbo": ("PIL._imaging", "HAVE_LIBJPEGTURBO", "libjpeg_turbo_version"), + "libimagequant": ("PIL._imaging", "HAVE_LIBIMAGEQUANT", "imagequant_version"), + "xcb": ("PIL._imaging", "HAVE_XCB", None), +} + + +def check_feature(feature): + """ + Checks if a feature is available. + + :param feature: The feature to check for. + :returns: ``True`` if available, ``False`` if unavailable, ``None`` if unknown. + :raises ValueError: If the feature is not defined in this version of Pillow. + """ + if feature not in features: + raise ValueError("Unknown feature %s" % feature) + + module, flag, ver = features[feature] + + try: + imported_module = __import__(module, fromlist=["PIL"]) + return getattr(imported_module, flag) + except ImportError: + return None + + +def version_feature(feature): + """ + :param feature: The feature to check for. + :returns: The version number as a string, or ``None`` if not available. + :raises ValueError: If the feature is not defined in this version of Pillow. + """ + if not check_feature(feature): + return None + + module, flag, ver = features[feature] + + if ver is None: + return None + + return getattr(__import__(module, fromlist=[ver]), ver) + + +def get_supported_features(): + """ + :returns: A list of all supported features. + """ + return [f for f in features if check_feature(f)] + + +def check(feature): + """ + :param feature: A module, codec, or feature name. + :returns: + ``True`` if the module, codec, or feature is available, + ``False`` or ``None`` otherwise. + """ + + if feature in modules: + return check_module(feature) + if feature in codecs: + return check_codec(feature) + if feature in features: + return check_feature(feature) + warnings.warn("Unknown feature '%s'." % feature, stacklevel=2) + return False + + +def version(feature): + """ + :param feature: + The module, codec, or feature to check for. + :returns: + The version number as a string, or ``None`` if unknown or not available. + """ + if feature in modules: + return version_module(feature) + if feature in codecs: + return version_codec(feature) + if feature in features: + return version_feature(feature) + return None + + +def get_supported(): + """ + :returns: A list of all supported modules, features, and codecs. + """ + + ret = get_supported_modules() + ret.extend(get_supported_features()) + ret.extend(get_supported_codecs()) + return ret + + +def pilinfo(out=None, supported_formats=True): + """ + Prints information about this installation of Pillow. + This function can be called with ``python -m PIL``. + + :param out: + The output stream to print to. Defaults to ``sys.stdout`` if ``None``. + :param supported_formats: + If ``True``, a list of all supported image file formats will be printed. + """ + + if out is None: + out = sys.stdout + + Image.init() + + print("-" * 68, file=out) + print("Pillow {}".format(PIL.__version__), file=out) + py_version = sys.version.splitlines() + print("Python {}".format(py_version[0].strip()), file=out) + for py_version in py_version[1:]: + print(" {}".format(py_version.strip()), file=out) + print("-" * 68, file=out) + print( + "Python modules loaded from {}".format(os.path.dirname(Image.__file__)), + file=out, + ) + print( + "Binary modules loaded from {}".format(os.path.dirname(Image.core.__file__)), + file=out, + ) + print("-" * 68, file=out) + + for name, feature in [ + ("pil", "PIL CORE"), + ("tkinter", "TKINTER"), + ("freetype2", "FREETYPE2"), + ("littlecms2", "LITTLECMS2"), + ("webp", "WEBP"), + ("transp_webp", "WEBP Transparency"), + ("webp_mux", "WEBPMUX"), + ("webp_anim", "WEBP Animation"), + ("jpg", "JPEG"), + ("jpg_2000", "OPENJPEG (JPEG2000)"), + ("zlib", "ZLIB (PNG/ZIP)"), + ("libtiff", "LIBTIFF"), + ("raqm", "RAQM (Bidirectional Text)"), + ("libimagequant", "LIBIMAGEQUANT (Quantization method)"), + ("xcb", "XCB (X protocol)"), + ]: + if check(name): + if name == "jpg" and check_feature("libjpeg_turbo"): + v = "libjpeg-turbo " + version_feature("libjpeg_turbo") + else: + v = version(name) + if v is not None: + t = "compiled for" if name in ("pil", "jpg") else "loaded" + print("---", feature, "support ok,", t, v, file=out) + else: + print("---", feature, "support ok", file=out) + else: + print("***", feature, "support not installed", file=out) + print("-" * 68, file=out) + + if supported_formats: + extensions = collections.defaultdict(list) + for ext, i in Image.EXTENSION.items(): + extensions[i].append(ext) + + for i in sorted(Image.ID): + line = "{}".format(i) + if i in Image.MIME: + line = "{} {}".format(line, Image.MIME[i]) + print(line, file=out) + + if i in extensions: + print( + "Extensions: {}".format(", ".join(sorted(extensions[i]))), file=out + ) + + features = [] + if i in Image.OPEN: + features.append("open") + if i in Image.SAVE: + features.append("save") + if i in Image.SAVE_ALL: + features.append("save_all") + if i in Image.DECODERS: + features.append("decode") + if i in Image.ENCODERS: + features.append("encode") + + print("Features: {}".format(", ".join(features)), file=out) + print("-" * 68, file=out) diff --git a/venv/Lib/site-packages/Pillow-7.2.0.dist-info/INSTALLER b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/Pillow-7.2.0.dist-info/LICENSE b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/LICENSE new file mode 100644 index 000000000..4aac532f4 --- /dev/null +++ b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/LICENSE @@ -0,0 +1,30 @@ +The Python Imaging Library (PIL) is + + Copyright © 1997-2011 by Secret Labs AB + Copyright © 1995-2011 by Fredrik Lundh + +Pillow is the friendly PIL fork. It is + + Copyright © 2010-2020 by Alex Clark and contributors + +Like PIL, Pillow is licensed under the open source PIL Software License: + +By obtaining, using, and/or copying this software and/or its associated +documentation, you agree that you have read, understood, and will comply +with the following terms and conditions: + +Permission to use, copy, modify, and distribute this software and its +associated documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appears in all copies, and that +both that copyright notice and this permission notice appear in supporting +documentation, and that the name of Secret Labs AB or the author not be +used in advertising or publicity pertaining to distribution of the software +without specific, written prior permission. + +SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. +IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL, +INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/venv/Lib/site-packages/Pillow-7.2.0.dist-info/METADATA b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/METADATA new file mode 100644 index 000000000..ae02ed79c --- /dev/null +++ b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/METADATA @@ -0,0 +1,135 @@ +Metadata-Version: 2.1 +Name: Pillow +Version: 7.2.0 +Summary: Python Imaging Library (Fork) +Home-page: https://python-pillow.org +Author: Alex Clark (PIL Fork Author) +Author-email: aclark@python-pillow.org +License: HPND +Project-URL: Documentation, https://pillow.readthedocs.io +Project-URL: Source, https://github.com/python-pillow/Pillow +Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=pypi +Keywords: Imaging +Platform: UNKNOWN +Classifier: Development Status :: 6 - Mature +Classifier: License :: OSI Approved :: Historical Permission Notice and Disclaimer (HPND) +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Multimedia :: Graphics +Classifier: Topic :: Multimedia :: Graphics :: Capture :: Digital Camera +Classifier: Topic :: Multimedia :: Graphics :: Capture :: Screen Capture +Classifier: Topic :: Multimedia :: Graphics :: Graphics Conversion +Classifier: Topic :: Multimedia :: Graphics :: Viewers +Requires-Python: >=3.5 + +Pillow +====== + +Python Imaging Library (Fork) +----------------------------- + +Pillow is the friendly PIL fork by `Alex Clark and Contributors `_. PIL is the Python Imaging Library by Fredrik Lundh and Contributors. As of 2019, Pillow development is `supported by Tidelift `_. + +.. start-badges + +.. list-table:: + :stub-columns: 1 + + * - docs + - |docs| + * - tests + - |linux| |macos| |windows| |gha_lint| |gha| |gha_windows| |gha_docker| |coverage| + * - package + - |zenodo| |tidelift| |version| |downloads| + * - social + - |gitter| |twitter| + +.. end-badges + +More Information +---------------- + +- `Documentation `_ + + - `Installation `_ + - `Handbook `_ + +- `Contribute `_ + + - `Issues `_ + - `Pull requests `_ + +- `Changelog `_ + + - `Pre-fork `_ + +Report a Vulnerability +---------------------- + +To report a security vulnerability, please follow the procedure described in the `Tidelift security policy `_. + +.. |docs| image:: https://readthedocs.org/projects/pillow/badge/?version=latest + :target: https://pillow.readthedocs.io/?badge=latest + :alt: Documentation Status + +.. |linux| image:: https://img.shields.io/travis/python-pillow/Pillow/master.svg?label=Linux%20build + :target: https://travis-ci.org/python-pillow/Pillow + :alt: Travis CI build status (Linux) + +.. |macos| image:: https://img.shields.io/travis/python-pillow/pillow-wheels/master.svg?label=macOS%20build + :target: https://travis-ci.org/python-pillow/pillow-wheels + :alt: Travis CI build status (macOS) + +.. |windows| image:: https://img.shields.io/appveyor/build/python-pillow/Pillow/master.svg?label=Windows%20build + :target: https://ci.appveyor.com/project/python-pillow/Pillow + :alt: AppVeyor CI build status (Windows) + +.. |gha_lint| image:: https://github.com/python-pillow/Pillow/workflows/Lint/badge.svg + :target: https://github.com/python-pillow/Pillow/actions?query=workflow%3ALint + :alt: GitHub Actions build status (Lint) + +.. |gha_docker| image:: https://github.com/python-pillow/Pillow/workflows/Test%20Docker/badge.svg + :target: https://github.com/python-pillow/Pillow/actions?query=workflow%3A%22Test+Docker%22 + :alt: GitHub Actions build status (Test Docker) + +.. |gha| image:: https://github.com/python-pillow/Pillow/workflows/Test/badge.svg + :target: https://github.com/python-pillow/Pillow/actions?query=workflow%3ATest + :alt: GitHub Actions build status (Test Linux and macOS) + +.. |gha_windows| image:: https://github.com/python-pillow/Pillow/workflows/Test%20Windows/badge.svg + :target: https://github.com/python-pillow/Pillow/actions?query=workflow%3A%22Test+Windows%22 + :alt: GitHub Actions build status (Test Windows) + +.. |coverage| image:: https://codecov.io/gh/python-pillow/Pillow/branch/master/graph/badge.svg + :target: https://codecov.io/gh/python-pillow/Pillow + :alt: Code coverage + +.. |zenodo| image:: https://zenodo.org/badge/17549/python-pillow/Pillow.svg + :target: https://zenodo.org/badge/latestdoi/17549/python-pillow/Pillow + +.. |tidelift| image:: https://tidelift.com/badges/package/pypi/Pillow?style=flat + :target: https://tidelift.com/subscription/pkg/pypi-pillow?utm_source=pypi-pillow&utm_medium=badge + +.. |version| image:: https://img.shields.io/pypi/v/pillow.svg + :target: https://pypi.org/project/Pillow/ + :alt: Latest PyPI version + +.. |downloads| image:: https://img.shields.io/pypi/dm/pillow.svg + :target: https://pypi.org/project/Pillow/ + :alt: Number of PyPI downloads + +.. |gitter| image:: https://badges.gitter.im/python-pillow/Pillow.svg + :target: https://gitter.im/python-pillow/Pillow?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + :alt: Join the chat at https://gitter.im/python-pillow/Pillow + +.. |twitter| image:: https://img.shields.io/badge/tweet-on%20Twitter-00aced.svg + :target: https://twitter.com/PythonPillow + :alt: Follow on https://twitter.com/PythonPillow + + diff --git a/venv/Lib/site-packages/Pillow-7.2.0.dist-info/RECORD b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/RECORD new file mode 100644 index 000000000..e76bcd10e --- /dev/null +++ b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/RECORD @@ -0,0 +1,198 @@ +PIL/BdfFontFile.py,sha256=hRnSgFZOIiTgWfJIaRHRQpU4TKVok2E31KJY6sbZPwc,2817 +PIL/BlpImagePlugin.py,sha256=haYEEl_DnCGzbSICvTjxRKRq93j1BJ49D6rON3VoMSE,14296 +PIL/BmpImagePlugin.py,sha256=zQOhwL2iIKntYKxkdLTYWO3OGqByaz4jAx9mZW0mSPw,14374 +PIL/BufrStubImagePlugin.py,sha256=Zq60GwcqQJTmZJrA9EQq94QvYpNqwYvQzHojh4U7SDw,1520 +PIL/ContainerIO.py,sha256=1U15zUXjWO8uWK-MyCp66Eh7djQEU-oUeCDoBqewNkA,2883 +PIL/CurImagePlugin.py,sha256=zhFOIWO0Id1kDqO3bL-6P27Y142mseLx9eOzsWs2hyQ,1681 +PIL/DcxImagePlugin.py,sha256=bfESLTji9GerqI4oYsy5oTFyRMlr2mjSsXzpY9IuLsk,2145 +PIL/DdsImagePlugin.py,sha256=llS29X6w-oFTyflc7ta7jiFHE4PWqnHgpXlrVO-Zmgo,5466 +PIL/EpsImagePlugin.py,sha256=exWRY-hn_NP2lvj7w11rHZADN-ubY93TcZ_KSkwgHts,12123 +PIL/ExifTags.py,sha256=fx7S0CnztT9ptHT2HGuMYteI99CMVrD73IHeRI5OFjU,9009 +PIL/FitsStubImagePlugin.py,sha256=8Zq2D9ReJE-stBppxB_ELX3wxcS0_BDGg6Xce7sWpaU,1624 +PIL/FliImagePlugin.py,sha256=fl-3mUGENpHSpI0NxJ7PzOOuS7vPtZgjnUipSWnX0rU,4272 +PIL/FontFile.py,sha256=5LQh5Rr62fTz8rHvanAccr_oQQt8E2M_BIO0ZeG1isQ,2769 +PIL/FpxImagePlugin.py,sha256=qn-JGrpfN_99egOSZAsCcx14RXeZaK2pKDomryEM2og,6670 +PIL/FtexImagePlugin.py,sha256=-iysUmqEEOORhWBQCou0gqrgSwsMBqcr6qyWbrNptSE,3307 +PIL/GbrImagePlugin.py,sha256=X8UAqQtyUPO3G8OUOlrYXROgsZyDZoUmXvk_RgpGLtw,2801 +PIL/GdImageFile.py,sha256=Ub6B3SP0BQlsBgLjZmYT68gKblXK1MPbtJynyPWez40,2487 +PIL/GifImagePlugin.py,sha256=Y-skmIQinP0W2pOMbEuUBolzZqt0EQX6MV09XuYPkpw,28872 +PIL/GimpGradientFile.py,sha256=G0ClRmjRHIJoU0nmG-P-tgehLHZip5i0rY4-5pjJ7bc,3353 +PIL/GimpPaletteFile.py,sha256=_wWvNmB40AfQ1M5sTxoYYXOMApWQji7rrubqZhfd1dU,1274 +PIL/GribStubImagePlugin.py,sha256=gtLF7drAx66O9OOE_lJ1GgtLzjULoQDzFWT0sms7l98,1543 +PIL/Hdf5StubImagePlugin.py,sha256=zjtFPZIcVkWXvYRPnHow6XA9kElEi772w7PFSuEqmq4,1517 +PIL/IcnsImagePlugin.py,sha256=SC9TkWt0MVer-aBer5F95rD45KXL2o-mX4RJKi1m6Dk,11718 +PIL/IcoImagePlugin.py,sha256=BrFX4_d-HBe0Sn6htQNX5V3LI-HxZnW9QGYG9t3yr7A,10134 +PIL/ImImagePlugin.py,sha256=HCwMuVW03zj9aWNbK1Sw9gfqCDRVCJ3hUKXZk3E03Ds,10825 +PIL/Image.py,sha256=IbqOBdm0e5SaiGcu49gDCylMfZH2lSu0e5dfmF0gH80,115642 +PIL/ImageChops.py,sha256=XxYMb9xWRFF6N2nUcBrWdpZLTJhog9G0dKbepFaWVBQ,7309 +PIL/ImageCms.py,sha256=ffh825laLsZfH4Ojn9ADWbfx2Kqo2LVw3uc5UUAaqQo,36750 +PIL/ImageColor.py,sha256=g0yVp1VyRBieKf2n18gh5w9BaveRtp5LK-4WNYmZTE8,8634 +PIL/ImageDraw.py,sha256=SH8-SsBWk7mX66pzrYD3Jl7EMmwuLYDAXf-A1XSAcdQ,19319 +PIL/ImageDraw2.py,sha256=oBhpBTZhx3bd4D0s8E2kDjBzgThRkDU_TE_987l501k,5019 +PIL/ImageEnhance.py,sha256=CJnCouiBmxN2fE0xW7m_uMdBqcm-Fp0S3ruHhkygal4,3190 +PIL/ImageFile.py,sha256=oPXH-sNQU1rQ6goP67ZEbFBM56SxOR-ulPhbvfZyKwQ,21015 +PIL/ImageFilter.py,sha256=yyjUre0YHNOC-WRAFf060sZWnQ_JTvZxme3X-VQBEGM,15887 +PIL/ImageFont.py,sha256=4nv8YNcE-IGYWwESeBmQvK6Ii_OGzv1FjspGi5FOqfQ,36077 +PIL/ImageGrab.py,sha256=DdO1jppfTiwF4XtTLZ_dTeZm9flgOim4n3Hfk_rhpec,3625 +PIL/ImageMath.py,sha256=olaAaE7xJORKmQgQsfD7EhGREseKBZz7u4_zVW6WOdk,7054 +PIL/ImageMode.py,sha256=gI88wDgAc4y-m46vTA4zPmipG12wpYLNXPRHyPZBZaY,1638 +PIL/ImageMorph.py,sha256=S_ZN_u6QwmqphSnv6iqobbqv1-CKxNGNJqSQ7i8MyrM,7896 +PIL/ImageOps.py,sha256=Mm8dmGeaUJx2V3LViRO_yUNJA7I_tLtxf99kJitU-0M,18022 +PIL/ImagePalette.py,sha256=O-kWUT0q1ExW-1I26uggy-jC_DaDlur9wlf9DBCMzGU,6352 +PIL/ImagePath.py,sha256=lVmH1-lCd0SyrFoqyhlstAFW2iJuC14fPcW8iewvxCQ,336 +PIL/ImageQt.py,sha256=p5IwMwxc2kHzwXc30bXo4fIsTBG6fzQMClIjKCCdcro,5797 +PIL/ImageSequence.py,sha256=3djA7vDH6wafTGbt4e_lPlVhy2TaKfdSrA1XQ4n-Uoc,1850 +PIL/ImageShow.py,sha256=wnLjXbmwqrc1dNi4nxfFK64yUm_imMIxboE17ql9cIc,6358 +PIL/ImageStat.py,sha256=PieQi44mRHE6jod7NqujwGr6WCntuZuNGmC2z9PaoDY,3901 +PIL/ImageTk.py,sha256=LqiRd0wkODL-7GI4gZm2fAlmtUDw7keLZj_dtRmqug8,9328 +PIL/ImageTransform.py,sha256=V2l6tsjmymMIF7HQBMI21UPn4mlicarrm4NF3Kazvio,2843 +PIL/ImageWin.py,sha256=WlwWoWKPkNkPCbKfjqQUsjLk3xknboqcx1rmRjBSWxI,7200 +PIL/ImtImagePlugin.py,sha256=cn60lqUVnK2oh_sPqPBORr_rZ4zuF_6FU0V96IAh8Ww,2203 +PIL/IptcImagePlugin.py,sha256=gs12EAOvOWaVYrv_LAZAlqhX_OsedSZom90MWQFNAmQ,5670 +PIL/Jpeg2KImagePlugin.py,sha256=3NAbqBmvSU_fHUIGspXFsVQV7uYMydN2Rj8jP2bGdiA,8722 +PIL/JpegImagePlugin.py,sha256=DUUQ9SW01fVj1W3O7nEj8-EJJ1MpVgoor1M-k9aFdZU,27898 +PIL/JpegPresets.py,sha256=59KxoMhAJGpdMtTaVSC_J7-JQgW1GtmWo5oYDP_ADII,12711 +PIL/McIdasImagePlugin.py,sha256=LrP5nA7l8IQG3WhlMI0Xs8fGXY_uf6IDmzNCERl3tGw,1754 +PIL/MicImagePlugin.py,sha256=t8iqakHjOilWVEOrjTISN2-ctxkTYSZgzmtxf4ufrfg,2606 +PIL/MpegImagePlugin.py,sha256=n16Zgdy8Hcfke16lQwZWs53PZq4BA_OxPCMPDkW62nw,1803 +PIL/MpoImagePlugin.py,sha256=lbBbUp-o6xVnfaX3sQYpd7RN4-5-KHcbwi0Km2vN0eg,4244 +PIL/MspImagePlugin.py,sha256=UlkKUCM7D0ZZZW-iSwsQxUrh44DOsTwjXLHzUhUPeZM,5530 +PIL/PSDraw.py,sha256=zySuABiEoP2TW_7DypJaS9ByWpxqPjmP43gCjXOghRA,6735 +PIL/PaletteFile.py,sha256=s3KtsDuY5S04MKDyiXK3iIbiOGzV9PvCDUpOQHI7yqc,1106 +PIL/PalmImagePlugin.py,sha256=SuMO5oCq1sgTFVsnJLvFRJFVAadez6UEJMbW65bjr5M,9092 +PIL/PcdImagePlugin.py,sha256=iBuIYSh2ff69V_DqG3yiMtvn9-wQkKkpdrmXbXYBluM,1504 +PIL/PcfFontFile.py,sha256=1GCvZM86-l8OmMhxDUeL66WqrrPckTJAeKySpck99GI,6268 +PIL/PcxImagePlugin.py,sha256=zfNKKUEIdykafo44h7ylZlANBPgt_8c-W2SkakxoQc4,5483 +PIL/PdfImagePlugin.py,sha256=sS6VL5uZvipBn3gkPEA8qEpb3gaKtNxhH6sHia11x4o,7574 +PIL/PdfParser.py,sha256=TQtymvzfdjrdYXYrVJ_ntM5A7rBqyDqLF-FQX26kcGE,34422 +PIL/PixarImagePlugin.py,sha256=PriyK2dGF7ecbm_ZNB6eWlL5CiVA2d0RkYw4TYS0n7Y,1647 +PIL/PngImagePlugin.py,sha256=jAwolRtePqHwCUd-a65p1mM7n4KZN_yHzlmuIHQzE-o,41790 +PIL/PpmImagePlugin.py,sha256=YrtSj-K7XQjb1dW7Y8G6I21-xXAVJ4YqPuNwOJosVTE,4451 +PIL/PsdImagePlugin.py,sha256=KAAItucNljGjlBnTZX9PgONB7ViNCmbxbhp-8bxNqBo,7628 +PIL/PyAccess.py,sha256=U_N4WB6yg_qpWKo1X7avE98p6Ve3bqqnWOGX6DeyE4U,9592 +PIL/SgiImagePlugin.py,sha256=Jn7gWyIXI5s1M8dkVKkQCp0XweWHe8mVB-YmuPx-Wx0,6100 +PIL/SpiderImagePlugin.py,sha256=VwW-TCSjeqQ4QvhlDTKBqZPxTiHZpRTlrypRn95BTu8,9538 +PIL/SunImagePlugin.py,sha256=OYiU1zBBlN_lNqoGpxAPuZDDyBm31A_-Ck_wul7vIv8,4302 +PIL/TarIO.py,sha256=E_pjAxk9wHezXUuR_99liySBXfJoL2wjzdNDf0g1hTo,1440 +PIL/TgaImagePlugin.py,sha256=3j3c1DAMKDcS0GLYq0yuPuc3gF-cTyAmF52Hp5Nsdog,6268 +PIL/TiffImagePlugin.py,sha256=z-6FybmlNvXeP00iRw9tgGg2AZT7hwQ68oCqtVzhmIY,67498 +PIL/TiffTags.py,sha256=Kl6hB2v-IT-h01P_5aBafrsO1AIA0JxJI7fNJvQCuhk,14551 +PIL/WalImageFile.py,sha256=Mfwtpwi-CgRKGORZbdc35uVG0XdelIEIafmtzh0aTKw,5531 +PIL/WebPImagePlugin.py,sha256=RKHtxgrWjuxN1fQF8AX0ckl3cEJkltOiKJrHYh--gk4,10795 +PIL/WmfImagePlugin.py,sha256=D7kFCuEPLZcdOoGg5Ab3jGBRJiAPy6nYd0WbDfsYPp8,4612 +PIL/XVThumbImagePlugin.py,sha256=sYdEiHkRT5U8JG-VMzjTlErNUGLnMqFJxHVPheyqXgo,1948 +PIL/XbmImagePlugin.py,sha256=rCNKI-dem3N-NIvRvepuQ9MzAWkSWS7C5FNpNInYfOg,2448 +PIL/XpmImagePlugin.py,sha256=BTgbV-ij_1Lw5u5yCWVOI_ZWUoif1-3AoQ0uszigrfE,3070 +PIL/__init__.py,sha256=kmCZsISXCYqhkyIhg1xfddav65p4l3nNifzh6a9qgi0,3265 +PIL/__main__.py,sha256=axR7PO-HtXp-o0rBhKIxs0wark0rBfaDIhAIWqtWUo4,41 +PIL/__pycache__/BdfFontFile.cpython-36.pyc,, +PIL/__pycache__/BlpImagePlugin.cpython-36.pyc,, +PIL/__pycache__/BmpImagePlugin.cpython-36.pyc,, +PIL/__pycache__/BufrStubImagePlugin.cpython-36.pyc,, +PIL/__pycache__/ContainerIO.cpython-36.pyc,, +PIL/__pycache__/CurImagePlugin.cpython-36.pyc,, +PIL/__pycache__/DcxImagePlugin.cpython-36.pyc,, +PIL/__pycache__/DdsImagePlugin.cpython-36.pyc,, +PIL/__pycache__/EpsImagePlugin.cpython-36.pyc,, +PIL/__pycache__/ExifTags.cpython-36.pyc,, +PIL/__pycache__/FitsStubImagePlugin.cpython-36.pyc,, +PIL/__pycache__/FliImagePlugin.cpython-36.pyc,, +PIL/__pycache__/FontFile.cpython-36.pyc,, +PIL/__pycache__/FpxImagePlugin.cpython-36.pyc,, +PIL/__pycache__/FtexImagePlugin.cpython-36.pyc,, +PIL/__pycache__/GbrImagePlugin.cpython-36.pyc,, +PIL/__pycache__/GdImageFile.cpython-36.pyc,, +PIL/__pycache__/GifImagePlugin.cpython-36.pyc,, +PIL/__pycache__/GimpGradientFile.cpython-36.pyc,, +PIL/__pycache__/GimpPaletteFile.cpython-36.pyc,, +PIL/__pycache__/GribStubImagePlugin.cpython-36.pyc,, +PIL/__pycache__/Hdf5StubImagePlugin.cpython-36.pyc,, +PIL/__pycache__/IcnsImagePlugin.cpython-36.pyc,, +PIL/__pycache__/IcoImagePlugin.cpython-36.pyc,, +PIL/__pycache__/ImImagePlugin.cpython-36.pyc,, +PIL/__pycache__/Image.cpython-36.pyc,, +PIL/__pycache__/ImageChops.cpython-36.pyc,, +PIL/__pycache__/ImageCms.cpython-36.pyc,, +PIL/__pycache__/ImageColor.cpython-36.pyc,, +PIL/__pycache__/ImageDraw.cpython-36.pyc,, +PIL/__pycache__/ImageDraw2.cpython-36.pyc,, +PIL/__pycache__/ImageEnhance.cpython-36.pyc,, +PIL/__pycache__/ImageFile.cpython-36.pyc,, +PIL/__pycache__/ImageFilter.cpython-36.pyc,, +PIL/__pycache__/ImageFont.cpython-36.pyc,, +PIL/__pycache__/ImageGrab.cpython-36.pyc,, +PIL/__pycache__/ImageMath.cpython-36.pyc,, +PIL/__pycache__/ImageMode.cpython-36.pyc,, +PIL/__pycache__/ImageMorph.cpython-36.pyc,, +PIL/__pycache__/ImageOps.cpython-36.pyc,, +PIL/__pycache__/ImagePalette.cpython-36.pyc,, +PIL/__pycache__/ImagePath.cpython-36.pyc,, +PIL/__pycache__/ImageQt.cpython-36.pyc,, +PIL/__pycache__/ImageSequence.cpython-36.pyc,, +PIL/__pycache__/ImageShow.cpython-36.pyc,, +PIL/__pycache__/ImageStat.cpython-36.pyc,, +PIL/__pycache__/ImageTk.cpython-36.pyc,, +PIL/__pycache__/ImageTransform.cpython-36.pyc,, +PIL/__pycache__/ImageWin.cpython-36.pyc,, +PIL/__pycache__/ImtImagePlugin.cpython-36.pyc,, +PIL/__pycache__/IptcImagePlugin.cpython-36.pyc,, +PIL/__pycache__/Jpeg2KImagePlugin.cpython-36.pyc,, +PIL/__pycache__/JpegImagePlugin.cpython-36.pyc,, +PIL/__pycache__/JpegPresets.cpython-36.pyc,, +PIL/__pycache__/McIdasImagePlugin.cpython-36.pyc,, +PIL/__pycache__/MicImagePlugin.cpython-36.pyc,, +PIL/__pycache__/MpegImagePlugin.cpython-36.pyc,, +PIL/__pycache__/MpoImagePlugin.cpython-36.pyc,, +PIL/__pycache__/MspImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PSDraw.cpython-36.pyc,, +PIL/__pycache__/PaletteFile.cpython-36.pyc,, +PIL/__pycache__/PalmImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PcdImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PcfFontFile.cpython-36.pyc,, +PIL/__pycache__/PcxImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PdfImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PdfParser.cpython-36.pyc,, +PIL/__pycache__/PixarImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PngImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PpmImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PsdImagePlugin.cpython-36.pyc,, +PIL/__pycache__/PyAccess.cpython-36.pyc,, +PIL/__pycache__/SgiImagePlugin.cpython-36.pyc,, +PIL/__pycache__/SpiderImagePlugin.cpython-36.pyc,, +PIL/__pycache__/SunImagePlugin.cpython-36.pyc,, +PIL/__pycache__/TarIO.cpython-36.pyc,, +PIL/__pycache__/TgaImagePlugin.cpython-36.pyc,, +PIL/__pycache__/TiffImagePlugin.cpython-36.pyc,, +PIL/__pycache__/TiffTags.cpython-36.pyc,, +PIL/__pycache__/WalImageFile.cpython-36.pyc,, +PIL/__pycache__/WebPImagePlugin.cpython-36.pyc,, +PIL/__pycache__/WmfImagePlugin.cpython-36.pyc,, +PIL/__pycache__/XVThumbImagePlugin.cpython-36.pyc,, +PIL/__pycache__/XbmImagePlugin.cpython-36.pyc,, +PIL/__pycache__/XpmImagePlugin.cpython-36.pyc,, +PIL/__pycache__/__init__.cpython-36.pyc,, +PIL/__pycache__/__main__.cpython-36.pyc,, +PIL/__pycache__/_binary.cpython-36.pyc,, +PIL/__pycache__/_tkinter_finder.cpython-36.pyc,, +PIL/__pycache__/_util.cpython-36.pyc,, +PIL/__pycache__/_version.cpython-36.pyc,, +PIL/__pycache__/features.cpython-36.pyc,, +PIL/_binary.py,sha256=M_yObPVR_1rxnS5craSJsSbFJMykMYqJ0vNHeUpAmj4,1793 +PIL/_imaging.cp36-win32.pyd,sha256=UoQFgmpW0mRVCyUf0N_Df1lmWnTJE1ip5g7iXqW3JzM,2171392 +PIL/_imagingcms.cp36-win32.pyd,sha256=Cg2cFXTdsKW5Ezm4M6Y11xScy8D8vgLbPWnYyfvb7oE,190976 +PIL/_imagingft.cp36-win32.pyd,sha256=WgIDhvTXZ7jdeYs0MYS3IJEwQ34nQx3vjoO8tCouYYM,513024 +PIL/_imagingmath.cp36-win32.pyd,sha256=bfaZ3wdMwT9wupWF8zkE4Hw1iL8waPrCh95-Jg49-p4,17920 +PIL/_imagingmorph.cp36-win32.pyd,sha256=AQXVSBc4_YvNthtF3L00AdQh0myJEPcBwMvw66MtVhg,11264 +PIL/_imagingtk.cp36-win32.pyd,sha256=-sw9JKZVRidgnIkO0CSeLASaxmXNfElVhW-VqB2dIQs,12288 +PIL/_tkinter_finder.py,sha256=H8lIY9JyNwACYSmxnPRyGwqSoolSxBoE_zMbQEmbe-o,622 +PIL/_util.py,sha256=VCm5WKSTI2hGMBDZdAY_XxBAbBYRwkKM_EbTLo0qJlc,503 +PIL/_version.py,sha256=oQcbLZ3YKpJ_oyN9BngQk3RukRqCAYLYIaUNjkAnM3A,50 +PIL/_webp.cp36-win32.pyd,sha256=gI52PpkjZiQBZA1gCIzePHX_VdmeLk0wqtoZ5JV-7qc,438272 +PIL/features.py,sha256=56xvqPTkaGDpeVBfIu38yQBxZE6cMe88QG-UPcptfbk,8826 +Pillow-7.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Pillow-7.2.0.dist-info/LICENSE,sha256=N95Cq-M6JH6PA9IxNleg8XSiOaGY9Sat1lRP8-JkO4E,1452 +Pillow-7.2.0.dist-info/METADATA,sha256=LQagWajafxhNsgoUYQs3GHiZKLQs8saJykYUaWnzjvs,5966 +Pillow-7.2.0.dist-info/RECORD,, +Pillow-7.2.0.dist-info/WHEEL,sha256=ZFeOeZQCWkgYx9PG5WAxk1yIHroxd2erWFNpu0USMOg,102 +Pillow-7.2.0.dist-info/top_level.txt,sha256=riZqrk-hyZqh5f1Z0Zwii3dKfxEsByhu9cU9IODF-NY,4 +Pillow-7.2.0.dist-info/zip-safe,sha256=frcCV1k9oG9oKj3dpUqdJg1PxRT2RSN_XKdLCPjaYaY,2 diff --git a/venv/Lib/site-packages/Pillow-7.2.0.dist-info/WHEEL b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/WHEEL new file mode 100644 index 000000000..f2456e30b --- /dev/null +++ b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: false +Tag: cp36-cp36m-win32 + diff --git a/venv/Lib/site-packages/Pillow-7.2.0.dist-info/top_level.txt b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/top_level.txt new file mode 100644 index 000000000..b338169ce --- /dev/null +++ b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +PIL diff --git a/venv/Lib/site-packages/Pillow-7.2.0.dist-info/zip-safe b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/zip-safe new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/venv/Lib/site-packages/Pillow-7.2.0.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/INSTALLER b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/LICENSE b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/LICENSE new file mode 100644 index 000000000..c01d7d721 --- /dev/null +++ b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2006-2012 Filip Wasilewski +Copyright (c) 2012-2019 The PyWavelets Developers + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/LICENSES_bundled.txt b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/LICENSES_bundled.txt new file mode 100644 index 000000000..6b2ab7a02 --- /dev/null +++ b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/LICENSES_bundled.txt @@ -0,0 +1,10 @@ +The PyWavelets repository and source distributions bundle some code that is +adapted from compatibly licensed projects. We list these here. + +Name: NumPy +Files: pywt/_pytesttester.py +License: 3-clause BSD + +Name: SciPy +Files: setup.py, util/* +License: 3-clause BSD diff --git a/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/METADATA b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/METADATA new file mode 100644 index 000000000..8806f4ca3 --- /dev/null +++ b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/METADATA @@ -0,0 +1,47 @@ +Metadata-Version: 2.1 +Name: PyWavelets +Version: 1.1.1 +Summary: PyWavelets, wavelet transform module +Home-page: https://github.com/PyWavelets/pywt +Maintainer: The PyWavelets Developers +Maintainer-email: pywavelets@googlegroups.com +License: MIT +Download-URL: https://github.com/PyWavelets/pywt/releases +Keywords: wavelets,wavelet transform,DWT,SWT,CWT,scientific +Platform: Windows +Platform: Linux +Platform: Solaris +Platform: Mac OS-X +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.5 +Requires-Dist: numpy (>=1.13.3) + +PyWavelets is a Python wavelet transforms module that includes: + +* nD Forward and Inverse Discrete Wavelet Transform (DWT and IDWT) +* 1D and 2D Forward and Inverse Stationary Wavelet Transform (Undecimated Wavelet Transform) +* 1D and 2D Wavelet Packet decomposition and reconstruction +* 1D Continuous Wavelet Tranfsorm +* Computing Approximations of wavelet and scaling functions +* Over 100 built-in wavelet filters and support for custom wavelets +* Single and double precision calculations +* Real and complex calculations +* Results compatible with Matlab Wavelet Toolbox (TM) + + diff --git a/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/RECORD b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/RECORD new file mode 100644 index 000000000..855d1fd40 --- /dev/null +++ b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/RECORD @@ -0,0 +1,103 @@ +PyWavelets-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyWavelets-1.1.1.dist-info/LICENSE,sha256=a2SvaoF5ToW2wxUg0EL0TSnDF-WpJM8tD1K8YnhC8SY,1171 +PyWavelets-1.1.1.dist-info/LICENSES_bundled.txt,sha256=2ahHJgofMijcqSeHR3D5M9hIh8fsFcy7Xg_dy_UHH_8,264 +PyWavelets-1.1.1.dist-info/METADATA,sha256=zn0hwdCtZDo4eqRBJk3RB0VugMBR7-4hvg00HCHvTTk,1902 +PyWavelets-1.1.1.dist-info/RECORD,, +PyWavelets-1.1.1.dist-info/WHEEL,sha256=BWBcgVHkeAa2gX50hi5tpUL8dDP1yeySqXAVuaCx3Jw,102 +PyWavelets-1.1.1.dist-info/top_level.txt,sha256=j3eDFZWsDkiyrSwsdc1H0AjhXkTzsuJLSx-blZUoOVo,5 +pywt/__init__.py,sha256=ECk0-vyIl8HiSSWqfq8v-JHcxG8RIAMHk-XtS5Jv9y4,1119 +pywt/__pycache__/__init__.cpython-36.pyc,, +pywt/__pycache__/_c99_config.cpython-36.pyc,, +pywt/__pycache__/_cwt.cpython-36.pyc,, +pywt/__pycache__/_doc_utils.cpython-36.pyc,, +pywt/__pycache__/_dwt.cpython-36.pyc,, +pywt/__pycache__/_functions.cpython-36.pyc,, +pywt/__pycache__/_multidim.cpython-36.pyc,, +pywt/__pycache__/_multilevel.cpython-36.pyc,, +pywt/__pycache__/_pytest.cpython-36.pyc,, +pywt/__pycache__/_pytesttester.cpython-36.pyc,, +pywt/__pycache__/_swt.cpython-36.pyc,, +pywt/__pycache__/_thresholding.cpython-36.pyc,, +pywt/__pycache__/_utils.cpython-36.pyc,, +pywt/__pycache__/_wavelet_packets.cpython-36.pyc,, +pywt/__pycache__/conftest.cpython-36.pyc,, +pywt/__pycache__/version.cpython-36.pyc,, +pywt/_c99_config.py,sha256=JBOPLzZKq3ZbCUpPiE-dKVFGD2U37qUn_Ulpg9z73GI,83 +pywt/_cwt.py,sha256=Zd1tNB-Jk_X046lPtILEi6-zwMwVU0flJ6IvlWPi8RI,7713 +pywt/_doc_utils.py,sha256=sHsQirfmNDWhT1qCWEVcliAx765nRH9g2vBNaGVOgrY,5823 +pywt/_dwt.py,sha256=pZYIXWstEnPaDV13fA2eqCM-I1iFN054LaLXkPB6Q9I,17223 +pywt/_extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pywt/_extensions/__pycache__/__init__.cpython-36.pyc,, +pywt/_extensions/_cwt.cp36-win32.pyd,sha256=-28PHHSJJiokd7hdN56JBmxaCzj1IOVwS__ezxK_OUQ,222208 +pywt/_extensions/_dwt.cp36-win32.pyd,sha256=EBtLb_UF21L_8EqNGfozMkPe7tQEIElVli4upnsvZuI,239616 +pywt/_extensions/_pywt.cp36-win32.pyd,sha256=q7pbhoV4r4DF6PYv0OYzUxZ5J6IdT81vR02Uis0GbD4,340992 +pywt/_extensions/_swt.cp36-win32.pyd,sha256=gGg4ziSk5tRB8q7gEof8Q65lUSi-Xn0eu9H9ziYy7jk,176128 +pywt/_functions.py,sha256=hvOQb4tn01j1s5_lNhbBBoCMeOvF1_3Q9mt0aEA1bI8,6999 +pywt/_multidim.py,sha256=xi_MS2wzyVJ1xfC9u2s8NKi3UDjQs1i1Xk34LLH2OVU,11320 +pywt/_multilevel.py,sha256=WES31sLveRtDIx-Uu_Th0fVn2FlgmShxmerUzgrAhjg,56551 +pywt/_pytest.py,sha256=yhHxov-VTpcokrHgfzZvYjMwpTQdDdiWTJUrbmJ2IxU,2513 +pywt/_pytesttester.py,sha256=bA2LQRmWoibTXD5ggR7FSFbrKt6IEO6-opG4ONiBCGw,4903 +pywt/_swt.py,sha256=Yq6LxbHXgNd9Zs1WWd0YokZpYUEbdccN1cGi0IVGcc8,29365 +pywt/_thresholding.py,sha256=74opJmTHL5ssbBRiQqir3cmrk4ZRg0ouWTcQ5cp1-Sc,8793 +pywt/_utils.py,sha256=gbcji_MoFHOZg94yq7qHU77eyAl6exTeVmWetntkwTM,3484 +pywt/_wavelet_packets.py,sha256=lPVfFw4R1WiRSueMI_TDsHQZJuES9VImr51ajYUzsfc,24641 +pywt/conftest.py,sha256=cfdJT7ucDVr1E_2Zcl2wdjNQC-dyQW2IF6L21CCcN0E,143 +pywt/data/__init__.py,sha256=qoDFVHX0RNi91n3UwC7UwYU3UGzbgdGD2OCH9zGJ8mo,96 +pywt/data/__pycache__/__init__.cpython-36.pyc,, +pywt/data/__pycache__/_readers.cpython-36.pyc,, +pywt/data/__pycache__/_wavelab_signals.cpython-36.pyc,, +pywt/data/__pycache__/create_dat.cpython-36.pyc,, +pywt/data/_readers.py,sha256=PSkP5ejSNiHCtZkvv8xwjOVznIo_p-hixsq_oF_UloQ,4530 +pywt/data/_wavelab_signals.py,sha256=7ay4VKhYkMsdrmrMm_FeBKrewzGzrwuoy6sO_RkR8vY,9476 +pywt/data/aero.npz,sha256=34YmNXmLrJQia4ko8iTajO-LDQBJLB_fSPrG36-XqUs,227784 +pywt/data/ascent.npz,sha256=ptVryOllcdYSTzTO3rpJ8dNZlQf2yJCtm6U4VERU6Pc,170883 +pywt/data/camera.npz,sha256=CfIDuSct94Yvk3GR4d897-G0VSeL6SG3omdrbmaGyoA,160418 +pywt/data/create_dat.py,sha256=8BsF3dCoixafNSi5jxZnHdvK69FTH6dIGNJNIlv6c60,625 +pywt/data/ecg.npy,sha256=iS9GVe4jRwWTxs8c4X8Of0f2ywMBJKkvVQ5bFyrUPTk,4176 +pywt/data/sst_nino3.npz,sha256=-vMX2TEULdISSSkMpmevDecdiZ5_I4Zk3zC2xB0Qz1c,64200 +pywt/tests/__pycache__/test__pywt.cpython-36.pyc,, +pywt/tests/__pycache__/test_concurrent.cpython-36.pyc,, +pywt/tests/__pycache__/test_cwt_wavelets.cpython-36.pyc,, +pywt/tests/__pycache__/test_data.cpython-36.pyc,, +pywt/tests/__pycache__/test_deprecations.cpython-36.pyc,, +pywt/tests/__pycache__/test_doc.cpython-36.pyc,, +pywt/tests/__pycache__/test_dwt_idwt.cpython-36.pyc,, +pywt/tests/__pycache__/test_functions.cpython-36.pyc,, +pywt/tests/__pycache__/test_matlab_compatibility.cpython-36.pyc,, +pywt/tests/__pycache__/test_matlab_compatibility_cwt.cpython-36.pyc,, +pywt/tests/__pycache__/test_modes.cpython-36.pyc,, +pywt/tests/__pycache__/test_multidim.cpython-36.pyc,, +pywt/tests/__pycache__/test_multilevel.cpython-36.pyc,, +pywt/tests/__pycache__/test_perfect_reconstruction.cpython-36.pyc,, +pywt/tests/__pycache__/test_swt.cpython-36.pyc,, +pywt/tests/__pycache__/test_thresholding.cpython-36.pyc,, +pywt/tests/__pycache__/test_wavelet.cpython-36.pyc,, +pywt/tests/__pycache__/test_wp.cpython-36.pyc,, +pywt/tests/__pycache__/test_wp2d.cpython-36.pyc,, +pywt/tests/data/__pycache__/generate_matlab_data.cpython-36.pyc,, +pywt/tests/data/__pycache__/generate_matlab_data_cwt.cpython-36.pyc,, +pywt/tests/data/cwt_matlabR2015b_result.npz,sha256=FA1Tx-q_1k74bb7yERH_lq4hgZEVwdNP3RVm8lu-_Zw,1819506 +pywt/tests/data/dwt_matlabR2012a_result.npz,sha256=H3zj71AxK1VbM7HZArX5El_qnfH_LLHAlVWj9eLhMmM,3590870 +pywt/tests/data/generate_matlab_data.py,sha256=Spz3yi0kYJF9B1-nDTHhRDBwRexvyjoCT3iuNx7T4xc,3957 +pywt/tests/data/generate_matlab_data_cwt.py,sha256=wjHsR_qTcU9WTI1CmSNam-AM-d0Aomb5emDWlAH8XTU,3248 +pywt/tests/data/wavelab_test_signals.npz,sha256=-cx0ne9JdTcq6LiKBacjM_0_En72TAiKvvFUW1yiZYE,184818 +pywt/tests/test__pywt.py,sha256=3xnBENJo0WXpuWikaV7NnFBGBjXyw4rcRRAxXe84b2w,5469 +pywt/tests/test_concurrent.py,sha256=nPh_HttsDnfg4dhgvFaTAqYXLPPHI3XZ7R4BIjG0nKo,3987 +pywt/tests/test_cwt_wavelets.py,sha256=qpYl6p5c2lzZ15eZ6P3Iv6TJUkw74bGSLePZnrsOoT0,12745 +pywt/tests/test_data.py,sha256=GmX-UBvs9xGvtGS7VmzdbLkIZX198ObgDpL8kSzZNNU,2266 +pywt/tests/test_deprecations.py,sha256=gmMe6YE9scXBrw7liZAloqtc6woX87_c36jXfgrBAXo,2220 +pywt/tests/test_doc.py,sha256=vgJpPxuFQdeXBwKiiYecQVNB49Rj6yhulHZTa8D8rIE,622 +pywt/tests/test_dwt_idwt.py,sha256=ijmugjSVra-IfCC37YSL2tuxomyug24_tl091QRZT8Q,10052 +pywt/tests/test_functions.py,sha256=Fpbk9V7j4cFBOwNtivGd5jnTw_SJ4OfblmieE_DSIsE,1163 +pywt/tests/test_matlab_compatibility.py,sha256=ldfngu7e2DdJv-vtTkCOhkJ8AbDIGFxXT2y23_g5Odw,5885 +pywt/tests/test_matlab_compatibility_cwt.py,sha256=Js-ZFE8eJUQo0TYKpYi9IR3qazqi1aZiSADSwmBbxA0,6283 +pywt/tests/test_modes.py,sha256=_cwJtc-vSJAg15hmDxBmq-JRmCndEivcNEom4ZW2W_Y,4848 +pywt/tests/test_multidim.py,sha256=FInykgFTbXZ0mzHm5HfHBRhncb4QAcolfEoYT1pXxpU,14933 +pywt/tests/test_multilevel.py,sha256=R3ApQ1lZCGIOKqw8zuZvP767P0BT9s1KS_tuGe8ba-w,39025 +pywt/tests/test_perfect_reconstruction.py,sha256=QMI18bJicfjhX65eMf1QaZM_UEwCbT6gwISOgS0CRHg,1795 +pywt/tests/test_swt.py,sha256=fqwfZUyIK4bcYvg3xD0m2PPhm9Q6gzXFJdHCp-mo8jE,24812 +pywt/tests/test_thresholding.py,sha256=tH8X6uqRvqhK0Z2clfvPCpKIpiLdg8eUhO1I4nwj2SQ,6533 +pywt/tests/test_wavelet.py,sha256=Y6oJVwzDVs-2d_P_2vZ_p-o2dGdCrnnNxDqxTdzueBM,11182 +pywt/tests/test_wp.py,sha256=9fiX7vji0IjVmMxAOfeNPPftofkZ_ukZgRS7xoautqE,6467 +pywt/tests/test_wp2d.py,sha256=i_88DQBs7ifk0DpMdVrA8TGQ82kdSSyOkT1vTxBye2k,6791 +pywt/version.py,sha256=k2yck5B2J4aCLbxes1bQGIG_8fN-CKfZuMWTggV4Wq8,243 diff --git a/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/WHEEL b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/WHEEL new file mode 100644 index 000000000..ad4b1030d --- /dev/null +++ b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.6) +Root-Is-Purelib: false +Tag: cp36-cp36m-win32 + diff --git a/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/top_level.txt b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/top_level.txt new file mode 100644 index 000000000..ce9775431 --- /dev/null +++ b/venv/Lib/site-packages/PyWavelets-1.1.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pywt diff --git a/venv/Lib/site-packages/__pycache__/cycler.cpython-36.pyc b/venv/Lib/site-packages/__pycache__/cycler.cpython-36.pyc new file mode 100644 index 000000000..192642f6b Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/cycler.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/decorator.cpython-36.pyc b/venv/Lib/site-packages/__pycache__/decorator.cpython-36.pyc new file mode 100644 index 000000000..98a74c10e Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/decorator.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/pylab.cpython-36.pyc b/venv/Lib/site-packages/__pycache__/pylab.cpython-36.pyc new file mode 100644 index 000000000..af7d69ab7 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/pylab.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/pyparsing.cpython-36.pyc b/venv/Lib/site-packages/__pycache__/pyparsing.cpython-36.pyc new file mode 100644 index 000000000..ad452e478 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/pyparsing.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/cycler-0.10.0.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/cycler-0.10.0.dist-info/DESCRIPTION.rst new file mode 100644 index 000000000..e1187231a --- /dev/null +++ b/venv/Lib/site-packages/cycler-0.10.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,3 @@ +UNKNOWN + + diff --git a/venv/Lib/site-packages/cycler-0.10.0.dist-info/INSTALLER b/venv/Lib/site-packages/cycler-0.10.0.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/venv/Lib/site-packages/cycler-0.10.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/cycler-0.10.0.dist-info/METADATA b/venv/Lib/site-packages/cycler-0.10.0.dist-info/METADATA new file mode 100644 index 000000000..b232cee59 --- /dev/null +++ b/venv/Lib/site-packages/cycler-0.10.0.dist-info/METADATA @@ -0,0 +1,25 @@ +Metadata-Version: 2.0 +Name: cycler +Version: 0.10.0 +Summary: Composable style cycles +Home-page: http://github.com/matplotlib/cycler +Author: Thomas A Caswell +Author-email: matplotlib-users@python.org +License: BSD +Keywords: cycle kwargs +Platform: Cross platform (Linux +Platform: Mac OSX +Platform: Windows) +Classifier: Development Status :: 4 - Beta +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Requires-Dist: six + +UNKNOWN + + diff --git a/venv/Lib/site-packages/cycler-0.10.0.dist-info/RECORD b/venv/Lib/site-packages/cycler-0.10.0.dist-info/RECORD new file mode 100644 index 000000000..8eaefc05b --- /dev/null +++ b/venv/Lib/site-packages/cycler-0.10.0.dist-info/RECORD @@ -0,0 +1,9 @@ +__pycache__/cycler.cpython-36.pyc,, +cycler-0.10.0.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 +cycler-0.10.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +cycler-0.10.0.dist-info/METADATA,sha256=aWX1pyo7D2hSDNZ2Q6Zl7DxhUQdpyu1O5uNABnvz000,722 +cycler-0.10.0.dist-info/RECORD,, +cycler-0.10.0.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +cycler-0.10.0.dist-info/metadata.json,sha256=CCBpg-KQU-VRL1unJcHPWKQeQbB84G0j7-BeCj7YUbU,875 +cycler-0.10.0.dist-info/top_level.txt,sha256=D8BVVDdAAelLb2FOEz7lDpc6-AL21ylKPrMhtG6yzyE,7 +cycler.py,sha256=ed3G39unvVEBrBZVDwnE0FFroRNsOLkbJ_TwIT5CjCU,15959 diff --git a/venv/Lib/site-packages/cycler-0.10.0.dist-info/WHEEL b/venv/Lib/site-packages/cycler-0.10.0.dist-info/WHEEL new file mode 100644 index 000000000..8b6dd1b5a --- /dev/null +++ b/venv/Lib/site-packages/cycler-0.10.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/cycler-0.10.0.dist-info/metadata.json b/venv/Lib/site-packages/cycler-0.10.0.dist-info/metadata.json new file mode 100644 index 000000000..608212907 --- /dev/null +++ b/venv/Lib/site-packages/cycler-0.10.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 4 - Beta", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.details": {"contacts": [{"email": "matplotlib-users@python.org", "name": "Thomas A Caswell", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/matplotlib/cycler"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "keywords": ["cycle", "kwargs"], "license": "BSD", "metadata_version": "2.0", "name": "cycler", "platform": "Cross platform (Linux", "run_requires": [{"requires": ["six"]}], "summary": "Composable style cycles", "version": "0.10.0"} \ No newline at end of file diff --git a/venv/Lib/site-packages/cycler-0.10.0.dist-info/top_level.txt b/venv/Lib/site-packages/cycler-0.10.0.dist-info/top_level.txt new file mode 100644 index 000000000..22546440f --- /dev/null +++ b/venv/Lib/site-packages/cycler-0.10.0.dist-info/top_level.txt @@ -0,0 +1 @@ +cycler diff --git a/venv/Lib/site-packages/cycler.py b/venv/Lib/site-packages/cycler.py new file mode 100644 index 000000000..3c3eb2d55 --- /dev/null +++ b/venv/Lib/site-packages/cycler.py @@ -0,0 +1,558 @@ +""" +Cycler +====== + +Cycling through combinations of values, producing dictionaries. + +You can add cyclers:: + + from cycler import cycler + cc = (cycler(color=list('rgb')) + + cycler(linestyle=['-', '--', '-.'])) + for d in cc: + print(d) + +Results in:: + + {'color': 'r', 'linestyle': '-'} + {'color': 'g', 'linestyle': '--'} + {'color': 'b', 'linestyle': '-.'} + + +You can multiply cyclers:: + + from cycler import cycler + cc = (cycler(color=list('rgb')) * + cycler(linestyle=['-', '--', '-.'])) + for d in cc: + print(d) + +Results in:: + + {'color': 'r', 'linestyle': '-'} + {'color': 'r', 'linestyle': '--'} + {'color': 'r', 'linestyle': '-.'} + {'color': 'g', 'linestyle': '-'} + {'color': 'g', 'linestyle': '--'} + {'color': 'g', 'linestyle': '-.'} + {'color': 'b', 'linestyle': '-'} + {'color': 'b', 'linestyle': '--'} + {'color': 'b', 'linestyle': '-.'} +""" + +from __future__ import (absolute_import, division, print_function, + unicode_literals) + +import six +from itertools import product, cycle +from six.moves import zip, reduce +from operator import mul, add +import copy + +__version__ = '0.10.0' + + +def _process_keys(left, right): + """ + Helper function to compose cycler keys + + Parameters + ---------- + left, right : iterable of dictionaries or None + The cyclers to be composed + Returns + ------- + keys : set + The keys in the composition of the two cyclers + """ + l_peek = next(iter(left)) if left is not None else {} + r_peek = next(iter(right)) if right is not None else {} + l_key = set(l_peek.keys()) + r_key = set(r_peek.keys()) + if l_key & r_key: + raise ValueError("Can not compose overlapping cycles") + return l_key | r_key + + +class Cycler(object): + """ + Composable cycles + + This class has compositions methods: + + ``+`` + for 'inner' products (zip) + + ``+=`` + in-place ``+`` + + ``*`` + for outer products (itertools.product) and integer multiplication + + ``*=`` + in-place ``*`` + + and supports basic slicing via ``[]`` + + Parameters + ---------- + left : Cycler or None + The 'left' cycler + + right : Cycler or None + The 'right' cycler + + op : func or None + Function which composes the 'left' and 'right' cyclers. + + """ + def __call__(self): + return cycle(self) + + def __init__(self, left, right=None, op=None): + """Semi-private init + + Do not use this directly, use `cycler` function instead. + """ + if isinstance(left, Cycler): + self._left = Cycler(left._left, left._right, left._op) + elif left is not None: + # Need to copy the dictionary or else that will be a residual + # mutable that could lead to strange errors + self._left = [copy.copy(v) for v in left] + else: + self._left = None + + if isinstance(right, Cycler): + self._right = Cycler(right._left, right._right, right._op) + elif right is not None: + # Need to copy the dictionary or else that will be a residual + # mutable that could lead to strange errors + self._right = [copy.copy(v) for v in right] + else: + self._right = None + + self._keys = _process_keys(self._left, self._right) + self._op = op + + @property + def keys(self): + """ + The keys this Cycler knows about + """ + return set(self._keys) + + def change_key(self, old, new): + """ + Change a key in this cycler to a new name. + Modification is performed in-place. + + Does nothing if the old key is the same as the new key. + Raises a ValueError if the new key is already a key. + Raises a KeyError if the old key isn't a key. + + """ + if old == new: + return + if new in self._keys: + raise ValueError("Can't replace %s with %s, %s is already a key" % + (old, new, new)) + if old not in self._keys: + raise KeyError("Can't replace %s with %s, %s is not a key" % + (old, new, old)) + + self._keys.remove(old) + self._keys.add(new) + + if self._right is not None and old in self._right.keys: + self._right.change_key(old, new) + + # self._left should always be non-None + # if self._keys is non-empty. + elif isinstance(self._left, Cycler): + self._left.change_key(old, new) + else: + # It should be completely safe at this point to + # assume that the old key can be found in each + # iteration. + self._left = [{new: entry[old]} for entry in self._left] + + def _compose(self): + """ + Compose the 'left' and 'right' components of this cycle + with the proper operation (zip or product as of now) + """ + for a, b in self._op(self._left, self._right): + out = dict() + out.update(a) + out.update(b) + yield out + + @classmethod + def _from_iter(cls, label, itr): + """ + Class method to create 'base' Cycler objects + that do not have a 'right' or 'op' and for which + the 'left' object is not another Cycler. + + Parameters + ---------- + label : str + The property key. + + itr : iterable + Finite length iterable of the property values. + + Returns + ------- + cycler : Cycler + New 'base' `Cycler` + """ + ret = cls(None) + ret._left = list({label: v} for v in itr) + ret._keys = set([label]) + return ret + + def __getitem__(self, key): + # TODO : maybe add numpy style fancy slicing + if isinstance(key, slice): + trans = self.by_key() + return reduce(add, (_cycler(k, v[key]) + for k, v in six.iteritems(trans))) + else: + raise ValueError("Can only use slices with Cycler.__getitem__") + + def __iter__(self): + if self._right is None: + return iter(dict(l) for l in self._left) + + return self._compose() + + def __add__(self, other): + """ + Pair-wise combine two equal length cycles (zip) + + Parameters + ---------- + other : Cycler + The second Cycler + """ + if len(self) != len(other): + raise ValueError("Can only add equal length cycles, " + "not {0} and {1}".format(len(self), len(other))) + return Cycler(self, other, zip) + + def __mul__(self, other): + """ + Outer product of two cycles (`itertools.product`) or integer + multiplication. + + Parameters + ---------- + other : Cycler or int + The second Cycler or integer + """ + if isinstance(other, Cycler): + return Cycler(self, other, product) + elif isinstance(other, int): + trans = self.by_key() + return reduce(add, (_cycler(k, v*other) + for k, v in six.iteritems(trans))) + else: + return NotImplemented + + def __rmul__(self, other): + return self * other + + def __len__(self): + op_dict = {zip: min, product: mul} + if self._right is None: + return len(self._left) + l_len = len(self._left) + r_len = len(self._right) + return op_dict[self._op](l_len, r_len) + + def __iadd__(self, other): + """ + In-place pair-wise combine two equal length cycles (zip) + + Parameters + ---------- + other : Cycler + The second Cycler + """ + if not isinstance(other, Cycler): + raise TypeError("Cannot += with a non-Cycler object") + # True shallow copy of self is fine since this is in-place + old_self = copy.copy(self) + self._keys = _process_keys(old_self, other) + self._left = old_self + self._op = zip + self._right = Cycler(other._left, other._right, other._op) + return self + + def __imul__(self, other): + """ + In-place outer product of two cycles (`itertools.product`) + + Parameters + ---------- + other : Cycler + The second Cycler + """ + if not isinstance(other, Cycler): + raise TypeError("Cannot *= with a non-Cycler object") + # True shallow copy of self is fine since this is in-place + old_self = copy.copy(self) + self._keys = _process_keys(old_self, other) + self._left = old_self + self._op = product + self._right = Cycler(other._left, other._right, other._op) + return self + + def __eq__(self, other): + """ + Check equality + """ + if len(self) != len(other): + return False + if self.keys ^ other.keys: + return False + + return all(a == b for a, b in zip(self, other)) + + def __repr__(self): + op_map = {zip: '+', product: '*'} + if self._right is None: + lab = self.keys.pop() + itr = list(v[lab] for v in self) + return "cycler({lab!r}, {itr!r})".format(lab=lab, itr=itr) + else: + op = op_map.get(self._op, '?') + msg = "({left!r} {op} {right!r})" + return msg.format(left=self._left, op=op, right=self._right) + + def _repr_html_(self): + # an table showing the value of each key through a full cycle + output = "" + sorted_keys = sorted(self.keys, key=repr) + for key in sorted_keys: + output += "".format(key=key) + for d in iter(self): + output += "" + for k in sorted_keys: + output += "".format(val=d[k]) + output += "" + output += "
{key!r}
{val!r}
" + return output + + def by_key(self): + """Values by key + + This returns the transposed values of the cycler. Iterating + over a `Cycler` yields dicts with a single value for each key, + this method returns a `dict` of `list` which are the values + for the given key. + + The returned value can be used to create an equivalent `Cycler` + using only `+`. + + Returns + ------- + transpose : dict + dict of lists of the values for each key. + """ + + # TODO : sort out if this is a bottle neck, if there is a better way + # and if we care. + + keys = self.keys + # change this to dict comprehension when drop 2.6 + out = dict((k, list()) for k in keys) + + for d in self: + for k in keys: + out[k].append(d[k]) + return out + + # for back compatibility + _transpose = by_key + + def simplify(self): + """Simplify the Cycler + + Returned as a composition using only sums (no multiplications) + + Returns + ------- + simple : Cycler + An equivalent cycler using only summation""" + # TODO: sort out if it is worth the effort to make sure this is + # balanced. Currently it is is + # (((a + b) + c) + d) vs + # ((a + b) + (c + d)) + # I would believe that there is some performance implications + + trans = self.by_key() + return reduce(add, (_cycler(k, v) for k, v in six.iteritems(trans))) + + def concat(self, other): + """Concatenate this cycler and an other. + + The keys must match exactly. + + This returns a single Cycler which is equivalent to + `itertools.chain(self, other)` + + Examples + -------- + + >>> num = cycler('a', range(3)) + >>> let = cycler('a', 'abc') + >>> num.concat(let) + cycler('a', [0, 1, 2, 'a', 'b', 'c']) + + Parameters + ---------- + other : `Cycler` + The `Cycler` to concatenate to this one. + + Returns + ------- + ret : `Cycler` + The concatenated `Cycler` + """ + return concat(self, other) + + +def concat(left, right): + """Concatenate two cyclers. + + The keys must match exactly. + + This returns a single Cycler which is equivalent to + `itertools.chain(left, right)` + + Examples + -------- + + >>> num = cycler('a', range(3)) + >>> let = cycler('a', 'abc') + >>> num.concat(let) + cycler('a', [0, 1, 2, 'a', 'b', 'c']) + + Parameters + ---------- + left, right : `Cycler` + The two `Cycler` instances to concatenate + + Returns + ------- + ret : `Cycler` + The concatenated `Cycler` + """ + if left.keys != right.keys: + msg = '\n\t'.join(["Keys do not match:", + "Intersection: {both!r}", + "Disjoint: {just_one!r}"]).format( + both=left.keys & right.keys, + just_one=left.keys ^ right.keys) + + raise ValueError(msg) + + _l = left.by_key() + _r = right.by_key() + return reduce(add, (_cycler(k, _l[k] + _r[k]) for k in left.keys)) + + +def cycler(*args, **kwargs): + """ + Create a new `Cycler` object from a single positional argument, + a pair of positional arguments, or the combination of keyword arguments. + + cycler(arg) + cycler(label1=itr1[, label2=iter2[, ...]]) + cycler(label, itr) + + Form 1 simply copies a given `Cycler` object. + + Form 2 composes a `Cycler` as an inner product of the + pairs of keyword arguments. In other words, all of the + iterables are cycled simultaneously, as if through zip(). + + Form 3 creates a `Cycler` from a label and an iterable. + This is useful for when the label cannot be a keyword argument + (e.g., an integer or a name that has a space in it). + + Parameters + ---------- + arg : Cycler + Copy constructor for Cycler (does a shallow copy of iterables). + + label : name + The property key. In the 2-arg form of the function, + the label can be any hashable object. In the keyword argument + form of the function, it must be a valid python identifier. + + itr : iterable + Finite length iterable of the property values. + Can be a single-property `Cycler` that would + be like a key change, but as a shallow copy. + + Returns + ------- + cycler : Cycler + New `Cycler` for the given property + + """ + if args and kwargs: + raise TypeError("cyl() can only accept positional OR keyword " + "arguments -- not both.") + + if len(args) == 1: + if not isinstance(args[0], Cycler): + raise TypeError("If only one positional argument given, it must " + " be a Cycler instance.") + return Cycler(args[0]) + elif len(args) == 2: + return _cycler(*args) + elif len(args) > 2: + raise TypeError("Only a single Cycler can be accepted as the lone " + "positional argument. Use keyword arguments instead.") + + if kwargs: + return reduce(add, (_cycler(k, v) for k, v in six.iteritems(kwargs))) + + raise TypeError("Must have at least a positional OR keyword arguments") + + +def _cycler(label, itr): + """ + Create a new `Cycler` object from a property name and + iterable of values. + + Parameters + ---------- + label : hashable + The property key. + + itr : iterable + Finite length iterable of the property values. + + Returns + ------- + cycler : Cycler + New `Cycler` for the given property + """ + if isinstance(itr, Cycler): + keys = itr.keys + if len(keys) != 1: + msg = "Can not create Cycler from a multi-property Cycler" + raise ValueError(msg) + + lab = keys.pop() + # Doesn't need to be a new list because + # _from_iter() will be creating that new list anyway. + itr = (v[lab] for v in itr) + + return Cycler._from_iter(label, itr) diff --git a/venv/Lib/site-packages/dateutil/__init__.py b/venv/Lib/site-packages/dateutil/__init__.py new file mode 100644 index 000000000..0defb82e2 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +try: + from ._version import version as __version__ +except ImportError: + __version__ = 'unknown' + +__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', + 'utils', 'zoneinfo'] diff --git a/venv/Lib/site-packages/dateutil/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/dateutil/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..378505811 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/__pycache__/_common.cpython-36.pyc b/venv/Lib/site-packages/dateutil/__pycache__/_common.cpython-36.pyc new file mode 100644 index 000000000..1aea16101 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/__pycache__/_common.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/__pycache__/_version.cpython-36.pyc b/venv/Lib/site-packages/dateutil/__pycache__/_version.cpython-36.pyc new file mode 100644 index 000000000..8952d2659 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/__pycache__/_version.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/__pycache__/easter.cpython-36.pyc b/venv/Lib/site-packages/dateutil/__pycache__/easter.cpython-36.pyc new file mode 100644 index 000000000..3b9ece8ed Binary files /dev/null and b/venv/Lib/site-packages/dateutil/__pycache__/easter.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/__pycache__/relativedelta.cpython-36.pyc b/venv/Lib/site-packages/dateutil/__pycache__/relativedelta.cpython-36.pyc new file mode 100644 index 000000000..792ae0b60 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/__pycache__/relativedelta.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/__pycache__/rrule.cpython-36.pyc b/venv/Lib/site-packages/dateutil/__pycache__/rrule.cpython-36.pyc new file mode 100644 index 000000000..1c9cb3d9a Binary files /dev/null and b/venv/Lib/site-packages/dateutil/__pycache__/rrule.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/__pycache__/tzwin.cpython-36.pyc b/venv/Lib/site-packages/dateutil/__pycache__/tzwin.cpython-36.pyc new file mode 100644 index 000000000..eff0d1041 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/__pycache__/tzwin.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/__pycache__/utils.cpython-36.pyc b/venv/Lib/site-packages/dateutil/__pycache__/utils.cpython-36.pyc new file mode 100644 index 000000000..a08e14dee Binary files /dev/null and b/venv/Lib/site-packages/dateutil/__pycache__/utils.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/_common.py b/venv/Lib/site-packages/dateutil/_common.py new file mode 100644 index 000000000..4eb2659bd --- /dev/null +++ b/venv/Lib/site-packages/dateutil/_common.py @@ -0,0 +1,43 @@ +""" +Common code used in multiple modules. +""" + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __hash__(self): + return hash(( + self.weekday, + self.n, + )) + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +# vim:ts=4:sw=4:et diff --git a/venv/Lib/site-packages/dateutil/_version.py b/venv/Lib/site-packages/dateutil/_version.py new file mode 100644 index 000000000..eac120969 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/_version.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '2.8.1' diff --git a/venv/Lib/site-packages/dateutil/easter.py b/venv/Lib/site-packages/dateutil/easter.py new file mode 100644 index 000000000..53b7c7893 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + `GM Arts: Easter Algorithms `_ + + and + + `The Calendar FAQ: Easter `_ + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/venv/Lib/site-packages/dateutil/parser/__init__.py b/venv/Lib/site-packages/dateutil/parser/__init__.py new file mode 100644 index 000000000..d174b0e4d --- /dev/null +++ b/venv/Lib/site-packages/dateutil/parser/__init__.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +from ._parser import parse, parser, parserinfo, ParserError +from ._parser import DEFAULTPARSER, DEFAULTTZPARSER +from ._parser import UnknownTimezoneWarning + +from ._parser import __doc__ + +from .isoparser import isoparser, isoparse + +__all__ = ['parse', 'parser', 'parserinfo', + 'isoparse', 'isoparser', + 'ParserError', + 'UnknownTimezoneWarning'] + + +### +# Deprecate portions of the private interface so that downstream code that +# is improperly relying on it is given *some* notice. + + +def __deprecated_private_func(f): + from functools import wraps + import warnings + + msg = ('{name} is a private function and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=f.__name__) + + @wraps(f) + def deprecated_func(*args, **kwargs): + warnings.warn(msg, DeprecationWarning) + return f(*args, **kwargs) + + return deprecated_func + +def __deprecate_private_class(c): + import warnings + + msg = ('{name} is a private class and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=c.__name__) + + class private_class(c): + __doc__ = c.__doc__ + + def __init__(self, *args, **kwargs): + warnings.warn(msg, DeprecationWarning) + super(private_class, self).__init__(*args, **kwargs) + + private_class.__name__ = c.__name__ + + return private_class + + +from ._parser import _timelex, _resultbase +from ._parser import _tzparser, _parsetz + +_timelex = __deprecate_private_class(_timelex) +_tzparser = __deprecate_private_class(_tzparser) +_resultbase = __deprecate_private_class(_resultbase) +_parsetz = __deprecated_private_func(_parsetz) diff --git a/venv/Lib/site-packages/dateutil/parser/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/dateutil/parser/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..fbb5e60b2 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/parser/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/parser/__pycache__/_parser.cpython-36.pyc b/venv/Lib/site-packages/dateutil/parser/__pycache__/_parser.cpython-36.pyc new file mode 100644 index 000000000..219634605 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/parser/__pycache__/_parser.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/parser/__pycache__/isoparser.cpython-36.pyc b/venv/Lib/site-packages/dateutil/parser/__pycache__/isoparser.cpython-36.pyc new file mode 100644 index 000000000..c28f45b68 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/parser/__pycache__/isoparser.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/parser/_parser.py b/venv/Lib/site-packages/dateutil/parser/_parser.py new file mode 100644 index 000000000..458aa6a32 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/parser/_parser.py @@ -0,0 +1,1609 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic date/time string parser which is able to parse +most known formats to represent a date and/or time. + +This module attempts to be forgiving with regards to unlikely input formats, +returning a datetime object even for dates which are ambiguous. If an element +of a date/time stamp is omitted, the following rules are applied: + +- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour + on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is + specified. +- If a time zone is omitted, a timezone-naive datetime is returned. + +If any other elements are missing, they are taken from the +:class:`datetime.datetime` object passed to the parameter ``default``. If this +results in a day number exceeding the valid number of days per month, the +value falls back to the end of the month. + +Additional resources about date/time string formats can be found below: + +- `A summary of the international standard date and time notation + `_ +- `W3C Date and Time Formats `_ +- `Time Formats (Planetary Rings Node) `_ +- `CPAN ParseDate module + `_ +- `Java SimpleDateFormat Class + `_ +""" +from __future__ import unicode_literals + +import datetime +import re +import string +import time +import warnings + +from calendar import monthrange +from io import StringIO + +import six +from six import integer_types, text_type + +from decimal import Decimal + +from warnings import warn + +from .. import relativedelta +from .. import tz + +__all__ = ["parse", "parserinfo", "ParserError"] + + +# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth +# making public and/or figuring out if there is something we can +# take off their plate. +class _timelex(object): + # Fractional seconds are sometimes split by a comma + _split_decimal = re.compile("([.,])") + + def __init__(self, instream): + if six.PY2: + # In Python 2, we can't duck type properly because unicode has + # a 'decode' function, and we'd be double-decoding + if isinstance(instream, (bytes, bytearray)): + instream = instream.decode() + else: + if getattr(instream, 'decode', None) is not None: + instream = instream.decode() + + if isinstance(instream, text_type): + instream = StringIO(instream) + elif getattr(instream, 'read', None) is None: + raise TypeError('Parser must be a string or character stream, not ' + '{itype}'.format(itype=instream.__class__.__name__)) + + self.instream = instream + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + """ + This function breaks the time string into lexical units (tokens), which + can be parsed by the parser. Lexical units are demarcated by changes in + the character set, so any continuous string of letters is considered + one unit, any continuous string of numbers is considered one unit. + + The main complication arises from the fact that dots ('.') can be used + both as separators (e.g. "Sep.20.2009") or decimal points (e.g. + "4:30:21.447"). As such, it is necessary to read the full context of + any dot-separated strings before breaking it into tokens; as such, this + function maintains a "token stack", for when the ambiguous context + demands that multiple tokens be parsed at once. + """ + if self.tokenstack: + return self.tokenstack.pop(0) + + seenletters = False + token = None + state = None + + while not self.eof: + # We only realize that we've reached the end of a token when we + # find a character that's not part of the current token - since + # that character may be part of the next token, it's stored in the + # charstack. + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + + if not nextchar: + self.eof = True + break + elif not state: + # First character of the token - determines if we're starting + # to parse a word, a number or something else. + token = nextchar + if self.isword(nextchar): + state = 'a' + elif self.isnum(nextchar): + state = '0' + elif self.isspace(nextchar): + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + # If we've already started reading a word, we keep reading + # letters until we find something that's not part of a word. + seenletters = True + if self.isword(nextchar): + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + # If we've already started reading a number, we keep reading + # numbers until we find something that doesn't fit. + if self.isnum(nextchar): + token += nextchar + elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + # If we've seen some letters and a dot separator, continue + # parsing, and the tokens will be broken up later. + seenletters = True + if nextchar == '.' or self.isword(nextchar): + token += nextchar + elif self.isnum(nextchar) and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + # If we've seen at least one dot separator, keep going, we'll + # break up the tokens later. + if nextchar == '.' or self.isnum(nextchar): + token += nextchar + elif self.isword(nextchar) and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + + if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or + token[-1] in '.,')): + l = self._split_decimal.split(token) + token = l[0] + for tok in l[1:]: + if tok: + self.tokenstack.append(tok) + + if state == '0.' and token.count('.') == 0: + token = token.replace(',', '.') + + return token + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token is None: + raise StopIteration + + return token + + def next(self): + return self.__next__() # Python 2.x support + + @classmethod + def split(cls, s): + return list(cls(s)) + + @classmethod + def isword(cls, nextchar): + """ Whether or not the next character is part of a word """ + return nextchar.isalpha() + + @classmethod + def isnum(cls, nextchar): + """ Whether the next character is part of a number """ + return nextchar.isdigit() + + @classmethod + def isspace(cls, nextchar): + """ Whether the next character is whitespace """ + return nextchar.isspace() + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (classname, ", ".join(l)) + + def __len__(self): + return (sum(getattr(self, attr) is not None + for attr in self.__slots__)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + """ + Class which handles what inputs are accepted. Subclass this to customize + the language and acceptable values for each parameter. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. Default is ``False``. + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + Default is ``False``. + """ + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), # TODO: "Tues" + ("Wed", "Wednesday"), + ("Thu", "Thursday"), # TODO: "Thurs" + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), # TODO: "Febr" + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "Sept", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z", "z"] + PERTAIN = ["of"] + TZOFFSET = {} + # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate", + # "Anno Domini", "Year of Our Lord"] + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year // 100 * 100 + + def _convert(self, lst): + dct = {} + for i, v in enumerate(lst): + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + try: + return self._months[name.lower()] + 1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + + return self.TZOFFSET.get(name) + + def convertyear(self, year, century_specified=False): + """ + Converts two-digit years to year within [-50, 49] + range of self._year (current local time) + """ + + # Function contract is that the year is always positive + assert year >= 0 + + if year < 100 and not century_specified: + # assume current century to start + year += self._century + + if year >= self._year + 50: # if too far in future + year -= 100 + elif year < self._year - 50: # if too far in past + year += 100 + + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year, res.century_specified) + + if ((res.tzoffset == 0 and not res.tzname) or + (res.tzname == 'Z' or res.tzname == 'z')): + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class _ymd(list): + def __init__(self, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.century_specified = False + self.dstridx = None + self.mstridx = None + self.ystridx = None + + @property + def has_year(self): + return self.ystridx is not None + + @property + def has_month(self): + return self.mstridx is not None + + @property + def has_day(self): + return self.dstridx is not None + + def could_be_day(self, value): + if self.has_day: + return False + elif not self.has_month: + return 1 <= value <= 31 + elif not self.has_year: + # Be permissive, assume leap year + month = self[self.mstridx] + return 1 <= value <= monthrange(2000, month)[1] + else: + month = self[self.mstridx] + year = self[self.ystridx] + return 1 <= value <= monthrange(year, month)[1] + + def append(self, val, label=None): + if hasattr(val, '__len__'): + if val.isdigit() and len(val) > 2: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + elif val > 100: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + + super(self.__class__, self).append(int(val)) + + if label == 'M': + if self.has_month: + raise ValueError('Month is already set') + self.mstridx = len(self) - 1 + elif label == 'D': + if self.has_day: + raise ValueError('Day is already set') + self.dstridx = len(self) - 1 + elif label == 'Y': + if self.has_year: + raise ValueError('Year is already set') + self.ystridx = len(self) - 1 + + def _resolve_from_stridxs(self, strids): + """ + Try to resolve the identities of year/month/day elements using + ystridx, mstridx, and dstridx, if enough of these are specified. + """ + if len(self) == 3 and len(strids) == 2: + # we can back out the remaining stridx value + missing = [x for x in range(3) if x not in strids.values()] + key = [x for x in ['y', 'm', 'd'] if x not in strids] + assert len(missing) == len(key) == 1 + key = key[0] + val = missing[0] + strids[key] = val + + assert len(self) == len(strids) # otherwise this should not be called + out = {key: self[strids[key]] for key in strids} + return (out.get('y'), out.get('m'), out.get('d')) + + def resolve_ymd(self, yearfirst, dayfirst): + len_ymd = len(self) + year, month, day = (None, None, None) + + strids = (('y', self.ystridx), + ('m', self.mstridx), + ('d', self.dstridx)) + + strids = {key: val for key, val in strids if val is not None} + if (len(self) == len(strids) > 0 or + (len(self) == 3 and len(strids) == 2)): + return self._resolve_from_stridxs(strids) + + mstridx = self.mstridx + + if len_ymd > 3: + raise ValueError("More than three YMD values") + elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): + # One member, or two members with a month string + if mstridx is not None: + month = self[mstridx] + # since mstridx is 0 or 1, self[mstridx-1] always + # looks up the other element + other = self[mstridx - 1] + else: + other = self[0] + + if len_ymd > 1 or mstridx is None: + if other > 31: + year = other + else: + day = other + + elif len_ymd == 2: + # Two members with numbers + if self[0] > 31: + # 99-01 + year, month = self + elif self[1] > 31: + # 01-99 + month, year = self + elif dayfirst and self[1] <= 12: + # 13-01 + day, month = self + else: + # 01-13 + month, day = self + + elif len_ymd == 3: + # Three members + if mstridx == 0: + if self[1] > 31: + # Apr-2003-25 + month, year, day = self + else: + month, day, year = self + elif mstridx == 1: + if self[0] > 31 or (yearfirst and self[2] <= 31): + # 99-Jan-01 + year, month, day = self + else: + # 01-Jan-01 + # Give precedence to day-first, since + # two-digit years is usually hand-written. + day, month, year = self + + elif mstridx == 2: + # WTF!? + if self[1] > 31: + # 01-99-Jan + day, year, month = self + else: + # 99-01-Jan + year, day, month = self + + else: + if (self[0] > 31 or + self.ystridx == 0 or + (yearfirst and self[1] <= 12 and self[2] <= 31)): + # 99-01-01 + if dayfirst and self[2] <= 12: + year, day, month = self + else: + year, month, day = self + elif self[0] > 12 or (dayfirst and self[1] <= 12): + # 13-01-01 + day, month, year = self + else: + # 01-13-01 + month, day, year = self + + return year, month, day + + +class parser(object): + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, + ignoretz=False, tzinfos=None, **kwargs): + """ + Parse the date/time string into a :class:`datetime.datetime` object. + + :param timestr: + Any date/time string using the supported formats. + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a + naive :class:`datetime.datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param \\*\\*kwargs: + Keyword arguments as passed to ``_parse()``. + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ParserError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises TypeError: + Raised for non-string or character stream input. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + + if default is None: + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + + res, skipped_tokens = self._parse(timestr, **kwargs) + + if res is None: + raise ParserError("Unknown string format: %s", timestr) + + if len(res) == 0: + raise ParserError("String does not contain a date: %s", timestr) + + try: + ret = self._build_naive(res, default) + except ValueError as e: + six.raise_from(ParserError(e.args[0] + ": %s", timestr), e) + + if not ignoretz: + ret = self._build_tzaware(ret, res, tzinfos) + + if kwargs.get('fuzzy_with_tokens', False): + return ret, skipped_tokens + else: + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset", "ampm","any_unused_tokens"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, + fuzzy_with_tokens=False): + """ + Private method which performs the heavy lifting of parsing, called from + ``parse()``, which passes on its ``kwargs`` to this function. + + :param timestr: + The string to parse. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. If set to ``None``, this value is retrieved from the + current :class:`parserinfo` object (which itself defaults to + ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + If this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + """ + if fuzzy_with_tokens: + fuzzy = True + + info = self.info + + if dayfirst is None: + dayfirst = info.dayfirst + + if yearfirst is None: + yearfirst = info.yearfirst + + res = self._result() + l = _timelex.split(timestr) # Splits the timestr into tokens + + skipped_idxs = [] + + # year/month/day list + ymd = _ymd() + + len_l = len(l) + i = 0 + try: + while i < len_l: + + # Check if it's a number + value_repr = l[i] + try: + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Numeric token + i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy) + + # Check weekday + elif info.weekday(l[i]) is not None: + value = info.weekday(l[i]) + res.weekday = value + + # Check month name + elif info.month(l[i]) is not None: + value = info.month(l[i]) + ymd.append(value, 'M') + + if i + 1 < len_l: + if l[i + 1] in ('-', '/'): + # Jan-01[-99] + sep = l[i + 1] + ymd.append(l[i + 2]) + + if i + 3 < len_l and l[i + 3] == sep: + # Jan-01-99 + ymd.append(l[i + 4]) + i += 2 + + i += 2 + + elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and + info.pertain(l[i + 2])): + # Jan of 01 + # In this case, 01 is clearly year + if l[i + 4].isdigit(): + # Convert it here to become unambiguous + value = int(l[i + 4]) + year = str(info.convertyear(value)) + ymd.append(year, 'Y') + else: + # Wrong guess + pass + # TODO: not hit in tests + i += 4 + + # Check am/pm + elif info.ampm(l[i]) is not None: + value = info.ampm(l[i]) + val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy) + + if val_is_ampm: + res.hour = self._adjust_ampm(res.hour, value) + res.ampm = value + + elif fuzzy: + skipped_idxs.append(i) + + # Check for a timezone name + elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i + 1 < len_l and l[i + 1] in ('+', '-'): + l[i + 1] = ('+', '-')[l[i + 1] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + # Check for a numbered timezone + elif res.hour is not None and l[i] in ('+', '-'): + signal = (-1, 1)[l[i] == '+'] + len_li = len(l[i + 1]) + + # TODO: check that l[i + 1] is integer? + if len_li == 4: + # -0300 + hour_offset = int(l[i + 1][:2]) + min_offset = int(l[i + 1][2:]) + elif i + 2 < len_l and l[i + 2] == ':': + # -03:00 + hour_offset = int(l[i + 1]) + min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like? + i += 2 + elif len_li <= 2: + # -[0]3 + hour_offset = int(l[i + 1][:2]) + min_offset = 0 + else: + raise ValueError(timestr) + + res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60) + + # Look for a timezone name between parenthesis + if (i + 5 < len_l and + info.jump(l[i + 2]) and l[i + 3] == '(' and + l[i + 5] == ')' and + 3 <= len(l[i + 4]) and + self._could_be_tzname(res.hour, res.tzname, + None, l[i + 4])): + # -0300 (BRST) + res.tzname = l[i + 4] + i += 4 + + i += 1 + + # Check jumps + elif not (info.jump(l[i]) or fuzzy): + raise ValueError(timestr) + + else: + skipped_idxs.append(i) + i += 1 + + # Process year/month/day + year, month, day = ymd.resolve_ymd(yearfirst, dayfirst) + + res.century_specified = ymd.century_specified + res.year = year + res.month = month + res.day = day + + except (IndexError, ValueError): + return None, None + + if not info.validate(res): + return None, None + + if fuzzy_with_tokens: + skipped_tokens = self._recombine_skipped(l, skipped_idxs) + return res, tuple(skipped_tokens) + else: + return res, None + + def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy): + # Token is a number + value_repr = tokens[idx] + try: + value = self._to_decimal(value_repr) + except Exception as e: + six.raise_from(ValueError('Unknown numeric token'), e) + + len_li = len(value_repr) + + len_l = len(tokens) + + if (len(ymd) == 3 and len_li in (2, 4) and + res.hour is None and + (idx + 1 >= len_l or + (tokens[idx + 1] != ':' and + info.hms(tokens[idx + 1]) is None))): + # 19990101T23[59] + s = tokens[idx] + res.hour = int(s[:2]) + + if len_li == 4: + res.minute = int(s[2:]) + + elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = tokens[idx] + + if not ymd and '.' not in tokens[idx]: + ymd.append(s[:2]) + ymd.append(s[2:4]) + ymd.append(s[4:]) + else: + # 19990101T235959[.59] + + # TODO: Check if res attributes already set. + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = self._parsems(s[4:]) + + elif len_li in (8, 12, 14): + # YYYYMMDD + s = tokens[idx] + ymd.append(s[:4], 'Y') + ymd.append(s[4:6]) + ymd.append(s[6:8]) + + if len_li > 8: + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + + if len_li > 12: + res.second = int(s[12:]) + + elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None: + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True) + (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx) + if hms is not None: + # TODO: checking that hour/minute/second are not + # already set? + self._assign_hms(res, value_repr, hms) + + elif idx + 2 < len_l and tokens[idx + 1] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this? + (res.minute, res.second) = self._parse_min_sec(value) + + if idx + 4 < len_l and tokens[idx + 3] == ':': + res.second, res.microsecond = self._parsems(tokens[idx + 4]) + + idx += 2 + + idx += 2 + + elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'): + sep = tokens[idx + 1] + ymd.append(value_repr) + + if idx + 2 < len_l and not info.jump(tokens[idx + 2]): + if tokens[idx + 2].isdigit(): + # 01-01[-01] + ymd.append(tokens[idx + 2]) + else: + # 01-Jan[-01] + value = info.month(tokens[idx + 2]) + + if value is not None: + ymd.append(value, 'M') + else: + raise ValueError() + + if idx + 3 < len_l and tokens[idx + 3] == sep: + # We have three members + value = info.month(tokens[idx + 4]) + + if value is not None: + ymd.append(value, 'M') + else: + ymd.append(tokens[idx + 4]) + idx += 2 + + idx += 1 + idx += 1 + + elif idx + 1 >= len_l or info.jump(tokens[idx + 1]): + if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None: + # 12 am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2])) + idx += 1 + else: + # Year, month or day + ymd.append(value) + idx += 1 + + elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24): + # 12am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1])) + idx += 1 + + elif ymd.could_be_day(value): + ymd.append(value) + + elif not fuzzy: + raise ValueError() + + return idx + + def _find_hms_idx(self, idx, tokens, info, allow_jump): + len_l = len(tokens) + + if idx+1 < len_l and info.hms(tokens[idx+1]) is not None: + # There is an "h", "m", or "s" label following this token. We take + # assign the upcoming label to the current token. + # e.g. the "12" in 12h" + hms_idx = idx + 1 + + elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and + info.hms(tokens[idx+2]) is not None): + # There is a space and then an "h", "m", or "s" label. + # e.g. the "12" in "12 h" + hms_idx = idx + 2 + + elif idx > 0 and info.hms(tokens[idx-1]) is not None: + # There is a "h", "m", or "s" preceding this token. Since neither + # of the previous cases was hit, there is no label following this + # token, so we use the previous label. + # e.g. the "04" in "12h04" + hms_idx = idx-1 + + elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and + info.hms(tokens[idx-2]) is not None): + # If we are looking at the final token, we allow for a + # backward-looking check to skip over a space. + # TODO: Are we sure this is the right condition here? + hms_idx = idx - 2 + + else: + hms_idx = None + + return hms_idx + + def _assign_hms(self, res, value_repr, hms): + # See GH issue #427, fixing float rounding + value = self._to_decimal(value_repr) + + if hms == 0: + # Hour + res.hour = int(value) + if value % 1: + res.minute = int(60*(value % 1)) + + elif hms == 1: + (res.minute, res.second) = self._parse_min_sec(value) + + elif hms == 2: + (res.second, res.microsecond) = self._parsems(value_repr) + + def _could_be_tzname(self, hour, tzname, tzoffset, token): + return (hour is not None and + tzname is None and + tzoffset is None and + len(token) <= 5 and + (all(x in string.ascii_uppercase for x in token) + or token in self.info.UTCZONE)) + + def _ampm_valid(self, hour, ampm, fuzzy): + """ + For fuzzy parsing, 'a' or 'am' (both valid English words) + may erroneously trigger the AM/PM flag. Deal with that + here. + """ + val_is_ampm = True + + # If there's already an AM/PM flag, this one isn't one. + if fuzzy and ampm is not None: + val_is_ampm = False + + # If AM/PM is found and hour is not, raise a ValueError + if hour is None: + if fuzzy: + val_is_ampm = False + else: + raise ValueError('No hour specified with AM or PM flag.') + elif not 0 <= hour <= 12: + # If AM/PM is found, it's a 12 hour clock, so raise + # an error for invalid range + if fuzzy: + val_is_ampm = False + else: + raise ValueError('Invalid hour specified for 12-hour clock.') + + return val_is_ampm + + def _adjust_ampm(self, hour, ampm): + if hour < 12 and ampm == 1: + hour += 12 + elif hour == 12 and ampm == 0: + hour = 0 + return hour + + def _parse_min_sec(self, value): + # TODO: Every usage of this function sets res.second to the return + # value. Are there any cases where second will be returned as None and + # we *don't* want to set res.second = None? + minute = int(value) + second = None + + sec_remainder = value % 1 + if sec_remainder: + second = int(60 * sec_remainder) + return (minute, second) + + def _parse_hms(self, idx, tokens, info, hms_idx): + # TODO: Is this going to admit a lot of false-positives for when we + # just happen to have digits and "h", "m" or "s" characters in non-date + # text? I guess hex hashes won't have that problem, but there's plenty + # of random junk out there. + if hms_idx is None: + hms = None + new_idx = idx + elif hms_idx > idx: + hms = info.hms(tokens[hms_idx]) + new_idx = hms_idx + else: + # Looking backwards, increment one. + hms = info.hms(tokens[hms_idx]) + 1 + new_idx = idx + + return (new_idx, hms) + + # ------------------------------------------------------------------ + # Handling for individual tokens. These are kept as methods instead + # of functions for the sake of customizability via subclassing. + + def _parsems(self, value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + def _to_decimal(self, val): + try: + decimal_value = Decimal(val) + # See GH 662, edge case, infinite value should not be converted + # via `_to_decimal` + if not decimal_value.is_finite(): + raise ValueError("Converted decimal value is infinite or NaN") + except Exception as e: + msg = "Could not convert %s to decimal" % val + six.raise_from(ValueError(msg), e) + else: + return decimal_value + + # ------------------------------------------------------------------ + # Post-Parsing construction of datetime output. These are kept as + # methods instead of functions for the sake of customizability via + # subclassing. + + def _build_tzinfo(self, tzinfos, tzname, tzoffset): + if callable(tzinfos): + tzdata = tzinfos(tzname, tzoffset) + else: + tzdata = tzinfos.get(tzname) + # handle case where tzinfo is paased an options that returns None + # eg tzinfos = {'BRST' : None} + if isinstance(tzdata, datetime.tzinfo) or tzdata is None: + tzinfo = tzdata + elif isinstance(tzdata, text_type): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, integer_types): + tzinfo = tz.tzoffset(tzname, tzdata) + else: + raise TypeError("Offset must be tzinfo subclass, tz string, " + "or int offset.") + return tzinfo + + def _build_tzaware(self, naive, res, tzinfos): + if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)): + tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset) + aware = naive.replace(tzinfo=tzinfo) + aware = self._assign_tzname(aware, res.tzname) + + elif res.tzname and res.tzname in time.tzname: + aware = naive.replace(tzinfo=tz.tzlocal()) + + # Handle ambiguous local datetime + aware = self._assign_tzname(aware, res.tzname) + + # This is mostly relevant for winter GMT zones parsed in the UK + if (aware.tzname() != res.tzname and + res.tzname in self.info.UTCZONE): + aware = aware.replace(tzinfo=tz.UTC) + + elif res.tzoffset == 0: + aware = naive.replace(tzinfo=tz.UTC) + + elif res.tzoffset: + aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + elif not res.tzname and not res.tzoffset: + # i.e. no timezone information was found. + aware = naive + + elif res.tzname: + # tz-like string was parsed but we don't know what to do + # with it + warnings.warn("tzname {tzname} identified but not understood. " + "Pass `tzinfos` argument in order to correctly " + "return a timezone-aware datetime. In a future " + "version, this will raise an " + "exception.".format(tzname=res.tzname), + category=UnknownTimezoneWarning) + aware = naive + + return aware + + def _build_naive(self, res, default): + repl = {} + for attr in ("year", "month", "day", "hour", + "minute", "second", "microsecond"): + value = getattr(res, attr) + if value is not None: + repl[attr] = value + + if 'day' not in repl: + # If the default day exceeds the last day of the month, fall back + # to the end of the month. + cyear = default.year if res.year is None else res.year + cmonth = default.month if res.month is None else res.month + cday = default.day if res.day is None else res.day + + if cday > monthrange(cyear, cmonth)[1]: + repl['day'] = monthrange(cyear, cmonth)[1] + + naive = default.replace(**repl) + + if res.weekday is not None and not res.day: + naive = naive + relativedelta.relativedelta(weekday=res.weekday) + + return naive + + def _assign_tzname(self, dt, tzname): + if dt.tzname() != tzname: + new_dt = tz.enfold(dt, fold=1) + if new_dt.tzname() == tzname: + return new_dt + + return dt + + def _recombine_skipped(self, tokens, skipped_idxs): + """ + >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"] + >>> skipped_idxs = [0, 1, 2, 5] + >>> _recombine_skipped(tokens, skipped_idxs) + ["foo bar", "baz"] + """ + skipped_tokens = [] + for i, idx in enumerate(sorted(skipped_idxs)): + if i > 0 and idx - 1 == skipped_idxs[i - 1]: + skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx] + else: + skipped_tokens.append(tokens[idx]) + + return skipped_tokens + + +DEFAULTPARSER = parser() + + +def parse(timestr, parserinfo=None, **kwargs): + """ + + Parse a string in one of the supported formats, using the + ``parserinfo`` parameters. + + :param timestr: + A string containing a date/time stamp. + + :param parserinfo: + A :class:`parserinfo` object containing parameters for the parser. + If ``None``, the default arguments to the :class:`parserinfo` + constructor are used. + + The ``**kwargs`` parameter takes the following keyword arguments: + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM and + YMD. If set to ``None``, this value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken to + be the year, otherwise the last number is taken to be the year. If + this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x] + used_idxs = list() + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + + for ii in range(j): + used_idxs.append(ii) + i = j + if (i < len_l and (l[i] in ('+', '-') or l[i][0] in + "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1, -1)[l[i] == '+'] + used_idxs.append(i) + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) * signal) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i]) * 3600 + + int(l[i + 2]) * 60) * signal) + used_idxs.append(i) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2]) * 3600 * signal) + else: + return None + used_idxs.append(i) + i += 1 + if res.dstabbr: + break + else: + break + + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': + l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789+-"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + used_idxs.append(i) + i += 2 + if l[i] == '-': + value = int(l[i + 1]) * -1 + used_idxs.append(i) + i += 1 + else: + value = int(l[i]) + used_idxs.append(i) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i]) - 1) % 7 + else: + x.day = int(l[i]) + used_idxs.append(i) + i += 2 + x.time = int(l[i]) + used_idxs.append(i) + i += 2 + if i < len_l: + if l[i] in ('-', '+'): + signal = (-1, 1)[l[i] == "+"] + used_idxs.append(i) + i += 1 + else: + signal = 1 + used_idxs.append(i) + res.dstoffset = (res.stdoffset + int(l[i]) * signal) + + # This was a made-up format that is not in normal use + warn(('Parsed time zone "%s"' % tzstr) + + 'is in a non-standard dateutil-specific format, which ' + + 'is now deprecated; support for parsing this format ' + + 'will be removed in future versions. It is recommended ' + + 'that you switch to a standard format like the GNU ' + + 'TZ variable format.', tz.DeprecatedTzFormatWarning) + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',', '/', 'J', 'M', + '.', '-', ':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + used_idxs.append(i) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + used_idxs.append(i) + i += 1 + x.month = int(l[i]) + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.weekday = (int(l[i]) - 1) % 7 + else: + # year day (zero based) + x.yday = int(l[i]) + 1 + + used_idxs.append(i) + i += 1 + + if i < len_l and l[i] == '/': + used_idxs.append(i) + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60 + used_idxs.append(i) + i += 2 + if i + 1 < len_l and l[i + 1] == ':': + used_idxs.append(i) + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2]) * 3600) + else: + return None + used_idxs.append(i) + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + unused_idxs = set(range(len_l)).difference(used_idxs) + res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"}) + return res + + +DEFAULTTZPARSER = _tzparser() + + +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + + +class ParserError(ValueError): + """Error class for representing failure to parse a datetime string.""" + def __str__(self): + try: + return self.args[0] % self.args[1:] + except (TypeError, IndexError): + return super(ParserError, self).__str__() + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, str(self)) + + +class UnknownTimezoneWarning(RuntimeWarning): + """Raised when the parser finds a timezone it cannot parse into a tzinfo""" +# vim:ts=4:sw=4:et diff --git a/venv/Lib/site-packages/dateutil/parser/isoparser.py b/venv/Lib/site-packages/dateutil/parser/isoparser.py new file mode 100644 index 000000000..48f86a335 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/parser/isoparser.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 -*- +""" +This module offers a parser for ISO-8601 strings + +It is intended to support all valid date, time and datetime formats per the +ISO-8601 specification. + +..versionadded:: 2.7.0 +""" +from datetime import datetime, timedelta, time, date +import calendar +from dateutil import tz + +from functools import wraps + +import re +import six + +__all__ = ["isoparse", "isoparser"] + + +def _takes_ascii(f): + @wraps(f) + def func(self, str_in, *args, **kwargs): + # If it's a stream, read the whole thing + str_in = getattr(str_in, 'read', lambda: str_in)() + + # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII + if isinstance(str_in, six.text_type): + # ASCII is the same in UTF-8 + try: + str_in = str_in.encode('ascii') + except UnicodeEncodeError as e: + msg = 'ISO-8601 strings should contain only ASCII characters' + six.raise_from(ValueError(msg), e) + + return f(self, str_in, *args, **kwargs) + + return func + + +class isoparser(object): + def __init__(self, sep=None): + """ + :param sep: + A single character that separates date and time portions. If + ``None``, the parser will accept any single character. + For strict ISO-8601 adherence, pass ``'T'``. + """ + if sep is not None: + if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): + raise ValueError('Separator must be a single, non-numeric ' + + 'ASCII character') + + sep = sep.encode('ascii') + + self._sep = sep + + @_takes_ascii + def isoparse(self, dt_str): + """ + Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. + + An ISO-8601 datetime string consists of a date portion, followed + optionally by a time portion - the date and time portions are separated + by a single character separator, which is ``T`` in the official + standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be + combined with a time portion. + + Supported date formats are: + + Common: + + - ``YYYY`` + - ``YYYY-MM`` or ``YYYYMM`` + - ``YYYY-MM-DD`` or ``YYYYMMDD`` + + Uncommon: + + - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) + - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day + + The ISO week and day numbering follows the same logic as + :func:`datetime.date.isocalendar`. + + Supported time formats are: + + - ``hh`` + - ``hh:mm`` or ``hhmm`` + - ``hh:mm:ss`` or ``hhmmss`` + - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) + + Midnight is a special case for `hh`, as the standard supports both + 00:00 and 24:00 as a representation. The decimal separator can be + either a dot or a comma. + + + .. caution:: + + Support for fractional components other than seconds is part of the + ISO-8601 standard, but is not currently implemented in this parser. + + Supported time zone offset formats are: + + - `Z` (UTC) + - `±HH:MM` + - `±HHMM` + - `±HH` + + Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, + with the exception of UTC, which will be represented as + :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such + as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. + + :param dt_str: + A string or stream containing only an ISO-8601 datetime string + + :return: + Returns a :class:`datetime.datetime` representing the string. + Unspecified components default to their lowest value. + + .. warning:: + + As of version 2.7.0, the strictness of the parser should not be + considered a stable part of the contract. Any valid ISO-8601 string + that parses correctly with the default settings will continue to + parse correctly in future versions, but invalid strings that + currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not + guaranteed to continue failing in future versions if they encode + a valid date. + + .. versionadded:: 2.7.0 + """ + components, pos = self._parse_isodate(dt_str) + + if len(dt_str) > pos: + if self._sep is None or dt_str[pos:pos + 1] == self._sep: + components += self._parse_isotime(dt_str[pos + 1:]) + else: + raise ValueError('String contains unknown ISO components') + + if len(components) > 3 and components[3] == 24: + components[3] = 0 + return datetime(*components) + timedelta(days=1) + + return datetime(*components) + + @_takes_ascii + def parse_isodate(self, datestr): + """ + Parse the date portion of an ISO string. + + :param datestr: + The string portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.date` object + """ + components, pos = self._parse_isodate(datestr) + if pos < len(datestr): + raise ValueError('String contains unknown ISO ' + + 'components: {}'.format(datestr)) + return date(*components) + + @_takes_ascii + def parse_isotime(self, timestr): + """ + Parse the time portion of an ISO string. + + :param timestr: + The time portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.time` object + """ + components = self._parse_isotime(timestr) + if components[0] == 24: + components[0] = 0 + return time(*components) + + @_takes_ascii + def parse_tzstr(self, tzstr, zero_as_utc=True): + """ + Parse a valid ISO time zone string. + + See :func:`isoparser.isoparse` for details on supported formats. + + :param tzstr: + A string representing an ISO time zone offset + + :param zero_as_utc: + Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones + + :return: + Returns :class:`dateutil.tz.tzoffset` for offsets and + :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is + specified) offsets equivalent to UTC. + """ + return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) + + # Constants + _DATE_SEP = b'-' + _TIME_SEP = b':' + _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') + + def _parse_isodate(self, dt_str): + try: + return self._parse_isodate_common(dt_str) + except ValueError: + return self._parse_isodate_uncommon(dt_str) + + def _parse_isodate_common(self, dt_str): + len_str = len(dt_str) + components = [1, 1, 1] + + if len_str < 4: + raise ValueError('ISO string too short') + + # Year + components[0] = int(dt_str[0:4]) + pos = 4 + if pos >= len_str: + return components, pos + + has_sep = dt_str[pos:pos + 1] == self._DATE_SEP + if has_sep: + pos += 1 + + # Month + if len_str - pos < 2: + raise ValueError('Invalid common month') + + components[1] = int(dt_str[pos:pos + 2]) + pos += 2 + + if pos >= len_str: + if has_sep: + return components, pos + else: + raise ValueError('Invalid ISO format') + + if has_sep: + if dt_str[pos:pos + 1] != self._DATE_SEP: + raise ValueError('Invalid separator in ISO string') + pos += 1 + + # Day + if len_str - pos < 2: + raise ValueError('Invalid common day') + components[2] = int(dt_str[pos:pos + 2]) + return components, pos + 2 + + def _parse_isodate_uncommon(self, dt_str): + if len(dt_str) < 4: + raise ValueError('ISO string too short') + + # All ISO formats start with the year + year = int(dt_str[0:4]) + + has_sep = dt_str[4:5] == self._DATE_SEP + + pos = 4 + has_sep # Skip '-' if it's there + if dt_str[pos:pos + 1] == b'W': + # YYYY-?Www-?D? + pos += 1 + weekno = int(dt_str[pos:pos + 2]) + pos += 2 + + dayno = 1 + if len(dt_str) > pos: + if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: + raise ValueError('Inconsistent use of dash separator') + + pos += has_sep + + dayno = int(dt_str[pos:pos + 1]) + pos += 1 + + base_date = self._calculate_weekdate(year, weekno, dayno) + else: + # YYYYDDD or YYYY-DDD + if len(dt_str) - pos < 3: + raise ValueError('Invalid ordinal day') + + ordinal_day = int(dt_str[pos:pos + 3]) + pos += 3 + + if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): + raise ValueError('Invalid ordinal day' + + ' {} for year {}'.format(ordinal_day, year)) + + base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) + + components = [base_date.year, base_date.month, base_date.day] + return components, pos + + def _calculate_weekdate(self, year, week, day): + """ + Calculate the day of corresponding to the ISO year-week-day calendar. + + This function is effectively the inverse of + :func:`datetime.date.isocalendar`. + + :param year: + The year in the ISO calendar + + :param week: + The week in the ISO calendar - range is [1, 53] + + :param day: + The day in the ISO calendar - range is [1 (MON), 7 (SUN)] + + :return: + Returns a :class:`datetime.date` + """ + if not 0 < week < 54: + raise ValueError('Invalid week: {}'.format(week)) + + if not 0 < day < 8: # Range is 1-7 + raise ValueError('Invalid weekday: {}'.format(day)) + + # Get week 1 for the specific year: + jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it + week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) + + # Now add the specific number of weeks and days to get what we want + week_offset = (week - 1) * 7 + (day - 1) + return week_1 + timedelta(days=week_offset) + + def _parse_isotime(self, timestr): + len_str = len(timestr) + components = [0, 0, 0, 0, None] + pos = 0 + comp = -1 + + if len(timestr) < 2: + raise ValueError('ISO time too short') + + has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP + + while pos < len_str and comp < 5: + comp += 1 + + if timestr[pos:pos + 1] in b'-+Zz': + # Detect time zone boundary + components[-1] = self._parse_tzstr(timestr[pos:]) + pos = len_str + break + + if comp < 3: + # Hour, minute, second + components[comp] = int(timestr[pos:pos + 2]) + pos += 2 + if (has_sep and pos < len_str and + timestr[pos:pos + 1] == self._TIME_SEP): + pos += 1 + + if comp == 3: + # Fraction of a second + frac = self._FRACTION_REGEX.match(timestr[pos:]) + if not frac: + continue + + us_str = frac.group(1)[:6] # Truncate to microseconds + components[comp] = int(us_str) * 10**(6 - len(us_str)) + pos += len(frac.group()) + + if pos < len_str: + raise ValueError('Unused components in ISO string') + + if components[0] == 24: + # Standard supports 00:00 and 24:00 as representations of midnight + if any(component != 0 for component in components[1:4]): + raise ValueError('Hour may only be 24 at 24:00:00.000') + + return components + + def _parse_tzstr(self, tzstr, zero_as_utc=True): + if tzstr == b'Z' or tzstr == b'z': + return tz.UTC + + if len(tzstr) not in {3, 5, 6}: + raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') + + if tzstr[0:1] == b'-': + mult = -1 + elif tzstr[0:1] == b'+': + mult = 1 + else: + raise ValueError('Time zone offset requires sign') + + hours = int(tzstr[1:3]) + if len(tzstr) == 3: + minutes = 0 + else: + minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) + + if zero_as_utc and hours == 0 and minutes == 0: + return tz.UTC + else: + if minutes > 59: + raise ValueError('Invalid minutes in time zone offset') + + if hours > 23: + raise ValueError('Invalid hours in time zone offset') + + return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) + + +DEFAULT_ISOPARSER = isoparser() +isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/venv/Lib/site-packages/dateutil/relativedelta.py b/venv/Lib/site-packages/dateutil/relativedelta.py new file mode 100644 index 000000000..a9e85f7e6 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/relativedelta.py @@ -0,0 +1,599 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is designed to be applied to an existing datetime and + can replace specific components of that datetime, or represents an interval + of time. + + It is based on the specification of the excellent work done by M.-A. Lemburg + in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an arithmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding arithmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc) available in the + relativedelta module. These instances may receive a parameter N, + specifying the Nth weekday, which could be positive or negative + (like MO(+1) or MO(-2)). Not specifying it is the same as specifying + +1. You can also use an integer, where 0=MO. This argument is always + relative e.g. if the calculated date is already Monday, using MO(1) + or MO(-1) won't change the day. To effectively make it absolute, use + it in combination with the day argument (e.g. day=1, MO(1) for first + Monday of the month). + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + There are relative and absolute forms of the keyword + arguments. The plural is relative, and the singular is + absolute. For each argument in the order below, the absolute form + is applied first (by setting each attribute to that value) and + then the relative form (by adding the value to the attribute). + + The order of attributes considered when this relativedelta is + added to a datetime is: + + 1. Year + 2. Month + 3. Day + 4. Hours + 5. Minutes + 6. Seconds + 7. Microseconds + + Finally, weekday is applied, using the rule described above. + + For example + + >>> from datetime import datetime + >>> from dateutil.relativedelta import relativedelta, MO + >>> dt = datetime(2018, 4, 9, 13, 37, 0) + >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) + >>> dt + delta + datetime.datetime(2018, 4, 2, 14, 37) + + First, the day is set to 1 (the first of the month), then 25 hours + are added, to get to the 2nd day and 14th hour, finally the + weekday is applied, but since the 2nd is already a Monday there is + no effect. + + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + # Relative information + self.years = int(years) + self.months = int(months) + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return int(self.days / 7.0) + + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=+1, hours=+14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=(other.year if other.year is not None + else self.year), + month=(other.month if other.month is not None + else self.month), + day=(other.day if other.day is not None + else self.day), + weekday=(other.weekday if other.weekday is not None + else self.weekday), + hour=(other.hour if other.hour is not None + else self.hour), + minute=(other.minute if other.minute is not None + else self.minute), + second=(other.second if other.second is not None + else self.second), + microsecond=(other.microsecond if other.microsecond + is not None else + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=(self.year if self.year is not None + else other.year), + month=(self.month if self.month is not None else + other.month), + day=(self.day if self.day is not None else + other.day), + weekday=(self.weekday if self.weekday is not None else + other.weekday), + hour=(self.hour if self.hour is not None else + other.hour), + minute=(self.minute if self.minute is not None else + other.minute), + second=(self.second if self.second is not None else + other.second), + microsecond=(self.microsecond if self.microsecond + is not None else + other.microsecond)) + + def __abs__(self): + return self.__class__(years=abs(self.years), + months=abs(self.months), + days=abs(self.days), + hours=abs(self.hours), + minutes=abs(self.minutes), + seconds=abs(self.seconds), + microseconds=abs(self.microseconds), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __hash__(self): + return hash(( + self.weekday, + self.years, + self.months, + self.days, + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.leapdays, + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + )) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/venv/Lib/site-packages/dateutil/rrule.py b/venv/Lib/site-packages/dateutil/rrule.py new file mode 100644 index 000000000..6bf0ea9c6 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/rrule.py @@ -0,0 +1,1735 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import itertools +import datetime +import calendar +import re +import sys + +try: + from math import gcd +except ImportError: + from fractions import gcd + +from six import advance_iterator, integer_types +from six.moves import _thread, range +import heapq + +from ._common import weekday as weekdaybase + +# For warning about deprecation of until and count +from warnings import warn + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penalty. + def count(self): + """ Returns the number of recurrences in this set. It will have go + trough the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + if count is not None: + n += 1 + if n > count: + break + + yield d + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + If given, this determines how many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param until: + If given, this must be a datetime instance specifying the upper-bound + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + if until and until.tzinfo: + dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) + else: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if self._dtstart and self._until: + if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): + # According to RFC5545 Section 3.3.10: + # https://tools.ietf.org/html/rfc5545#section-3.3.10 + # + # > If the "DTSTART" property is specified as a date with UTC + # > time or a date with local time and time zone reference, + # > then the UNTIL rule part MUST be specified as a date with + # > UTC time. + raise ValueError( + 'RRULE UNTIL values must be specified in UTC when DTSTART ' + 'is timezone-aware' + ) + + if count is not None and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 5545" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) + self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = () + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = () + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = {dtstart.hour} + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = {dtstart.minute} + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC5545, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count is not None: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC5545-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append('RRULE:' + ';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + total += 1 + yield res + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + + total += 1 + yield res + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + + + +class _rrulestr(object): + """ Parses a string representation of a recurrence rule or set of + recurrence rules. + + :param s: + Required, a string defining one or more recurrence rules. + + :param dtstart: + If given, used as the default recurrence start if not specified in the + rule string. + + :param cache: + If set ``True`` caching of results will be enabled, improving + performance of multiple queries considerably. + + :param unfold: + If set ``True`` indicates that a rule string is split over more + than one line and should be joined before processing. + + :param forceset: + If set ``True`` forces a :class:`dateutil.rrule.rruleset` to + be returned. + + :param compatible: + If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime.datetime` object is returned. + + :param tzids: + If given, a callable or mapping used to retrieve a + :class:`datetime.tzinfo` from a string representation. + Defaults to :func:`dateutil.tz.gettz`. + + :param tzinfos: + Additional time zone names / aliases which may be present in a string + representation. See :func:`dateutil.parser.parse` for more + information. + + :return: + Returns a :class:`dateutil.rrule.rruleset` or + :class:`dateutil.rrule.rrule` + """ + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_date_value(self, date_value, parms, rule_tzids, + ignoretz, tzids, tzinfos): + global parser + if not parser: + from dateutil import parser + + datevals = [] + value_found = False + TZID = None + + for parm in parms: + if parm.startswith("TZID="): + try: + tzkey = rule_tzids[parm.split('TZID=')[-1]] + except KeyError: + continue + if tzids is None: + from . import tz + tzlookup = tz.gettz + elif callable(tzids): + tzlookup = tzids + else: + tzlookup = getattr(tzids, 'get', None) + if tzlookup is None: + msg = ('tzids must be a callable, mapping, or None, ' + 'not %s' % tzids) + raise ValueError(msg) + + TZID = tzlookup(tzkey) + continue + + # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found + # only once. + if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}: + raise ValueError("unsupported parm: " + parm) + else: + if value_found: + msg = ("Duplicate value parameter found in: " + parm) + raise ValueError(msg) + value_found = True + + for datestr in date_value.split(','): + date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos) + if TZID is not None: + if date.tzinfo is None: + date = date.replace(tzinfo=TZID) + else: + raise ValueError('DTSTART/EXDATE specifies multiple timezone') + datevals.append(date) + + return datevals + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzids=None, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + + TZID_NAMES = dict(map( + lambda x: (x.upper(), x), + re.findall('TZID=(?P[^:]+):', s) + )) + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + exdatevals.extend( + self._parse_date_value(value, parms, + TZID_NAMES, ignoretz, + tzids, tzinfos) + ) + elif name == "DTSTART": + dtvals = self._parse_date_value(value, parms, TZID_NAMES, + ignoretz, tzids, tzinfos) + if len(dtvals) != 1: + raise ValueError("Multiple DTSTART values specified:" + + value) + dtstart = dtvals[0] + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + rset.exdate(value) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/venv/Lib/site-packages/dateutil/tz/__init__.py b/venv/Lib/site-packages/dateutil/tz/__init__.py new file mode 100644 index 000000000..af1352c47 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/tz/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +from .tz import * +from .tz import __doc__ + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", + "enfold", "datetime_ambiguous", "datetime_exists", + "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] + + +class DeprecatedTzFormatWarning(Warning): + """Warning raised when time zones are parsed from deprecated formats.""" diff --git a/venv/Lib/site-packages/dateutil/tz/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/dateutil/tz/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..c57cd668c Binary files /dev/null and b/venv/Lib/site-packages/dateutil/tz/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/tz/__pycache__/_common.cpython-36.pyc b/venv/Lib/site-packages/dateutil/tz/__pycache__/_common.cpython-36.pyc new file mode 100644 index 000000000..f56200c78 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/tz/__pycache__/_common.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/tz/__pycache__/_factories.cpython-36.pyc b/venv/Lib/site-packages/dateutil/tz/__pycache__/_factories.cpython-36.pyc new file mode 100644 index 000000000..e035ab073 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/tz/__pycache__/_factories.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/tz/__pycache__/tz.cpython-36.pyc b/venv/Lib/site-packages/dateutil/tz/__pycache__/tz.cpython-36.pyc new file mode 100644 index 000000000..c2da45c65 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/tz/__pycache__/tz.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/tz/__pycache__/win.cpython-36.pyc b/venv/Lib/site-packages/dateutil/tz/__pycache__/win.cpython-36.pyc new file mode 100644 index 000000000..2b5e0cb8d Binary files /dev/null and b/venv/Lib/site-packages/dateutil/tz/__pycache__/win.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/tz/_common.py b/venv/Lib/site-packages/dateutil/tz/_common.py new file mode 100644 index 000000000..e6ac11831 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/tz/_common.py @@ -0,0 +1,419 @@ +from six import PY2 + +from functools import wraps + +from datetime import datetime, timedelta, tzinfo + + +ZERO = timedelta(0) + +__all__ = ['tzname_in_python2', 'enfold'] + + +def tzname_in_python2(namefunc): + """Change unicode output into bytestrings in Python 2 + + tzname() API changed in Python 3. It used to return bytes, but was changed + to unicode strings + """ + if PY2: + @wraps(namefunc) + def adjust_encoding(*args, **kwargs): + name = namefunc(*args, **kwargs) + if name is not None: + name = name.encode() + + return name + + return adjust_encoding + else: + return namefunc + + +# The following is adapted from Alexander Belopolsky's tz library +# https://github.com/abalkin/tz +if hasattr(datetime, 'fold'): + # This is the pre-python 3.6 fold situation + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + return dt.replace(fold=fold) + +else: + class _DatetimeWithFold(datetime): + """ + This is a class designed to provide a PEP 495-compliant interface for + Python versions before 3.6. It is used only for dates in a fold, so + the ``fold`` attribute is fixed at ``1``. + + .. versionadded:: 2.6.0 + """ + __slots__ = () + + def replace(self, *args, **kwargs): + """ + Return a datetime with the same attributes, except for those + attributes given new values by whichever keyword arguments are + specified. Note that tzinfo=None can be specified to create a naive + datetime from an aware datetime with no conversion of date and time + data. + + This is reimplemented in ``_DatetimeWithFold`` because pypy3 will + return a ``datetime.datetime`` even if ``fold`` is unchanged. + """ + argnames = ( + 'year', 'month', 'day', 'hour', 'minute', 'second', + 'microsecond', 'tzinfo' + ) + + for arg, argname in zip(args, argnames): + if argname in kwargs: + raise TypeError('Duplicate argument: {}'.format(argname)) + + kwargs[argname] = arg + + for argname in argnames: + if argname not in kwargs: + kwargs[argname] = getattr(self, argname) + + dt_class = self.__class__ if kwargs.get('fold', 1) else datetime + + return dt_class(**kwargs) + + @property + def fold(self): + return 1 + + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + if getattr(dt, 'fold', 0) == fold: + return dt + + args = dt.timetuple()[:6] + args += (dt.microsecond, dt.tzinfo) + + if fold: + return _DatetimeWithFold(*args) + else: + return datetime(*args) + + +def _validate_fromutc_inputs(f): + """ + The CPython version of ``fromutc`` checks that the input is a ``datetime`` + object and that ``self`` is attached as its ``tzinfo``. + """ + @wraps(f) + def fromutc(self, dt): + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + return f(self, dt) + + return fromutc + + +class _tzinfo(tzinfo): + """ + Base class for all ``dateutil`` ``tzinfo`` objects. + """ + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + + dt = dt.replace(tzinfo=self) + + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) + + return same_dt and not same_offset + + def _fold_status(self, dt_utc, dt_wall): + """ + Determine the fold status of a "wall" datetime, given a representation + of the same datetime as a (naive) UTC datetime. This is calculated based + on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all + datetimes, and that this offset is the actual number of hours separating + ``dt_utc`` and ``dt_wall``. + + :param dt_utc: + Representation of the datetime as UTC + + :param dt_wall: + Representation of the datetime as "wall time". This parameter must + either have a `fold` attribute or have a fold-naive + :class:`datetime.tzinfo` attached, otherwise the calculation may + fail. + """ + if self.is_ambiguous(dt_wall): + delta_wall = dt_wall - dt_utc + _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) + else: + _fold = 0 + + return _fold + + def _fold(self, dt): + return getattr(dt, 'fold', 0) + + def _fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurrence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + + # Re-implement the algorithm from Python's datetime.py + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # The original datetime.py code assumes that `dst()` defaults to + # zero during ambiguous times. PEP 495 inverts this presumption, so + # for pre-PEP 495 versions of python, we need to tweak the algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + + dt += delta + # Set fold=1 so we can default to being in the fold for + # ambiguous dates. + dtdst = enfold(dt, fold=1).dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurrence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + dt_wall = self._fromutc(dt) + + # Calculate the fold status given the two datetimes. + _fold = self._fold_status(dt, dt_wall) + + # Set the default fold value for ambiguous dates + return enfold(dt_wall, fold=_fold) + + +class tzrangebase(_tzinfo): + """ + This is an abstract base class for time zones represented by an annual + transition into and out of DST. Child classes should implement the following + methods: + + * ``__init__(self, *args, **kwargs)`` + * ``transitions(self, year)`` - this is expected to return a tuple of + datetimes representing the DST on and off transitions in standard + time. + + A fully initialized ``tzrangebase`` subclass should also provide the + following attributes: + * ``hasdst``: Boolean whether or not the zone uses DST. + * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects + representing the respective UTC offsets. + * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short + abbreviations in DST and STD, respectively. + * ``_hasdst``: Whether or not the zone has DST. + + .. versionadded:: 2.6.0 + """ + def __init__(self): + raise NotImplementedError('tzrangebase is an abstract base class') + + def utcoffset(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_base_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def fromutc(self, dt): + """ Given a datetime in UTC, return local time """ + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # Get transitions - if there are none, fixed offset + transitions = self.transitions(dt.year) + if transitions is None: + return dt + self.utcoffset(dt) + + # Get the transition times in UTC + dston, dstoff = transitions + + dston -= self._std_offset + dstoff -= self._std_offset + + utc_transitions = (dston, dstoff) + dt_utc = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt_utc, utc_transitions) + + if isdst: + dt_wall = dt + self._dst_offset + else: + dt_wall = dt + self._std_offset + + _fold = int(not isdst and self.is_ambiguous(dt_wall)) + + return enfold(dt_wall, fold=_fold) + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if not self.hasdst: + return False + + start, end = self.transitions(dt.year) + + dt = dt.replace(tzinfo=None) + return (end <= dt < end + self._dst_base_offset) + + def _isdst(self, dt): + if not self.hasdst: + return False + elif dt is None: + return None + + transitions = self.transitions(dt.year) + + if transitions is None: + return False + + dt = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt, transitions) + + # Handle ambiguous dates + if not isdst and self.is_ambiguous(dt): + return not self._fold(dt) + else: + return isdst + + def _naive_isdst(self, dt, transitions): + dston, dstoff = transitions + + dt = dt.replace(tzinfo=None) + + if dston < dstoff: + isdst = dston <= dt < dstoff + else: + isdst = not dstoff <= dt < dston + + return isdst + + @property + def _dst_base_offset(self): + return self._dst_offset - self._std_offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ diff --git a/venv/Lib/site-packages/dateutil/tz/_factories.py b/venv/Lib/site-packages/dateutil/tz/_factories.py new file mode 100644 index 000000000..f8a65891a --- /dev/null +++ b/venv/Lib/site-packages/dateutil/tz/_factories.py @@ -0,0 +1,80 @@ +from datetime import timedelta +import weakref +from collections import OrderedDict + +from six.moves import _thread + + +class _TzSingleton(type): + def __init__(cls, *args, **kwargs): + cls.__instance = None + super(_TzSingleton, cls).__init__(*args, **kwargs) + + def __call__(cls): + if cls.__instance is None: + cls.__instance = super(_TzSingleton, cls).__call__() + return cls.__instance + + +class _TzFactory(type): + def instance(cls, *args, **kwargs): + """Alternate constructor that returns a fresh instance""" + return type.__call__(cls, *args, **kwargs) + + +class _TzOffsetFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = weakref.WeakValueDictionary() + cls.__strong_cache = OrderedDict() + cls.__strong_cache_size = 8 + + cls._cache_lock = _thread.allocate_lock() + + def __call__(cls, name, offset): + if isinstance(offset, timedelta): + key = (name, offset.total_seconds()) + else: + key = (name, offset) + + instance = cls.__instances.get(key, None) + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(name, offset)) + + # This lock may not be necessary in Python 3. See GH issue #901 + with cls._cache_lock: + cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) + + # Remove an item if the strong cache is overpopulated + if len(cls.__strong_cache) > cls.__strong_cache_size: + cls.__strong_cache.popitem(last=False) + + return instance + + +class _TzStrFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = weakref.WeakValueDictionary() + cls.__strong_cache = OrderedDict() + cls.__strong_cache_size = 8 + + cls.__cache_lock = _thread.allocate_lock() + + def __call__(cls, s, posix_offset=False): + key = (s, posix_offset) + instance = cls.__instances.get(key, None) + + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(s, posix_offset)) + + # This lock may not be necessary in Python 3. See GH issue #901 + with cls.__cache_lock: + cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) + + # Remove an item if the strong cache is overpopulated + if len(cls.__strong_cache) > cls.__strong_cache_size: + cls.__strong_cache.popitem(last=False) + + return instance + diff --git a/venv/Lib/site-packages/dateutil/tz/tz.py b/venv/Lib/site-packages/dateutil/tz/tz.py new file mode 100644 index 000000000..af81e88e1 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/tz/tz.py @@ -0,0 +1,1849 @@ +# -*- coding: utf-8 -*- +""" +This module offers timezone implementations subclassing the abstract +:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format +files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, +etc), TZ environment string (in all known formats), given ranges (with help +from relative deltas), local machine timezone, fixed offset timezone, and UTC +timezone. +""" +import datetime +import struct +import time +import sys +import os +import bisect +import weakref +from collections import OrderedDict + +import six +from six import string_types +from six.moves import _thread +from ._common import tzname_in_python2, _tzinfo +from ._common import tzrangebase, enfold +from ._common import _validate_fromutc_inputs + +from ._factories import _TzSingleton, _TzOffsetFactory +from ._factories import _TzStrFactory +try: + from .win import tzwin, tzwinlocal +except ImportError: + tzwin = tzwinlocal = None + +# For warning about rounding tzinfo +from warnings import warn + +ZERO = datetime.timedelta(0) +EPOCH = datetime.datetime.utcfromtimestamp(0) +EPOCHORDINAL = EPOCH.toordinal() + + +@six.add_metaclass(_TzSingleton) +class tzutc(datetime.tzinfo): + """ + This is a tzinfo object that represents the UTC time zone. + + **Examples:** + + .. doctest:: + + >>> from datetime import * + >>> from dateutil.tz import * + + >>> datetime.now() + datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) + + >>> datetime.now(tzutc()) + datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) + + >>> datetime.now(tzutc()).tzname() + 'UTC' + + .. versionchanged:: 2.7.0 + ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will + always return the same object. + + .. doctest:: + + >>> from dateutil.tz import tzutc, UTC + >>> tzutc() is tzutc() + True + >>> tzutc() is UTC + True + """ + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return "UTC" + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Fast track version of fromutc() returns the original ``dt`` object for + any valid :py:class:`datetime.datetime` object. + """ + return dt + + def __eq__(self, other): + if not isinstance(other, (tzutc, tzoffset)): + return NotImplemented + + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +#: Convenience constant providing a :class:`tzutc()` instance +#: +#: .. versionadded:: 2.7.0 +UTC = tzutc() + + +@six.add_metaclass(_TzOffsetFactory) +class tzoffset(datetime.tzinfo): + """ + A simple class for representing a fixed offset from UTC. + + :param name: + The timezone name, to be returned when ``tzname()`` is called. + :param offset: + The time zone offset in seconds, or (since version 2.6.0, represented + as a :py:class:`datetime.timedelta` object). + """ + def __init__(self, name, offset): + self._name = name + + try: + # Allow a timedelta + offset = offset.total_seconds() + except (TypeError, AttributeError): + pass + + self._offset = datetime.timedelta(seconds=_get_supported_offset(offset)) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._name + + @_validate_fromutc_inputs + def fromutc(self, dt): + return dt + self._offset + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + def __eq__(self, other): + if not isinstance(other, tzoffset): + return NotImplemented + + return self._offset == other._offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + repr(self._name), + int(self._offset.total_seconds())) + + __reduce__ = object.__reduce__ + + +class tzlocal(_tzinfo): + """ + A :class:`tzinfo` subclass built around the ``time`` timezone functions. + """ + def __init__(self): + super(tzlocal, self).__init__() + + self._std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + self._dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + self._dst_offset = self._std_offset + + self._dst_saved = self._dst_offset - self._std_offset + self._hasdst = bool(self._dst_saved) + self._tznames = tuple(time.tzname) + + def utcoffset(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset - self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._tznames[self._isdst(dt)] + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + naive_dst = self._naive_is_dst(dt) + return (not naive_dst and + (naive_dst != self._naive_is_dst(dt - self._dst_saved))) + + def _naive_is_dst(self, dt): + timestamp = _datetime_to_timestamp(dt) + return time.localtime(timestamp + time.timezone).tm_isdst + + def _isdst(self, dt, fold_naive=True): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + # >>> import tz, datetime + # >>> t = tz.tzlocal() + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # + # Here is a more stable implementation: + # + if not self._hasdst: + return False + + # Check for ambiguous times: + dstval = self._naive_is_dst(dt) + fold = getattr(dt, 'fold', None) + + if self.is_ambiguous(dt): + if fold is not None: + return not self._fold(dt) + else: + return True + + return dstval + + def __eq__(self, other): + if isinstance(other, tzlocal): + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + elif isinstance(other, tzutc): + return (not self._hasdst and + self._tznames[0] in {'UTC', 'GMT'} and + self._std_offset == ZERO) + elif isinstance(other, tzoffset): + return (not self._hasdst and + self._tznames[0] == other._name and + self._std_offset == other._offset) + else: + return NotImplemented + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", + "isstd", "isgmt", "dstoffset"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return NotImplemented + + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt and + self.dstoffset == other.dstoffset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + + +class _tzfile(object): + """ + Lightweight class for holding the relevant transition and time zone + information read from binary tzfiles. + """ + attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', + 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] + + def __init__(self, **kwargs): + for attr in self.attrs: + setattr(self, attr, kwargs.get(attr, None)) + + +class tzfile(_tzinfo): + """ + This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)`` + format timezone files to extract current and historical zone information. + + :param fileobj: + This can be an opened file stream or a file name that the time zone + information can be read from. + + :param filename: + This is an optional parameter specifying the source of the time zone + information in the event that ``fileobj`` is a file object. If omitted + and ``fileobj`` is a file stream, this parameter will be set either to + ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. + + See `Sources for Time Zone and Daylight Saving Time Data + `_ for more information. + Time zone files can be compiled from the `IANA Time Zone database files + `_ with the `zic time zone compiler + `_ + + .. note:: + + Only construct a ``tzfile`` directly if you have a specific timezone + file on disk that you want to read into a Python ``tzinfo`` object. + If you want to get a ``tzfile`` representing a specific IANA zone, + (e.g. ``'America/New_York'``), you should call + :func:`dateutil.tz.gettz` with the zone identifier. + + + **Examples:** + + Using the US Eastern time zone as an example, we can see that a ``tzfile`` + provides time zone information for the standard Daylight Saving offsets: + + .. testsetup:: tzfile + + from dateutil.tz import gettz + from datetime import datetime + + .. doctest:: tzfile + + >>> NYC = gettz('America/New_York') + >>> NYC + tzfile('/usr/share/zoneinfo/America/New_York') + + >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST + 2016-01-03 00:00:00-05:00 + + >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT + 2016-07-07 00:00:00-04:00 + + + The ``tzfile`` structure contains a fully history of the time zone, + so historical dates will also have the right offsets. For example, before + the adoption of the UTC standards, New York used local solar mean time: + + .. doctest:: tzfile + + >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT + 1901-04-12 00:00:00-04:56 + + And during World War II, New York was on "Eastern War Time", which was a + state of permanent daylight saving time: + + .. doctest:: tzfile + + >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT + 1944-02-07 00:00:00-04:00 + + """ + + def __init__(self, fileobj, filename=None): + super(tzfile, self).__init__() + + file_opened_here = False + if isinstance(fileobj, string_types): + self._filename = fileobj + fileobj = open(fileobj, 'rb') + file_opened_here = True + elif filename is not None: + self._filename = filename + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = repr(fileobj) + + if fileobj is not None: + if not file_opened_here: + fileobj = _nullcontext(fileobj) + + with fileobj as file_stream: + tzobj = self._read_tzfile(file_stream) + + self._set_tzdata(tzobj) + + def _set_tzdata(self, tzobj): + """ Set the time zone data of this object from a _tzfile object """ + # Copy the relevant attributes over as private attributes + for attr in _tzfile.attrs: + setattr(self, '_' + attr, getattr(tzobj, attr)) + + def _read_tzfile(self, fileobj): + out = _tzfile() + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + if fileobj.read(4).decode() != "TZif": + raise ValueError("magic not found") + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4))) + else: + out.trans_list_utc = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + out.trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + out.trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt).decode() + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now (but seek for correct file position) + if leapcnt: + fileobj.seek(leapcnt * 8, os.SEEK_CUR) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # Build ttinfo list + out.ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + gmtoff = _get_supported_offset(gmtoff) + tti = _ttinfo() + tti.offset = gmtoff + tti.dstoffset = datetime.timedelta(0) + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + out.ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + out.ttinfo_std = None + out.ttinfo_dst = None + out.ttinfo_before = None + if out.ttinfo_list: + if not out.trans_list_utc: + out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] + else: + for i in range(timecnt-1, -1, -1): + tti = out.trans_idx[i] + if not out.ttinfo_std and not tti.isdst: + out.ttinfo_std = tti + elif not out.ttinfo_dst and tti.isdst: + out.ttinfo_dst = tti + + if out.ttinfo_std and out.ttinfo_dst: + break + else: + if out.ttinfo_dst and not out.ttinfo_std: + out.ttinfo_std = out.ttinfo_dst + + for tti in out.ttinfo_list: + if not tti.isdst: + out.ttinfo_before = tti + break + else: + out.ttinfo_before = out.ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + lastdst = None + lastoffset = None + lastdstoffset = None + lastbaseoffset = None + out.trans_list = [] + + for i, tti in enumerate(out.trans_idx): + offset = tti.offset + dstoffset = 0 + + if lastdst is not None: + if tti.isdst: + if not lastdst: + dstoffset = offset - lastoffset + + if not dstoffset and lastdstoffset: + dstoffset = lastdstoffset + + tti.dstoffset = datetime.timedelta(seconds=dstoffset) + lastdstoffset = dstoffset + + # If a time zone changes its base offset during a DST transition, + # then you need to adjust by the previous base offset to get the + # transition time in local time. Otherwise you use the current + # base offset. Ideally, I would have some mathematical proof of + # why this is true, but I haven't really thought about it enough. + baseoffset = offset - dstoffset + adjustment = baseoffset + if (lastbaseoffset is not None and baseoffset != lastbaseoffset + and tti.isdst != lastdst): + # The base DST has changed + adjustment = lastbaseoffset + + lastdst = tti.isdst + lastoffset = offset + lastbaseoffset = baseoffset + + out.trans_list.append(out.trans_list_utc[i] + adjustment) + + out.trans_idx = tuple(out.trans_idx) + out.trans_list = tuple(out.trans_list) + out.trans_list_utc = tuple(out.trans_list_utc) + + return out + + def _find_last_transition(self, dt, in_utc=False): + # If there's no list, there are no transitions to find + if not self._trans_list: + return None + + timestamp = _datetime_to_timestamp(dt) + + # Find where the timestamp fits in the transition list - if the + # timestamp is a transition time, it's part of the "after" period. + trans_list = self._trans_list_utc if in_utc else self._trans_list + idx = bisect.bisect_right(trans_list, timestamp) + + # We want to know when the previous transition was, so subtract off 1 + return idx - 1 + + def _get_ttinfo(self, idx): + # For no list or after the last transition, default to _ttinfo_std + if idx is None or (idx + 1) >= len(self._trans_list): + return self._ttinfo_std + + # If there is a list and the time is before it, return _ttinfo_before + if idx < 0: + return self._ttinfo_before + + return self._trans_idx[idx] + + def _find_ttinfo(self, dt): + idx = self._resolve_ambiguous_time(dt) + + return self._get_ttinfo(idx) + + def fromutc(self, dt): + """ + The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. + + :param dt: + A :py:class:`datetime.datetime` object. + + :raises TypeError: + Raised if ``dt`` is not a :py:class:`datetime.datetime` object. + + :raises ValueError: + Raised if this is called with a ``dt`` which does not have this + ``tzinfo`` attached. + + :return: + Returns a :py:class:`datetime.datetime` object representing the + wall time in ``self``'s time zone. + """ + # These isinstance checks are in datetime.tzinfo, so we'll preserve + # them, even if we don't care about duck typing. + if not isinstance(dt, datetime.datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # First treat UTC as wall time and get the transition we're in. + idx = self._find_last_transition(dt, in_utc=True) + tti = self._get_ttinfo(idx) + + dt_out = dt + datetime.timedelta(seconds=tti.offset) + + fold = self.is_ambiguous(dt_out, idx=idx) + + return enfold(dt_out, fold=int(fold)) + + def is_ambiguous(self, dt, idx=None): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if idx is None: + idx = self._find_last_transition(dt) + + # Calculate the difference in offsets from current to previous + timestamp = _datetime_to_timestamp(dt) + tti = self._get_ttinfo(idx) + + if idx is None or idx <= 0: + return False + + od = self._get_ttinfo(idx - 1).offset - tti.offset + tt = self._trans_list[idx] # Transition time + + return timestamp < tt + od + + def _resolve_ambiguous_time(self, dt): + idx = self._find_last_transition(dt) + + # If we have no transitions, return the index + _fold = self._fold(dt) + if idx is None or idx == 0: + return idx + + # If it's ambiguous and we're in a fold, shift to a different index. + idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) + + return idx - idx_offset + + def utcoffset(self, dt): + if dt is None: + return None + + if not self._ttinfo_std: + return ZERO + + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if dt is None: + return None + + if not self._ttinfo_dst: + return ZERO + + tti = self._find_ttinfo(dt) + + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.dstoffset + + @tzname_in_python2 + def tzname(self, dt): + if not self._ttinfo_std or dt is None: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return NotImplemented + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) + + def __reduce__(self): + return self.__reduce_ex__(None) + + def __reduce_ex__(self, protocol): + return (self.__class__, (None, self._filename), self.__dict__) + + +class tzrange(tzrangebase): + """ + The ``tzrange`` object is a time zone specified by a set of offsets and + abbreviations, equivalent to the way the ``TZ`` variable can be specified + in POSIX-like systems, but using Python delta objects to specify DST + start, end and offsets. + + :param stdabbr: + The abbreviation for standard time (e.g. ``'EST'``). + + :param stdoffset: + An integer or :class:`datetime.timedelta` object or equivalent + specifying the base offset from UTC. + + If unspecified, +00:00 is used. + + :param dstabbr: + The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). + + If specified, with no other DST information, DST is assumed to occur + and the default behavior or ``dstoffset``, ``start`` and ``end`` is + used. If unspecified and no other DST information is specified, it + is assumed that this zone has no DST. + + If this is unspecified and other DST information is *is* specified, + DST occurs in the zone but the time zone abbreviation is left + unchanged. + + :param dstoffset: + A an integer or :class:`datetime.timedelta` object or equivalent + specifying the UTC offset during DST. If unspecified and any other DST + information is specified, it is assumed to be the STD offset +1 hour. + + :param start: + A :class:`relativedelta.relativedelta` object or equivalent specifying + the time and time of year that daylight savings time starts. To + specify, for example, that DST starts at 2AM on the 2nd Sunday in + March, pass: + + ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` + + If unspecified and any other DST information is specified, the default + value is 2 AM on the first Sunday in April. + + :param end: + A :class:`relativedelta.relativedelta` object or equivalent + representing the time and time of year that daylight savings time + ends, with the same specification method as in ``start``. One note is + that this should point to the first time in the *standard* zone, so if + a transition occurs at 2AM in the DST zone and the clocks are set back + 1 hour to 1AM, set the ``hours`` parameter to +1. + + + **Examples:** + + .. testsetup:: tzrange + + from dateutil.tz import tzrange, tzstr + + .. doctest:: tzrange + + >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") + True + + >>> from dateutil.relativedelta import * + >>> range1 = tzrange("EST", -18000, "EDT") + >>> range2 = tzrange("EST", -18000, "EDT", -14400, + ... relativedelta(hours=+2, month=4, day=1, + ... weekday=SU(+1)), + ... relativedelta(hours=+1, month=10, day=31, + ... weekday=SU(-1))) + >>> tzstr('EST5EDT') == range1 == range2 + True + + """ + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + + global relativedelta + from dateutil import relativedelta + + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + + try: + stdoffset = stdoffset.total_seconds() + except (TypeError, AttributeError): + pass + + try: + dstoffset = dstoffset.total_seconds() + except (TypeError, AttributeError): + pass + + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = bool(self._start_delta) + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + if not self.hasdst: + return None + + base_year = datetime.datetime(year, 1, 1) + + start = base_year + self._start_delta + end = base_year + self._end_delta + + return (start, end) + + def __eq__(self, other): + if not isinstance(other, tzrange): + return NotImplemented + + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +@six.add_metaclass(_TzStrFactory) +class tzstr(tzrange): + """ + ``tzstr`` objects are time zone objects specified by a time-zone string as + it would be passed to a ``TZ`` variable on POSIX-style systems (see + the `GNU C Library: TZ Variable`_ for more details). + + There is one notable exception, which is that POSIX-style time zones use an + inverted offset format, so normally ``GMT+3`` would be parsed as an offset + 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an + offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX + behavior, pass a ``True`` value to ``posix_offset``. + + The :class:`tzrange` object provides the same functionality, but is + specified using :class:`relativedelta.relativedelta` objects. rather than + strings. + + :param s: + A time zone string in ``TZ`` variable format. This can be a + :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: + :class:`unicode`) or a stream emitting unicode characters + (e.g. :class:`StringIO`). + + :param posix_offset: + Optional. If set to ``True``, interpret strings such as ``GMT+3`` or + ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the + POSIX standard. + + .. caution:: + + Prior to version 2.7.0, this function also supported time zones + in the format: + + * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600`` + * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600`` + + This format is non-standard and has been deprecated; this function + will raise a :class:`DeprecatedTZFormatWarning` until + support is removed in a future version. + + .. _`GNU C Library: TZ Variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + """ + def __init__(self, s, posix_offset=False): + global parser + from dateutil.parser import _parser as parser + + self._s = s + + res = parser._parsetz(s) + if res is None or res.any_unused_tokens: + raise ValueError("unknown string format") + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC") and not posix_offset: + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + self.hasdst = bool(self._start_delta) + + def _delta(self, x, isend=0): + from dateutil import relativedelta + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset - self._std_offset + kwargs["seconds"] -= delta.seconds + delta.days * 86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +class _tzicalvtzcomp(object): + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + + +class _tzicalvtz(_tzinfo): + def __init__(self, tzid, comps=[]): + super(_tzicalvtz, self).__init__() + + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + self._cache_lock = _thread.allocate_lock() + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + + dt = dt.replace(tzinfo=None) + + try: + with self._cache_lock: + return self._cachecomp[self._cachedate.index( + (dt, self._fold(dt)))] + except ValueError: + pass + + lastcompdt = None + lastcomp = None + + for comp in self._comps: + compdt = self._find_compdt(comp, dt) + + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + + with self._cache_lock: + self._cachedate.insert(0, (dt, self._fold(dt))) + self._cachecomp.insert(0, lastcomp) + + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + + return lastcomp + + def _find_compdt(self, comp, dt): + if comp.tzoffsetdiff < ZERO and self._fold(dt): + dt -= comp.tzoffsetdiff + + compdt = comp.rrule.before(dt, inc=True) + + return compdt + + def utcoffset(self, dt): + if dt is None: + return None + + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "" % repr(self._tzid) + + __reduce__ = object.__reduce__ + + +class tzical(object): + """ + This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure + as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects. + + :param `fileobj`: + A file or stream in iCalendar format, which should be UTF-8 encoded + with CRLF endings. + + .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545 + """ + def __init__(self, fileobj): + global rrule + from dateutil import rrule + + if isinstance(fileobj, string_types): + self._s = fileobj + # ical should be encoded in UTF-8 with CRLF + fileobj = open(fileobj, 'r') + else: + self._s = getattr(fileobj, 'name', repr(fileobj)) + fileobj = _nullcontext(fileobj) + + self._vtz = {} + + with fileobj as fobj: + self._parse_rfc(fobj.read()) + + def keys(self): + """ + Retrieves the available time zones as a list. + """ + return list(self._vtz.keys()) + + def get(self, tzid=None): + """ + Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. + + :param tzid: + If there is exactly one time zone available, omitting ``tzid`` + or passing :py:const:`None` value returns it. Otherwise a valid + key (which can be retrieved from :func:`keys`) is required. + + :raises ValueError: + Raised if ``tzid`` is not specified but there are either more + or fewer than 1 zone defined. + + :returns: + Returns either a :py:class:`datetime.tzinfo` object representing + the relevant time zone or :py:const:`None` if the ``tzid`` was + not found. + """ + if tzid is None: + if len(self._vtz) == 0: + raise ValueError("no timezones defined") + elif len(self._vtz) > 1: + raise ValueError("more than one timezone available") + tzid = next(iter(self._vtz)) + + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError("empty offset") + if s[0] in ('+', '-'): + signal = (-1, +1)[s[0] == '+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal + elif len(s) == 6: + return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal + else: + raise ValueError("invalid offset: " + s) + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError("empty string") + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError("unknown component: "+value) + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError("component not closed: "+comptype) + if not tzid: + raise ValueError("mandatory TZID not found") + if not comps: + raise ValueError( + "at least one component is needed") + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError("mandatory DTSTART not found") + if tzoffsetfrom is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + if tzoffsetto is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError("invalid component end: "+value) + elif comptype: + if name == "DTSTART": + # DTSTART in VTIMEZONE takes a subset of valid RRULE + # values under RFC 5545. + for parm in parms: + if parm != 'VALUE=DATE-TIME': + msg = ('Unsupported DTSTART param in ' + + 'VTIMEZONE: ' + parm) + raise ValueError(msg) + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError( + "unsupported %s parm: %s " % (name, parms[0])) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError( + "unsupported TZOFFSETTO parm: "+parms[0]) + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError( + "unsupported TZNAME parm: "+parms[0]) + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError("unsupported property: "+name) + else: + if name == "TZID": + if parms: + raise ValueError( + "unsupported TZID parm: "+parms[0]) + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError("unsupported property: "+name) + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", + "/usr/lib/zoneinfo", + "/usr/share/lib/zoneinfo", + "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + + +def __get_gettz(): + tzlocal_classes = (tzlocal,) + if tzwinlocal is not None: + tzlocal_classes += (tzwinlocal,) + + class GettzFunc(object): + """ + Retrieve a time zone object from a string representation + + This function is intended to retrieve the :py:class:`tzinfo` subclass + that best represents the time zone that would be used if a POSIX + `TZ variable`_ were set to the same value. + + If no argument or an empty string is passed to ``gettz``, local time + is returned: + + .. code-block:: python3 + + >>> gettz() + tzfile('/etc/localtime') + + This function is also the preferred way to map IANA tz database keys + to :class:`tzfile` objects: + + .. code-block:: python3 + + >>> gettz('Pacific/Kiritimati') + tzfile('/usr/share/zoneinfo/Pacific/Kiritimati') + + On Windows, the standard is extended to include the Windows-specific + zone names provided by the operating system: + + .. code-block:: python3 + + >>> gettz('Egypt Standard Time') + tzwin('Egypt Standard Time') + + Passing a GNU ``TZ`` style string time zone specification returns a + :class:`tzstr` object: + + .. code-block:: python3 + + >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + + :param name: + A time zone name (IANA, or, on Windows, Windows keys), location of + a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone + specifier. An empty string, no argument or ``None`` is interpreted + as local time. + + :return: + Returns an instance of one of ``dateutil``'s :py:class:`tzinfo` + subclasses. + + .. versionchanged:: 2.7.0 + + After version 2.7.0, any two calls to ``gettz`` using the same + input strings will return the same object: + + .. code-block:: python3 + + >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago') + True + + In addition to improving performance, this ensures that + `"same zone" semantics`_ are used for datetimes in the same zone. + + + .. _`TZ variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + + .. _`"same zone" semantics`: + https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html + """ + def __init__(self): + + self.__instances = weakref.WeakValueDictionary() + self.__strong_cache_size = 8 + self.__strong_cache = OrderedDict() + self._cache_lock = _thread.allocate_lock() + + def __call__(self, name=None): + with self._cache_lock: + rv = self.__instances.get(name, None) + + if rv is None: + rv = self.nocache(name=name) + if not (name is None + or isinstance(rv, tzlocal_classes) + or rv is None): + # tzlocal is slightly more complicated than the other + # time zone providers because it depends on environment + # at construction time, so don't cache that. + # + # We also cannot store weak references to None, so we + # will also not store that. + self.__instances[name] = rv + else: + # No need for strong caching, return immediately + return rv + + self.__strong_cache[name] = self.__strong_cache.pop(name, rv) + + if len(self.__strong_cache) > self.__strong_cache_size: + self.__strong_cache.popitem(last=False) + + return rv + + def set_cache_size(self, size): + with self._cache_lock: + self.__strong_cache_size = size + while len(self.__strong_cache) > size: + self.__strong_cache.popitem(last=False) + + def cache_clear(self): + with self._cache_lock: + self.__instances = weakref.WeakValueDictionary() + self.__strong_cache.clear() + + @staticmethod + def nocache(name=None): + """A non-cached version of gettz""" + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + try: + if name.startswith(":"): + name = name[1:] + except TypeError as e: + if isinstance(name, bytes): + new_msg = "gettz argument should be str, not bytes" + six.raise_from(TypeError(new_msg), e) + else: + raise + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ', '_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin is not None: + try: + tz = tzwin(name) + except (WindowsError, UnicodeEncodeError): + # UnicodeEncodeError is for Python 2.7 compat + tz = None + + if not tz: + from dateutil.zoneinfo import get_zonefile_instance + tz = get_zonefile_instance().get(name) + + if not tz: + for c in name: + # name is not a tzstr unless it has at least + # one offset. For short values of "name", an + # explicit for loop seems to be the fastest way + # To determine if a string contains a digit + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = UTC + elif name in time.tzname: + tz = tzlocal() + return tz + + return GettzFunc() + + +gettz = __get_gettz() +del __get_gettz + + +def datetime_exists(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + would fall in a gap. + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" exists in + ``tz``. + + .. versionadded:: 2.7.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + tz = dt.tzinfo + + dt = dt.replace(tzinfo=None) + + # This is essentially a test of whether or not the datetime can survive + # a round trip to UTC. + dt_rt = dt.replace(tzinfo=tz).astimezone(UTC).astimezone(tz) + dt_rt = dt_rt.replace(tzinfo=None) + + return dt == dt_rt + + +def datetime_ambiguous(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + is ambiguous (i.e if there are two times differentiated only by their DST + status). + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" is ambiguous in + ``tz``. + + .. versionadded:: 2.6.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + + tz = dt.tzinfo + + # If a time zone defines its own "is_ambiguous" function, we'll use that. + is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) + if is_ambiguous_fn is not None: + try: + return tz.is_ambiguous(dt) + except Exception: + pass + + # If it doesn't come out and tell us it's ambiguous, we'll just check if + # the fold attribute has any effect on this particular date and time. + dt = dt.replace(tzinfo=tz) + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dst = wall_0.dst() == wall_1.dst() + + return not (same_offset and same_dst) + + +def resolve_imaginary(dt): + """ + Given a datetime that may be imaginary, return an existing datetime. + + This function assumes that an imaginary datetime represents what the + wall time would be in a zone had the offset transition not occurred, so + it will always fall forward by the transition's change in offset. + + .. doctest:: + + >>> from dateutil import tz + >>> from datetime import datetime + >>> NYC = tz.gettz('America/New_York') + >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC))) + 2017-03-12 03:30:00-04:00 + + >>> KIR = tz.gettz('Pacific/Kiritimati') + >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR))) + 1995-01-02 12:30:00+14:00 + + As a note, :func:`datetime.astimezone` is guaranteed to produce a valid, + existing datetime, so a round-trip to and from UTC is sufficient to get + an extant datetime, however, this generally "falls back" to an earlier time + rather than falling forward to the STD side (though no guarantees are made + about this behavior). + + :param dt: + A :class:`datetime.datetime` which may or may not exist. + + :return: + Returns an existing :class:`datetime.datetime`. If ``dt`` was not + imaginary, the datetime returned is guaranteed to be the same object + passed to the function. + + .. versionadded:: 2.7.0 + """ + if dt.tzinfo is not None and not datetime_exists(dt): + + curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset() + old_offset = (dt - datetime.timedelta(hours=24)).utcoffset() + + dt += curr_offset - old_offset + + return dt + + +def _datetime_to_timestamp(dt): + """ + Convert a :class:`datetime.datetime` object to an epoch timestamp in + seconds since January 1, 1970, ignoring the time zone. + """ + return (dt.replace(tzinfo=None) - EPOCH).total_seconds() + + +if sys.version_info >= (3, 6): + def _get_supported_offset(second_offset): + return second_offset +else: + def _get_supported_offset(second_offset): + # For python pre-3.6, round to full-minutes if that's not the case. + # Python's datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 or https://bugs.python.org/issue5288 + # for some information. + old_offset = second_offset + calculated_offset = 60 * ((second_offset + 30) // 60) + return calculated_offset + + +try: + # Python 3.7 feature + from contextlib import nullcontext as _nullcontext +except ImportError: + class _nullcontext(object): + """ + Class for wrapping contexts so that they are passed through in a + with statement. + """ + def __init__(self, context): + self.context = context + + def __enter__(self): + return self.context + + def __exit__(*args, **kwargs): + pass + +# vim:ts=4:sw=4:et diff --git a/venv/Lib/site-packages/dateutil/tz/win.py b/venv/Lib/site-packages/dateutil/tz/win.py new file mode 100644 index 000000000..cde07ba79 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/tz/win.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +""" +This module provides an interface to the native time zone data on Windows, +including :py:class:`datetime.tzinfo` implementations. + +Attempting to import this module on a non-Windows platform will raise an +:py:obj:`ImportError`. +""" +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct + +from six.moves import winreg +from six import text_type + +try: + import ctypes + from ctypes import wintypes +except ValueError: + # ValueError is raised on non-Windows systems for some horrible reason. + raise ImportError("Running tzwin on non-Windows system") + +from ._common import tzrangebase + +__all__ = ["tzwin", "tzwinlocal", "tzres"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + + +def _settzkeyname(): + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + return TZKEYNAME + + +TZKEYNAME = _settzkeyname() + + +class tzres(object): + """ + Class for accessing ``tzres.dll``, which contains timezone name related + resources. + + .. versionadded:: 2.5.0 + """ + p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char + + def __init__(self, tzres_loc='tzres.dll'): + # Load the user32 DLL so we can load strings from tzres + user32 = ctypes.WinDLL('user32') + + # Specify the LoadStringW function + user32.LoadStringW.argtypes = (wintypes.HINSTANCE, + wintypes.UINT, + wintypes.LPWSTR, + ctypes.c_int) + + self.LoadStringW = user32.LoadStringW + self._tzres = ctypes.WinDLL(tzres_loc) + self.tzres_loc = tzres_loc + + def load_name(self, offset): + """ + Load a timezone name from a DLL offset (integer). + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.load_name(112)) + 'Eastern Standard Time' + + :param offset: + A positive integer value referring to a string from the tzres dll. + + .. note:: + + Offsets found in the registry are generally of the form + ``@tzres.dll,-114``. The offset in this case is 114, not -114. + + """ + resource = self.p_wchar() + lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) + nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) + return resource[:nchar] + + def name_from_string(self, tzname_str): + """ + Parse strings as returned from the Windows registry into the time zone + name as defined in the registry. + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.name_from_string('@tzres.dll,-251')) + 'Dateline Daylight Time' + >>> print(tzr.name_from_string('Eastern Standard Time')) + 'Eastern Standard Time' + + :param tzname_str: + A timezone name string as returned from a Windows registry key. + + :return: + Returns the localized timezone string from tzres.dll if the string + is of the form `@tzres.dll,-offset`, else returns the input string. + """ + if not tzname_str.startswith('@'): + return tzname_str + + name_splt = tzname_str.split(',-') + try: + offset = int(name_splt[1]) + except: + raise ValueError("Malformed timezone string.") + + return self.load_name(offset) + + +class tzwinbase(tzrangebase): + """tzinfo class based on win32's timezones available in the registry.""" + def __init__(self): + raise NotImplementedError('tzwinbase is an abstract base class') + + def __eq__(self, other): + # Compare on all relevant dimensions, including name. + if not isinstance(other, tzwinbase): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._stddayofweek == other._stddayofweek and + self._dstdayofweek == other._dstdayofweek and + self._stdweeknumber == other._stdweeknumber and + self._dstweeknumber == other._dstweeknumber and + self._stdhour == other._stdhour and + self._dsthour == other._dsthour and + self._stdminute == other._stdminute and + self._dstminute == other._dstminute and + self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr) + + @staticmethod + def list(): + """Return a list of all time zones known to the system.""" + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZKEYNAME) as tzkey: + result = [winreg.EnumKey(tzkey, i) + for i in range(winreg.QueryInfoKey(tzkey)[0])] + return result + + def display(self): + """ + Return the display name of the time zone. + """ + return self._display + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + + if not self.hasdst: + return None + + dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + + dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + + # Ambiguous dates default to the STD side + dstoff -= self._dst_base_offset + + return dston, dstoff + + def _get_hasdst(self): + return self._dstmonth != 0 + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzwin(tzwinbase): + """ + Time zone object created from the zone info in the Windows registry + + These are similar to :py:class:`dateutil.tz.tzrange` objects in that + the time zone data is provided in the format of a single offset rule + for either 0 or 2 time zone transitions per year. + + :param: name + The name of a Windows time zone key, e.g. "Eastern Standard Time". + The full list of keys can be retrieved with :func:`tzwin.list`. + """ + + def __init__(self, name): + self._name = name + + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + keydict = valuestodict(tzkey) + + self._std_abbr = keydict["Std"] + self._dst_abbr = keydict["Dlt"] + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + dstoffset = stdoffset-tup[2] # + DaylightBias * -1 + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs + # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + """ + Class representing the local time zone information in the Windows registry + + While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` + module) to retrieve time zone information, ``tzwinlocal`` retrieves the + rules directly from the Windows registry and creates an object like + :class:`dateutil.tz.tzwin`. + + Because Windows does not have an equivalent of :func:`time.tzset`, on + Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the + time zone settings *at the time that the process was started*, meaning + changes to the machine's time zone settings during the run of a program + on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. + Because ``tzwinlocal`` reads the registry directly, it is unaffected by + this issue. + """ + def __init__(self): + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: + keydict = valuestodict(tzlocalkey) + + self._std_abbr = keydict["StandardName"] + self._dst_abbr = keydict["DaylightName"] + + try: + tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, + sn=self._std_abbr) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + except OSError: + self._display = None + + stdoffset = -keydict["Bias"]-keydict["StandardBias"] + dstoffset = stdoffset-keydict["DaylightBias"] + + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # For reasons unclear, in this particular key, the day of week has been + # moved to the END of the SYSTEMTIME structure. + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:5] + + self._stddayofweek = tup[7] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:5] + + self._dstdayofweek = tup[7] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwinlocal()" + + def __str__(self): + # str will return the standard name, not the daylight name. + return "tzwinlocal(%s)" % repr(self._std_abbr) + + def __reduce__(self): + return (self.__class__, ()) + + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ + first = datetime.datetime(year, month, 1, hour, minute) + + # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), + # Because 7 % 7 = 0 + weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) + wd = weekdayone + ((whichweek - 1) * ONEWEEK) + if (wd.month != month): + wd -= ONEWEEK + + return wd + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dout = {} + size = winreg.QueryInfoKey(key)[1] + tz_res = None + + for i in range(size): + key_name, value, dtype = winreg.EnumValue(key, i) + if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: + # If it's a DWORD (32-bit integer), it's stored as unsigned - convert + # that to a proper signed integer + if value & (1 << 31): + value = value - (1 << 32) + elif dtype == winreg.REG_SZ: + # If it's a reference to the tzres DLL, load the actual string + if value.startswith('@tzres'): + tz_res = tz_res or tzres() + value = tz_res.name_from_string(value) + + value = value.rstrip('\x00') # Remove trailing nulls + + dout[key_name] = value + + return dout diff --git a/venv/Lib/site-packages/dateutil/tzwin.py b/venv/Lib/site-packages/dateutil/tzwin.py new file mode 100644 index 000000000..cebc673e4 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * diff --git a/venv/Lib/site-packages/dateutil/utils.py b/venv/Lib/site-packages/dateutil/utils.py new file mode 100644 index 000000000..44d9c9945 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/utils.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +This module offers general convenience and utility functions for dealing with +datetimes. + +.. versionadded:: 2.7.0 +""" +from __future__ import unicode_literals + +from datetime import datetime, time + + +def today(tzinfo=None): + """ + Returns a :py:class:`datetime` representing the current day at midnight + + :param tzinfo: + The time zone to attach (also used to determine the current day). + + :return: + A :py:class:`datetime.datetime` object representing the current day + at midnight. + """ + + dt = datetime.now(tzinfo) + return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) + + +def default_tzinfo(dt, tzinfo): + """ + Sets the ``tzinfo`` parameter on naive datetimes only + + This is useful for example when you are provided a datetime that may have + either an implicit or explicit time zone, such as when parsing a time zone + string. + + .. doctest:: + + >>> from dateutil.tz import tzoffset + >>> from dateutil.parser import parse + >>> from dateutil.utils import default_tzinfo + >>> dflt_tz = tzoffset("EST", -18000) + >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) + 2014-01-01 12:30:00+00:00 + >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) + 2014-01-01 12:30:00-05:00 + + :param dt: + The datetime on which to replace the time zone + + :param tzinfo: + The :py:class:`datetime.tzinfo` subclass instance to assign to + ``dt`` if (and only if) it is naive. + + :return: + Returns an aware :py:class:`datetime.datetime`. + """ + if dt.tzinfo is not None: + return dt + else: + return dt.replace(tzinfo=tzinfo) + + +def within_delta(dt1, dt2, delta): + """ + Useful for comparing two datetimes that may a negilible difference + to be considered equal. + """ + delta = abs(delta) + difference = dt1 - dt2 + return -delta <= difference <= delta diff --git a/venv/Lib/site-packages/dateutil/zoneinfo/__init__.py b/venv/Lib/site-packages/dateutil/zoneinfo/__init__.py new file mode 100644 index 000000000..34f11ad66 --- /dev/null +++ b/venv/Lib/site-packages/dateutil/zoneinfo/__init__.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +import warnings +import json + +from tarfile import TarFile +from pkgutil import get_data +from io import BytesIO + +from dateutil.tz import tzfile as _tzfile + +__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] + +ZONEFILENAME = "dateutil-zoneinfo.tar.gz" +METADATA_FN = 'METADATA' + + +class tzfile(_tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + + +def getzoneinfofile_stream(): + try: + return BytesIO(get_data(__name__, ZONEFILENAME)) + except IOError as e: # TODO switch to FileNotFoundError? + warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) + return None + + +class ZoneInfoFile(object): + def __init__(self, zonefile_stream=None): + if zonefile_stream is not None: + with TarFile.open(fileobj=zonefile_stream) as tf: + self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) + for zf in tf.getmembers() + if zf.isfile() and zf.name != METADATA_FN} + # deal with links: They'll point to their parent object. Less + # waste of memory + links = {zl.name: self.zones[zl.linkname] + for zl in tf.getmembers() if + zl.islnk() or zl.issym()} + self.zones.update(links) + try: + metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) + metadata_str = metadata_json.read().decode('UTF-8') + self.metadata = json.loads(metadata_str) + except KeyError: + # no metadata in tar file + self.metadata = None + else: + self.zones = {} + self.metadata = None + + def get(self, name, default=None): + """ + Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method + for retrieving zones from the zone dictionary. + + :param name: + The name of the zone to retrieve. (Generally IANA zone names) + + :param default: + The value to return in the event of a missing key. + + .. versionadded:: 2.6.0 + + """ + return self.zones.get(name, default) + + +# The current API has gettz as a module function, although in fact it taps into +# a stateful class. So as a workaround for now, without changing the API, we +# will create a new "global" class instance the first time a user requests a +# timezone. Ugly, but adheres to the api. +# +# TODO: Remove after deprecation period. +_CLASS_ZONE_INSTANCE = [] + + +def get_zonefile_instance(new_instance=False): + """ + This is a convenience function which provides a :class:`ZoneInfoFile` + instance using the data provided by the ``dateutil`` package. By default, it + caches a single instance of the ZoneInfoFile object and returns that. + + :param new_instance: + If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and + used as the cached instance for the next call. Otherwise, new instances + are created only as necessary. + + :return: + Returns a :class:`ZoneInfoFile` object. + + .. versionadded:: 2.6 + """ + if new_instance: + zif = None + else: + zif = getattr(get_zonefile_instance, '_cached_instance', None) + + if zif is None: + zif = ZoneInfoFile(getzoneinfofile_stream()) + + get_zonefile_instance._cached_instance = zif + + return zif + + +def gettz(name): + """ + This retrieves a time zone from the local zoneinfo tarball that is packaged + with dateutil. + + :param name: + An IANA-style time zone name, as found in the zoneinfo file. + + :return: + Returns a :class:`dateutil.tz.tzfile` time zone object. + + .. warning:: + It is generally inadvisable to use this function, and it is only + provided for API compatibility with earlier versions. This is *not* + equivalent to ``dateutil.tz.gettz()``, which selects an appropriate + time zone based on the inputs, favoring system zoneinfo. This is ONLY + for accessing the dateutil-specific zoneinfo (which may be out of + date compared to the system zoneinfo). + + .. deprecated:: 2.6 + If you need to use a specific zoneinfofile over the system zoneinfo, + instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call + :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. + + Use :func:`get_zonefile_instance` to retrieve an instance of the + dateutil-provided zoneinfo. + """ + warnings.warn("zoneinfo.gettz() will be removed in future versions, " + "to use the dateutil-provided zoneinfo files, instantiate a " + "ZoneInfoFile object and use ZoneInfoFile.zones.get() " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].zones.get(name) + + +def gettz_db_metadata(): + """ Get the zonefile metadata + + See `zonefile_metadata`_ + + :returns: + A dictionary with the database metadata + + .. deprecated:: 2.6 + See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, + query the attribute ``zoneinfo.ZoneInfoFile.metadata``. + """ + warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " + "versions, to use the dateutil-provided zoneinfo files, " + "ZoneInfoFile object and query the 'metadata' attribute " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/venv/Lib/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..beb108a67 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-36.pyc b/venv/Lib/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-36.pyc new file mode 100644 index 000000000..db5ee5b93 Binary files /dev/null and b/venv/Lib/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/venv/Lib/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz new file mode 100644 index 000000000..89e83517b Binary files /dev/null and b/venv/Lib/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ diff --git a/venv/Lib/site-packages/dateutil/zoneinfo/rebuild.py b/venv/Lib/site-packages/dateutil/zoneinfo/rebuild.py new file mode 100644 index 000000000..78f0d1a0c --- /dev/null +++ b/venv/Lib/site-packages/dateutil/zoneinfo/rebuild.py @@ -0,0 +1,53 @@ +import logging +import os +import tempfile +import shutil +import json +from subprocess import check_call +from tarfile import TarFile + +from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME + + +def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ``ftp.iana.org/tz``. + + """ + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + try: + with TarFile.open(filename) as tf: + for name in zonegroups: + tf.extract(name, tmpdir) + filepaths = [os.path.join(tmpdir, n) for n in zonegroups] + try: + check_call(["zic", "-d", zonedir] + filepaths) + except OSError as e: + _print_on_nosuchfile(e) + raise + # write metadata file + with open(os.path.join(zonedir, METADATA_FN), 'w') as f: + json.dump(metadata, f, indent=4, sort_keys=True) + target = os.path.join(moduledir, ZONEFILENAME) + with TarFile.open(target, "w:%s" % format) as tf: + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + finally: + shutil.rmtree(tmpdir) + + +def _print_on_nosuchfile(e): + """Print helpful troubleshooting message + + e is an exception raised by subprocess.check_call() + + """ + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") diff --git a/venv/Lib/site-packages/decorator-4.4.2.dist-info/INSTALLER b/venv/Lib/site-packages/decorator-4.4.2.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/venv/Lib/site-packages/decorator-4.4.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/decorator-4.4.2.dist-info/LICENSE.txt b/venv/Lib/site-packages/decorator-4.4.2.dist-info/LICENSE.txt new file mode 100644 index 000000000..b0ade0487 --- /dev/null +++ b/venv/Lib/site-packages/decorator-4.4.2.dist-info/LICENSE.txt @@ -0,0 +1,26 @@ +Copyright (c) 2005-2018, Michele Simionato +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + Redistributions in bytecode form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/venv/Lib/site-packages/decorator-4.4.2.dist-info/METADATA b/venv/Lib/site-packages/decorator-4.4.2.dist-info/METADATA new file mode 100644 index 000000000..fd12277a0 --- /dev/null +++ b/venv/Lib/site-packages/decorator-4.4.2.dist-info/METADATA @@ -0,0 +1,131 @@ +Metadata-Version: 2.1 +Name: decorator +Version: 4.4.2 +Summary: Decorators for Humans +Home-page: https://github.com/micheles/decorator +Author: Michele Simionato +Author-email: michele.simionato@gmail.com +License: new BSD License +Keywords: decorators generic utility +Platform: All +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Utilities +Requires-Python: >=2.6, !=3.0.*, !=3.1.* + +Decorators for Humans +===================== + +The goal of the decorator module is to make it easy to define +signature-preserving function decorators and decorator factories. +It also includes an implementation of multiple dispatch and other niceties +(please check the docs). It is released under a two-clauses +BSD license, i.e. basically you can do whatever you want with it but I am not +responsible. + +Installation +------------- + +If you are lazy, just perform + + ``$ pip install decorator`` + +which will install just the module on your system. + +If you prefer to install the full distribution from source, including +the documentation, clone the `GitHub repo`_ or download the tarball_, unpack it and run + + ``$ pip install .`` + +in the main directory, possibly as superuser. + +.. _tarball: https://pypi.org/project/decorator/#files +.. _GitHub repo: https://github.com/micheles/decorator + +Testing +-------- + +If you have the source code installation you can run the tests with + + `$ python src/tests/test.py -v` + +or (if you have setuptools installed) + + `$ python setup.py test` + +Notice that you may run into trouble if in your system there +is an older version of the decorator module; in such a case remove the +old version. It is safe even to copy the module `decorator.py` over +an existing one, since we kept backward-compatibility for a long time. + +Repository +--------------- + +The project is hosted on GitHub. You can look at the source here: + + https://github.com/micheles/decorator + +Documentation +--------------- + +The documentation has been moved to https://github.com/micheles/decorator/blob/master/docs/documentation.md + +From there you can get a PDF version by simply using the print +functionality of your browser. + +Here is the documentation for previous versions of the module: + +https://github.com/micheles/decorator/blob/4.3.2/docs/tests.documentation.rst +https://github.com/micheles/decorator/blob/4.2.1/docs/tests.documentation.rst +https://github.com/micheles/decorator/blob/4.1.2/docs/tests.documentation.rst +https://github.com/micheles/decorator/blob/4.0.0/documentation.rst +https://github.com/micheles/decorator/blob/3.4.2/documentation.rst + +For the impatient +----------------- + +Here is an example of how to define a family of decorators tracing slow +operations: + +.. code-block:: python + + from decorator import decorator + + @decorator + def warn_slow(func, timelimit=60, *args, **kw): + t0 = time.time() + result = func(*args, **kw) + dt = time.time() - t0 + if dt > timelimit: + logging.warn('%s took %d seconds', func.__name__, dt) + else: + logging.info('%s took %d seconds', func.__name__, dt) + return result + + @warn_slow # warn if it takes more than 1 minute + def preprocess_input_files(inputdir, tempdir): + ... + + @warn_slow(timelimit=600) # warn if it takes more than 10 minutes + def run_calculation(tempdir, outdir): + ... + +Enjoy! + + diff --git a/venv/Lib/site-packages/decorator-4.4.2.dist-info/RECORD b/venv/Lib/site-packages/decorator-4.4.2.dist-info/RECORD new file mode 100644 index 000000000..775db2251 --- /dev/null +++ b/venv/Lib/site-packages/decorator-4.4.2.dist-info/RECORD @@ -0,0 +1,9 @@ +__pycache__/decorator.cpython-36.pyc,, +decorator-4.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +decorator-4.4.2.dist-info/LICENSE.txt,sha256=_RFmDKvwUyCCxFcGhi-vwpSQfsf44heBgkCkmZgGeC4,1309 +decorator-4.4.2.dist-info/METADATA,sha256=RYLh5Qy8XzYOcgCT6RsI_cTXG_PE1QvoAVT-u2vus80,4168 +decorator-4.4.2.dist-info/RECORD,, +decorator-4.4.2.dist-info/WHEEL,sha256=h_aVn5OB2IERUjMbi2pucmR_zzWJtk303YXvhh60NJ8,110 +decorator-4.4.2.dist-info/pbr.json,sha256=AL84oUUWQHwkd8OCPhLRo2NJjU5MDdmXMqRHv-posqs,47 +decorator-4.4.2.dist-info/top_level.txt,sha256=Kn6eQjo83ctWxXVyBMOYt0_YpjRjBznKYVuNyuC_DSI,10 +decorator.py,sha256=aQ8Ozc-EK26xBTOXVR5A-8Szgx99_bhaexZSGNn38Yc,17222 diff --git a/venv/Lib/site-packages/decorator-4.4.2.dist-info/WHEEL b/venv/Lib/site-packages/decorator-4.4.2.dist-info/WHEEL new file mode 100644 index 000000000..78e6f69d1 --- /dev/null +++ b/venv/Lib/site-packages/decorator-4.4.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.4) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/decorator-4.4.2.dist-info/pbr.json b/venv/Lib/site-packages/decorator-4.4.2.dist-info/pbr.json new file mode 100644 index 000000000..cd0459978 --- /dev/null +++ b/venv/Lib/site-packages/decorator-4.4.2.dist-info/pbr.json @@ -0,0 +1 @@ +{"is_release": false, "git_version": "8608a46"} \ No newline at end of file diff --git a/venv/Lib/site-packages/decorator-4.4.2.dist-info/top_level.txt b/venv/Lib/site-packages/decorator-4.4.2.dist-info/top_level.txt new file mode 100644 index 000000000..3fe18a4d1 --- /dev/null +++ b/venv/Lib/site-packages/decorator-4.4.2.dist-info/top_level.txt @@ -0,0 +1 @@ +decorator diff --git a/venv/Lib/site-packages/decorator.py b/venv/Lib/site-packages/decorator.py new file mode 100644 index 000000000..b1f8b567e --- /dev/null +++ b/venv/Lib/site-packages/decorator.py @@ -0,0 +1,454 @@ +# ######################### LICENSE ############################ # + +# Copyright (c) 2005-2018, Michele Simionato +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: + +# Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# Redistributions in bytecode form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +# DAMAGE. + +""" +Decorator module, see http://pypi.python.org/pypi/decorator +for the documentation. +""" +from __future__ import print_function + +import re +import sys +import inspect +import operator +import itertools +import collections + +__version__ = '4.4.2' + +if sys.version_info >= (3,): + from inspect import getfullargspec + + def get_init(cls): + return cls.__init__ +else: + FullArgSpec = collections.namedtuple( + 'FullArgSpec', 'args varargs varkw defaults ' + 'kwonlyargs kwonlydefaults annotations') + + def getfullargspec(f): + "A quick and dirty replacement for getfullargspec for Python 2.X" + return FullArgSpec._make(inspect.getargspec(f) + ([], None, {})) + + def get_init(cls): + return cls.__init__.__func__ + +try: + iscoroutinefunction = inspect.iscoroutinefunction +except AttributeError: + # let's assume there are no coroutine functions in old Python + def iscoroutinefunction(f): + return False +try: + from inspect import isgeneratorfunction +except ImportError: + # assume no generator function in old Python versions + def isgeneratorfunction(caller): + return False + + +DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(') + + +# basic functionality +class FunctionMaker(object): + """ + An object with the ability to create functions with a given signature. + It has attributes name, doc, module, signature, defaults, dict and + methods update and make. + """ + + # Atomic get-and-increment provided by the GIL + _compile_count = itertools.count() + + # make pylint happy + args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = () + + def __init__(self, func=None, name=None, signature=None, + defaults=None, doc=None, module=None, funcdict=None): + self.shortsignature = signature + if func: + # func can be a class or a callable, but not an instance method + self.name = func.__name__ + if self.name == '': # small hack for lambda functions + self.name = '_lambda_' + self.doc = func.__doc__ + self.module = func.__module__ + if inspect.isfunction(func): + argspec = getfullargspec(func) + self.annotations = getattr(func, '__annotations__', {}) + for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', + 'kwonlydefaults'): + setattr(self, a, getattr(argspec, a)) + for i, arg in enumerate(self.args): + setattr(self, 'arg%d' % i, arg) + allargs = list(self.args) + allshortargs = list(self.args) + if self.varargs: + allargs.append('*' + self.varargs) + allshortargs.append('*' + self.varargs) + elif self.kwonlyargs: + allargs.append('*') # single star syntax + for a in self.kwonlyargs: + allargs.append('%s=None' % a) + allshortargs.append('%s=%s' % (a, a)) + if self.varkw: + allargs.append('**' + self.varkw) + allshortargs.append('**' + self.varkw) + self.signature = ', '.join(allargs) + self.shortsignature = ', '.join(allshortargs) + self.dict = func.__dict__.copy() + # func=None happens when decorating a caller + if name: + self.name = name + if signature is not None: + self.signature = signature + if defaults: + self.defaults = defaults + if doc: + self.doc = doc + if module: + self.module = module + if funcdict: + self.dict = funcdict + # check existence required attributes + assert hasattr(self, 'name') + if not hasattr(self, 'signature'): + raise TypeError('You are decorating a non function: %s' % func) + + def update(self, func, **kw): + "Update the signature of func with the data in self" + func.__name__ = self.name + func.__doc__ = getattr(self, 'doc', None) + func.__dict__ = getattr(self, 'dict', {}) + func.__defaults__ = self.defaults + func.__kwdefaults__ = self.kwonlydefaults or None + func.__annotations__ = getattr(self, 'annotations', None) + try: + frame = sys._getframe(3) + except AttributeError: # for IronPython and similar implementations + callermodule = '?' + else: + callermodule = frame.f_globals.get('__name__', '?') + func.__module__ = getattr(self, 'module', callermodule) + func.__dict__.update(kw) + + def make(self, src_templ, evaldict=None, addsource=False, **attrs): + "Make a new function from a given template and update the signature" + src = src_templ % vars(self) # expand name and signature + evaldict = evaldict or {} + mo = DEF.search(src) + if mo is None: + raise SyntaxError('not a valid function template\n%s' % src) + name = mo.group(1) # extract the function name + names = set([name] + [arg.strip(' *') for arg in + self.shortsignature.split(',')]) + for n in names: + if n in ('_func_', '_call_'): + raise NameError('%s is overridden in\n%s' % (n, src)) + + if not src.endswith('\n'): # add a newline for old Pythons + src += '\n' + + # Ensure each generated function has a unique filename for profilers + # (such as cProfile) that depend on the tuple of (, + # , ) being unique. + filename = '' % next(self._compile_count) + try: + code = compile(src, filename, 'single') + exec(code, evaldict) + except Exception: + print('Error in generated code:', file=sys.stderr) + print(src, file=sys.stderr) + raise + func = evaldict[name] + if addsource: + attrs['__source__'] = src + self.update(func, **attrs) + return func + + @classmethod + def create(cls, obj, body, evaldict, defaults=None, + doc=None, module=None, addsource=True, **attrs): + """ + Create a function from the strings name, signature and body. + evaldict is the evaluation dictionary. If addsource is true an + attribute __source__ is added to the result. The attributes attrs + are added, if any. + """ + if isinstance(obj, str): # "name(signature)" + name, rest = obj.strip().split('(', 1) + signature = rest[:-1] # strip a right parens + func = None + else: # a function + name = None + signature = None + func = obj + self = cls(func, name, signature, defaults, doc, module) + ibody = '\n'.join(' ' + line for line in body.splitlines()) + caller = evaldict.get('_call_') # when called from `decorate` + if caller and iscoroutinefunction(caller): + body = ('async def %(name)s(%(signature)s):\n' + ibody).replace( + 'return', 'return await') + else: + body = 'def %(name)s(%(signature)s):\n' + ibody + return self.make(body, evaldict, addsource, **attrs) + + +def decorate(func, caller, extras=()): + """ + decorate(func, caller) decorates a function using a caller. + If the caller is a generator function, the resulting function + will be a generator function. + """ + evaldict = dict(_call_=caller, _func_=func) + es = '' + for i, extra in enumerate(extras): + ex = '_e%d_' % i + evaldict[ex] = extra + es += ex + ', ' + + if '3.5' <= sys.version < '3.6': + # with Python 3.5 isgeneratorfunction returns True for all coroutines + # however we know that it is NOT possible to have a generator + # coroutine in python 3.5: PEP525 was not there yet + generatorcaller = isgeneratorfunction( + caller) and not iscoroutinefunction(caller) + else: + generatorcaller = isgeneratorfunction(caller) + if generatorcaller: + fun = FunctionMaker.create( + func, "for res in _call_(_func_, %s%%(shortsignature)s):\n" + " yield res" % es, evaldict, __wrapped__=func) + else: + fun = FunctionMaker.create( + func, "return _call_(_func_, %s%%(shortsignature)s)" % es, + evaldict, __wrapped__=func) + if hasattr(func, '__qualname__'): + fun.__qualname__ = func.__qualname__ + return fun + + +def decorator(caller, _func=None): + """decorator(caller) converts a caller function into a decorator""" + if _func is not None: # return a decorated function + # this is obsolete behavior; you should use decorate instead + return decorate(_func, caller) + # else return a decorator function + defaultargs, defaults = '', () + if inspect.isclass(caller): + name = caller.__name__.lower() + doc = 'decorator(%s) converts functions/generators into ' \ + 'factories of %s objects' % (caller.__name__, caller.__name__) + elif inspect.isfunction(caller): + if caller.__name__ == '': + name = '_lambda_' + else: + name = caller.__name__ + doc = caller.__doc__ + nargs = caller.__code__.co_argcount + ndefs = len(caller.__defaults__ or ()) + defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs]) + if defaultargs: + defaultargs += ',' + defaults = caller.__defaults__ + else: # assume caller is an object with a __call__ method + name = caller.__class__.__name__.lower() + doc = caller.__call__.__doc__ + evaldict = dict(_call=caller, _decorate_=decorate) + dec = FunctionMaker.create( + '%s(func, %s)' % (name, defaultargs), + 'if func is None: return lambda func: _decorate_(func, _call, (%s))\n' + 'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs), + evaldict, doc=doc, module=caller.__module__, __wrapped__=caller) + if defaults: + dec.__defaults__ = (None,) + defaults + return dec + + +# ####################### contextmanager ####################### # + +try: # Python >= 3.2 + from contextlib import _GeneratorContextManager +except ImportError: # Python >= 2.5 + from contextlib import GeneratorContextManager as _GeneratorContextManager + + +class ContextManager(_GeneratorContextManager): + def __call__(self, func): + """Context manager decorator""" + return FunctionMaker.create( + func, "with _self_: return _func_(%(shortsignature)s)", + dict(_self_=self, _func_=func), __wrapped__=func) + + +init = getfullargspec(_GeneratorContextManager.__init__) +n_args = len(init.args) +if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7 + def __init__(self, g, *a, **k): + return _GeneratorContextManager.__init__(self, g(*a, **k)) + ContextManager.__init__ = __init__ +elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4 + pass +elif n_args == 4: # (self, gen, args, kwds) Python 3.5 + def __init__(self, g, *a, **k): + return _GeneratorContextManager.__init__(self, g, a, k) + ContextManager.__init__ = __init__ + +_contextmanager = decorator(ContextManager) + + +def contextmanager(func): + # Enable Pylint config: contextmanager-decorators=decorator.contextmanager + return _contextmanager(func) + + +# ############################ dispatch_on ############################ # + +def append(a, vancestors): + """ + Append ``a`` to the list of the virtual ancestors, unless it is already + included. + """ + add = True + for j, va in enumerate(vancestors): + if issubclass(va, a): + add = False + break + if issubclass(a, va): + vancestors[j] = a + add = False + if add: + vancestors.append(a) + + +# inspired from simplegeneric by P.J. Eby and functools.singledispatch +def dispatch_on(*dispatch_args): + """ + Factory of decorators turning a function into a generic function + dispatching on the given arguments. + """ + assert dispatch_args, 'No dispatch args passed' + dispatch_str = '(%s,)' % ', '.join(dispatch_args) + + def check(arguments, wrong=operator.ne, msg=''): + """Make sure one passes the expected number of arguments""" + if wrong(len(arguments), len(dispatch_args)): + raise TypeError('Expected %d arguments, got %d%s' % + (len(dispatch_args), len(arguments), msg)) + + def gen_func_dec(func): + """Decorator turning a function into a generic function""" + + # first check the dispatch arguments + argset = set(getfullargspec(func).args) + if not set(dispatch_args) <= argset: + raise NameError('Unknown dispatch arguments %s' % dispatch_str) + + typemap = {} + + def vancestors(*types): + """ + Get a list of sets of virtual ancestors for the given types + """ + check(types) + ras = [[] for _ in range(len(dispatch_args))] + for types_ in typemap: + for t, type_, ra in zip(types, types_, ras): + if issubclass(t, type_) and type_ not in t.mro(): + append(type_, ra) + return [set(ra) for ra in ras] + + def ancestors(*types): + """ + Get a list of virtual MROs, one for each type + """ + check(types) + lists = [] + for t, vas in zip(types, vancestors(*types)): + n_vas = len(vas) + if n_vas > 1: + raise RuntimeError( + 'Ambiguous dispatch for %s: %s' % (t, vas)) + elif n_vas == 1: + va, = vas + mro = type('t', (t, va), {}).mro()[1:] + else: + mro = t.mro() + lists.append(mro[:-1]) # discard t and object + return lists + + def register(*types): + """ + Decorator to register an implementation for the given types + """ + check(types) + + def dec(f): + check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) + typemap[types] = f + return f + return dec + + def dispatch_info(*types): + """ + An utility to introspect the dispatch algorithm + """ + check(types) + lst = [] + for anc in itertools.product(*ancestors(*types)): + lst.append(tuple(a.__name__ for a in anc)) + return lst + + def _dispatch(dispatch_args, *args, **kw): + types = tuple(type(arg) for arg in dispatch_args) + try: # fast path + f = typemap[types] + except KeyError: + pass + else: + return f(*args, **kw) + combinations = itertools.product(*ancestors(*types)) + next(combinations) # the first one has been already tried + for types_ in combinations: + f = typemap.get(types_) + if f is not None: + return f(*args, **kw) + + # else call the default implementation + return func(*args, **kw) + + return FunctionMaker.create( + func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, + dict(_f_=_dispatch), register=register, default=func, + typemap=typemap, vancestors=vancestors, ancestors=ancestors, + dispatch_info=dispatch_info, __wrapped__=func) + + gen_func_dec.__name__ = 'dispatch_on' + dispatch_str + return gen_func_dec diff --git a/venv/Lib/site-packages/imageio-2.9.0.dist-info/INSTALLER b/venv/Lib/site-packages/imageio-2.9.0.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/venv/Lib/site-packages/imageio-2.9.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/imageio-2.9.0.dist-info/LICENSE b/venv/Lib/site-packages/imageio-2.9.0.dist-info/LICENSE new file mode 100644 index 000000000..82f800219 --- /dev/null +++ b/venv/Lib/site-packages/imageio-2.9.0.dist-info/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2014-2020, imageio developers +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/venv/Lib/site-packages/imageio-2.9.0.dist-info/METADATA b/venv/Lib/site-packages/imageio-2.9.0.dist-info/METADATA new file mode 100644 index 000000000..9ec78d54d --- /dev/null +++ b/venv/Lib/site-packages/imageio-2.9.0.dist-info/METADATA @@ -0,0 +1,75 @@ +Metadata-Version: 2.1 +Name: imageio +Version: 2.9.0 +Summary: Library for reading and writing a wide range of image, video, scientific, and volumetric data formats. +Home-page: https://github.com/imageio/imageio +Author: imageio contributors +Author-email: almar.klein@gmail.com +License: BSD-2-Clause +Download-URL: http://pypi.python.org/pypi/imageio +Keywords: image video volume imread imwrite io animation ffmpeg +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Provides: imageio +Requires-Python: >=3.5 +Requires-Dist: numpy +Requires-Dist: pillow +Provides-Extra: ffmpeg +Requires-Dist: imageio-ffmpeg ; extra == 'ffmpeg' +Provides-Extra: fits +Requires-Dist: astropy ; extra == 'fits' +Provides-Extra: full +Requires-Dist: astropy ; extra == 'full' +Requires-Dist: gdal ; extra == 'full' +Requires-Dist: imageio-ffmpeg ; extra == 'full' +Requires-Dist: itk ; extra == 'full' +Provides-Extra: gdal +Requires-Dist: gdal ; extra == 'gdal' +Provides-Extra: itk +Requires-Dist: itk ; extra == 'itk' + + +.. image:: https://travis-ci.org/imageio/imageio.svg?branch=master + :target: https://travis-ci.org/imageio/imageio' + +.. image:: https://coveralls.io/repos/imageio/imageio/badge.png?branch=master + :target: https://coveralls.io/r/imageio/imageio?branch=master + + +Imageio is a Python library that provides an easy interface to read and +write a wide range of image data, including animated images, volumetric +data, and scientific formats. It is cross-platform, runs on Python 3.5+, +and is easy to install. + +Main website: https://imageio.github.io + + +Release notes: hhttps://github.com/imageio/imageio/blob/master/CHANGELOG.md + +Example: + +.. code-block:: python + + >>> import imageio + >>> im = imageio.imread('imageio:astronaut.png') + >>> im.shape # im is a numpy array + (512, 512, 3) + >>> imageio.imwrite('astronaut-gray.jpg', im[:, :, 0]) + +See the `user API `_ +or `examples `_ +for more information. + + diff --git a/venv/Lib/site-packages/imageio-2.9.0.dist-info/RECORD b/venv/Lib/site-packages/imageio-2.9.0.dist-info/RECORD new file mode 100644 index 000000000..50d247899 --- /dev/null +++ b/venv/Lib/site-packages/imageio-2.9.0.dist-info/RECORD @@ -0,0 +1,89 @@ +../../Scripts/imageio_download_bin.exe,sha256=shSTMQKZt1jRD7suN6J7Xjfti16aeogdzMrrBMac4pQ,97257 +../../Scripts/imageio_remove_bin.exe,sha256=MDO5_J-gPkipT9lcmx5qzdmmPwSRCL77SJuBC1bD-JY,97253 +imageio-2.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +imageio-2.9.0.dist-info/LICENSE,sha256=-JMM5XFFSLVkGaco5Sd1EN9SocaukU4ksOscUwVEF6s,1307 +imageio-2.9.0.dist-info/METADATA,sha256=FMyK0QrRSRdT1NyAbRwvLYs2ITo4MxC3131A4PDdErk,2602 +imageio-2.9.0.dist-info/RECORD,, +imageio-2.9.0.dist-info/WHEEL,sha256=YUYzQ6UQdoqxXjimOitTqynltBCkwY6qlTfTh2IzqQU,97 +imageio-2.9.0.dist-info/entry_points.txt,sha256=wIv0WLZA9V-h0NF4ozbsQHo8Ym9-tj4lfOG6J9Pv13c,131 +imageio-2.9.0.dist-info/top_level.txt,sha256=iSUjc-wEw-xbMTvMOSKg85n0-E7Ms--Mo4FLMC-J2YM,8 +imageio/__init__.py,sha256=ZC5EAP5t2IbguZt0ScfmNDY2xVbsjgBMi9KHqJph3d4,1273 +imageio/__main__.py,sha256=t-PU7fPN_ocsM_jPCNlRHilVHvFwxxT5sNByse5RBzI,5399 +imageio/__pycache__/__init__.cpython-36.pyc,, +imageio/__pycache__/__main__.cpython-36.pyc,, +imageio/__pycache__/freeze.cpython-36.pyc,, +imageio/__pycache__/testing.cpython-36.pyc,, +imageio/core/__init__.py,sha256=PSkGH8K76ntSWhwM4j7W49UmCSZf_OGaSl9fNbQP7uQ,639 +imageio/core/__pycache__/__init__.cpython-36.pyc,, +imageio/core/__pycache__/fetching.cpython-36.pyc,, +imageio/core/__pycache__/findlib.cpython-36.pyc,, +imageio/core/__pycache__/format.cpython-36.pyc,, +imageio/core/__pycache__/functions.cpython-36.pyc,, +imageio/core/__pycache__/request.cpython-36.pyc,, +imageio/core/__pycache__/util.cpython-36.pyc,, +imageio/core/fetching.py,sha256=gpVVTHaYXYMmrOgUMq3o2sxwn7pQvkxpJKSzw9fmL-g,9187 +imageio/core/findlib.py,sha256=pp9NXWaVaUdJeGRBY-SAVAkNcvb72WrNU_7oe9SFxRo,5544 +imageio/core/format.py,sha256=6wSf9y8dkk4UXJ_LuN0begWmBvVXOnV6Nxr2eIX1JmU,26004 +imageio/core/functions.py,sha256=dvtS_D3CwyYSVbrom9nNO4DNcZ_nIWG77JERcP3JEDc,21221 +imageio/core/request.py,sha256=Kg_haOxj1g5KwOI0paG2F9xUWWX4ywMyVcl479F_nG0,21190 +imageio/core/util.py,sha256=552H_9xQPRcA0rGta4urVot9ZUNsbokEWHRBmOv1NAU,18663 +imageio/freeze.py,sha256=hi9MNZz-ridgQBWcAqnd92sULek2lgmBSTmuott5lus,170 +imageio/plugins/__init__.py,sha256=MmNuerg2kaNVVC64wOfMXFtBCXpDFleRh1qQ6t9uCpQ,3674 +imageio/plugins/__pycache__/__init__.cpython-36.pyc,, +imageio/plugins/__pycache__/_bsdf.cpython-36.pyc,, +imageio/plugins/__pycache__/_dicom.cpython-36.pyc,, +imageio/plugins/__pycache__/_freeimage.cpython-36.pyc,, +imageio/plugins/__pycache__/_swf.cpython-36.pyc,, +imageio/plugins/__pycache__/_tifffile.cpython-36.pyc,, +imageio/plugins/__pycache__/bsdf.cpython-36.pyc,, +imageio/plugins/__pycache__/dicom.cpython-36.pyc,, +imageio/plugins/__pycache__/example.cpython-36.pyc,, +imageio/plugins/__pycache__/feisem.cpython-36.pyc,, +imageio/plugins/__pycache__/ffmpeg.cpython-36.pyc,, +imageio/plugins/__pycache__/fits.cpython-36.pyc,, +imageio/plugins/__pycache__/freeimage.cpython-36.pyc,, +imageio/plugins/__pycache__/freeimagemulti.cpython-36.pyc,, +imageio/plugins/__pycache__/gdal.cpython-36.pyc,, +imageio/plugins/__pycache__/grab.cpython-36.pyc,, +imageio/plugins/__pycache__/lytro.cpython-36.pyc,, +imageio/plugins/__pycache__/npz.cpython-36.pyc,, +imageio/plugins/__pycache__/pillow.cpython-36.pyc,, +imageio/plugins/__pycache__/pillow_info.cpython-36.pyc,, +imageio/plugins/__pycache__/pillowmulti.cpython-36.pyc,, +imageio/plugins/__pycache__/simpleitk.cpython-36.pyc,, +imageio/plugins/__pycache__/spe.cpython-36.pyc,, +imageio/plugins/__pycache__/swf.cpython-36.pyc,, +imageio/plugins/__pycache__/tifffile.cpython-36.pyc,, +imageio/plugins/_bsdf.py,sha256=pSXfsu83lU9dSi7S_Qa4-i-LQsgCa0StcVPxAU7fSZw,32971 +imageio/plugins/_dicom.py,sha256=F5IHB2MSiHvA0a4YCc806BKn4W_0RhS5vkopq1ZCEYs,33935 +imageio/plugins/_freeimage.py,sha256=3AlxvaS2LwN2U85oaYehw8Py83opxJmzAO7f4ygN1S0,51834 +imageio/plugins/_swf.py,sha256=xoPiBOvS-PhrlOBnD-ANoCH3x3GmZ-crDF3Fjaxs98s,25758 +imageio/plugins/_tifffile.py,sha256=zrl54cfCYWiT4EqQyVFsigWHEkr0BY8VUWFHBSQQ0Q8,367400 +imageio/plugins/bsdf.py,sha256=EnEmdKb4dbIyihsS8H7jYFgOct4B5KlinH6hmFWEy9Y,11422 +imageio/plugins/dicom.py,sha256=RFLZ_1xf7Uf0kaJisIqhsB_b4MqDhm5tZku6Y0-_8q0,12190 +imageio/plugins/example.py,sha256=JX8O92x_RJhAikDjTuYneamHrds1HKgvaSCpJeyY2-U,5696 +imageio/plugins/feisem.py,sha256=ezfKArS9f-PRRHkZZCdwb3Y3bn6LnPFf1iDyPqVFjAw,3392 +imageio/plugins/ffmpeg.py,sha256=4HF9n6RD2aB3AsGTY3-CBjre0rv96h7Y01UV2cp2kZQ,28647 +imageio/plugins/fits.py,sha256=HZVXCCKZ5iQb01HYCG91NAKTGrycMJmeli5PfMqi8IU,4775 +imageio/plugins/freeimage.py,sha256=9v9wz7o21SEz-X0-JdWKopMQWUHFbn4o8krB_qhn08E,18887 +imageio/plugins/freeimagemulti.py,sha256=raUSrdQrdb76mafxfwnuf0DHtpPCxc95Q8Rtmwh2frA,11745 +imageio/plugins/gdal.py,sha256=8VyKFuzhhJQeY8cjh1QRv9-DEh0OrHWIlcDENOtzR-8,1711 +imageio/plugins/grab.py,sha256=HGQTPSye4FTgfv5gUCVQAOvizIxgfVYO-nf5DwpTNJ4,3250 +imageio/plugins/lytro.py,sha256=Oj7ZjWBrVB_gBPiYbvgN-M86f1nefj0-wm7yUZcrCz4,24708 +imageio/plugins/npz.py,sha256=ILzIa4m9LeNUjl1014pDtSedzB0NWq7aGHkIU57QNRY,3122 +imageio/plugins/pillow.py,sha256=NnXTR25z7k8H-VhvS6oPcv_v8016CJ9nTfsCVH20MTI,33002 +imageio/plugins/pillow_info.py,sha256=bTe2UPFxhvTuAMPxy3U-ti4BjpL373XmlMxAIeRn_5w,37399 +imageio/plugins/pillowmulti.py,sha256=oZj1zivjjhV6zTRkXa_YCJhHCpqAO1PiVoNrY-JBiQI,12571 +imageio/plugins/simpleitk.py,sha256=bToRGHEwcym_bzh4ZQJiugV1pVMO1_yX2cxH93GL-Xw,4349 +imageio/plugins/spe.py,sha256=PDCLirGR28TgyHmghC2MY1TEhuRLAT1Kfs9B8d-TvHs,15441 +imageio/plugins/swf.py,sha256=XkyQg8kSqkhJDOAyPshC-tMQVl1VZGiV_eu257hC15o,12295 +imageio/plugins/tifffile.py,sha256=CJCMsLE_gfz6CMBuEKMOQJUK1r5jEhl1kH0VUuZDcMU,11436 +imageio/resources/images/astronaut.png,sha256=iEMc2WU8zVOXQbVV-wpGthVYswHUEQQStbwotePqbLU,791555 +imageio/resources/images/chelsea.png,sha256=l0A8nBcdGu3SAmx1FeICCO-GXqq1bUYsPC7vrem313k,221294 +imageio/resources/images/chelsea.zip,sha256=ieIbNItsviHa0hRghW_MBOgCXdnr1Sp7MvC_vXEDGJo,221318 +imageio/resources/images/cockatoo.mp4,sha256=X9419aKIyobiFtLcKBiKtktFYNMCHyc_rv3w3oDziqU,728751 +imageio/resources/images/newtonscradle.gif,sha256=pmPE4Ha1xI4KrFjHd30rsxk8swU8CY0I2ieKYtAv8xQ,583374 +imageio/resources/images/realshort.mp4,sha256=qLNcLCEwRTueoRcq1K9orAJ7wkg-8FRXaWhHIhJ7_hg,96822 +imageio/resources/images/stent.npz,sha256=YKg9Ipa1HualMVPpupa6kCA5GwyJUoldnWCgpimsa7Y,824612 +imageio/resources/shipped_resources_go_here,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +imageio/testing.py,sha256=DkuiiAY_gXK9pVtumFx1bBBwPKnm2bS-Ub7pj9REces,3419 diff --git a/venv/Lib/site-packages/imageio-2.9.0.dist-info/WHEEL b/venv/Lib/site-packages/imageio-2.9.0.dist-info/WHEEL new file mode 100644 index 000000000..b552003ff --- /dev/null +++ b/venv/Lib/site-packages/imageio-2.9.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/imageio-2.9.0.dist-info/entry_points.txt b/venv/Lib/site-packages/imageio-2.9.0.dist-info/entry_points.txt new file mode 100644 index 000000000..d17059abd --- /dev/null +++ b/venv/Lib/site-packages/imageio-2.9.0.dist-info/entry_points.txt @@ -0,0 +1,4 @@ +[console_scripts] +imageio_download_bin = imageio.__main__:download_bin_main +imageio_remove_bin = imageio.__main__:remove_bin_main + diff --git a/venv/Lib/site-packages/imageio-2.9.0.dist-info/top_level.txt b/venv/Lib/site-packages/imageio-2.9.0.dist-info/top_level.txt new file mode 100644 index 000000000..a464e4cd3 --- /dev/null +++ b/venv/Lib/site-packages/imageio-2.9.0.dist-info/top_level.txt @@ -0,0 +1 @@ +imageio diff --git a/venv/Lib/site-packages/imageio/__init__.py b/venv/Lib/site-packages/imageio/__init__.py new file mode 100644 index 000000000..0462a4083 --- /dev/null +++ b/venv/Lib/site-packages/imageio/__init__.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2014-2020, imageio contributors +# imageio is distributed under the terms of the (new) BSD License. + +# This docstring is used at the index of the documentation pages, and +# gets inserted into a slightly larger description (in setup.py) for +# the page on Pypi: +""" +Imageio is a Python library that provides an easy interface to read and +write a wide range of image data, including animated images, volumetric +data, and scientific formats. It is cross-platform, runs on Python 3.5+, +and is easy to install. + +Main website: https://imageio.github.io +""" + +# flake8: noqa + +__version__ = "2.9.0" + +# Load some bits from core +from .core import FormatManager, RETURN_BYTES + +# Instantiate format manager +formats = FormatManager() + +# Load the functions +from .core.functions import help +from .core.functions import get_reader, get_writer +from .core.functions import imread, mimread, volread, mvolread +from .core.functions import imwrite, mimwrite, volwrite, mvolwrite + +# Load function aliases +from .core.functions import read, save +from .core.functions import imsave, mimsave, volsave, mvolsave + +# Load all the plugins +from . import plugins + +# expose the show method of formats +show_formats = formats.show + +# Clean up some names +del FormatManager diff --git a/venv/Lib/site-packages/imageio/__main__.py b/venv/Lib/site-packages/imageio/__main__.py new file mode 100644 index 000000000..e0344e711 --- /dev/null +++ b/venv/Lib/site-packages/imageio/__main__.py @@ -0,0 +1,169 @@ +""" +Console scripts and associated helper methods for imageio. +""" + +import argparse +import os +from os import path as op +import shutil +import sys + + +from . import plugins +from .core import util + +# A list of plugins that require binaries from the imageio-binaries +# repository. These plugins must implement the `download` method. +PLUGINS_WITH_BINARIES = ["freeimage"] + + +def download_bin(plugin_names=["all"], package_dir=False): + """ Download binary dependencies of plugins + + This is a convenience method for downloading the binaries + (e.g. for freeimage) from the imageio-binaries + repository. + + Parameters + ---------- + plugin_names: list + A list of imageio plugin names. If it contains "all", all + binary dependencies are downloaded. + package_dir: bool + If set to `True`, the binaries will be downloaded to the + `resources` directory of the imageio package instead of + to the users application data directory. Note that this + might require administrative rights if imageio is installed + in a system directory. + """ + if plugin_names.count("all"): + # Use all plugins + plugin_names = PLUGINS_WITH_BINARIES + + plugin_names.sort() + print("Ascertaining binaries for: {}.".format(", ".join(plugin_names))) + + if package_dir: + # Download the binaries to the `resources` directory + # of imageio. If imageio comes as an .egg, then a cache + # directory will be created by pkg_resources (requires setuptools). + # see `imageio.core.util.resource_dirs` + # and `imageio.core.utilresource_package_dir` + directory = util.resource_package_dir() + else: + directory = None + + for plg in plugin_names: + if plg not in PLUGINS_WITH_BINARIES: + msg = "Plugin {} not registered for binary download!".format(plg) + raise Exception(msg) + mod = getattr(plugins, plg) + mod.download(directory=directory) + + +def download_bin_main(): + """ Argument-parsing wrapper for `download_bin` """ + description = "Download plugin binary dependencies" + phelp = ( + "Plugin name for which to download the binary. " + + "If no argument is given, all binaries are downloaded." + ) + dhelp = ( + "Download the binaries to the package directory " + + "(default is the users application data directory). " + + "This might require administrative rights." + ) + example_text = ( + "examples:\n" + + " imageio_download_bin all\n" + + " imageio_download_bin freeimage\n" + ) + parser = argparse.ArgumentParser( + description=description, + epilog=example_text, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("plugin", type=str, nargs="*", default="all", help=phelp) + parser.add_argument( + "--package-dir", + dest="package_dir", + action="store_true", + default=False, + help=dhelp, + ) + args = parser.parse_args() + download_bin(plugin_names=args.plugin, package_dir=args.package_dir) + + +def remove_bin(plugin_names=["all"]): + """ Remove binary dependencies of plugins + + This is a convenience method that removes all binaries + dependencies for plugins downloaded by imageio. + + Notes + ----- + It only makes sense to use this method if the binaries + are corrupt. + """ + if plugin_names.count("all"): + # Use all plugins + plugin_names = PLUGINS_WITH_BINARIES + + print("Removing binaries for: {}.".format(", ".join(plugin_names))) + + rdirs = util.resource_dirs() + + for plg in plugin_names: + if plg not in PLUGINS_WITH_BINARIES: + msg = "Plugin {} not registered for binary download!".format(plg) + raise Exception(msg) + + not_removed = [] + for rd in rdirs: + # plugin name is in subdirectories + for rsub in os.listdir(rd): + if rsub in plugin_names: + plgdir = op.join(rd, rsub) + try: + shutil.rmtree(plgdir) + except Exception: + not_removed.append(plgdir) + if not_removed: + nrs = ",".join(not_removed) + msg2 = ( + "These plugins files could not be removed: {}\n".format(nrs) + + "Make sure they are not used by any program and try again." + ) + raise Exception(msg2) + + +def remove_bin_main(): + """ Argument-parsing wrapper for `remove_bin` """ + description = "Remove plugin binary dependencies" + phelp = ( + "Plugin name for which to remove the binary. " + + "If no argument is given, all binaries are removed." + ) + example_text = ( + "examples:\n" + + " imageio_remove_bin all\n" + + " imageio_remove_bin freeimage\n" + ) + parser = argparse.ArgumentParser( + description=description, + epilog=example_text, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument("plugin", type=str, nargs="*", default="all", help=phelp) + args = parser.parse_args() + remove_bin(plugin_names=args.plugin) + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "download_bin": + download_bin_main() + elif len(sys.argv) > 1 and sys.argv[1] == "remove_bin": + remove_bin_main() + else: + raise RuntimeError("Invalid use of the imageio CLI") diff --git a/venv/Lib/site-packages/imageio/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/imageio/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..fc3e83e2d Binary files /dev/null and b/venv/Lib/site-packages/imageio/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/__pycache__/__main__.cpython-36.pyc b/venv/Lib/site-packages/imageio/__pycache__/__main__.cpython-36.pyc new file mode 100644 index 000000000..6def5ace2 Binary files /dev/null and b/venv/Lib/site-packages/imageio/__pycache__/__main__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/__pycache__/freeze.cpython-36.pyc b/venv/Lib/site-packages/imageio/__pycache__/freeze.cpython-36.pyc new file mode 100644 index 000000000..06762e6ac Binary files /dev/null and b/venv/Lib/site-packages/imageio/__pycache__/freeze.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/__pycache__/testing.cpython-36.pyc b/venv/Lib/site-packages/imageio/__pycache__/testing.cpython-36.pyc new file mode 100644 index 000000000..f517ba147 Binary files /dev/null and b/venv/Lib/site-packages/imageio/__pycache__/testing.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/core/__init__.py b/venv/Lib/site-packages/imageio/core/__init__.py new file mode 100644 index 000000000..80bedab11 --- /dev/null +++ b/venv/Lib/site-packages/imageio/core/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Distributed under the (new) BSD License. See LICENSE.txt for more info. + +""" This subpackage provides the core functionality of imageio +(everything but the plugins). +""" + +# flake8: noqa + +from .util import Image, Array, Dict, asarray, image_as_uint, urlopen +from .util import BaseProgressIndicator, StdoutProgressIndicator, IS_PYPY +from .util import get_platform, appdata_dir, resource_dirs, has_module +from .findlib import load_lib +from .fetching import get_remote_file, InternetNotAllowedError, NeedDownloadError +from .request import Request, read_n_bytes, RETURN_BYTES +from .format import Format, FormatManager diff --git a/venv/Lib/site-packages/imageio/core/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/imageio/core/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..3b25ff691 Binary files /dev/null and b/venv/Lib/site-packages/imageio/core/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/core/__pycache__/fetching.cpython-36.pyc b/venv/Lib/site-packages/imageio/core/__pycache__/fetching.cpython-36.pyc new file mode 100644 index 000000000..7acf4f683 Binary files /dev/null and b/venv/Lib/site-packages/imageio/core/__pycache__/fetching.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/core/__pycache__/findlib.cpython-36.pyc b/venv/Lib/site-packages/imageio/core/__pycache__/findlib.cpython-36.pyc new file mode 100644 index 000000000..cdb336271 Binary files /dev/null and b/venv/Lib/site-packages/imageio/core/__pycache__/findlib.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/core/__pycache__/format.cpython-36.pyc b/venv/Lib/site-packages/imageio/core/__pycache__/format.cpython-36.pyc new file mode 100644 index 000000000..14a2ba278 Binary files /dev/null and b/venv/Lib/site-packages/imageio/core/__pycache__/format.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/core/__pycache__/functions.cpython-36.pyc b/venv/Lib/site-packages/imageio/core/__pycache__/functions.cpython-36.pyc new file mode 100644 index 000000000..b7af1e08e Binary files /dev/null and b/venv/Lib/site-packages/imageio/core/__pycache__/functions.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/core/__pycache__/request.cpython-36.pyc b/venv/Lib/site-packages/imageio/core/__pycache__/request.cpython-36.pyc new file mode 100644 index 000000000..640730e68 Binary files /dev/null and b/venv/Lib/site-packages/imageio/core/__pycache__/request.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/core/__pycache__/util.cpython-36.pyc b/venv/Lib/site-packages/imageio/core/__pycache__/util.cpython-36.pyc new file mode 100644 index 000000000..39f897e41 Binary files /dev/null and b/venv/Lib/site-packages/imageio/core/__pycache__/util.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/core/fetching.py b/venv/Lib/site-packages/imageio/core/fetching.py new file mode 100644 index 000000000..aee7d531a --- /dev/null +++ b/venv/Lib/site-packages/imageio/core/fetching.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +# Based on code from the vispy project +# Distributed under the (new) BSD License. See LICENSE.txt for more info. + +"""Data downloading and reading functions +""" + +from math import log +import os +from os import path as op +import sys +import shutil +import time + +from . import appdata_dir, resource_dirs +from . import StdoutProgressIndicator, urlopen + + +class InternetNotAllowedError(IOError): + """ Plugins that need resources can just use get_remote_file(), but + should catch this error and silently ignore it. + """ + + pass + + +class NeedDownloadError(IOError): + """ Is raised when a remote file is requested that is not locally + available, but which needs to be explicitly downloaded by the user. + """ + + +def get_remote_file(fname, directory=None, force_download=False, auto=True): + """ Get a the filename for the local version of a file from the web + + Parameters + ---------- + fname : str + The relative filename on the remote data repository to download. + These correspond to paths on + ``https://github.com/imageio/imageio-binaries/``. + directory : str | None + The directory where the file will be cached if a download was + required to obtain the file. By default, the appdata directory + is used. This is also the first directory that is checked for + a local version of the file. If the directory does not exist, + it will be created. + force_download : bool | str + If True, the file will be downloaded even if a local copy exists + (and this copy will be overwritten). Can also be a YYYY-MM-DD date + to ensure a file is up-to-date (modified date of a file on disk, + if present, is checked). + auto : bool + Whether to auto-download the file if its not present locally. Default + True. If False and a download is needed, raises NeedDownloadError. + + Returns + ------- + fname : str + The path to the file on the local system. + """ + _url_root = "https://github.com/imageio/imageio-binaries/raw/master/" + url = _url_root + fname + nfname = op.normcase(fname) # convert to native + # Get dirs to look for the resource + given_directory = directory + directory = given_directory or appdata_dir("imageio") + dirs = resource_dirs() + dirs.insert(0, directory) # Given dir has preference + # Try to find the resource locally + for dir in dirs: + filename = op.join(dir, nfname) + if op.isfile(filename): + if not force_download: # we're done + if given_directory and given_directory != dir: + filename2 = os.path.join(given_directory, nfname) + # Make sure the output directory exists + if not op.isdir(op.dirname(filename2)): + os.makedirs(op.abspath(op.dirname(filename2))) + shutil.copy(filename, filename2) + return filename2 + return filename + if isinstance(force_download, str): + ntime = time.strptime(force_download, "%Y-%m-%d") + ftime = time.gmtime(op.getctime(filename)) + if ftime >= ntime: + if given_directory and given_directory != dir: + filename2 = os.path.join(given_directory, nfname) + # Make sure the output directory exists + if not op.isdir(op.dirname(filename2)): + os.makedirs(op.abspath(op.dirname(filename2))) + shutil.copy(filename, filename2) + return filename2 + return filename + else: + print("File older than %s, updating..." % force_download) + break + + # If we get here, we're going to try to download the file + if os.getenv("IMAGEIO_NO_INTERNET", "").lower() in ("1", "true", "yes"): + raise InternetNotAllowedError( + "Will not download resource from the " + "internet because environment variable " + "IMAGEIO_NO_INTERNET is set." + ) + + # Can we proceed with auto-download? + if not auto: + raise NeedDownloadError() + + # Get filename to store to and make sure the dir exists + filename = op.join(directory, nfname) + if not op.isdir(op.dirname(filename)): + os.makedirs(op.abspath(op.dirname(filename))) + # let's go get the file + if os.getenv("CONTINUOUS_INTEGRATION", False): # pragma: no cover + # On Travis, we retry a few times ... + for i in range(2): + try: + _fetch_file(url, filename) + return filename + except IOError: + time.sleep(0.5) + else: + _fetch_file(url, filename) + return filename + else: # pragma: no cover + _fetch_file(url, filename) + return filename + + +def _fetch_file(url, file_name, print_destination=True): + """Load requested file, downloading it if needed or requested + + Parameters + ---------- + url: string + The url of file to be downloaded. + file_name: string + Name, along with the path, of where downloaded file will be saved. + print_destination: bool, optional + If true, destination of where file was saved will be printed after + download finishes. + resume: bool, optional + If true, try to resume partially downloaded files. + """ + # Adapted from NISL: + # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py + + print( + "Imageio: %r was not found on your computer; " + "downloading it now." % os.path.basename(file_name) + ) + + temp_file_name = file_name + ".part" + local_file = None + initial_size = 0 + errors = [] + for tries in range(4): + try: + # Checking file size and displaying it alongside the download url + remote_file = urlopen(url, timeout=5.0) + file_size = int(remote_file.headers["Content-Length"].strip()) + size_str = _sizeof_fmt(file_size) + print("Try %i. Download from %s (%s)" % (tries + 1, url, size_str)) + # Downloading data (can be extended to resume if need be) + local_file = open(temp_file_name, "wb") + _chunk_read(remote_file, local_file, initial_size=initial_size) + # temp file must be closed prior to the move + if not local_file.closed: + local_file.close() + shutil.move(temp_file_name, file_name) + if print_destination is True: + sys.stdout.write("File saved as %s.\n" % file_name) + break + except Exception as e: + errors.append(e) + print("Error while fetching file: %s." % str(e)) + finally: + if local_file is not None: + if not local_file.closed: + local_file.close() + else: + raise IOError( + "Unable to download %r. Perhaps there is a no internet " + "connection? If there is, please report this problem." + % os.path.basename(file_name) + ) + + +def _chunk_read(response, local_file, chunk_size=8192, initial_size=0): + """Download a file chunk by chunk and show advancement + + Can also be used when resuming downloads over http. + + Parameters + ---------- + response: urllib.response.addinfourl + Response to the download request in order to get file size. + local_file: file + Hard disk file where data should be written. + chunk_size: integer, optional + Size of downloaded chunks. Default: 8192 + initial_size: int, optional + If resuming, indicate the initial size of the file. + """ + # Adapted from NISL: + # https://github.com/nisl/tutorial/blob/master/nisl/datasets.py + + bytes_so_far = initial_size + # Returns only amount left to download when resuming, not the size of the + # entire file + total_size = int(response.headers["Content-Length"].strip()) + total_size += initial_size + + progress = StdoutProgressIndicator("Downloading") + progress.start("", "bytes", total_size) + + while True: + chunk = response.read(chunk_size) + bytes_so_far += len(chunk) + if not chunk: + break + _chunk_write(chunk, local_file, progress) + progress.finish("Done") + + +def _chunk_write(chunk, local_file, progress): + """Write a chunk to file and update the progress bar""" + local_file.write(chunk) + progress.increase_progress(len(chunk)) + time.sleep(0) # Give other threads a chance, e.g. those that handle stdout pipes + + +def _sizeof_fmt(num): + """Turn number of bytes into human-readable str""" + units = ["bytes", "kB", "MB", "GB", "TB", "PB"] + decimals = [0, 0, 1, 2, 2, 2] + """Human friendly file size""" + if num > 1: + exponent = min(int(log(num, 1024)), len(units) - 1) + quotient = float(num) / 1024 ** exponent + unit = units[exponent] + num_decimals = decimals[exponent] + format_string = "{0:.%sf} {1}" % num_decimals + return format_string.format(quotient, unit) + return "0 bytes" if num == 0 else "1 byte" diff --git a/venv/Lib/site-packages/imageio/core/findlib.py b/venv/Lib/site-packages/imageio/core/findlib.py new file mode 100644 index 000000000..144f40fb3 --- /dev/null +++ b/venv/Lib/site-packages/imageio/core/findlib.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2015-1018, imageio contributors +# Copyright (C) 2013, Zach Pincus, Almar Klein and others + +""" This module contains generic code to find and load a dynamic library. +""" + +import os +import sys +import ctypes + + +LOCALDIR = os.path.abspath(os.path.dirname(__file__)) + +# Flag that can be patched / set to True to disable loading non-system libs +SYSTEM_LIBS_ONLY = False + + +def looks_lib(fname): + """ Returns True if the given filename looks like a dynamic library. + Based on extension, but cross-platform and more flexible. + """ + fname = fname.lower() + if sys.platform.startswith("win"): + return fname.endswith(".dll") + elif sys.platform.startswith("darwin"): + return fname.endswith(".dylib") + else: + return fname.endswith(".so") or ".so." in fname + + +def generate_candidate_libs(lib_names, lib_dirs=None): + """ Generate a list of candidate filenames of what might be the dynamic + library corresponding with the given list of names. + Returns (lib_dirs, lib_paths) + """ + lib_dirs = lib_dirs or [] + + # Get system dirs to search + sys_lib_dirs = [ + "/lib", + "/usr/lib", + "/usr/lib/x86_64-linux-gnu", + "/usr/lib/aarch64-linux-gnu", + "/usr/local/lib", + "/opt/local/lib", + ] + + # Get Python dirs to search (shared if for Pyzo) + py_sub_dirs = ["lib", "DLLs", "Library/bin", "shared"] + py_lib_dirs = [os.path.join(sys.prefix, d) for d in py_sub_dirs] + if hasattr(sys, "base_prefix"): + py_lib_dirs += [os.path.join(sys.base_prefix, d) for d in py_sub_dirs] + + # Get user dirs to search (i.e. HOME) + home_dir = os.path.expanduser("~") + user_lib_dirs = [os.path.join(home_dir, d) for d in ["lib"]] + + # Select only the dirs for which a directory exists, and remove duplicates + potential_lib_dirs = lib_dirs + sys_lib_dirs + py_lib_dirs + user_lib_dirs + lib_dirs = [] + for ld in potential_lib_dirs: + if os.path.isdir(ld) and ld not in lib_dirs: + lib_dirs.append(ld) + + # Now attempt to find libraries of that name in the given directory + # (case-insensitive) + lib_paths = [] + for lib_dir in lib_dirs: + # Get files, prefer short names, last version + files = os.listdir(lib_dir) + files = reversed(sorted(files)) + files = sorted(files, key=len) + for lib_name in lib_names: + # Test all filenames for name and ext + for fname in files: + if fname.lower().startswith(lib_name) and looks_lib(fname): + lib_paths.append(os.path.join(lib_dir, fname)) + + # Return (only the items which are files) + lib_paths = [lp for lp in lib_paths if os.path.isfile(lp)] + return lib_dirs, lib_paths + + +def load_lib(exact_lib_names, lib_names, lib_dirs=None): + """ load_lib(exact_lib_names, lib_names, lib_dirs=None) + + Load a dynamic library. + + This function first tries to load the library from the given exact + names. When that fails, it tries to find the library in common + locations. It searches for files that start with one of the names + given in lib_names (case insensitive). The search is performed in + the given lib_dirs and a set of common library dirs. + + Returns ``(ctypes_library, library_path)`` + """ + + # Checks + assert isinstance(exact_lib_names, list) + assert isinstance(lib_names, list) + if lib_dirs is not None: + assert isinstance(lib_dirs, list) + exact_lib_names = [n for n in exact_lib_names if n] + lib_names = [n for n in lib_names if n] + + # Get reference name (for better messages) + if lib_names: + the_lib_name = lib_names[0] + elif exact_lib_names: + the_lib_name = exact_lib_names[0] + else: + raise ValueError("No library name given.") + + # Collect filenames of potential libraries + # First try a few bare library names that ctypes might be able to find + # in the default locations for each platform. + if SYSTEM_LIBS_ONLY: + lib_dirs, lib_paths = [], [] + else: + lib_dirs, lib_paths = generate_candidate_libs(lib_names, lib_dirs) + lib_paths = exact_lib_names + lib_paths + + # Select loader + if sys.platform.startswith("win"): + loader = ctypes.windll + else: + loader = ctypes.cdll + + # Try to load until success + the_lib = None + errors = [] + for fname in lib_paths: + try: + the_lib = loader.LoadLibrary(fname) + break + except Exception as err: + # Don't record errors when it couldn't load the library from an + # exact name -- this fails often, and doesn't provide any useful + # debugging information anyway, beyond "couldn't find library..." + if fname not in exact_lib_names: + errors.append((fname, err)) + + # No success ... + if the_lib is None: + if errors: + # No library loaded, and load-errors reported for some + # candidate libs + err_txt = ["%s:\n%s" % (l, str(e)) for l, e in errors] + msg = ( + "One or more %s libraries were found, but " + + "could not be loaded due to the following errors:\n%s" + ) + raise OSError(msg % (the_lib_name, "\n\n".join(err_txt))) + else: + # No errors, because no potential libraries found at all! + msg = "Could not find a %s library in any of:\n%s" + raise OSError(msg % (the_lib_name, "\n".join(lib_dirs))) + + # Done + return the_lib, fname diff --git a/venv/Lib/site-packages/imageio/core/format.py b/venv/Lib/site-packages/imageio/core/format.py new file mode 100644 index 000000000..c5e093a67 --- /dev/null +++ b/venv/Lib/site-packages/imageio/core/format.py @@ -0,0 +1,735 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" + +.. note:: + imageio is under construction, some details with regard to the + Reader and Writer classes may change. + +These are the main classes of imageio. They expose an interface for +advanced users and plugin developers. A brief overview: + + * imageio.FormatManager - for keeping track of registered formats. + * imageio.Format - representation of a file format reader/writer + * imageio.Format.Reader - object used during the reading of a file. + * imageio.Format.Writer - object used during saving a file. + * imageio.Request - used to store the filename and other info. + +Plugins need to implement a Format class and register +a format object using ``imageio.formats.add_format()``. + +""" + +# todo: do we even use the known extensions? + +# Some notes: +# +# The classes in this module use the Request object to pass filename and +# related info around. This request object is instantiated in +# imageio.get_reader and imageio.get_writer. + +import os +import sys + +import numpy as np + +from . import Array, asarray + + +MODENAMES = { + "i": "single-image", + "I": "multi-image", + "v": "single-volume", + "V": "multi-volume", + "?": "any-mode", +} + + +class Format(object): + """ Represents an implementation to read/write a particular file format + + A format instance is responsible for 1) providing information about + a format; 2) determining whether a certain file can be read/written + with this format; 3) providing a reader/writer class. + + Generally, imageio will select the right format and use that to + read/write an image. A format can also be explicitly chosen in all + read/write functions. Use ``print(format)``, or ``help(format_name)`` + to see its documentation. + + To implement a specific format, one should create a subclass of + Format and the Format.Reader and Format.Writer classes. see + :doc:`plugins` for details. + + Parameters + ---------- + name : str + A short name of this format. Users can select a format using its name. + description : str + A one-line description of the format. + extensions : str | list | None + List of filename extensions that this format supports. If a + string is passed it should be space or comma separated. The + extensions are used in the documentation and to allow users to + select a format by file extension. It is not used to determine + what format to use for reading/saving a file. + modes : str + A string containing the modes that this format can handle ('iIvV'), + “i” for an image, “I” for multiple images, “v” for a volume, + “V” for multiple volumes. + This attribute is used in the documentation and to select the + formats when reading/saving a file. + """ + + def __init__(self, name, description, extensions=None, modes=None): + + # Store name and description + self._name = name.upper() + self._description = description + + # Store extensions, do some effort to normalize them. + # They are stored as a list of lowercase strings without leading dots. + if extensions is None: + extensions = [] + elif isinstance(extensions, str): + extensions = extensions.replace(",", " ").split(" ") + # + if isinstance(extensions, (tuple, list)): + self._extensions = tuple( + ["." + e.strip(".").lower() for e in extensions if e] + ) + else: + raise ValueError("Invalid value for extensions given.") + + # Store mode + self._modes = modes or "" + if not isinstance(self._modes, str): + raise ValueError("Invalid value for modes given.") + for m in self._modes: + if m not in "iIvV?": + raise ValueError("Invalid value for mode given.") + + def __repr__(self): + # Short description + return "" % (self.name, self.description) + + def __str__(self): + return self.doc + + @property + def doc(self): + """ The documentation for this format (name + description + docstring). + """ + # Our docsring is assumed to be indented by four spaces. The + # first line needs special attention. + return "%s - %s\n\n %s\n" % ( + self.name, + self.description, + self.__doc__.strip(), + ) + + @property + def name(self): + """ The name of this format. + """ + return self._name + + @property + def description(self): + """ A short description of this format. + """ + return self._description + + @property + def extensions(self): + """ A list of file extensions supported by this plugin. + These are all lowercase with a leading dot. + """ + return self._extensions + + @property + def modes(self): + """ A string specifying the modes that this format can handle. + """ + return self._modes + + def get_reader(self, request): + """ get_reader(request) + + Return a reader object that can be used to read data and info + from the given file. Users are encouraged to use + imageio.get_reader() instead. + """ + select_mode = request.mode[1] if request.mode[1] in "iIvV" else "" + if select_mode not in self.modes: + modename = MODENAMES.get(select_mode, select_mode) + raise RuntimeError( + "Format %s cannot read in %s mode" % (self.name, modename) + ) + return self.Reader(self, request) + + def get_writer(self, request): + """ get_writer(request) + + Return a writer object that can be used to write data and info + to the given file. Users are encouraged to use + imageio.get_writer() instead. + """ + select_mode = request.mode[1] if request.mode[1] in "iIvV" else "" + if select_mode not in self.modes: + modename = MODENAMES.get(select_mode, select_mode) + raise RuntimeError( + "Format %s cannot write in %s mode" % (self.name, modename) + ) + return self.Writer(self, request) + + def can_read(self, request): + """ can_read(request) + + Get whether this format can read data from the specified uri. + """ + return self._can_read(request) + + def can_write(self, request): + """ can_write(request) + + Get whether this format can write data to the speciefed uri. + """ + return self._can_write(request) + + def _can_read(self, request): # pragma: no cover + return None # Plugins must implement this + + def _can_write(self, request): # pragma: no cover + return None # Plugins must implement this + + # ----- + + class _BaseReaderWriter(object): + """ Base class for the Reader and Writer class to implement common + functionality. It implements a similar approach for opening/closing + and context management as Python's file objects. + """ + + def __init__(self, format, request): + self.__closed = False + self._BaseReaderWriter_last_index = -1 + self._format = format + self._request = request + # Open the reader/writer + self._open(**self.request.kwargs.copy()) + + @property + def format(self): + """ The :class:`.Format` object corresponding to the current + read/write operation. + """ + return self._format + + @property + def request(self): + """ The :class:`.Request` object corresponding to the + current read/write operation. + """ + return self._request + + def __enter__(self): + self._checkClosed() + return self + + def __exit__(self, type, value, traceback): + if value is None: + # Otherwise error in close hide the real error. + self.close() + + def __del__(self): + try: + self.close() + except Exception: # pragma: no cover + pass # Supress noise when called during interpreter shutdown + + def close(self): + """ Flush and close the reader/writer. + This method has no effect if it is already closed. + """ + if self.__closed: + return + self.__closed = True + self._close() + # Process results and clean request object + self.request.finish() + + @property + def closed(self): + """ Whether the reader/writer is closed. + """ + return self.__closed + + def _checkClosed(self, msg=None): + """Internal: raise an ValueError if reader/writer is closed + """ + if self.closed: + what = self.__class__.__name__ + msg = msg or ("I/O operation on closed %s." % what) + raise RuntimeError(msg) + + # To implement + + def _open(self, **kwargs): + """ _open(**kwargs) + + Plugins should probably implement this. + + It is called when reader/writer is created. Here the + plugin can do its initialization. The given keyword arguments + are those that were given by the user at imageio.read() or + imageio.write(). + """ + raise NotImplementedError() + + def _close(self): + """ _close() + + Plugins should probably implement this. + + It is called when the reader/writer is closed. Here the plugin + can do a cleanup, flush, etc. + + """ + raise NotImplementedError() + + # ----- + + class Reader(_BaseReaderWriter): + """ + The purpose of a reader object is to read data from an image + resource, and should be obtained by calling :func:`.get_reader`. + + A reader can be used as an iterator to read multiple images, + and (if the format permits) only reads data from the file when + new data is requested (i.e. streaming). A reader can also be + used as a context manager so that it is automatically closed. + + Plugins implement Reader's for different formats. Though rare, + plugins may provide additional functionality (beyond what is + provided by the base reader class). + """ + + def get_length(self): + """ get_length() + + Get the number of images in the file. (Note: you can also + use ``len(reader_object)``.) + + The result can be: + * 0 for files that only have meta data + * 1 for singleton images (e.g. in PNG, JPEG, etc.) + * N for image series + * inf for streams (series of unknown length) + """ + return self._get_length() + + def get_data(self, index, **kwargs): + """ get_data(index, **kwargs) + + Read image data from the file, using the image index. The + returned image has a 'meta' attribute with the meta data. + Raises IndexError if the index is out of range. + + Some formats may support additional keyword arguments. These are + listed in the documentation of those formats. + """ + self._checkClosed() + self._BaseReaderWriter_last_index = index + try: + im, meta = self._get_data(index, **kwargs) + except StopIteration: + raise IndexError(index) + return Array(im, meta) # Array tests im and meta + + def get_next_data(self, **kwargs): + """ get_next_data(**kwargs) + + Read the next image from the series. + + Some formats may support additional keyword arguments. These are + listed in the documentation of those formats. + """ + return self.get_data(self._BaseReaderWriter_last_index + 1, **kwargs) + + def set_image_index(self, index, **kwargs): + """ set_image_index(index) + + Set the internal pointer such that the next call to + get_next_data() returns the image specified by the index + """ + self._checkClosed() + n = self.get_length() + if index <= n: + self._BaseReaderWriter_last_index = index - 1 + + def get_meta_data(self, index=None): + """ get_meta_data(index=None) + + Read meta data from the file. using the image index. If the + index is omitted or None, return the file's (global) meta data. + + Note that ``get_data`` also provides the meta data for the returned + image as an atrribute of that image. + + The meta data is a dict, which shape depends on the format. + E.g. for JPEG, the dict maps group names to subdicts and each + group is a dict with name-value pairs. The groups represent + the different metadata formats (EXIF, XMP, etc.). + """ + self._checkClosed() + meta = self._get_meta_data(index) + if not isinstance(meta, dict): + raise ValueError( + "Meta data must be a dict, not %r" % meta.__class__.__name__ + ) + return meta + + def iter_data(self): + """ iter_data() + + Iterate over all images in the series. (Note: you can also + iterate over the reader object.) + + """ + self._checkClosed() + n = self.get_length() + i = 0 + while i < n: + try: + im, meta = self._get_data(i) + except StopIteration: + return + except IndexError: + if n == float("inf"): + return + raise + yield Array(im, meta) + i += 1 + + # Compatibility + + def __iter__(self): + return self.iter_data() + + def __len__(self): + n = self.get_length() + if n == float("inf"): + n = sys.maxsize + return n + + # To implement + + def _get_length(self): + """ _get_length() + + Plugins must implement this. + + The retured scalar specifies the number of images in the series. + See Reader.get_length for more information. + """ + raise NotImplementedError() + + def _get_data(self, index): + """ _get_data() + + Plugins must implement this, but may raise an IndexError in + case the plugin does not support random access. + + It should return the image and meta data: (ndarray, dict). + """ + raise NotImplementedError() + + def _get_meta_data(self, index): + """ _get_meta_data(index) + + Plugins must implement this. + + It should return the meta data as a dict, corresponding to the + given index, or to the file's (global) meta data if index is + None. + """ + raise NotImplementedError() + + # ----- + + class Writer(_BaseReaderWriter): + """ + The purpose of a writer object is to write data to an image + resource, and should be obtained by calling :func:`.get_writer`. + + A writer will (if the format permits) write data to the file + as soon as new data is provided (i.e. streaming). A writer can + also be used as a context manager so that it is automatically + closed. + + Plugins implement Writer's for different formats. Though rare, + plugins may provide additional functionality (beyond what is + provided by the base writer class). + """ + + def append_data(self, im, meta=None): + """ append_data(im, meta={}) + + Append an image (and meta data) to the file. The final meta + data that is used consists of the meta data on the given + image (if applicable), updated with the given meta data. + """ + self._checkClosed() + # Check image data + if not isinstance(im, np.ndarray): + raise ValueError("append_data requires ndarray as first arg") + # Get total meta dict + total_meta = {} + if hasattr(im, "meta") and isinstance(im.meta, dict): + total_meta.update(im.meta) + if meta is None: + pass + elif not isinstance(meta, dict): + raise ValueError("Meta must be a dict.") + else: + total_meta.update(meta) + + # Decouple meta info + im = asarray(im) + # Call + return self._append_data(im, total_meta) + + def set_meta_data(self, meta): + """ set_meta_data(meta) + + Sets the file's (global) meta data. The meta data is a dict which + shape depends on the format. E.g. for JPEG the dict maps + group names to subdicts, and each group is a dict with + name-value pairs. The groups represents the different + metadata formats (EXIF, XMP, etc.). + + Note that some meta formats may not be supported for + writing, and individual fields may be ignored without + warning if they are invalid. + """ + self._checkClosed() + if not isinstance(meta, dict): + raise ValueError("Meta must be a dict.") + else: + return self._set_meta_data(meta) + + # To implement + + def _append_data(self, im, meta): + # Plugins must implement this + raise NotImplementedError() + + def _set_meta_data(self, meta): + # Plugins must implement this + raise NotImplementedError() + + +class FormatManager(object): + """ + There is exactly one FormatManager object in imageio: ``imageio.formats``. + Its purpose it to keep track of the registered formats. + + The format manager supports getting a format object using indexing (by + format name or extension). When used as an iterator, this object + yields all registered format objects. + + See also :func:`.help`. + """ + + def __init__(self): + self._formats = [] + self._formats_sorted = [] + + def __repr__(self): + return "" % len(self) + + def __iter__(self): + return iter(self._formats_sorted) + + def __len__(self): + return len(self._formats) + + def __str__(self): + ss = [] + for format in self: + ext = ", ".join(format.extensions) + s = "%s - %s [%s]" % (format.name, format.description, ext) + ss.append(s) + return "\n".join(ss) + + def __getitem__(self, name): + # Check + if not isinstance(name, str): + raise ValueError( + "Looking up a format should be done by name " "or by extension." + ) + if not name: + raise ValueError("No format matches the empty string.") + + # Test if name is existing file + if os.path.isfile(name): + from . import Request + + format = self.search_read_format(Request(name, "r?")) + if format is not None: + return format + + if "." in name: + # Look for extension + e1, e2 = os.path.splitext(name.lower()) + name = e2 or e1 + # Search for format that supports this extension + for format in self: + if name in format.extensions: + return format + else: + # Look for name + name = name.upper() + for format in self: + if name == format.name: + return format + for format in self: + if name == format.name.rsplit("-", 1)[0]: + return format + else: + # Maybe the user meant to specify an extension + try: + return self["." + name.lower()] + except IndexError: + pass # Fail using original name below + + # Nothing found ... + raise IndexError("No format known by name %s." % name) + + def sort(self, *names): + """ sort(name1, name2, name3, ...) + + Sort the formats based on zero or more given names; a format with + a name that matches one of the given names will take precedence + over other formats. A match means an equal name, or ending with + that name (though the former counts higher). Case insensitive. + + Format preference will match the order of the given names: using + ``sort('TIFF', '-FI', '-PIL')`` would prefer the FreeImage formats + over the Pillow formats, but prefer TIFF even more. Each time + this is called, the starting point is the default format order, + and calling ``sort()`` with no arguments will reset the order. + + Be aware that using the function can affect the behavior of + other code that makes use of imageio. + + Also see the ``IMAGEIO_FORMAT_ORDER`` environment variable. + """ + # Check and sanitize imput + for name in names: + if not isinstance(name, str): + raise TypeError("formats.sort() accepts only string names.") + if any(c in name for c in ".,"): + raise ValueError( + "Names given to formats.sort() should not " + "contain dots or commas." + ) + names = [name.strip().upper() for name in names] + # Reset + self._formats_sorted = list(self._formats) + # Sort + for name in reversed(names): + sorter = lambda f: -((f.name == name) + (f.name.endswith(name))) + self._formats_sorted.sort(key=sorter) + + def add_format(self, format, overwrite=False): + """ add_format(format, overwrite=False) + + Register a format, so that imageio can use it. If a format with the + same name already exists, an error is raised, unless overwrite is True, + in which case the current format is replaced. + """ + if not isinstance(format, Format): + raise ValueError("add_format needs argument to be a Format object") + elif format in self._formats: + raise ValueError("Given Format instance is already registered") + elif format.name in self.get_format_names(): + if overwrite: + old_format = self[format.name] + self._formats.remove(old_format) + if old_format in self._formats_sorted: + self._formats_sorted.remove(old_format) + else: + raise ValueError( + "A Format named %r is already registered, use" + " overwrite=True to replace." % format.name + ) + self._formats.append(format) + self._formats_sorted.append(format) + + def search_read_format(self, request): + """ search_read_format(request) + + Search a format that can read a file according to the given request. + Returns None if no appropriate format was found. (used internally) + """ + select_mode = request.mode[1] if request.mode[1] in "iIvV" else "" + + # Select formats that seem to be able to read it + selected_formats = [] + for format in self: + if select_mode in format.modes: + if request.extension in format.extensions: + selected_formats.append(format) + + # Select the first that can + for format in selected_formats: + if format.can_read(request): + return format + + # If no format could read it, it could be that file has no or + # the wrong extension. We ask all formats again. + for format in self: + if format not in selected_formats: + if format.can_read(request): + return format + + def search_write_format(self, request): + """ search_write_format(request) + + Search a format that can write a file according to the given request. + Returns None if no appropriate format was found. (used internally) + """ + select_mode = request.mode[1] if request.mode[1] in "iIvV" else "" + + # Select formats that seem to be able to write it + selected_formats = [] + for format in self: + if select_mode in format.modes: + if request.extension in format.extensions: + selected_formats.append(format) + + # Select the first that can + for format in selected_formats: + if format.can_write(request): + return format + + # If none of the selected formats could write it, maybe another + # format can still write it. It might prefer a different mode, + # or be able to handle more formats than it says by its extensions. + for format in self: + if format not in selected_formats: + if format.can_write(request): + return format + + def get_format_names(self): + """ Get the names of all registered formats. + """ + return [f.name for f in self] + + def show(self): + """ Show a nicely formatted list of available formats + """ + print(self) diff --git a/venv/Lib/site-packages/imageio/core/functions.py b/venv/Lib/site-packages/imageio/core/functions.py new file mode 100644 index 000000000..3a579b04d --- /dev/null +++ b/venv/Lib/site-packages/imageio/core/functions.py @@ -0,0 +1,625 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" +These functions represent imageio's main interface for the user. They +provide a common API to read and write image data for a large +variety of formats. All read and write functions accept keyword +arguments, which are passed on to the format that does the actual work. +To see what keyword arguments are supported by a specific format, use +the :func:`.help` function. + +Functions for reading: + + * :func:`.imread` - read an image from the specified uri + * :func:`.mimread` - read a series of images from the specified uri + * :func:`.volread` - read a volume from the specified uri + * :func:`.mvolread` - read a series of volumes from the specified uri + +Functions for saving: + + * :func:`.imwrite` - write an image to the specified uri + * :func:`.mimwrite` - write a series of images to the specified uri + * :func:`.volwrite` - write a volume to the specified uri + * :func:`.mvolwrite` - write a series of volumes to the specified uri + +More control: + +For a larger degree of control, imageio provides functions +:func:`.get_reader` and :func:`.get_writer`. They respectively return an +:class:`.Reader` and an :class:`.Writer` object, which can +be used to read/write data and meta data in a more controlled manner. +This also allows specific scientific formats to be exposed in a way +that best suits that file-format. + +---- + +All read-functions return images as numpy arrays, and have a ``meta`` +attribute; the meta-data dictionary can be accessed with ``im.meta``. +To make this work, imageio actually makes use of a subclass of +``np.ndarray``. If needed, the image can be converted to a plain numpy +array using ``np.asarray(im)``. + +---- + +Supported resource URI's: + +All functions described here accept a URI to describe the resource to +read from or write to. These can be a wide range of things. (Imageio +takes care of handling the URI so that plugins can access the data in +an easy way.) + +For reading and writing: + +* a normal filename, e.g. ``'c:\\foo\\bar.png'`` +* a file in a zipfile, e.g. ``'c:\\foo\\bar.zip\\eggs.png'`` +* a file object with a ``read()`` / ``write()`` method. + +For reading: + +* an http/ftp address, e.g. ``'http://example.com/foo.png'`` +* the raw bytes of an image file +* ``get_reader("")`` to grab images from a (web) camera. +* ``imread("")`` to grab a screenshot (on Windows or OS X). +* ``imread("")`` to grab an image from the clipboard (on Windows). + +For writing one can also use ``''`` or ``imageio.RETURN_BYTES`` to +make a write function return the bytes instead of writing to a file. + +Note that reading from HTTP and zipfiles works for many formats including +png and jpeg, but may not work for all formats (some plugins "seek" the +file object, which HTTP/zip streams do not support). In such a case one +can download/extract the file first. For HTTP one can use something +like ``imageio.imread(imageio.core.urlopen(url).read(), '.gif')``. + +""" + +from numbers import Number +import re + +import numpy as np + +from . import Request, RETURN_BYTES +from .. import formats +from .format import MODENAMES + + +MEMTEST_DEFAULT_MIM = "256MB" +MEMTEST_DEFAULT_MVOL = "1GB" + + +mem_re = re.compile(r"^(\d+\.?\d*)\s*([kKMGTPEZY]?i?)B?$") +sizes = {"": 1, None: 1} +for i, si in enumerate([""] + list("kMGTPEZY")): + sizes[si] = 1000 ** i + if si: + sizes[si.upper() + "i"] = 1024 ** i + + +def to_nbytes(arg, default=None): + if not arg: + return None + + if arg is True: + return default + + if isinstance(arg, Number): + return arg + + match = mem_re.match(arg) + if match is None: + raise ValueError( + "Memory size could not be parsed " + "(is your capitalisation correct?): {}".format(arg) + ) + + num, unit = match.groups() + + try: + return float(num) * sizes[unit] + except KeyError: + raise ValueError( + "Memory size unit not recognised " + "(is your capitalisation correct?): {}".format(unit) + ) + + +def help(name=None): + """ help(name=None) + + Print the documentation of the format specified by name, or a list + of supported formats if name is omitted. + + Parameters + ---------- + name : str + Can be the name of a format, a filename extension, or a full + filename. See also the :doc:`formats page `. + """ + if not name: + print(formats) + else: + print(formats[name]) + + +## Base functions that return a reader/writer + + +def get_reader(uri, format=None, mode="?", **kwargs): + """ get_reader(uri, format=None, mode='?', **kwargs) + + Returns a :class:`.Reader` object which can be used to read data + and meta data from the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + mode : {'i', 'I', 'v', 'V', '?'} + Used to give the reader a hint on what the user expects (default "?"): + "i" for an image, "I" for multiple images, "v" for a volume, + "V" for multiple volumes, "?" for don't care. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Create request object + request = Request(uri, "r" + mode, **kwargs) + + # Get format + if format is not None: + format = formats[format] + else: + format = formats.search_read_format(request) + if format is None: + modename = MODENAMES.get(mode, mode) + raise ValueError( + "Could not find a format to read the specified file in %s mode" % modename + ) + + # Return its reader object + return format.get_reader(request) + + +def get_writer(uri, format=None, mode="?", **kwargs): + """ get_writer(uri, format=None, mode='?', **kwargs) + + Returns a :class:`.Writer` object which can be used to write data + and meta data to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + format : str + The format to use to write the file. By default imageio selects + the appropriate for you based on the filename. + mode : {'i', 'I', 'v', 'V', '?'} + Used to give the writer a hint on what the user expects (default '?'): + "i" for an image, "I" for multiple images, "v" for a volume, + "V" for multiple volumes, "?" for don't care. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Signal extension when returning as bytes, needed by e.g. ffmpeg + if uri == RETURN_BYTES and isinstance(format, str): + uri = RETURN_BYTES + "." + format.strip(". ") + + # Create request object + request = Request(uri, "w" + mode, **kwargs) + + # Get format + if format is not None: + format = formats[format] + else: + format = formats.search_write_format(request) + if format is None: + modename = MODENAMES.get(mode, mode) + raise ValueError( + "Could not find a format to write the specified file in %s mode" % modename + ) + + # Return its writer object + return format.get_writer(request) + + +## Images + + +def imread(uri, format=None, **kwargs): + """ imread(uri, format=None, **kwargs) + + Reads an image from the specified file. Returns a numpy array, which + comes with a dict of meta data at its 'meta' attribute. + + Note that the image data is returned as-is, and may not always have + a dtype of uint8 (and thus may differ from what e.g. PIL returns). + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the image from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + if "mode" in kwargs: + raise TypeError( + 'Invalid keyword argument "mode", ' 'perhaps you mean "pilmode"?' + ) + + # Get reader and read first + reader = read(uri, format, "i", **kwargs) + with reader: + return reader.get_data(0) + + +def imwrite(uri, im, format=None, **kwargs): + """ imwrite(uri, im, format=None, **kwargs) + + Write an image to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + im : numpy.ndarray + The image data. Must be NxM, NxMx3 or NxMx4. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Test image + imt = type(im) + im = np.asanyarray(im) + if not np.issubdtype(im.dtype, np.number): + raise ValueError("Image is not numeric, but {}.".format(imt.__name__)) + elif im.ndim == 2: + pass + elif im.ndim == 3 and im.shape[2] in [1, 3, 4]: + pass + else: + raise ValueError("Image must be 2D (grayscale, RGB, or RGBA).") + + # Get writer and write first + writer = get_writer(uri, format, "i", **kwargs) + with writer: + writer.append_data(im) + + # Return a result if there is any + return writer.request.get_result() + + +## Multiple images + + +def mimread(uri, format=None, memtest=MEMTEST_DEFAULT_MIM, **kwargs): + """ mimread(uri, format=None, memtest="256MB", **kwargs) + + Reads multiple images from the specified file. Returns a list of + numpy arrays, each with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the images from, e.g. a filename,pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + memtest : {bool, int, float, str} + If truthy, this function will raise an error if the resulting + list of images consumes greater than the amount of memory specified. + This is to protect the system from using so much memory that it needs + to resort to swapping, and thereby stall the computer. E.g. + ``mimread('hunger_games.avi')``. + + If the argument is a number, that will be used as the threshold number + of bytes. + + If the argument is a string, it will be interpreted as a number of bytes with + SI/IEC prefixed units (e.g. '1kB', '250MiB', '80.3YB'). + + - Units are case sensitive + - k, M etc. represent a 1000-fold change, where Ki, Mi etc. represent 1024-fold + - The "B" is optional, but if present, must be capitalised + + If the argument is True, the default will be used, for compatibility reasons. + + Default: '256MB' + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Get reader + reader = read(uri, format, "I", **kwargs) + nbyte_limit = to_nbytes(memtest, MEMTEST_DEFAULT_MIM) + + # Read + ims = [] + nbytes = 0 + for im in reader: + ims.append(im) + # Memory check + nbytes += im.nbytes + if nbyte_limit and nbytes > nbyte_limit: + ims[:] = [] # clear to free the memory + raise RuntimeError( + "imageio.mimread() has read over {}B of " + "image data.\nStopped to avoid memory problems." + " Use imageio.get_reader(), increase threshold, or memtest=False".format( + int(nbyte_limit) + ) + ) + + return ims + + +def mimwrite(uri, ims, format=None, **kwargs): + """ mimwrite(uri, ims, format=None, **kwargs) + + Write multiple images to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the images to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + ims : sequence of numpy arrays + The image data. Each array must be NxM, NxMx3 or NxMx4. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + # Get writer + writer = get_writer(uri, format, "I", **kwargs) + written = 0 + + with writer: + + # Iterate over images (ims may be a generator) + for im in ims: + + # Test image + imt = type(im) + im = np.asanyarray(im) + if not np.issubdtype(im.dtype, np.number): + raise ValueError("Image is not numeric, but {}.".format(imt.__name__)) + elif im.ndim == 2: + pass + elif im.ndim == 3 and im.shape[2] in [1, 3, 4]: + pass + else: + raise ValueError("Image must be 2D " "(grayscale, RGB, or RGBA).") + + # Add image + writer.append_data(im) + written += 1 + + # Check that something was written. Check after writing, because ims might + # be a generator. The damage is done, but we want to error when it happens. + if not written: + raise RuntimeError("Zero images were written.") + + # Return a result if there is any + return writer.request.get_result() + + +## Volumes + + +def volread(uri, format=None, **kwargs): + """ volread(uri, format=None, **kwargs) + + Reads a volume from the specified file. Returns a numpy array, which + comes with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the volume from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Get reader and read first + reader = read(uri, format, "v", **kwargs) + with reader: + return reader.get_data(0) + + +def volwrite(uri, im, format=None, **kwargs): + """ volwrite(uri, vol, format=None, **kwargs) + + Write a volume to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the image to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + vol : numpy.ndarray + The image data. Must be NxMxL (or NxMxLxK if each voxel is a tuple). + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Test image + imt = type(im) + im = np.asanyarray(im) + if not np.issubdtype(im.dtype, np.number): + raise ValueError("Image is not numeric, but {}.".format(imt.__name__)) + elif im.ndim == 3: + pass + elif im.ndim == 4 and im.shape[3] < 32: # How large can a tuple be? + pass + else: + raise ValueError("Image must be 3D, or 4D if each voxel is a tuple.") + + # Get writer and write first + writer = get_writer(uri, format, "v", **kwargs) + with writer: + writer.append_data(im) + + # Return a result if there is any + return writer.request.get_result() + + +## Multiple volumes + + +def mvolread(uri, format=None, memtest=MEMTEST_DEFAULT_MVOL, **kwargs): + """ mvolread(uri, format=None, memtest='1GB', **kwargs) + + Reads multiple volumes from the specified file. Returns a list of + numpy arrays, each with a dict of meta data at its 'meta' attribute. + + Parameters + ---------- + uri : {str, pathlib.Path, bytes, file} + The resource to load the volumes from, e.g. a filename, pathlib.Path, + http address or file object, see the docs for more info. + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + memtest : {bool, int, float, str} + If truthy, this function will raise an error if the resulting + list of images consumes greater than the amount of memory specified. + This is to protect the system from using so much memory that it needs + to resort to swapping, and thereby stall the computer. E.g. + ``mimread('hunger_games.avi')``. + + If the argument is a number, that will be used as the threshold number + of bytes. + + If the argument is a string, it will be interpreted as a number of bytes with + SI/IEC prefixed units (e.g. '1kB', '250MiB', '80.3YB'). + + - Units are case sensitive + - k, M etc. represent a 1000-fold change, where Ki, Mi etc. represent 1024-fold + - The "B" is optional, but if present, must be capitalised + + If the argument is True, the default will be used, for compatibility reasons. + + Default: '1GB' + kwargs : ... + Further keyword arguments are passed to the reader. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Get reader and read all + reader = read(uri, format, "V", **kwargs) + nbyte_limit = to_nbytes(memtest, MEMTEST_DEFAULT_MVOL) + + ims = [] + nbytes = 0 + for im in reader: + ims.append(im) + # Memory check + nbytes += im.nbytes + if nbyte_limit and nbytes > nbyte_limit: # pragma: no cover + ims[:] = [] # clear to free the memory + raise RuntimeError( + "imageio.mvolread() has read over {}B of " + "image data.\nStopped to avoid memory problems." + " Use imageio.get_reader(), increase threshold, or memtest=False".format( + int(nbyte_limit) + ) + ) + + return ims + + +def mvolwrite(uri, ims, format=None, **kwargs): + """ mvolwrite(uri, vols, format=None, **kwargs) + + Write multiple volumes to the specified file. + + Parameters + ---------- + uri : {str, pathlib.Path, file} + The resource to write the volumes to, e.g. a filename, pathlib.Path + or file object, see the docs for more info. + ims : sequence of numpy arrays + The image data. Each array must be NxMxL (or NxMxLxK if each + voxel is a tuple). + format : str + The format to use to read the file. By default imageio selects + the appropriate for you based on the filename and its contents. + kwargs : ... + Further keyword arguments are passed to the writer. See :func:`.help` + to see what arguments are available for a particular format. + """ + + # Get writer + writer = get_writer(uri, format, "V", **kwargs) + written = 0 + + with writer: + + # Iterate over images (ims may be a generator) + for im in ims: + # Test image + imt = type(im) + im = np.asanyarray(im) + if not np.issubdtype(im.dtype, np.number): + raise ValueError("Image is not numeric, but {}.".format(imt.__name__)) + elif im.ndim == 3: + pass + elif im.ndim == 4 and im.shape[3] < 32: + pass # How large can a tuple be? + else: + raise ValueError("Image must be 3D, or 4D if each voxel is a tuple.") + + # Add image + writer.append_data(im) + written += 1 + + # Check that something was written. Check after writing, because ims might + # be a generator. The damage is done, but we want to error when it happens. + if not written: + raise RuntimeError("Zero volumes were written.") + + # Return a result if there is any + return writer.request.get_result() + + +## Aliases + +read = get_reader +save = get_writer +imsave = imwrite +mimsave = mimwrite +volsave = volwrite +mvolsave = mvolwrite diff --git a/venv/Lib/site-packages/imageio/core/request.py b/venv/Lib/site-packages/imageio/core/request.py new file mode 100644 index 000000000..83e1c938e --- /dev/null +++ b/venv/Lib/site-packages/imageio/core/request.py @@ -0,0 +1,577 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" +Definition of the Request object, which acts as a kind of bridge between +what the user wants and what the plugins can. +""" + +import os +from io import BytesIO +import zipfile +import tempfile +import shutil + +from ..core import urlopen, get_remote_file + +try: + from pathlib import Path +except ImportError: + Path = None + +# URI types +URI_BYTES = 1 +URI_FILE = 2 +URI_FILENAME = 3 +URI_ZIPPED = 4 +URI_HTTP = 5 +URI_FTP = 6 + +SPECIAL_READ_URIS = "", "" + +# The user can use this string in a write call to get the data back as bytes. +RETURN_BYTES = "" + +# Example images that will be auto-downloaded +EXAMPLE_IMAGES = { + "astronaut.png": "Image of the astronaut Eileen Collins", + "camera.png": "Classic grayscale image of a photographer", + "checkerboard.png": "Black and white image of a chekerboard", + "wood.jpg": "A (repeatable) texture of wooden planks", + "bricks.jpg": "A (repeatable) texture of stone bricks", + "clock.png": "Photo of a clock with motion blur (Stefan van der Walt)", + "coffee.png": "Image of a cup of coffee (Rachel Michetti)", + "chelsea.png": "Image of Stefan's cat", + "wikkie.png": "Image of Almar's cat", + "coins.png": "Image showing greek coins from Pompeii", + "horse.png": "Image showing the silhouette of a horse (Andreas Preuss)", + "hubble_deep_field.png": "Photograph taken by Hubble telescope (NASA)", + "immunohistochemistry.png": "Immunohistochemical (IHC) staining", + "moon.png": "Image showing a portion of the surface of the moon", + "page.png": "A scanned page of text", + "text.png": "A photograph of handdrawn text", + "chelsea.zip": "The chelsea.png in a zipfile (for testing)", + "chelsea.bsdf": "The chelsea.png in a BSDF file(for testing)", + "newtonscradle.gif": "Animated GIF of a newton's cradle", + "cockatoo.mp4": "Video file of a cockatoo", + "stent.npz": "Volumetric image showing a stented abdominal aorta", + "meadow_cube.jpg": "A cubemap image of a meadow, e.g. to render a skybox.", +} + + +class Request(object): + """ Request(uri, mode, **kwargs) + + Represents a request for reading or saving an image resource. This + object wraps information to that request and acts as an interface + for the plugins to several resources; it allows the user to read + from filenames, files, http, zipfiles, raw bytes, etc., but offer + a simple interface to the plugins via ``get_file()`` and + ``get_local_filename()``. + + For each read/write operation a single Request instance is used and passed + to the can_read/can_write method of a format, and subsequently to + the Reader/Writer class. This allows rudimentary passing of + information between different formats and between a format and + associated reader/writer. + + parameters + ---------- + uri : {str, bytes, file} + The resource to load the image from. + mode : str + The first character is "r" or "w", indicating a read or write + request. The second character is used to indicate the kind of data: + "i" for an image, "I" for multiple images, "v" for a volume, + "V" for multiple volumes, "?" for don't care. + """ + + def __init__(self, uri, mode, **kwargs): + + # General + self._uri_type = None + self._filename = None + self._extension = None + self._kwargs = kwargs + self._result = None # Some write actions may have a result + + # To handle the user-side + self._filename_zip = None # not None if a zipfile is used + self._bytes = None # Incoming bytes + self._zipfile = None # To store a zipfile instance (if used) + + # To handle the plugin side + self._file = None # To store the file instance + self._file_is_local = False # whether the data needs to be copied at end + self._filename_local = None # not None if using tempfile on this FS + self._firstbytes = None # For easy header parsing + + # To store formats that may be able to fulfil this request + # self._potential_formats = [] + + # Check mode + self._mode = mode + if not isinstance(mode, str): + raise ValueError("Request requires mode must be a string") + if not len(mode) == 2: + raise ValueError("Request requires mode to have two chars") + if mode[0] not in "rw": + raise ValueError('Request requires mode[0] to be "r" or "w"') + if mode[1] not in "iIvV?": + raise ValueError('Request requires mode[1] to be in "iIvV?"') + + # Parse what was given + self._parse_uri(uri) + + # Set extension + if self._filename is not None: + ext = self._filename + if self._filename.startswith(("http://", "https://", "ftp://", "ftps://")): + ext = ext.split("?")[0] + self._extension = "." + ext.split(".")[-1].lower() + + def _parse_uri(self, uri): + """ Try to figure our what we were given + """ + is_read_request = self.mode[0] == "r" + is_write_request = self.mode[0] == "w" + + if isinstance(uri, str): + # Explicit + if uri.startswith("imageio:"): + if is_write_request: + raise RuntimeError("Cannot write to the standard images.") + fn = uri.split(":", 1)[-1].lower() + fn, _, zip_part = fn.partition(".zip/") + if zip_part: + fn += ".zip" + if fn not in EXAMPLE_IMAGES: + raise ValueError("Unknown standard image %r." % fn) + self._uri_type = URI_FILENAME + self._filename = get_remote_file("images/" + fn, auto=True) + if zip_part: + self._filename += "/" + zip_part + elif uri.startswith("http://") or uri.startswith("https://"): + self._uri_type = URI_HTTP + self._filename = uri + elif uri.startswith("ftp://") or uri.startswith("ftps://"): + self._uri_type = URI_FTP + self._filename = uri + elif uri.startswith("file://"): + self._uri_type = URI_FILENAME + self._filename = uri[7:] + elif uri.startswith(SPECIAL_READ_URIS) and is_read_request: + self._uri_type = URI_BYTES + self._filename = uri + elif uri.startswith(RETURN_BYTES) and is_write_request: + self._uri_type = URI_BYTES + self._filename = uri + else: + self._uri_type = URI_FILENAME + self._filename = uri + + elif isinstance(uri, memoryview) and is_read_request: + self._uri_type = URI_BYTES + self._filename = "" + self._bytes = uri.tobytes() + elif isinstance(uri, bytes) and is_read_request: + self._uri_type = URI_BYTES + self._filename = "" + self._bytes = uri + elif Path is not None and isinstance(uri, Path): + self._uri_type = URI_FILENAME + self._filename = str(uri) + # Files + elif is_read_request: + if hasattr(uri, "read") and hasattr(uri, "close"): + self._uri_type = URI_FILE + self._filename = "" + self._file = uri # Data must be read from here + elif is_write_request: + if hasattr(uri, "write") and hasattr(uri, "close"): + self._uri_type = URI_FILE + self._filename = "" + self._file = uri # Data must be written here + + # Expand user dir + if self._uri_type == URI_FILENAME and self._filename.startswith("~"): + self._filename = os.path.expanduser(self._filename) + + # Check if a zipfile + if self._uri_type == URI_FILENAME: + # Search for zip extension followed by a path separater + for needle in [".zip/", ".zip\\"]: + zip_i = self._filename.lower().find(needle) + if zip_i > 0: + zip_i += 4 + zip_path = self._filename[:zip_i] + if is_write_request or os.path.isfile(zip_path): + self._uri_type = URI_ZIPPED + + self._filename_zip = ( + zip_path, + self._filename[zip_i:].lstrip("/\\"), + ) + break + + # Check if we could read it + if self._uri_type is None: + uri_r = repr(uri) + if len(uri_r) > 60: + uri_r = uri_r[:57] + "..." + raise IOError("Cannot understand given URI: %s." % uri_r) + + # Check if this is supported + noWriting = [URI_HTTP, URI_FTP] + if is_write_request and self._uri_type in noWriting: + raise IOError("imageio does not support writing to http/ftp.") + + # Deprecated way to load standard images, give a sensible error message + if is_read_request and self._uri_type in [URI_FILENAME, URI_ZIPPED]: + fn = self._filename + if self._filename_zip: + fn = self._filename_zip[0] + if (not os.path.exists(fn)) and (fn in EXAMPLE_IMAGES): + raise IOError( + "No such file: %r. This file looks like one of " + "the standard images, but from imageio 2.1, " + "standard images have to be specified using " + '"imageio:%s".' % (fn, fn) + ) + + # Make filename absolute + if self._uri_type in [URI_FILENAME, URI_ZIPPED]: + if self._filename_zip: + self._filename_zip = ( + os.path.abspath(self._filename_zip[0]), + self._filename_zip[1], + ) + else: + self._filename = os.path.abspath(self._filename) + + # Check whether file name is valid + if self._uri_type in [URI_FILENAME, URI_ZIPPED]: + fn = self._filename + if self._filename_zip: + fn = self._filename_zip[0] + if is_read_request: + # Reading: check that the file exists (but is allowed a dir) + if not os.path.exists(fn): + raise FileNotFoundError("No such file: '%s'" % fn) + else: + # Writing: check that the directory to write to does exist + dn = os.path.dirname(fn) + if not os.path.exists(dn): + raise FileNotFoundError("The directory %r does not exist" % dn) + + @property + def filename(self): + """ The uri for which reading/saving was requested. This + can be a filename, an http address, or other resource + identifier. Do not rely on the filename to obtain the data, + but use ``get_file()`` or ``get_local_filename()`` instead. + """ + return self._filename + + @property + def extension(self): + """ The (lowercase) extension of the requested filename. + Suffixes in url's are stripped. Can be None if the request is + not based on a filename. + """ + return self._extension + + @property + def mode(self): + """ The mode of the request. The first character is "r" or "w", + indicating a read or write request. The second character is + used to indicate the kind of data: + "i" for an image, "I" for multiple images, "v" for a volume, + "V" for multiple volumes, "?" for don't care. + """ + return self._mode + + @property + def kwargs(self): + """ The dict of keyword arguments supplied by the user. + """ + return self._kwargs + + ## For obtaining data + + def get_file(self): + """ get_file() + Get a file object for the resource associated with this request. + If this is a reading request, the file is in read mode, + otherwise in write mode. This method is not thread safe. Plugins + should not close the file when done. + + This is the preferred way to read/write the data. But if a + format cannot handle file-like objects, they should use + ``get_local_filename()``. + """ + want_to_write = self.mode[0] == "w" + + # Is there already a file? + # Either _uri_type == URI_FILE, or we already opened the file, + # e.g. by using firstbytes + if self._file is not None: + return self._file + + if self._uri_type == URI_BYTES: + if want_to_write: + # Create new file object, we catch the bytes in finish() + self._file = BytesIO() + self._file_is_local = True + else: + self._file = BytesIO(self._bytes) + + elif self._uri_type == URI_FILENAME: + if want_to_write: + self._file = open(self.filename, "wb") + else: + self._file = open(self.filename, "rb") + + elif self._uri_type == URI_ZIPPED: + # Get the correct filename + filename, name = self._filename_zip + if want_to_write: + # Create new file object, we catch the bytes in finish() + self._file = BytesIO() + self._file_is_local = True + else: + # Open zipfile and open new file object for specific file + self._zipfile = zipfile.ZipFile(filename, "r") + self._file = self._zipfile.open(name, "r") + self._file = SeekableFileObject(self._file) + + elif self._uri_type in [URI_HTTP or URI_FTP]: + assert not want_to_write # This should have been tested in init + timeout = os.getenv('IMAGEIO_REQUEST_TIMEOUT') + if timeout is None or not timeout.isdigit(): + timeout = 5 + self._file = urlopen(self.filename, timeout=float(timeout)) + self._file = SeekableFileObject(self._file) + + return self._file + + def get_local_filename(self): + """ get_local_filename() + If the filename is an existing file on this filesystem, return + that. Otherwise a temporary file is created on the local file + system which can be used by the format to read from or write to. + """ + + if self._uri_type == URI_FILENAME: + return self._filename + else: + # Get filename + if self._uri_type in (URI_HTTP, URI_FTP): + ext = os.path.splitext(self._filename.split("?")[0])[1] + else: + ext = os.path.splitext(self._filename)[1] + self._filename_local = tempfile.mktemp(ext, "imageio_") + # Write stuff to it? + if self.mode[0] == "r": + with open(self._filename_local, "wb") as file: + shutil.copyfileobj(self.get_file(), file) + return self._filename_local + + def finish(self): + """ finish() + For internal use (called when the context of the reader/writer + exits). Finishes this request. Close open files and process + results. + """ + + if self.mode[0] == "w": + + # See if we "own" the data and must put it somewhere + bytes = None + if self._filename_local: + with open(self._filename_local, "rb") as file: + bytes = file.read() + elif self._file_is_local: + bytes = self._file.getvalue() + + # Put the data in the right place + if bytes is not None: + if self._uri_type == URI_BYTES: + self._result = bytes # Picked up by imread function + elif self._uri_type == URI_FILE: + self._file.write(bytes) + elif self._uri_type == URI_ZIPPED: + zf = zipfile.ZipFile(self._filename_zip[0], "a") + zf.writestr(self._filename_zip[1], bytes) + zf.close() + # elif self._uri_type == URI_FILENAME: -> is always direct + # elif self._uri_type == URI_FTP/HTTP: -> write not supported + + # Close open files that we know of (and are responsible for) + if self._file and self._uri_type != URI_FILE: + self._file.close() + self._file = None + if self._zipfile: + self._zipfile.close() + self._zipfile = None + + # Remove temp file + if self._filename_local: + try: + os.remove(self._filename_local) + except Exception: # pragma: no cover + pass + self._filename_local = None + + # Detach so gc can clean even if a reference of self lingers + self._bytes = None + + def get_result(self): + """ For internal use. In some situations a write action can have + a result (bytes data). That is obtained with this function. + """ + self._result, res = None, self._result + return res + + @property + def firstbytes(self): + """ The first 256 bytes of the file. These can be used to + parse the header to determine the file-format. + """ + if self._firstbytes is None: + self._read_first_bytes() + return self._firstbytes + + def _read_first_bytes(self, N=256): + if self._bytes is not None: + self._firstbytes = self._bytes[:N] + else: + # Prepare + try: + f = self.get_file() + except IOError: + if os.path.isdir(self.filename): # A directory, e.g. for DICOM + self._firstbytes = bytes() + return + raise + try: + i = f.tell() + except Exception: + i = None + # Read + self._firstbytes = read_n_bytes(f, N) + # Set back + try: + if i is None: + raise Exception("cannot seek with None") + f.seek(i) + except Exception: + # Prevent get_file() from reusing the file + self._file = None + # If the given URI was a file object, we have a problem, + if self._uri_type == URI_FILE: + raise IOError("Cannot seek back after getting firstbytes!") + + +def read_n_bytes(f, N): + """ read_n_bytes(file, n) + + Read n bytes from the given file, or less if the file has less + bytes. Returns zero bytes if the file is closed. + """ + bb = bytes() + while len(bb) < N: + extra_bytes = f.read(N - len(bb)) + if not extra_bytes: + break + bb += extra_bytes + return bb + + +class SeekableFileObject: + """ A readonly wrapper file object that add support for seeking, even if + the wrapped file object does not. The allows us to stream from http and + still use Pillow. + """ + + def __init__(self, f): + self.f = f + self._i = 0 # >=0 but can exceed buffer + self._buffer = b"" + self._have_all = False + self.closed = False + + def read(self, n=None): + + # Fix up n + if n is None: + pass + else: + n = int(n) + if n < 0: + n = None + + # Can and must we read more? + if not self._have_all: + more = b"" + if n is None: + more = self.f.read() + self._have_all = True + else: + want_i = self._i + n + want_more = want_i - len(self._buffer) + if want_more > 0: + more = self.f.read(want_more) + if len(more) < want_more: + self._have_all = True + self._buffer += more + + # Read data from buffer and update pointer + if n is None: + res = self._buffer[self._i :] + else: + res = self._buffer[self._i : self._i + n] + self._i += len(res) + + return res + + def tell(self): + return self._i + + def seek(self, i, mode=0): + # Mimic BytesIO behavior + + # Get the absolute new position + i = int(i) + if mode == 0: + if i < 0: + raise ValueError("negative seek value " + str(i)) + real_i = i + elif mode == 1: + real_i = max(0, self._i + i) # negative ok here + elif mode == 2: + if not self._have_all: + self.read() + real_i = max(0, len(self._buffer) + i) + else: + raise ValueError("invalid whence (%s, should be 0, 1 or 2)" % i) + + # Read some? + if real_i <= len(self._buffer): + pass # no need to read + elif not self._have_all: + assert real_i > self._i # if we don't have all, _i cannot be > _buffer + self.read(real_i - self._i) # sets self._i + + self._i = real_i + return self._i + + def close(self): + self.closed = True + self.f.close() + + def isatty(self): + return False + + def seekable(self): + return True diff --git a/venv/Lib/site-packages/imageio/core/util.py b/venv/Lib/site-packages/imageio/core/util.py new file mode 100644 index 000000000..3b18ed862 --- /dev/null +++ b/venv/Lib/site-packages/imageio/core/util.py @@ -0,0 +1,563 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" +Various utilities for imageio +""" + + +import os +import re +import struct +import sys +import time +import logging + +logger = logging.getLogger("imageio") + + +import numpy as np + +IS_PYPY = "__pypy__" in sys.builtin_module_names +THIS_DIR = os.path.abspath(os.path.dirname(__file__)) + + +def urlopen(*args, **kwargs): + """ Compatibility function for the urlopen function. Raises an + RuntimeError if urlopen could not be imported (which can occur in + frozen applications. + """ + try: + from urllib.request import urlopen + except ImportError: + raise RuntimeError("Could not import urlopen.") + return urlopen(*args, **kwargs) + + +def _precision_warn(p1, p2, extra=""): + t = ( + "Lossy conversion from {} to {}. {} Convert image to {} prior to " + "saving to suppress this warning." + ) + logger.warning(t.format(p1, p2, extra, p2)) + + +def image_as_uint(im, bitdepth=None): + """ Convert the given image to uint (default: uint8) + + If the dtype already matches the desired format, it is returned + as-is. If the image is float, and all values are between 0 and 1, + the values are multiplied by np.power(2.0, bitdepth). In all other + situations, the values are scaled such that the minimum value + becomes 0 and the maximum value becomes np.power(2.0, bitdepth)-1 + (255 for 8-bit and 65535 for 16-bit). + """ + if not bitdepth: + bitdepth = 8 + if not isinstance(im, np.ndarray): + raise ValueError("Image must be a numpy array") + if bitdepth == 8: + out_type = np.uint8 + elif bitdepth == 16: + out_type = np.uint16 + else: + raise ValueError("Bitdepth must be either 8 or 16") + dtype_str1 = str(im.dtype) + dtype_str2 = out_type.__name__ + if (im.dtype == np.uint8 and bitdepth == 8) or ( + im.dtype == np.uint16 and bitdepth == 16 + ): + # Already the correct format? Return as-is + return im + if dtype_str1.startswith("float") and np.nanmin(im) >= 0 and np.nanmax(im) <= 1: + _precision_warn(dtype_str1, dtype_str2, "Range [0, 1].") + im = im.astype(np.float64) * (np.power(2.0, bitdepth) - 1) + 0.499999999 + elif im.dtype == np.uint16 and bitdepth == 8: + _precision_warn(dtype_str1, dtype_str2, "Losing 8 bits of resolution.") + im = np.right_shift(im, 8) + elif im.dtype == np.uint32: + _precision_warn( + dtype_str1, + dtype_str2, + "Losing {} bits of resolution.".format(32 - bitdepth), + ) + im = np.right_shift(im, 32 - bitdepth) + elif im.dtype == np.uint64: + _precision_warn( + dtype_str1, + dtype_str2, + "Losing {} bits of resolution.".format(64 - bitdepth), + ) + im = np.right_shift(im, 64 - bitdepth) + else: + mi = np.nanmin(im) + ma = np.nanmax(im) + if not np.isfinite(mi): + raise ValueError("Minimum image value is not finite") + if not np.isfinite(ma): + raise ValueError("Maximum image value is not finite") + if ma == mi: + return im.astype(out_type) + _precision_warn(dtype_str1, dtype_str2, "Range [{}, {}].".format(mi, ma)) + # Now make float copy before we scale + im = im.astype("float64") + # Scale the values between 0 and 1 then multiply by the max value + im = (im - mi) / (ma - mi) * (np.power(2.0, bitdepth) - 1) + 0.499999999 + assert np.nanmin(im) >= 0 + assert np.nanmax(im) < np.power(2.0, bitdepth) + return im.astype(out_type) + + +class Array(np.ndarray): + """ Array(array, meta=None) + + A subclass of np.ndarray that has a meta attribute. Get the dictionary + that contains the meta data using ``im.meta``. Convert to a plain numpy + array using ``np.asarray(im)``. + + """ + + def __new__(cls, array, meta=None): + # Check + if not isinstance(array, np.ndarray): + raise ValueError("Array expects a numpy array.") + if not (meta is None or isinstance(meta, dict)): + raise ValueError("Array expects meta data to be a dict.") + # Convert and return + meta = meta if meta is not None else {} + try: + ob = array.view(cls) + except AttributeError: # pragma: no cover + # Just return the original; no metadata on the array in Pypy! + return array + ob._copy_meta(meta) + return ob + + def _copy_meta(self, meta): + """ Make a 2-level deep copy of the meta dictionary. + """ + self._meta = Dict() + for key, val in meta.items(): + if isinstance(val, dict): + val = Dict(val) # Copy this level + self._meta[key] = val + + @property + def meta(self): + """ The dict with the meta data of this image. + """ + return self._meta + + def __array_finalize__(self, ob): + """ So the meta info is maintained when doing calculations with + the array. + """ + if isinstance(ob, Array): + self._copy_meta(ob.meta) + else: + self._copy_meta({}) + + def __array_wrap__(self, out, context=None): + """ So that we return a native numpy array (or scalar) when a + reducting ufunc is applied (such as sum(), std(), etc.) + """ + if not out.shape: + return out.dtype.type(out) # Scalar + elif out.shape != self.shape: + return out.view(type=np.ndarray) + else: + return out # Type Array + + +Image = Array # Alias for backwards compatibility + + +def asarray(a): + """ Pypy-safe version of np.asarray. Pypy's np.asarray consumes a + *lot* of memory if the given array is an ndarray subclass. This + function does not. + """ + if isinstance(a, np.ndarray): + if IS_PYPY: # pragma: no cover + a = a.copy() # pypy has issues with base views + plain = a.view(type=np.ndarray) + return plain + return np.asarray(a) + + +from collections import OrderedDict + + +class Dict(OrderedDict): + """ A dict in which the keys can be get and set as if they were + attributes. Very convenient in combination with autocompletion. + + This Dict still behaves as much as possible as a normal dict, and + keys can be anything that are otherwise valid keys. However, + keys that are not valid identifiers or that are names of the dict + class (such as 'items' and 'copy') cannot be get/set as attributes. + """ + + __reserved_names__ = dir(OrderedDict()) # Also from OrderedDict + __pure_names__ = dir(dict()) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, key) + except AttributeError: + if key in self: + return self[key] + else: + raise + + def __setattr__(self, key, val): + if key in Dict.__reserved_names__: + # Either let OrderedDict do its work, or disallow + if key not in Dict.__pure_names__: + return OrderedDict.__setattr__(self, key, val) + else: + raise AttributeError( + "Reserved name, this key can only " + + "be set via ``d[%r] = X``" % key + ) + else: + # if isinstance(val, dict): val = Dict(val) -> no, makes a copy! + self[key] = val + + def __dir__(self): + isidentifier = lambda x: bool(re.match(r"[a-z_]\w*$", x, re.I)) + names = [k for k in self.keys() if (isinstance(k, str) and isidentifier(k))] + return Dict.__reserved_names__ + names + + +class BaseProgressIndicator(object): + """ BaseProgressIndicator(name) + + A progress indicator helps display the progres of a task to the + user. Progress can be pending, running, finished or failed. + + Each task has: + * a name - a short description of what needs to be done. + * an action - the current action in performing the task (e.g. a subtask) + * progress - how far the task is completed + * max - max number of progress units. If 0, the progress is indefinite + * unit - the units in which the progress is counted + * status - 0: pending, 1: in progress, 2: finished, 3: failed + + This class defines an abstract interface. Subclasses should implement + _start, _stop, _update_progress(progressText), _write(message). + """ + + def __init__(self, name): + self._name = name + self._action = "" + self._unit = "" + self._max = 0 + self._status = 0 + self._last_progress_update = 0 + + def start(self, action="", unit="", max=0): + """ start(action='', unit='', max=0) + + Start the progress. Optionally specify an action, a unit, + and a maxium progress value. + """ + if self._status == 1: + self.finish() + self._action = action + self._unit = unit + self._max = max + # + self._progress = 0 + self._status = 1 + self._start() + + def status(self): + """ status() + + Get the status of the progress - 0: pending, 1: in progress, + 2: finished, 3: failed + """ + return self._status + + def set_progress(self, progress=0, force=False): + """ set_progress(progress=0, force=False) + + Set the current progress. To avoid unnecessary progress updates + this will only have a visual effect if the time since the last + update is > 0.1 seconds, or if force is True. + """ + self._progress = progress + # Update or not? + if not (force or (time.time() - self._last_progress_update > 0.1)): + return + self._last_progress_update = time.time() + # Compose new string + unit = self._unit or "" + progressText = "" + if unit == "%": + progressText = "%2.1f%%" % progress + elif self._max > 0: + percent = 100 * float(progress) / self._max + progressText = "%i/%i %s (%2.1f%%)" % (progress, self._max, unit, percent) + elif progress > 0: + if isinstance(progress, float): + progressText = "%0.4g %s" % (progress, unit) + else: + progressText = "%i %s" % (progress, unit) + # Update + self._update_progress(progressText) + + def increase_progress(self, extra_progress): + """ increase_progress(extra_progress) + + Increase the progress by a certain amount. + """ + self.set_progress(self._progress + extra_progress) + + def finish(self, message=None): + """ finish(message=None) + + Finish the progress, optionally specifying a message. This will + not set the progress to the maximum. + """ + self.set_progress(self._progress, True) # fore update + self._status = 2 + self._stop() + if message is not None: + self._write(message) + + def fail(self, message=None): + """ fail(message=None) + + Stop the progress with a failure, optionally specifying a message. + """ + self.set_progress(self._progress, True) # fore update + self._status = 3 + self._stop() + message = "FAIL " + (message or "") + self._write(message) + + def write(self, message): + """ write(message) + + Write a message during progress (such as a warning). + """ + if self.__class__ == BaseProgressIndicator: + # When this class is used as a dummy, print explicit message + print(message) + else: + return self._write(message) + + # Implementing classes should implement these + + def _start(self): + pass + + def _stop(self): + pass + + def _update_progress(self, progressText): + pass + + def _write(self, message): + pass + + +class StdoutProgressIndicator(BaseProgressIndicator): + """ StdoutProgressIndicator(name) + + A progress indicator that shows the progress in stdout. It + assumes that the tty can appropriately deal with backspace + characters. + """ + + def _start(self): + self._chars_prefix, self._chars = "", "" + # Write message + if self._action: + self._chars_prefix = "%s (%s): " % (self._name, self._action) + else: + self._chars_prefix = "%s: " % self._name + sys.stdout.write(self._chars_prefix) + sys.stdout.flush() + + def _update_progress(self, progressText): + # If progress is unknown, at least make something move + if not progressText: + i1, i2, i3, i4 = "-\\|/" + M = {i1: i2, i2: i3, i3: i4, i4: i1} + progressText = M.get(self._chars, i1) + # Store new string and write + delChars = "\b" * len(self._chars) + self._chars = progressText + sys.stdout.write(delChars + self._chars) + sys.stdout.flush() + + def _stop(self): + self._chars = self._chars_prefix = "" + sys.stdout.write("\n") + sys.stdout.flush() + + def _write(self, message): + # Write message + delChars = "\b" * len(self._chars_prefix + self._chars) + sys.stdout.write(delChars + " " + message + "\n") + # Reprint progress text + sys.stdout.write(self._chars_prefix + self._chars) + sys.stdout.flush() + + +# From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py) +def appdata_dir(appname=None, roaming=False): + """ appdata_dir(appname=None, roaming=False) + + Get the path to the application directory, where applications are allowed + to write user specific files (e.g. configurations). For non-user specific + data, consider using common_appdata_dir(). + If appname is given, a subdir is appended (and created if necessary). + If roaming is True, will prefer a roaming directory (Windows Vista/7). + """ + + # Define default user directory + userDir = os.getenv("IMAGEIO_USERDIR", None) + if userDir is None: + userDir = os.path.expanduser("~") + if not os.path.isdir(userDir): # pragma: no cover + userDir = "/var/tmp" # issue #54 + + # Get system app data dir + path = None + if sys.platform.startswith("win"): + path1, path2 = os.getenv("LOCALAPPDATA"), os.getenv("APPDATA") + path = (path2 or path1) if roaming else (path1 or path2) + elif sys.platform.startswith("darwin"): + path = os.path.join(userDir, "Library", "Application Support") + # On Linux and as fallback + if not (path and os.path.isdir(path)): + path = userDir + + # Maybe we should store things local to the executable (in case of a + # portable distro or a frozen application that wants to be portable) + prefix = sys.prefix + if getattr(sys, "frozen", None): + prefix = os.path.abspath(os.path.dirname(sys.executable)) + for reldir in ("settings", "../settings"): + localpath = os.path.abspath(os.path.join(prefix, reldir)) + if os.path.isdir(localpath): # pragma: no cover + try: + open(os.path.join(localpath, "test.write"), "wb").close() + os.remove(os.path.join(localpath, "test.write")) + except IOError: + pass # We cannot write in this directory + else: + path = localpath + break + + # Get path specific for this app + if appname: + if path == userDir: + appname = "." + appname.lstrip(".") # Make it a hidden directory + path = os.path.join(path, appname) + if not os.path.isdir(path): # pragma: no cover + os.makedirs(path, exist_ok=True) + + # Done + return path + + +def resource_dirs(): + """ resource_dirs() + + Get a list of directories where imageio resources may be located. + The first directory in this list is the "resources" directory in + the package itself. The second directory is the appdata directory + (~/.imageio on Linux). The list further contains the application + directory (for frozen apps), and may include additional directories + in the future. + """ + dirs = [resource_package_dir()] + # Resource dir baked in the package. + # Appdata directory + try: + dirs.append(appdata_dir("imageio")) + except Exception: # pragma: no cover + pass # The home dir may not be writable + # Directory where the app is located (mainly for frozen apps) + if getattr(sys, "frozen", None): + dirs.append(os.path.abspath(os.path.dirname(sys.executable))) + elif sys.path and sys.path[0]: + dirs.append(os.path.abspath(sys.path[0])) + return dirs + + +def resource_package_dir(): + """ package_dir + + Get the resources directory in the imageio package installation + directory. + + Notes + ----- + This is a convenience method that is used by `resource_dirs` and + imageio entry point scripts. + """ + # Make pkg_resources optional if setuptools is not available + try: + # Avoid importing pkg_resources in the top level due to how slow it is + # https://github.com/pypa/setuptools/issues/510 + import pkg_resources + except ImportError: + pkg_resources = None + + if pkg_resources: + # The directory returned by `pkg_resources.resource_filename` + # also works with eggs. + pdir = pkg_resources.resource_filename("imageio", "resources") + else: + # If setuptools is not available, use fallback + pdir = os.path.abspath(os.path.join(THIS_DIR, "..", "resources")) + return pdir + + +def get_platform(): + """ get_platform() + + Get a string that specifies the platform more specific than + sys.platform does. The result can be: linux32, linux64, win32, + win64, osx32, osx64. Other platforms may be added in the future. + """ + # Get platform + if sys.platform.startswith("linux"): + plat = "linux%i" + elif sys.platform.startswith("win"): + plat = "win%i" + elif sys.platform.startswith("darwin"): + plat = "osx%i" + elif sys.platform.startswith("freebsd"): + plat = "freebsd%i" + else: # pragma: no cover + return None + + return plat % (struct.calcsize("P") * 8) # 32 or 64 bits + + +def has_module(module_name): + """Check to see if a python module is available. + """ + if sys.version_info > (3, 4): + import importlib + + name_parts = module_name.split(".") + for i in range(len(name_parts)): + if importlib.util.find_spec(".".join(name_parts[: i + 1])) is None: + return False + return True + else: # pragma: no cover + import imp + + try: + imp.find_module(module_name) + except ImportError: + return False + return True diff --git a/venv/Lib/site-packages/imageio/freeze.py b/venv/Lib/site-packages/imageio/freeze.py new file mode 100644 index 000000000..3753a29df --- /dev/null +++ b/venv/Lib/site-packages/imageio/freeze.py @@ -0,0 +1,11 @@ +""" +Helper functions for freezing imageio. +""" + + +def get_includes(): + return ["email", "urllib.request", "numpy", "zipfile", "io"] + + +def get_excludes(): + return [] diff --git a/venv/Lib/site-packages/imageio/plugins/__init__.py b/venv/Lib/site-packages/imageio/plugins/__init__.py new file mode 100644 index 000000000..7e2668318 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/__init__.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +# flake8: noqa + +""" + +Imagio is plugin-based. Every supported format is provided with a +plugin. You can write your own plugins to make imageio support +additional formats. And we would be interested in adding such code to the +imageio codebase! + + +What is a plugin +---------------- + +In imageio, a plugin provides one or more :class:`.Format` objects, and +corresponding :class:`.Reader` and :class:`.Writer` classes. +Each Format object represents an implementation to read/write a +particular file format. Its Reader and Writer classes do the actual +reading/saving. + +The reader and writer objects have a ``request`` attribute that can be +used to obtain information about the read or write :class:`.Request`, such as +user-provided keyword arguments, as well get access to the raw image +data. + + +Registering +----------- + +Strictly speaking a format can be used stand alone. However, to allow +imageio to automatically select it for a specific file, the format must +be registered using ``imageio.formats.add_format()``. + +Note that a plugin is not required to be part of the imageio package; as +long as a format is registered, imageio can use it. This makes imageio very +easy to extend. + + +What methods to implement +-------------------------- + +Imageio is designed such that plugins only need to implement a few +private methods. The public API is implemented by the base classes. +In effect, the public methods can be given a descent docstring which +does not have to be repeated at the plugins. + +For the Format class, the following needs to be implemented/specified: + + * The format needs a short name, a description, and a list of file + extensions that are common for the file-format in question. + These ase set when instantiation the Format object. + * Use a docstring to provide more detailed information about the + format/plugin, such as parameters for reading and saving that the user + can supply via keyword arguments. + * Implement ``_can_read(request)``, return a bool. + See also the :class:`.Request` class. + * Implement ``_can_write(request)``, dito. + +For the Format.Reader class: + + * Implement ``_open(**kwargs)`` to initialize the reader. Deal with the + user-provided keyword arguments here. + * Implement ``_close()`` to clean up. + * Implement ``_get_length()`` to provide a suitable length based on what + the user expects. Can be ``inf`` for streaming data. + * Implement ``_get_data(index)`` to return an array and a meta-data dict. + * Implement ``_get_meta_data(index)`` to return a meta-data dict. If index + is None, it should return the 'global' meta-data. + +For the Format.Writer class: + + * Implement ``_open(**kwargs)`` to initialize the writer. Deal with the + user-provided keyword arguments here. + * Implement ``_close()`` to clean up. + * Implement ``_append_data(im, meta)`` to add data (and meta-data). + * Implement ``_set_meta_data(meta)`` to set the global meta-data. + +""" + +# First import plugins that we want to take precedence over freeimage +from . import tifffile +from . import pillow +from . import grab + +from . import freeimage +from . import freeimagemulti + +from . import ffmpeg + +from . import bsdf +from . import dicom +from . import npz +from . import swf +from . import feisem # special kind of tiff, uses _tiffile + +from . import fits # depends on astropy +from . import simpleitk # depends on itk or SimpleITK +from . import gdal # depends on gdal + +from . import lytro +from . import spe + +from . import example + +# Sort +import os +from .. import formats + +formats.sort(*os.getenv("IMAGEIO_FORMAT_ORDER", "").split(",")) +del os, formats diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..7b184fbde Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-36.pyc new file mode 100644 index 000000000..2afb329d0 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/_bsdf.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/_dicom.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/_dicom.cpython-36.pyc new file mode 100644 index 000000000..28322999b Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/_dicom.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-36.pyc new file mode 100644 index 000000000..9e6d5725f Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/_freeimage.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/_swf.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/_swf.cpython-36.pyc new file mode 100644 index 000000000..5ca9f008f Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/_swf.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/_tifffile.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/_tifffile.cpython-36.pyc new file mode 100644 index 000000000..bd2cf12f4 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/_tifffile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/bsdf.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/bsdf.cpython-36.pyc new file mode 100644 index 000000000..98d25c1a2 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/bsdf.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/dicom.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/dicom.cpython-36.pyc new file mode 100644 index 000000000..75d8458fb Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/dicom.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/example.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/example.cpython-36.pyc new file mode 100644 index 000000000..a4fc1e892 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/example.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/feisem.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/feisem.cpython-36.pyc new file mode 100644 index 000000000..d41fb056b Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/feisem.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/ffmpeg.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/ffmpeg.cpython-36.pyc new file mode 100644 index 000000000..080d01833 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/ffmpeg.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/fits.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/fits.cpython-36.pyc new file mode 100644 index 000000000..324beb6e9 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/fits.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/freeimage.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/freeimage.cpython-36.pyc new file mode 100644 index 000000000..160e6a6d5 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/freeimage.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-36.pyc new file mode 100644 index 000000000..9eaf8e909 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/freeimagemulti.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/gdal.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/gdal.cpython-36.pyc new file mode 100644 index 000000000..6d08e89aa Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/gdal.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/grab.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/grab.cpython-36.pyc new file mode 100644 index 000000000..3d588665f Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/grab.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/lytro.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/lytro.cpython-36.pyc new file mode 100644 index 000000000..c25f71158 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/lytro.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/npz.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/npz.cpython-36.pyc new file mode 100644 index 000000000..1f74effc4 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/npz.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/pillow.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/pillow.cpython-36.pyc new file mode 100644 index 000000000..5ae4933d0 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/pillow.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/pillow_info.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/pillow_info.cpython-36.pyc new file mode 100644 index 000000000..59a29f81e Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/pillow_info.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-36.pyc new file mode 100644 index 000000000..e37e6399b Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/pillowmulti.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/simpleitk.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/simpleitk.cpython-36.pyc new file mode 100644 index 000000000..888ae92e8 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/simpleitk.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/spe.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/spe.cpython-36.pyc new file mode 100644 index 000000000..19b2b8894 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/spe.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/swf.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/swf.cpython-36.pyc new file mode 100644 index 000000000..c659c99cc Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/swf.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/__pycache__/tifffile.cpython-36.pyc b/venv/Lib/site-packages/imageio/plugins/__pycache__/tifffile.cpython-36.pyc new file mode 100644 index 000000000..9bb073092 Binary files /dev/null and b/venv/Lib/site-packages/imageio/plugins/__pycache__/tifffile.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/imageio/plugins/_bsdf.py b/venv/Lib/site-packages/imageio/plugins/_bsdf.py new file mode 100644 index 000000000..be22db1d3 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/_bsdf.py @@ -0,0 +1,940 @@ +#!/usr/bin/env python +# This file is distributed under the terms of the 2-clause BSD License. +# Copyright (c) 2017-2018, Almar Klein + +""" +Python implementation of the Binary Structured Data Format (BSDF). + +BSDF is a binary format for serializing structured (scientific) data. +See http://bsdf.io for more information. + +This is the reference implementation, which is relatively relatively +sophisticated, providing e.g. lazy loading of blobs and streamed +reading/writing. A simpler Python implementation is available as +``bsdf_lite.py``. + +This module has no dependencies and works on Python 2.7 and 3.4+. + +Note: on Legacy Python (Python 2.7), non-Unicode strings are encoded as bytes. +""" + +# todo: in 2020, remove six stuff, __future__ and _isidentifier +# todo: in 2020, remove 'utf-8' args to encode/decode; it's faster + +from __future__ import absolute_import, division, print_function + +import bz2 +import hashlib +import logging +import os +import re +import struct +import sys +import types +import zlib +from io import BytesIO + +logger = logging.getLogger(__name__) + +# Notes on versioning: the major and minor numbers correspond to the +# BSDF format version. The major number if increased when backward +# incompatible changes are introduced. An implementation must raise an +# exception when the file being read has a higher major version. The +# minor number is increased when new backward compatible features are +# introduced. An implementation must display a warning when the file +# being read has a higher minor version. The patch version is increased +# for subsequent releases of the implementation. +VERSION = 2, 1, 2 +__version__ = ".".join(str(i) for i in VERSION) + + +# %% The encoder and decoder implementation + +# From six.py +PY3 = sys.version_info[0] >= 3 +if PY3: + text_type = str + string_types = str + unicode_types = str + integer_types = int + classtypes = type +else: # pragma: no cover + logging.basicConfig() # avoid "no handlers found" error + text_type = unicode # noqa + string_types = basestring # noqa + unicode_types = unicode # noqa + integer_types = (int, long) # noqa + classtypes = type, types.ClassType + +# Shorthands +spack = struct.pack +strunpack = struct.unpack + + +def lencode(x): + """ Encode an unsigned integer into a variable sized blob of bytes. + """ + # We could support 16 bit and 32 bit as well, but the gain is low, since + # 9 bytes for collections with over 250 elements is marginal anyway. + if x <= 250: + return spack(" extension + self._extensions_by_cls = {} # cls -> (name, extension.encode) + if extensions is None: + extensions = standard_extensions + for extension in extensions: + self.add_extension(extension) + self._parse_options(**options) + + def _parse_options( + self, + compression=0, + use_checksum=False, + float64=True, + load_streaming=False, + lazy_blob=False, + ): + + # Validate compression + if isinstance(compression, string_types): + m = {"no": 0, "zlib": 1, "bz2": 2} + compression = m.get(compression.lower(), compression) + if compression not in (0, 1, 2): + raise TypeError("Compression must be 0, 1, 2, " '"no", "zlib", or "bz2"') + self._compression = compression + + # Other encoding args + self._use_checksum = bool(use_checksum) + self._float64 = bool(float64) + + # Decoding args + self._load_streaming = bool(load_streaming) + self._lazy_blob = bool(lazy_blob) + + def add_extension(self, extension_class): + """ Add an extension to this serializer instance, which must be + a subclass of Extension. Can be used as a decorator. + """ + # Check class + if not ( + isinstance(extension_class, type) and issubclass(extension_class, Extension) + ): + raise TypeError("add_extension() expects a Extension class.") + extension = extension_class() + + # Get name + name = extension.name + if not isinstance(name, str): + raise TypeError("Extension name must be str.") + if len(name) == 0 or len(name) > 250: + raise NameError( + "Extension names must be nonempty and shorter " "than 251 chars." + ) + if name in self._extensions: + logger.warning( + 'BSDF warning: overwriting extension "%s", ' + "consider removing first" % name + ) + + # Get classes + cls = extension.cls + if not cls: + clss = [] + elif isinstance(cls, (tuple, list)): + clss = cls + else: + clss = [cls] + for cls in clss: + if not isinstance(cls, classtypes): + raise TypeError("Extension classes must be types.") + + # Store + for cls in clss: + self._extensions_by_cls[cls] = name, extension.encode + self._extensions[name] = extension + return extension_class + + def remove_extension(self, name): + """ Remove a converted by its unique name. + """ + if not isinstance(name, str): + raise TypeError("Extension name must be str.") + if name in self._extensions: + self._extensions.pop(name) + for cls in list(self._extensions_by_cls.keys()): + if self._extensions_by_cls[cls][0] == name: + self._extensions_by_cls.pop(cls) + + def _encode(self, f, value, streams, ext_id): + """ Main encoder function. + """ + x = encode_type_id + + if value is None: + f.write(x(b"v", ext_id)) # V for void + elif value is True: + f.write(x(b"y", ext_id)) # Y for yes + elif value is False: + f.write(x(b"n", ext_id)) # N for no + elif isinstance(value, integer_types): + if -32768 <= value <= 32767: + f.write(x(b"h", ext_id) + spack("h", value)) # H for ... + else: + f.write(x(b"i", ext_id) + spack(" 0: + raise ValueError("Can only have one stream per file.") + streams.append(value) + value._activate(f, self._encode, self._decode) # noqa + else: + if ext_id is not None: + raise ValueError( + "Extension %s wronfully encodes object to another " + "extension object (though it may encode to a list/dict " + "that contains other extension objects)." % ext_id + ) + # Try if the value is of a type we know + ex = self._extensions_by_cls.get(value.__class__, None) + # Maybe its a subclass of a type we know + if ex is None: + for name, c in self._extensions.items(): + if c.match(self, value): + ex = name, c.encode + break + else: + ex = None + # Success or fail + if ex is not None: + ext_id2, extension_encode = ex + self._encode(f, extension_encode(self, value), streams, ext_id2) + else: + t = ( + "Class %r is not a valid base BSDF type, nor is it " + "handled by an extension." + ) + raise TypeError(t % value.__class__.__name__) + + def _decode(self, f): + """ Main decoder function. + """ + + # Get value + char = f.read(1) + c = char.lower() + + # Conversion (uppercase value identifiers signify converted values) + if not char: + raise EOFError() + elif char != c: + n = strunpack("= 254: + # Streaming + closed = n == 254 + n = strunpack(" 0 + name = f.read(n_name).decode("UTF-8") + value[name] = self._decode(f) + elif c == b"b": + if self._lazy_blob: + value = Blob((f, True)) + else: + blob = Blob((f, False)) + value = blob.get_bytes() + else: + raise RuntimeError("Parse error %r" % char) + + # Convert value if we have an extension for it + if ext_id is not None: + extension = self._extensions.get(ext_id, None) + if extension is not None: + value = extension.decode(self, value) + else: + logger.warning("BSDF warning: no extension found for %r" % ext_id) + + return value + + def encode(self, ob): + """ Save the given object to bytes. + """ + f = BytesIO() + self.save(f, ob) + return f.getvalue() + + def save(self, f, ob): + """ Write the given object to the given file object. + """ + f.write(b"BSDF") + f.write(struct.pack(" 0: + stream = streams[0] + if stream._start_pos != f.tell(): + raise ValueError( + "The stream object must be " "the last object to be encoded." + ) + + def decode(self, bb): + """ Load the data structure that is BSDF-encoded in the given bytes. + """ + f = BytesIO(bb) + return self.load(f) + + def load(self, f): + """ Load a BSDF-encoded object from the given file object. + """ + # Check magic string + f4 = f.read(4) + if f4 != b"BSDF": + raise RuntimeError("This does not look like a BSDF file: %r" % f4) + # Check version + major_version = strunpack(" VERSION[1]: # minor should be < ours + t = ( + "BSDF warning: reading file with higher minor version (%s) " + "than the implementation (%s)." + ) + logger.warning(t % (__version__, file_version)) + + return self._decode(f) + + +# %% Streaming and blob-files + + +class BaseStream(object): + """ Base class for streams. + """ + + def __init__(self, mode="w"): + self._i = 0 + self._count = -1 + if isinstance(mode, int): + self._count = mode + mode = "r" + elif mode == "w": + self._count = 0 + assert mode in ("r", "w") + self._mode = mode + self._f = None + self._start_pos = 0 + + def _activate(self, file, encode_func, decode_func): + if self._f is not None: # Associated with another write + raise IOError("Stream object cannot be activated twice?") + self._f = file + self._start_pos = self._f.tell() + self._encode = encode_func + self._decode = decode_func + + @property + def mode(self): + """ The mode of this stream: 'r' or 'w'. + """ + return self._mode + + +class ListStream(BaseStream): + """ A streamable list object used for writing or reading. + In read mode, it can also be iterated over. + """ + + @property + def count(self): + """ The number of elements in the stream (can be -1 for unclosed + streams in read-mode). + """ + return self._count + + @property + def index(self): + """ The current index of the element to read/write. + """ + return self._i + + def append(self, item): + """ Append an item to the streaming list. The object is immediately + serialized and written to the underlying file. + """ + # if self._mode != 'w': + # raise IOError('This ListStream is not in write mode.') + if self._count != self._i: + raise IOError("Can only append items to the end of the stream.") + if self._f is None: + raise IOError("List stream is not associated with a file yet.") + if self._f.closed: + raise IOError("Cannot stream to a close file.") + self._encode(self._f, item, [self], None) + self._i += 1 + self._count += 1 + + def close(self, unstream=False): + """ Close the stream, marking the number of written elements. New + elements may still be appended, but they won't be read during decoding. + If ``unstream`` is False, the stream is turned into a regular list + (not streaming). + """ + # if self._mode != 'w': + # raise IOError('This ListStream is not in write mode.') + if self._count != self._i: + raise IOError("Can only close when at the end of the stream.") + if self._f is None: + raise IOError("ListStream is not associated with a file yet.") + if self._f.closed: + raise IOError("Cannot close a stream on a close file.") + i = self._f.tell() + self._f.seek(self._start_pos - 8 - 1) + self._f.write(spack("= 0: + if self._i >= self._count: + raise StopIteration() + self._i += 1 + return self._decode(self._f) + else: + # This raises EOFError at some point. + try: + res = self._decode(self._f) + self._i += 1 + return res + except EOFError: + self._count = self._i + raise StopIteration() + + def __iter__(self): + if self._mode != "r": + raise IOError("Cannot iterate: ListStream in not in read mode.") + return self + + def __next__(self): + return self.next() + + +class Blob(object): + """ Object to represent a blob of bytes. When used to write a BSDF file, + it's a wrapper for bytes plus properties such as what compression to apply. + When used to read a BSDF file, it can be used to read the data lazily, and + also modify the data if reading in 'r+' mode and the blob isn't compressed. + """ + + # For now, this does not allow re-sizing blobs (within the allocated size) + # but this can be added later. + + def __init__(self, bb, compression=0, extra_size=0, use_checksum=False): + if isinstance(bb, bytes): + self._f = None + self.compressed = self._from_bytes(bb, compression) + self.compression = compression + self.allocated_size = self.used_size + extra_size + self.use_checksum = use_checksum + elif isinstance(bb, tuple) and len(bb) == 2 and hasattr(bb[0], "read"): + self._f, allow_seek = bb + self.compressed = None + self._from_file(self._f, allow_seek) + self._modified = False + else: + raise TypeError("Wrong argument to create Blob.") + + def _from_bytes(self, value, compression): + """ When used to wrap bytes in a blob. + """ + if compression == 0: + compressed = value + elif compression == 1: + compressed = zlib.compress(value, 9) + elif compression == 2: + compressed = bz2.compress(value, 9) + else: # pragma: no cover + assert False, "Unknown compression identifier" + + self.data_size = len(value) + self.used_size = len(compressed) + return compressed + + def _to_file(self, f): + """ Private friend method called by encoder to write a blob to a file. + """ + # Write sizes - write at least in a size that allows resizing + if self.allocated_size <= 250 and self.compression == 0: + f.write(spack(" self.allocated_size: + raise IOError("Seek beyond blob boundaries.") + self._f.seek(self.start_pos + p) + + def tell(self): + """ Get the current file pointer position (relative to the blob start). + """ + if self._f is None: + raise RuntimeError( + "Cannot tell in a blob " "that is not created by the BSDF decoder." + ) + return self._f.tell() - self.start_pos + + def write(self, bb): + """ Write bytes to the blob. + """ + if self._f is None: + raise RuntimeError( + "Cannot write in a blob " "that is not created by the BSDF decoder." + ) + if self.compression: + raise IOError("Cannot arbitrarily write in compressed blob.") + if self._f.tell() + len(bb) > self.end_pos: + raise IOError("Write beyond blob boundaries.") + self._modified = True + return self._f.write(bb) + + def read(self, n): + """ Read n bytes from the blob. + """ + if self._f is None: + raise RuntimeError( + "Cannot read in a blob " "that is not created by the BSDF decoder." + ) + if self.compression: + raise IOError("Cannot arbitrarily read in compressed blob.") + if self._f.tell() + n > self.end_pos: + raise IOError("Read beyond blob boundaries.") + return self._f.read(n) + + def get_bytes(self): + """ Get the contents of the blob as bytes. + """ + if self.compressed is not None: + compressed = self.compressed + else: + i = self._f.tell() + self.seek(0) + compressed = self._f.read(self.used_size) + self._f.seek(i) + if self.compression == 0: + value = compressed + elif self.compression == 1: + value = zlib.decompress(compressed) + elif self.compression == 2: + value = bz2.decompress(compressed) + else: # pragma: no cover + raise RuntimeError("Invalid compression %i" % self.compression) + return value + + def update_checksum(self): + """ Reset the blob's checksum if present. Call this after modifying + the data. + """ + # or ... should the presence of a checksum mean that data is proteced? + if self.use_checksum and self._modified: + self.seek(0) + compressed = self._f.read(self.used_size) + self._f.seek(self.start_pos - self.alignment - 1 - 16) + self._f.write(hashlib.md5(compressed).digest()) + + +# %% High-level functions + + +def encode(ob, extensions=None, **options): + """ Save (BSDF-encode) the given object to bytes. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + return s.encode(ob) + + +def save(f, ob, extensions=None, **options): + """ Save (BSDF-encode) the given object to the given filename or + file object. See` BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + if isinstance(f, string_types): + with open(f, "wb") as fp: + return s.save(fp, ob) + else: + return s.save(f, ob) + + +def decode(bb, extensions=None, **options): + """ Load a (BSDF-encoded) structure from bytes. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + return s.decode(bb) + + +def load(f, extensions=None, **options): + """ Load a (BSDF-encoded) structure from the given filename or file object. + See `BSDFSerializer` for details on extensions and options. + """ + s = BsdfSerializer(extensions, **options) + if isinstance(f, string_types): + if f.startswith(("~/", "~\\")): # pragma: no cover + f = os.path.expanduser(f) + with open(f, "rb") as fp: + return s.load(fp) + else: + return s.load(f) + + +# Aliases for json compat +loads = decode +dumps = encode + + +# %% Standard extensions + +# Defining extensions as a dict would be more compact and feel lighter, but +# that would only allow lambdas, which is too limiting, e.g. for ndarray +# extension. + + +class Extension(object): + """ Base class to implement BSDF extensions for special data types. + + Extension classes are provided to the BSDF serializer, which + instantiates the class. That way, the extension can be somewhat dynamic: + e.g. the NDArrayExtension exposes the ndarray class only when numpy + is imported. + + A extension instance must have two attributes. These can be attribiutes of + the class, or of the instance set in ``__init__()``: + + * name (str): the name by which encoded values will be identified. + * cls (type): the type (or list of types) to match values with. + This is optional, but it makes the encoder select extensions faster. + + Further, it needs 3 methods: + + * `match(serializer, value) -> bool`: return whether the extension can + convert the given value. The default is ``isinstance(value, self.cls)``. + * `encode(serializer, value) -> encoded_value`: the function to encode a + value to more basic data types. + * `decode(serializer, encoded_value) -> value`: the function to decode an + encoded value back to its intended representation. + + """ + + name = "" + cls = () + + def __repr__(self): + return "" % (self.name, hex(id(self))) + + def match(self, s, v): + return isinstance(v, self.cls) + + def encode(self, s, v): + raise NotImplementedError() + + def decode(self, s, v): + raise NotImplementedError() + + +class ComplexExtension(Extension): + + name = "c" + cls = complex + + def encode(self, s, v): + return (v.real, v.imag) + + def decode(self, s, v): + return complex(v[0], v[1]) + + +class NDArrayExtension(Extension): + + name = "ndarray" + + def __init__(self): + if "numpy" in sys.modules: + import numpy as np + + self.cls = np.ndarray + + def match(self, s, v): # pragma: no cover - e.g. work for nd arrays in JS + return hasattr(v, "shape") and hasattr(v, "dtype") and hasattr(v, "tobytes") + + def encode(self, s, v): + return dict(shape=v.shape, dtype=text_type(v.dtype), data=v.tobytes()) + + def decode(self, s, v): + try: + import numpy as np + except ImportError: # pragma: no cover + return v + a = np.frombuffer(v["data"], dtype=v["dtype"]) + a.shape = v["shape"] + return a + + +standard_extensions = [ComplexExtension, NDArrayExtension] + + +if __name__ == "__main__": + # Invoke CLI + import bsdf_cli + + bsdf_cli.main() diff --git a/venv/Lib/site-packages/imageio/plugins/_dicom.py b/venv/Lib/site-packages/imageio/plugins/_dicom.py new file mode 100644 index 000000000..e9472245d --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/_dicom.py @@ -0,0 +1,926 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin for reading DICOM files. +""" + +# todo: Use pydicom: +# * Note: is not py3k ready yet +# * Allow reading the full meta info +# I think we can more or less replace the SimpleDicomReader with a +# pydicom.Dataset For series, only ned to read the full info from one +# file: speed still high +# * Perhaps allow writing? + +import sys +import os +import struct +import logging + +import numpy as np + + +logger = logging.getLogger(__name__) + +# Determine endianity of system +sys_is_little_endian = sys.byteorder == "little" + +# Define a dictionary that contains the tags that we would like to know +MINIDICT = { + (0x7FE0, 0x0010): ("PixelData", "OB"), + # Date and time + (0x0008, 0x0020): ("StudyDate", "DA"), + (0x0008, 0x0021): ("SeriesDate", "DA"), + (0x0008, 0x0022): ("AcquisitionDate", "DA"), + (0x0008, 0x0023): ("ContentDate", "DA"), + (0x0008, 0x0030): ("StudyTime", "TM"), + (0x0008, 0x0031): ("SeriesTime", "TM"), + (0x0008, 0x0032): ("AcquisitionTime", "TM"), + (0x0008, 0x0033): ("ContentTime", "TM"), + # With what, where, by whom? + (0x0008, 0x0060): ("Modality", "CS"), + (0x0008, 0x0070): ("Manufacturer", "LO"), + (0x0008, 0x0080): ("InstitutionName", "LO"), + # Descriptions + (0x0008, 0x1030): ("StudyDescription", "LO"), + (0x0008, 0x103E): ("SeriesDescription", "LO"), + # UID's + (0x0008, 0x0016): ("SOPClassUID", "UI"), + (0x0008, 0x0018): ("SOPInstanceUID", "UI"), + (0x0020, 0x000D): ("StudyInstanceUID", "UI"), + (0x0020, 0x000E): ("SeriesInstanceUID", "UI"), + (0x0008, 0x0117): ("ContextUID", "UI"), + # Numbers + (0x0020, 0x0011): ("SeriesNumber", "IS"), + (0x0020, 0x0012): ("AcquisitionNumber", "IS"), + (0x0020, 0x0013): ("InstanceNumber", "IS"), + (0x0020, 0x0014): ("IsotopeNumber", "IS"), + (0x0020, 0x0015): ("PhaseNumber", "IS"), + (0x0020, 0x0016): ("IntervalNumber", "IS"), + (0x0020, 0x0017): ("TimeSlotNumber", "IS"), + (0x0020, 0x0018): ("AngleNumber", "IS"), + (0x0020, 0x0019): ("ItemNumber", "IS"), + (0x0020, 0x0020): ("PatientOrientation", "CS"), + (0x0020, 0x0030): ("ImagePosition", "CS"), + (0x0020, 0x0032): ("ImagePositionPatient", "CS"), + (0x0020, 0x0035): ("ImageOrientation", "CS"), + (0x0020, 0x0037): ("ImageOrientationPatient", "CS"), + # Patient information + (0x0010, 0x0010): ("PatientName", "PN"), + (0x0010, 0x0020): ("PatientID", "LO"), + (0x0010, 0x0030): ("PatientBirthDate", "DA"), + (0x0010, 0x0040): ("PatientSex", "CS"), + (0x0010, 0x1010): ("PatientAge", "AS"), + (0x0010, 0x1020): ("PatientSize", "DS"), + (0x0010, 0x1030): ("PatientWeight", "DS"), + # Image specific (required to construct numpy array) + (0x0028, 0x0002): ("SamplesPerPixel", "US"), + (0x0028, 0x0008): ("NumberOfFrames", "IS"), + (0x0028, 0x0100): ("BitsAllocated", "US"), + (0x0028, 0x0101): ("BitsStored", "US"), + (0x0028, 0x0102): ("HighBit", "US"), + (0x0028, 0x0103): ("PixelRepresentation", "US"), + (0x0028, 0x0010): ("Rows", "US"), + (0x0028, 0x0011): ("Columns", "US"), + (0x0028, 0x1052): ("RescaleIntercept", "DS"), + (0x0028, 0x1053): ("RescaleSlope", "DS"), + # Image specific (for the user) + (0x0028, 0x0030): ("PixelSpacing", "DS"), + (0x0018, 0x0088): ("SliceSpacing", "DS"), +} + +# Define some special tags: +# See PS 3.5-2008 section 7.5 (p.40) +ItemTag = (0xFFFE, 0xE000) # start of Sequence Item +ItemDelimiterTag = (0xFFFE, 0xE00D) # end of Sequence Item +SequenceDelimiterTag = (0xFFFE, 0xE0DD) # end of Sequence of undefined length + +# Define set of groups that we're interested in (so we can quickly skip others) +GROUPS = set([key[0] for key in MINIDICT.keys()]) +VRS = set([val[1] for val in MINIDICT.values()]) + + +class NotADicomFile(Exception): + pass + + +class CompressedDicom(RuntimeError): + pass + + +class SimpleDicomReader(object): + """ + This class provides reading of pixel data from DICOM files. It is + focussed on getting the pixel data, not the meta info. + + To use, first create an instance of this class (giving it + a file object or filename). Next use the info attribute to + get a dict of the meta data. The loading of pixel data is + deferred until get_numpy_array() is called. + + Comparison with Pydicom + ----------------------- + + This code focusses on getting the pixel data out, which allows some + shortcuts, resulting in the code being much smaller. + + Since the processing of data elements is much cheaper (it skips a lot + of tags), this code is about 3x faster than pydicom (except for the + deflated DICOM files). + + This class does borrow some code (and ideas) from the pydicom + project, and (to the best of our knowledge) has the same limitations + as pydicom with regard to the type of files that it can handle. + + Limitations + ----------- + + For more advanced DICOM processing, please check out pydicom. + + * Only a predefined subset of data elements (meta information) is read. + * This is a reader; it can not write DICOM files. + * (just like pydicom) it can handle none of the compressed DICOM + formats except for "Deflated Explicit VR Little Endian" + (1.2.840.10008.1.2.1.99). + + """ + + def __init__(self, file): + # Open file if filename given + if isinstance(file, str): + self._filename = file + self._file = open(file, "rb") + else: + self._filename = "" + self._file = file + # Init variable to store position and size of pixel data + self._pixel_data_loc = None + # The meta header is always explicit and little endian + self.is_implicit_VR = False + self.is_little_endian = True + self._unpackPrefix = "<" + # Dict to store data elements of interest in + self._info = {} + # VR Conversion + self._converters = { + # Numbers + "US": lambda x: self._unpack("H", x), + "UL": lambda x: self._unpack("L", x), + # Numbers encoded as strings + "DS": lambda x: self._splitValues(x, float, "\\"), + "IS": lambda x: self._splitValues(x, int, "\\"), + # strings + "AS": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "DA": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "TM": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "UI": lambda x: x.decode("ascii", "ignore").strip("\x00"), + "LO": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(), + "CS": lambda x: self._splitValues(x, float, "\\"), + "PN": lambda x: x.decode("utf-8", "ignore").strip("\x00").rstrip(), + } + + # Initiate reading + self._read() + + @property + def info(self): + return self._info + + def _splitValues(self, x, type, splitter): + s = x.decode("ascii").strip("\x00") + try: + if splitter in s: + return tuple([type(v) for v in s.split(splitter) if v.strip()]) + else: + return type(s) + except ValueError: + return s + + def _unpack(self, fmt, value): + return struct.unpack(self._unpackPrefix + fmt, value)[0] + + # Really only so we need minimal changes to _pixel_data_numpy + def __iter__(self): + return iter(self._info.keys()) + + def __getattr__(self, key): + info = object.__getattribute__(self, "_info") + if key in info: + return info[key] + return object.__getattribute__(self, key) # pragma: no cover + + def _read(self): + f = self._file + # Check prefix after peamble + f.seek(128) + if f.read(4) != b"DICM": + raise NotADicomFile("Not a valid DICOM file.") + # Read + self._read_header() + self._read_data_elements() + self._get_shape_and_sampling() + # Close if done, reopen if necessary to read pixel data + if os.path.isfile(self._filename): + self._file.close() + self._file = None + + def _readDataElement(self): + f = self._file + # Get group and element + group = self._unpack("H", f.read(2)) + element = self._unpack("H", f.read(2)) + # Get value length + if self.is_implicit_VR: + vl = self._unpack("I", f.read(4)) + else: + vr = f.read(2) + if vr in (b"OB", b"OW", b"SQ", b"UN"): + reserved = f.read(2) # noqa + vl = self._unpack("I", f.read(4)) + else: + vl = self._unpack("H", f.read(2)) + # Get value + if group == 0x7FE0 and element == 0x0010: + here = f.tell() + self._pixel_data_loc = here, vl + f.seek(here + vl) + return group, element, b"Deferred loading of pixel data" + else: + if vl == 0xFFFFFFFF: + value = self._read_undefined_length_value() + else: + value = f.read(vl) + return group, element, value + + def _read_undefined_length_value(self, read_size=128): + """ Copied (in compacted form) from PyDicom + Copyright Darcy Mason. + """ + fp = self._file + # data_start = fp.tell() + search_rewind = 3 + bytes_to_find = struct.pack( + self._unpackPrefix + "HH", SequenceDelimiterTag[0], SequenceDelimiterTag[1] + ) + + found = False + value_chunks = [] + while not found: + chunk_start = fp.tell() + bytes_read = fp.read(read_size) + if len(bytes_read) < read_size: + # try again, + # if still don't get required amount, this is last block + new_bytes = fp.read(read_size - len(bytes_read)) + bytes_read += new_bytes + if len(bytes_read) < read_size: + raise EOFError( + "End of file reached before sequence " "delimiter found." + ) + index = bytes_read.find(bytes_to_find) + if index != -1: + found = True + value_chunks.append(bytes_read[:index]) + fp.seek(chunk_start + index + 4) # rewind to end of delimiter + length = fp.read(4) + if length != b"\0\0\0\0": + logger.warning( + "Expected 4 zero bytes after undefined length " "delimiter" + ) + else: + fp.seek(fp.tell() - search_rewind) # rewind a bit + # accumulate the bytes read (not including the rewind) + value_chunks.append(bytes_read[:-search_rewind]) + + # if get here then have found the byte string + return b"".join(value_chunks) + + def _read_header(self): + f = self._file + TransferSyntaxUID = None + + # Read all elements, store transferSyntax when we encounter it + try: + while True: + fp_save = f.tell() + # Get element + group, element, value = self._readDataElement() + if group == 0x02: + if group == 0x02 and element == 0x10: + TransferSyntaxUID = value.decode("ascii").strip("\x00") + else: + # No more group 2: rewind and break + # (don't trust group length) + f.seek(fp_save) + break + except (EOFError, struct.error): # pragma: no cover + raise RuntimeError("End of file reached while still in header.") + + # Handle transfer syntax + self._info["TransferSyntaxUID"] = TransferSyntaxUID + # + if TransferSyntaxUID is None: + # Assume ExplicitVRLittleEndian + is_implicit_VR, is_little_endian = False, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.1": + # ExplicitVRLittleEndian + is_implicit_VR, is_little_endian = False, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.2": + # ExplicitVRBigEndian + is_implicit_VR, is_little_endian = False, False + elif TransferSyntaxUID == "1.2.840.10008.1.2": + # implicit VR little endian + is_implicit_VR, is_little_endian = True, True + elif TransferSyntaxUID == "1.2.840.10008.1.2.1.99": + # DeflatedExplicitVRLittleEndian: + is_implicit_VR, is_little_endian = False, True + self._inflate() + else: + # http://www.dicomlibrary.com/dicom/transfer-syntax/ + t, extra_info = TransferSyntaxUID, "" + if "1.2.840.10008.1.2.4.50" <= t < "1.2.840.10008.1.2.4.99": + extra_info = " (JPEG)" + if "1.2.840.10008.1.2.4.90" <= t < "1.2.840.10008.1.2.4.99": + extra_info = " (JPEG 2000)" + if t == "1.2.840.10008.1.2.5": + extra_info = " (RLE)" + if t == "1.2.840.10008.1.2.6.1": + extra_info = " (RFC 2557)" + raise CompressedDicom( + "The dicom reader can only read files with " + "uncompressed image data - not %r%s. You " + "can try using dcmtk or gdcm to convert the " + "image." % (t, extra_info) + ) + + # From hereon, use implicit/explicit big/little endian + self.is_implicit_VR = is_implicit_VR + self.is_little_endian = is_little_endian + self._unpackPrefix = "><"[is_little_endian] + + def _read_data_elements(self): + info = self._info + try: + while True: + # Get element + group, element, value = self._readDataElement() + # Is it a group we are interested in? + if group in GROUPS: + key = (group, element) + name, vr = MINIDICT.get(key, (None, None)) + # Is it an element we are interested in? + if name: + # Store value + converter = self._converters.get(vr, lambda x: x) + info[name] = converter(value) + except (EOFError, struct.error): + pass # end of file ... + + def get_numpy_array(self): + """ Get numpy arra for this DICOM file, with the correct shape, + and pixel values scaled appropriately. + """ + # Is there pixel data at all? + if "PixelData" not in self: + raise TypeError("No pixel data found in this dataset.") + + # Load it now if it was not already loaded + if self._pixel_data_loc and len(self.PixelData) < 100: + # Reopen file? + close_file = False + if self._file is None: + close_file = True + self._file = open(self._filename, "rb") + # Read data + self._file.seek(self._pixel_data_loc[0]) + if self._pixel_data_loc[1] == 0xFFFFFFFF: + value = self._read_undefined_length_value() + else: + value = self._file.read(self._pixel_data_loc[1]) + # Close file + if close_file: + self._file.close() + self._file = None + # Overwrite + self._info["PixelData"] = value + + # Get data + data = self._pixel_data_numpy() + data = self._apply_slope_and_offset(data) + + # Remove data again to preserve memory + # Note that the data for the original file is loaded twice ... + self._info["PixelData"] = ( + b"Data converted to numpy array, " + b"raw data removed to preserve memory" + ) + return data + + def _get_shape_and_sampling(self): + """ Get shape and sampling without actuall using the pixel data. + In this way, the user can get an idea what's inside without having + to load it. + """ + # Get shape (in the same way that pydicom does) + if "NumberOfFrames" in self and self.NumberOfFrames > 1: + if self.SamplesPerPixel > 1: + shape = ( + self.SamplesPerPixel, + self.NumberOfFrames, + self.Rows, + self.Columns, + ) + else: + shape = self.NumberOfFrames, self.Rows, self.Columns + elif "SamplesPerPixel" in self: + if self.SamplesPerPixel > 1: + if self.BitsAllocated == 8: + shape = self.SamplesPerPixel, self.Rows, self.Columns + else: + raise NotImplementedError( + "DICOM plugin only handles " + "SamplesPerPixel > 1 if Bits " + "Allocated = 8" + ) + else: + shape = self.Rows, self.Columns + else: + raise RuntimeError( + "DICOM file has no SamplesPerPixel " "(perhaps this is a report?)" + ) + + # Try getting sampling between pixels + if "PixelSpacing" in self: + sampling = float(self.PixelSpacing[0]), float(self.PixelSpacing[1]) + else: + sampling = 1.0, 1.0 + if "SliceSpacing" in self: + sampling = (abs(self.SliceSpacing),) + sampling + + # Ensure that sampling has as many elements as shape + sampling = (1.0,) * (len(shape) - len(sampling)) + sampling[-len(shape) :] + + # Set shape and sampling + self._info["shape"] = shape + self._info["sampling"] = sampling + + def _pixel_data_numpy(self): + """Return a NumPy array of the pixel data. + """ + # Taken from pydicom + # Copyright (c) 2008-2012 Darcy Mason + + if "PixelData" not in self: + raise TypeError("No pixel data found in this dataset.") + + # determine the type used for the array + need_byteswap = self.is_little_endian != sys_is_little_endian + + # Make NumPy format code, e.g. "uint16", "int32" etc + # from two pieces of info: + # self.PixelRepresentation -- 0 for unsigned, 1 for signed; + # self.BitsAllocated -- 8, 16, or 32 + format_str = "%sint%d" % ( + ("u", "")[self.PixelRepresentation], + self.BitsAllocated, + ) + try: + numpy_format = np.dtype(format_str) + except TypeError: # pragma: no cover + raise TypeError( + "Data type not understood by NumPy: format='%s', " + " PixelRepresentation=%d, BitsAllocated=%d" + % (numpy_format, self.PixelRepresentation, self.BitsAllocated) + ) + + # Have correct Numpy format, so create the NumPy array + arr = np.frombuffer(self.PixelData, numpy_format).copy() + + # XXX byte swap - may later handle this in read_file!!? + if need_byteswap: + arr.byteswap(True) # True means swap in-place, don't make new copy + + # Note the following reshape operations return a new *view* onto arr, + # but don't copy the data + arr = arr.reshape(*self._info["shape"]) + return arr + + def _apply_slope_and_offset(self, data): + """ + If RescaleSlope and RescaleIntercept are present in the data, + apply them. The data type of the data is changed if necessary. + """ + # Obtain slope and offset + slope, offset = 1, 0 + needFloats, needApplySlopeOffset = False, False + if "RescaleSlope" in self: + needApplySlopeOffset = True + slope = self.RescaleSlope + if "RescaleIntercept" in self: + needApplySlopeOffset = True + offset = self.RescaleIntercept + if int(slope) != slope or int(offset) != offset: + needFloats = True + if not needFloats: + slope, offset = int(slope), int(offset) + + # Apply slope and offset + if needApplySlopeOffset: + # Maybe we need to change the datatype? + if data.dtype in [np.float32, np.float64]: + pass + elif needFloats: + data = data.astype(np.float32) + else: + # Determine required range + minReq, maxReq = data.min(), data.max() + minReq = min([minReq, minReq * slope + offset, maxReq * slope + offset]) + maxReq = max([maxReq, minReq * slope + offset, maxReq * slope + offset]) + + # Determine required datatype from that + dtype = None + if minReq < 0: + # Signed integer type + maxReq = max([-minReq, maxReq]) + if maxReq < 2 ** 7: + dtype = np.int8 + elif maxReq < 2 ** 15: + dtype = np.int16 + elif maxReq < 2 ** 31: + dtype = np.int32 + else: + dtype = np.float32 + else: + # Unsigned integer type + if maxReq < 2 ** 8: + dtype = np.int8 + elif maxReq < 2 ** 16: + dtype = np.int16 + elif maxReq < 2 ** 32: + dtype = np.int32 + else: + dtype = np.float32 + # Change datatype + if dtype != data.dtype: + data = data.astype(dtype) + + # Apply slope and offset + data *= slope + data += offset + + # Done + return data + + def _inflate(self): + # Taken from pydicom + # Copyright (c) 2008-2012 Darcy Mason + import zlib + from io import BytesIO + + # See PS3.6-2008 A.5 (p 71) -- when written, the entire dataset + # following the file metadata was prepared the normal way, + # then "deflate" compression applied. + # All that is needed here is to decompress and then + # use as normal in a file-like object + zipped = self._file.read() + # -MAX_WBITS part is from comp.lang.python answer: + # groups.google.com/group/comp.lang.python/msg/e95b3b38a71e6799 + unzipped = zlib.decompress(zipped, -zlib.MAX_WBITS) + self._file = BytesIO(unzipped) # a file-like object + + +class DicomSeries(object): + """ DicomSeries + This class represents a serie of dicom files (SimpleDicomReader + objects) that belong together. If these are multiple files, they + represent the slices of a volume (like for CT or MRI). + """ + + def __init__(self, suid, progressIndicator): + # Init dataset list and the callback + self._entries = [] + + # Init props + self._suid = suid + self._info = {} + self._progressIndicator = progressIndicator + + def __len__(self): + return len(self._entries) + + def __iter__(self): + return iter(self._entries) + + def __getitem__(self, index): + return self._entries[index] + + @property + def suid(self): + return self._suid + + @property + def shape(self): + """ The shape of the data (nz, ny, nx). """ + return self._info["shape"] + + @property + def sampling(self): + """ The sampling (voxel distances) of the data (dz, dy, dx). """ + return self._info["sampling"] + + @property + def info(self): + """ A dictionary containing the information as present in the + first dicomfile of this serie. None if there are no entries. """ + return self._info + + @property + def description(self): + """ A description of the dicom series. Used fields are + PatientName, shape of the data, SeriesDescription, and + ImageComments. + """ + info = self.info + + # If no info available, return simple description + if not info: # pragma: no cover + return "DicomSeries containing %i images" % len(self) + + fields = [] + # Give patient name + if "PatientName" in info: + fields.append("" + info["PatientName"]) + # Also add dimensions + if self.shape: + tmp = [str(d) for d in self.shape] + fields.append("x".join(tmp)) + # Try adding more fields + if "SeriesDescription" in info: + fields.append("'" + info["SeriesDescription"] + "'") + if "ImageComments" in info: + fields.append("'" + info["ImageComments"] + "'") + + # Combine + return " ".join(fields) + + def __repr__(self): + adr = hex(id(self)).upper() + return "" % (len(self), adr) + + def get_numpy_array(self): + """ Get (load) the data that this DicomSeries represents, and return + it as a numpy array. If this serie contains multiple images, the + resulting array is 3D, otherwise it's 2D. + """ + + # It's easy if no file or if just a single file + if len(self) == 0: + raise ValueError("Serie does not contain any files.") + elif len(self) == 1: + return self[0].get_numpy_array() + + # Check info + if self.info is None: + raise RuntimeError("Cannot return volume if series not finished.") + + # Init data (using what the dicom packaged produces as a reference) + slice = self[0].get_numpy_array() + vol = np.zeros(self.shape, dtype=slice.dtype) + vol[0] = slice + + # Fill volume + self._progressIndicator.start("loading data", "", len(self)) + for z in range(1, len(self)): + vol[z] = self[z].get_numpy_array() + self._progressIndicator.set_progress(z + 1) + self._progressIndicator.finish() + + # Done + import gc + + gc.collect() + return vol + + def _append(self, dcm): + self._entries.append(dcm) + + def _sort(self): + self._entries.sort(key=lambda k: k.InstanceNumber) + + def _finish(self): + """ + Evaluate the series of dicom files. Together they should make up + a volumetric dataset. This means the files should meet certain + conditions. Also some additional information has to be calculated, + such as the distance between the slices. This method sets the + attributes for "shape", "sampling" and "info". + + This method checks: + * that there are no missing files + * that the dimensions of all images match + * that the pixel spacing of all images match + """ + + # The datasets list should be sorted by instance number + L = self._entries + if len(L) == 0: + return + elif len(L) == 1: + self._info = L[0].info + return + + # Get previous + ds1 = L[0] + # Init measures to calculate average of + distance_sum = 0.0 + # Init measures to check (these are in 2D) + dimensions = ds1.Rows, ds1.Columns + # sampling = float(ds1.PixelSpacing[0]), float(ds1.PixelSpacing[1]) + sampling = ds1.info["sampling"][:2] # row, column + + for index in range(len(L)): + # The first round ds1 and ds2 will be the same, for the + # distance calculation this does not matter + # Get current + ds2 = L[index] + # Get positions + pos1 = float(ds1.ImagePositionPatient[2]) + pos2 = float(ds2.ImagePositionPatient[2]) + # Update distance_sum to calculate distance later + distance_sum += abs(pos1 - pos2) + # Test measures + dimensions2 = ds2.Rows, ds2.Columns + # sampling2 = float(ds2.PixelSpacing[0]), float(ds2.PixelSpacing[1]) + sampling2 = ds2.info["sampling"][:2] # row, column + if dimensions != dimensions2: + # We cannot produce a volume if the dimensions match + raise ValueError("Dimensions of slices does not match.") + if sampling != sampling2: + # We can still produce a volume, but we should notify the user + self._progressIndicator.write("Warn: sampling does not match.") + # Store previous + ds1 = ds2 + + # Finish calculating average distance + # (Note that there are len(L)-1 distances) + distance_mean = distance_sum / (len(L) - 1) + + # Set info dict + self._info = L[0].info.copy() + + # Store information that is specific for the serie + self._info["shape"] = (len(L),) + ds2.info["shape"] + self._info["sampling"] = (distance_mean,) + ds2.info["sampling"] + + +def list_files(files, path): + """List all files in the directory, recursively. """ + for item in os.listdir(path): + item = os.path.join(path, item) + if os.path.isdir(item): + list_files(files, item) + elif os.path.isfile(item): + files.append(item) + + +def process_directory(request, progressIndicator, readPixelData=False): + """ + Reads dicom files and returns a list of DicomSeries objects, which + contain information about the data, and can be used to load the + image or volume data. + + if readPixelData is True, the pixel data of all series is read. By + default the loading of pixeldata is deferred until it is requested + using the DicomSeries.get_pixel_array() method. In general, both + methods should be equally fast. + """ + # Get directory to examine + if os.path.isdir(request.filename): + path = request.filename + elif os.path.isfile(request.filename): + path = os.path.dirname(request.filename) + else: # pragma: no cover - tested earlier + raise ValueError( + "Dicom plugin needs a valid filename to examine " "the directory" + ) + + # Check files + files = [] + list_files(files, path) # Find files recursively + + # Gather file data and put in DicomSeries + series = {} + count = 0 + progressIndicator.start("examining files", "files", len(files)) + for filename in files: + # Show progress (note that we always start with a 0.0) + count += 1 + progressIndicator.set_progress(count) + # Skip DICOMDIR files + if filename.count("DICOMDIR"): # pragma: no cover + continue + # Try loading dicom ... + try: + dcm = SimpleDicomReader(filename) + except NotADicomFile: + continue # skip non-dicom file + except Exception as why: # pragma: no cover + progressIndicator.write(str(why)) + continue + # Get SUID and register the file with an existing or new series object + try: + suid = dcm.SeriesInstanceUID + except AttributeError: # pragma: no cover + continue # some other kind of dicom file + if suid not in series: + series[suid] = DicomSeries(suid, progressIndicator) + series[suid]._append(dcm) + + # Finish progress + # progressIndicator.finish('Found %i series.' % len(series)) + + # Make a list and sort, so that the order is deterministic + series = list(series.values()) + series.sort(key=lambda x: x.suid) + + # Split series if necessary + for serie in reversed([serie for serie in series]): + splitSerieIfRequired(serie, series, progressIndicator) + + # Finish all series + # progressIndicator.start('analyse series', '', len(series)) + series_ = [] + for i in range(len(series)): + try: + series[i]._finish() + series_.append(series[i]) + except Exception as err: # pragma: no cover + progressIndicator.write(str(err)) + pass # Skip serie (probably report-like file without pixels) + # progressIndicator.set_progress(i+1) + progressIndicator.finish("Found %i correct series." % len(series_)) + + # Done + return series_ + + +def splitSerieIfRequired(serie, series, progressIndicator): + """ + Split the serie in multiple series if this is required. The choice + is based on examing the image position relative to the previous + image. If it differs too much, it is assumed that there is a new + dataset. This can happen for example in unspitted gated CT data. + """ + + # Sort the original list and get local name + serie._sort() + L = serie._entries + # Init previous slice + ds1 = L[0] + # Check whether we can do this + if "ImagePositionPatient" not in ds1: + return + # Initialize a list of new lists + L2 = [[ds1]] + # Init slice distance estimate + distance = 0 + + for index in range(1, len(L)): + # Get current slice + ds2 = L[index] + # Get positions + pos1 = float(ds1.ImagePositionPatient[2]) + pos2 = float(ds2.ImagePositionPatient[2]) + # Get distances + newDist = abs(pos1 - pos2) + # deltaDist = abs(firstPos-pos2) + # If the distance deviates more than 2x from what we've seen, + # we can agree it's a new dataset. + if distance and newDist > 2.1 * distance: + L2.append([]) + distance = 0 + else: + # Test missing file + if distance and newDist > 1.5 * distance: + progressIndicator.write( + "Warning: missing file after %r" % ds1._filename + ) + distance = newDist + # Add to last list + L2[-1].append(ds2) + # Store previous + ds1 = ds2 + + # Split if we should + if len(L2) > 1: + # At what position are we now? + i = series.index(serie) + # Create new series + series2insert = [] + for L in L2: + newSerie = DicomSeries(serie.suid, progressIndicator) + newSerie._entries = L + series2insert.append(newSerie) + # Insert series and remove self + for newSerie in reversed(series2insert): + series.insert(i, newSerie) + series.remove(serie) diff --git a/venv/Lib/site-packages/imageio/plugins/_freeimage.py b/venv/Lib/site-packages/imageio/plugins/_freeimage.py new file mode 100644 index 000000000..1f6d47072 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/_freeimage.py @@ -0,0 +1,1332 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +# styletest: ignore E261 + +""" Module imageio/freeimage.py + +This module contains the wrapper code for the freeimage library. +The functions defined in this module are relatively thin; just thin +enough so that arguments and results are native Python/numpy data +types. + +""" + +import os +import sys +import ctypes +import threading +import logging +import numpy + +from ..core import ( + get_remote_file, + load_lib, + Dict, + resource_dirs, + IS_PYPY, + get_platform, + InternetNotAllowedError, + NeedDownloadError, +) + +logger = logging.getLogger(__name__) + +TEST_NUMPY_NO_STRIDES = False # To test pypy fallback + +FNAME_PER_PLATFORM = { + "osx32": "libfreeimage-3.16.0-osx10.6.dylib", # universal library + "osx64": "libfreeimage-3.16.0-osx10.6.dylib", + "win32": "FreeImage-3.15.4-win32.dll", + "win64": "FreeImage-3.15.1-win64.dll", + "linux32": "libfreeimage-3.16.0-linux32.so", + "linux64": "libfreeimage-3.16.0-linux64.so", +} + + +def download(directory=None, force_download=False): + """ Download the FreeImage library to your computer. + + Parameters + ---------- + directory : str | None + The directory where the file will be cached if a download was + required to obtain the file. By default, the appdata directory + is used. This is also the first directory that is checked for + a local version of the file. + force_download : bool | str + If True, the file will be downloaded even if a local copy exists + (and this copy will be overwritten). Can also be a YYYY-MM-DD date + to ensure a file is up-to-date (modified date of a file on disk, + if present, is checked). + """ + plat = get_platform() + if plat and plat in FNAME_PER_PLATFORM: + fname = "freeimage/" + FNAME_PER_PLATFORM[plat] + get_remote_file(fname=fname, directory=directory, force_download=force_download) + fi._lib = None # allow trying again (needed to make tests work) + + +def get_freeimage_lib(): + """ Ensure we have our version of the binary freeimage lib. + """ + + lib = os.getenv("IMAGEIO_FREEIMAGE_LIB", None) + if lib: # pragma: no cover + return lib + + # Get filename to load + # If we do not provide a binary, the system may still do ... + plat = get_platform() + if plat and plat in FNAME_PER_PLATFORM: + try: + return get_remote_file("freeimage/" + FNAME_PER_PLATFORM[plat], auto=False) + except InternetNotAllowedError: + pass + except NeedDownloadError: + raise NeedDownloadError( + "Need FreeImage library. " + "You can obtain it with either:\n" + " - download using the command: " + "imageio_download_bin freeimage\n" + " - download by calling (in Python): " + "imageio.plugins.freeimage.download()\n" + ) + except RuntimeError as e: # pragma: no cover + logger.warning(str(e)) + + +# Define function to encode a filename to bytes (for the current system) +efn = lambda x: x.encode(sys.getfilesystemencoding()) + +# 4-byte quads of 0,v,v,v from 0,0,0,0 to 0,255,255,255 +GREY_PALETTE = numpy.arange(0, 0x01000000, 0x00010101, dtype=numpy.uint32) + + +class FI_TYPES(object): + FIT_UNKNOWN = 0 + FIT_BITMAP = 1 + FIT_UINT16 = 2 + FIT_INT16 = 3 + FIT_UINT32 = 4 + FIT_INT32 = 5 + FIT_FLOAT = 6 + FIT_DOUBLE = 7 + FIT_COMPLEX = 8 + FIT_RGB16 = 9 + FIT_RGBA16 = 10 + FIT_RGBF = 11 + FIT_RGBAF = 12 + + dtypes = { + FIT_BITMAP: numpy.uint8, + FIT_UINT16: numpy.uint16, + FIT_INT16: numpy.int16, + FIT_UINT32: numpy.uint32, + FIT_INT32: numpy.int32, + FIT_FLOAT: numpy.float32, + FIT_DOUBLE: numpy.float64, + FIT_COMPLEX: numpy.complex128, + FIT_RGB16: numpy.uint16, + FIT_RGBA16: numpy.uint16, + FIT_RGBF: numpy.float32, + FIT_RGBAF: numpy.float32, + } + + fi_types = { + (numpy.uint8, 1): FIT_BITMAP, + (numpy.uint8, 3): FIT_BITMAP, + (numpy.uint8, 4): FIT_BITMAP, + (numpy.uint16, 1): FIT_UINT16, + (numpy.int16, 1): FIT_INT16, + (numpy.uint32, 1): FIT_UINT32, + (numpy.int32, 1): FIT_INT32, + (numpy.float32, 1): FIT_FLOAT, + (numpy.float64, 1): FIT_DOUBLE, + (numpy.complex128, 1): FIT_COMPLEX, + (numpy.uint16, 3): FIT_RGB16, + (numpy.uint16, 4): FIT_RGBA16, + (numpy.float32, 3): FIT_RGBF, + (numpy.float32, 4): FIT_RGBAF, + } + + extra_dims = { + FIT_UINT16: [], + FIT_INT16: [], + FIT_UINT32: [], + FIT_INT32: [], + FIT_FLOAT: [], + FIT_DOUBLE: [], + FIT_COMPLEX: [], + FIT_RGB16: [3], + FIT_RGBA16: [4], + FIT_RGBF: [3], + FIT_RGBAF: [4], + } + + +class IO_FLAGS(object): + FIF_LOAD_NOPIXELS = 0x8000 # loading: load the image header only + # # (not supported by all plugins) + BMP_DEFAULT = 0 + BMP_SAVE_RLE = 1 + CUT_DEFAULT = 0 + DDS_DEFAULT = 0 + EXR_DEFAULT = 0 # save data as half with piz-based wavelet compression + EXR_FLOAT = 0x0001 # save data as float instead of half (not recommended) + EXR_NONE = 0x0002 # save with no compression + EXR_ZIP = 0x0004 # save with zlib compression, in blocks of 16 scan lines + EXR_PIZ = 0x0008 # save with piz-based wavelet compression + EXR_PXR24 = 0x0010 # save with lossy 24-bit float compression + EXR_B44 = 0x0020 # save with lossy 44% float compression + # # - goes to 22% when combined with EXR_LC + EXR_LC = 0x0040 # save images with one luminance and two chroma channels, + # # rather than as RGB (lossy compression) + FAXG3_DEFAULT = 0 + GIF_DEFAULT = 0 + GIF_LOAD256 = 1 # Load the image as a 256 color image with ununsed + # # palette entries, if it's 16 or 2 color + GIF_PLAYBACK = 2 # 'Play' the GIF to generate each frame (as 32bpp) + # # instead of returning raw frame data when loading + HDR_DEFAULT = 0 + ICO_DEFAULT = 0 + ICO_MAKEALPHA = 1 # convert to 32bpp and create an alpha channel from the + # # AND-mask when loading + IFF_DEFAULT = 0 + J2K_DEFAULT = 0 # save with a 16:1 rate + JP2_DEFAULT = 0 # save with a 16:1 rate + JPEG_DEFAULT = 0 # loading (see JPEG_FAST); + # # saving (see JPEG_QUALITYGOOD|JPEG_SUBSAMPLING_420) + JPEG_FAST = 0x0001 # load the file as fast as possible, + # # sacrificing some quality + JPEG_ACCURATE = 0x0002 # load the file with the best quality, + # # sacrificing some speed + JPEG_CMYK = 0x0004 # load separated CMYK "as is" + # # (use | to combine with other load flags) + JPEG_EXIFROTATE = 0x0008 # load and rotate according to + # # Exif 'Orientation' tag if available + JPEG_QUALITYSUPERB = 0x80 # save with superb quality (100:1) + JPEG_QUALITYGOOD = 0x0100 # save with good quality (75:1) + JPEG_QUALITYNORMAL = 0x0200 # save with normal quality (50:1) + JPEG_QUALITYAVERAGE = 0x0400 # save with average quality (25:1) + JPEG_QUALITYBAD = 0x0800 # save with bad quality (10:1) + JPEG_PROGRESSIVE = 0x2000 # save as a progressive-JPEG + # # (use | to combine with other save flags) + JPEG_SUBSAMPLING_411 = 0x1000 # save with high 4x1 chroma + # # subsampling (4:1:1) + JPEG_SUBSAMPLING_420 = 0x4000 # save with medium 2x2 medium chroma + # # subsampling (4:2:0) - default value + JPEG_SUBSAMPLING_422 = 0x8000 # save /w low 2x1 chroma subsampling (4:2:2) + JPEG_SUBSAMPLING_444 = 0x10000 # save with no chroma subsampling (4:4:4) + JPEG_OPTIMIZE = 0x20000 # on saving, compute optimal Huffman coding tables + # # (can reduce a few percent of file size) + JPEG_BASELINE = 0x40000 # save basic JPEG, without metadata or any markers + KOALA_DEFAULT = 0 + LBM_DEFAULT = 0 + MNG_DEFAULT = 0 + PCD_DEFAULT = 0 + PCD_BASE = 1 # load the bitmap sized 768 x 512 + PCD_BASEDIV4 = 2 # load the bitmap sized 384 x 256 + PCD_BASEDIV16 = 3 # load the bitmap sized 192 x 128 + PCX_DEFAULT = 0 + PFM_DEFAULT = 0 + PICT_DEFAULT = 0 + PNG_DEFAULT = 0 + PNG_IGNOREGAMMA = 1 # loading: avoid gamma correction + PNG_Z_BEST_SPEED = 0x0001 # save using ZLib level 1 compression flag + # # (default value is 6) + PNG_Z_DEFAULT_COMPRESSION = 0x0006 # save using ZLib level 6 compression + # # flag (default recommended value) + PNG_Z_BEST_COMPRESSION = 0x0009 # save using ZLib level 9 compression flag + # # (default value is 6) + PNG_Z_NO_COMPRESSION = 0x0100 # save without ZLib compression + PNG_INTERLACED = 0x0200 # save using Adam7 interlacing (use | to combine + # # with other save flags) + PNM_DEFAULT = 0 + PNM_SAVE_RAW = 0 # Writer saves in RAW format (i.e. P4, P5 or P6) + PNM_SAVE_ASCII = 1 # Writer saves in ASCII format (i.e. P1, P2 or P3) + PSD_DEFAULT = 0 + PSD_CMYK = 1 # reads tags for separated CMYK (default is conversion to RGB) + PSD_LAB = 2 # reads tags for CIELab (default is conversion to RGB) + RAS_DEFAULT = 0 + RAW_DEFAULT = 0 # load the file as linear RGB 48-bit + RAW_PREVIEW = 1 # try to load the embedded JPEG preview with included + # # Exif Data or default to RGB 24-bit + RAW_DISPLAY = 2 # load the file as RGB 24-bit + SGI_DEFAULT = 0 + TARGA_DEFAULT = 0 + TARGA_LOAD_RGB888 = 1 # Convert RGB555 and ARGB8888 -> RGB888. + TARGA_SAVE_RLE = 2 # Save with RLE compression + TIFF_DEFAULT = 0 + TIFF_CMYK = 0x0001 # reads/stores tags for separated CMYK + # # (use | to combine with compression flags) + TIFF_PACKBITS = 0x0100 # save using PACKBITS compression + TIFF_DEFLATE = 0x0200 # save using DEFLATE (a.k.a. ZLIB) compression + TIFF_ADOBE_DEFLATE = 0x0400 # save using ADOBE DEFLATE compression + TIFF_NONE = 0x0800 # save without any compression + TIFF_CCITTFAX3 = 0x1000 # save using CCITT Group 3 fax encoding + TIFF_CCITTFAX4 = 0x2000 # save using CCITT Group 4 fax encoding + TIFF_LZW = 0x4000 # save using LZW compression + TIFF_JPEG = 0x8000 # save using JPEG compression + TIFF_LOGLUV = 0x10000 # save using LogLuv compression + WBMP_DEFAULT = 0 + XBM_DEFAULT = 0 + XPM_DEFAULT = 0 + + +class METADATA_MODELS(object): + FIMD_COMMENTS = 0 + FIMD_EXIF_MAIN = 1 + FIMD_EXIF_EXIF = 2 + FIMD_EXIF_GPS = 3 + FIMD_EXIF_MAKERNOTE = 4 + FIMD_EXIF_INTEROP = 5 + FIMD_IPTC = 6 + FIMD_XMP = 7 + FIMD_GEOTIFF = 8 + FIMD_ANIMATION = 9 + + +class METADATA_DATATYPE(object): + FIDT_BYTE = 1 # 8-bit unsigned integer + FIDT_ASCII = 2 # 8-bit bytes w/ last byte null + FIDT_SHORT = 3 # 16-bit unsigned integer + FIDT_LONG = 4 # 32-bit unsigned integer + FIDT_RATIONAL = 5 # 64-bit unsigned fraction + FIDT_SBYTE = 6 # 8-bit signed integer + FIDT_UNDEFINED = 7 # 8-bit untyped data + FIDT_SSHORT = 8 # 16-bit signed integer + FIDT_SLONG = 9 # 32-bit signed integer + FIDT_SRATIONAL = 10 # 64-bit signed fraction + FIDT_FLOAT = 11 # 32-bit IEEE floating point + FIDT_DOUBLE = 12 # 64-bit IEEE floating point + FIDT_IFD = 13 # 32-bit unsigned integer (offset) + FIDT_PALETTE = 14 # 32-bit RGBQUAD + FIDT_LONG8 = 16 # 64-bit unsigned integer + FIDT_SLONG8 = 17 # 64-bit signed integer + FIDT_IFD8 = 18 # 64-bit unsigned integer (offset) + + dtypes = { + FIDT_BYTE: numpy.uint8, + FIDT_SHORT: numpy.uint16, + FIDT_LONG: numpy.uint32, + FIDT_RATIONAL: [("numerator", numpy.uint32), ("denominator", numpy.uint32)], + FIDT_LONG8: numpy.uint64, + FIDT_SLONG8: numpy.int64, + FIDT_IFD8: numpy.uint64, + FIDT_SBYTE: numpy.int8, + FIDT_UNDEFINED: numpy.uint8, + FIDT_SSHORT: numpy.int16, + FIDT_SLONG: numpy.int32, + FIDT_SRATIONAL: [("numerator", numpy.int32), ("denominator", numpy.int32)], + FIDT_FLOAT: numpy.float32, + FIDT_DOUBLE: numpy.float64, + FIDT_IFD: numpy.uint32, + FIDT_PALETTE: [ + ("R", numpy.uint8), + ("G", numpy.uint8), + ("B", numpy.uint8), + ("A", numpy.uint8), + ], + } + + +class Freeimage(object): + """ Class to represent an interface to the FreeImage library. + This class is relatively thin. It provides a Pythonic API that converts + Freeimage objects to Python objects, but that's about it. + The actual implementation should be provided by the plugins. + + The recommended way to call into the Freeimage library (so that + errors and warnings show up in the right moment) is to use this + object as a context manager: + with imageio.fi as lib: + lib.FreeImage_GetPalette() + + """ + + _API = { + # All we're doing here is telling ctypes that some of the + # FreeImage functions return pointers instead of integers. (On + # 64-bit systems, without this information the pointers get + # truncated and crashes result). There's no need to list + # functions that return ints, or the types of the parameters + # to these or other functions -- that's fine to do implicitly. + # Note that the ctypes immediately converts the returned void_p + # back to a python int again! This is really not helpful, + # because then passing it back to another library call will + # cause truncation-to-32-bits on 64-bit systems. Thanks, ctypes! + # So after these calls one must immediately re-wrap the int as + # a c_void_p if it is to be passed back into FreeImage. + "FreeImage_AllocateT": (ctypes.c_void_p, None), + "FreeImage_FindFirstMetadata": (ctypes.c_void_p, None), + "FreeImage_GetBits": (ctypes.c_void_p, None), + "FreeImage_GetPalette": (ctypes.c_void_p, None), + "FreeImage_GetTagKey": (ctypes.c_char_p, None), + "FreeImage_GetTagValue": (ctypes.c_void_p, None), + "FreeImage_CreateTag": (ctypes.c_void_p, None), + "FreeImage_Save": (ctypes.c_void_p, None), + "FreeImage_Load": (ctypes.c_void_p, None), + "FreeImage_LoadFromMemory": (ctypes.c_void_p, None), + "FreeImage_OpenMultiBitmap": (ctypes.c_void_p, None), + "FreeImage_LoadMultiBitmapFromMemory": (ctypes.c_void_p, None), + "FreeImage_LockPage": (ctypes.c_void_p, None), + "FreeImage_OpenMemory": (ctypes.c_void_p, None), + # 'FreeImage_ReadMemory': (ctypes.c_void_p, None), + # 'FreeImage_CloseMemory': (ctypes.c_void_p, None), + "FreeImage_GetVersion": (ctypes.c_char_p, None), + "FreeImage_GetFIFExtensionList": (ctypes.c_char_p, None), + "FreeImage_GetFormatFromFIF": (ctypes.c_char_p, None), + "FreeImage_GetFIFDescription": (ctypes.c_char_p, None), + "FreeImage_ColorQuantizeEx": (ctypes.c_void_p, None), + # Pypy wants some extra definitions, so here we go ... + "FreeImage_IsLittleEndian": (ctypes.c_int, None), + "FreeImage_SetOutputMessage": (ctypes.c_void_p, None), + "FreeImage_GetFIFCount": (ctypes.c_int, None), + "FreeImage_IsPluginEnabled": (ctypes.c_int, None), + "FreeImage_GetFileType": (ctypes.c_int, None), + # + "FreeImage_GetTagType": (ctypes.c_int, None), + "FreeImage_GetTagLength": (ctypes.c_int, None), + "FreeImage_FindNextMetadata": (ctypes.c_int, None), + "FreeImage_FindCloseMetadata": (ctypes.c_void_p, None), + # + "FreeImage_GetFIFFromFilename": (ctypes.c_int, None), + "FreeImage_FIFSupportsReading": (ctypes.c_int, None), + "FreeImage_FIFSupportsWriting": (ctypes.c_int, None), + "FreeImage_FIFSupportsExportType": (ctypes.c_int, None), + "FreeImage_FIFSupportsExportBPP": (ctypes.c_int, None), + "FreeImage_GetHeight": (ctypes.c_int, None), + "FreeImage_GetWidth": (ctypes.c_int, None), + "FreeImage_GetImageType": (ctypes.c_int, None), + "FreeImage_GetBPP": (ctypes.c_int, None), + "FreeImage_GetColorsUsed": (ctypes.c_int, None), + "FreeImage_ConvertTo32Bits": (ctypes.c_void_p, None), + "FreeImage_GetPitch": (ctypes.c_int, None), + "FreeImage_Unload": (ctypes.c_void_p, None), + } + + def __init__(self): + + # Initialize freeimage lib as None + self._lib = None + + # A lock to create thread-safety + self._lock = threading.RLock() + + # Init log messages lists + self._messages = [] + + # Select functype for error handler + if sys.platform.startswith("win"): + functype = ctypes.WINFUNCTYPE + else: + functype = ctypes.CFUNCTYPE + + # Create output message handler + @functype(None, ctypes.c_int, ctypes.c_char_p) + def error_handler(fif, message): + message = message.decode("utf-8") + self._messages.append(message) + while (len(self._messages)) > 256: + self._messages.pop(0) + + # Make sure to keep a ref to function + self._error_handler = error_handler + + @property + def lib(self): + if self._lib is None: + try: + self.load_freeimage() + except OSError as err: + self._lib = "The freeimage library could not be loaded: " + self._lib += str(err) + if isinstance(self._lib, str): + raise RuntimeError(self._lib) + return self._lib + + def has_lib(self): + try: + self.lib + except Exception: + return False + return True + + def load_freeimage(self): + """ Try to load the freeimage lib from the system. If not successful, + try to download the imageio version and try again. + """ + # Load library and register API + success = False + try: + # Try without forcing a download, but giving preference + # to the imageio-provided lib (if previously downloaded) + self._load_freeimage() + self._register_api() + if self.lib.FreeImage_GetVersion().decode("utf-8") >= "3.15": + success = True + except OSError: + pass + + if not success: + # Ensure we have our own lib, try again + get_freeimage_lib() + self._load_freeimage() + self._register_api() + + # Wrap up + self.lib.FreeImage_SetOutputMessage(self._error_handler) + self.lib_version = self.lib.FreeImage_GetVersion().decode("utf-8") + + def _load_freeimage(self): + + # Define names + lib_names = ["freeimage", "libfreeimage"] + exact_lib_names = [ + "FreeImage", + "libfreeimage.dylib", + "libfreeimage.so", + "libfreeimage.so.3", + ] + # Add names of libraries that we provide (that file may not exist) + res_dirs = resource_dirs() + plat = get_platform() + if plat: # Can be None on e.g. FreeBSD + fname = FNAME_PER_PLATFORM[plat] + for dir in res_dirs: + exact_lib_names.insert(0, os.path.join(dir, "freeimage", fname)) + + # Add the path specified with IMAGEIO_FREEIMAGE_LIB: + lib = os.getenv("IMAGEIO_FREEIMAGE_LIB", None) + if lib is not None: + exact_lib_names.insert(0, lib) + + # Load + try: + lib, fname = load_lib(exact_lib_names, lib_names, res_dirs) + except OSError as err: # pragma: no cover + err_msg = str(err) + "\nPlease install the FreeImage library." + raise OSError(err_msg) + + # Store + self._lib = lib + self.lib_fname = fname + + def _register_api(self): + # Albert's ctypes pattern + for f, (restype, argtypes) in self._API.items(): + func = getattr(self.lib, f) + func.restype = restype + func.argtypes = argtypes + + ## Handling of output messages + + def __enter__(self): + self._lock.acquire() + return self.lib + + def __exit__(self, *args): + self._show_any_warnings() + self._lock.release() + + def _reset_log(self): + """ Reset the list of output messages. Call this before + loading or saving an image with the FreeImage API. + """ + self._messages = [] + + def _get_error_message(self): + """ Get the output messages produced since the last reset as + one string. Returns 'No known reason.' if there are no messages. + Also resets the log. + """ + if self._messages: + res = " ".join(self._messages) + self._reset_log() + return res + else: + return "No known reason." + + def _show_any_warnings(self): + """ If there were any messages since the last reset, show them + as a warning. Otherwise do nothing. Also resets the messages. + """ + if self._messages: + logger.warning("imageio.freeimage warning: " + self._get_error_message()) + self._reset_log() + + def get_output_log(self): + """ Return a list of the last 256 output messages + (warnings and errors) produced by the FreeImage library. + """ + # This message log is not cleared/reset, but kept to 256 elements. + return [m for m in self._messages] + + def getFIF(self, filename, mode, bb=None): + """ Get the freeimage Format (FIF) from a given filename. + If mode is 'r', will try to determine the format by reading + the file, otherwise only the filename is used. + + This function also tests whether the format supports reading/writing. + """ + with self as lib: + + # Init + ftype = -1 + if mode not in "rw": + raise ValueError('Invalid mode (must be "r" or "w").') + + # Try getting format from the content. Note that some files + # do not have a header that allows reading the format from + # the file. + if mode == "r": + if bb is not None: + fimemory = lib.FreeImage_OpenMemory(ctypes.c_char_p(bb), len(bb)) + ftype = lib.FreeImage_GetFileTypeFromMemory( + ctypes.c_void_p(fimemory), len(bb) + ) + lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + if (ftype == -1) and os.path.isfile(filename): + ftype = lib.FreeImage_GetFileType(efn(filename), 0) + # Try getting the format from the extension + if ftype == -1: + ftype = lib.FreeImage_GetFIFFromFilename(efn(filename)) + + # Test if ok + if ftype == -1: + raise ValueError('Cannot determine format of file "%s"' % filename) + elif mode == "w" and not lib.FreeImage_FIFSupportsWriting(ftype): + raise ValueError('Cannot write the format of file "%s"' % filename) + elif mode == "r" and not lib.FreeImage_FIFSupportsReading(ftype): + raise ValueError('Cannot read the format of file "%s"' % filename) + return ftype + + def create_bitmap(self, filename, ftype, flags=0): + """ create_bitmap(filename, ftype, flags=0) + Create a wrapped bitmap object. + """ + return FIBitmap(self, filename, ftype, flags) + + def create_multipage_bitmap(self, filename, ftype, flags=0): + """ create_multipage_bitmap(filename, ftype, flags=0) + Create a wrapped multipage bitmap object. + """ + return FIMultipageBitmap(self, filename, ftype, flags) + + +class FIBaseBitmap(object): + def __init__(self, fi, filename, ftype, flags): + self._fi = fi + self._filename = filename + self._ftype = ftype + self._flags = flags + self._bitmap = None + self._close_funcs = [] + + def __del__(self): + self.close() + + def close(self): + if (self._bitmap is not None) and self._close_funcs: + for close_func in self._close_funcs: + try: + with self._fi: + fun = close_func[0] + fun(*close_func[1:]) + except Exception: # pragma: no cover + pass + self._close_funcs = [] + self._bitmap = None + + def _set_bitmap(self, bitmap, close_func=None): + """ Function to set the bitmap and specify the function to unload it. + """ + if self._bitmap is not None: + pass # bitmap is converted + if close_func is None: + close_func = self._fi.lib.FreeImage_Unload, bitmap + + self._bitmap = bitmap + if close_func: + self._close_funcs.append(close_func) + + def get_meta_data(self): + + # todo: there is also FreeImage_TagToString, is that useful? + # and would that work well when reading and then saving? + + # Create a list of (model_name, number) tuples + models = [ + (name[5:], number) + for name, number in METADATA_MODELS.__dict__.items() + if name.startswith("FIMD_") + ] + + # Prepare + metadata = Dict() + tag = ctypes.c_void_p() + + with self._fi as lib: + + # Iterate over all FreeImage meta models + for model_name, number in models: + + # Find beginning, get search handle + mdhandle = lib.FreeImage_FindFirstMetadata( + number, self._bitmap, ctypes.byref(tag) + ) + mdhandle = ctypes.c_void_p(mdhandle) + if mdhandle: + + # Iterate over all tags in this model + more = True + while more: + # Get info about tag + tag_name = lib.FreeImage_GetTagKey(tag).decode("utf-8") + tag_type = lib.FreeImage_GetTagType(tag) + byte_size = lib.FreeImage_GetTagLength(tag) + char_ptr = ctypes.c_char * byte_size + data = char_ptr.from_address(lib.FreeImage_GetTagValue(tag)) + # Convert in a way compatible with Pypy + tag_bytes = bytes(bytearray(data)) + # The default value is the raw bytes + tag_val = tag_bytes + # Convert to a Python value in the metadata dict + if tag_type == METADATA_DATATYPE.FIDT_ASCII: + tag_val = tag_bytes.decode("utf-8", "replace") + elif tag_type in METADATA_DATATYPE.dtypes: + dtype = METADATA_DATATYPE.dtypes[tag_type] + if IS_PYPY and isinstance(dtype, (list, tuple)): + pass # pragma: no cover - or we get a segfault + else: + try: + tag_val = numpy.frombuffer( + tag_bytes, dtype=dtype + ).copy() + if len(tag_val) == 1: + tag_val = tag_val[0] + except Exception: # pragma: no cover + pass + # Store data in dict + subdict = metadata.setdefault(model_name, Dict()) + subdict[tag_name] = tag_val + # Next + more = lib.FreeImage_FindNextMetadata( + mdhandle, ctypes.byref(tag) + ) + + # Close search handle for current meta model + lib.FreeImage_FindCloseMetadata(mdhandle) + + # Done + return metadata + + def set_meta_data(self, metadata): + + # Create a dict mapping model_name to number + models = {} + for name, number in METADATA_MODELS.__dict__.items(): + if name.startswith("FIMD_"): + models[name[5:]] = number + + # Create a mapping from numpy.dtype to METADATA_DATATYPE + def get_tag_type_number(dtype): + for number, numpy_dtype in METADATA_DATATYPE.dtypes.items(): + if dtype == numpy_dtype: + return number + else: + return None + + with self._fi as lib: + + for model_name, subdict in metadata.items(): + + # Get model number + number = models.get(model_name, None) + if number is None: + continue # Unknown model, silent ignore + + for tag_name, tag_val in subdict.items(): + + # Create new tag + tag = lib.FreeImage_CreateTag() + tag = ctypes.c_void_p(tag) + + try: + # Convert Python value to FI type, val + is_ascii = False + if isinstance(tag_val, str): + try: + tag_bytes = tag_val.encode("ascii") + is_ascii = True + except UnicodeError: + pass + if is_ascii: + tag_type = METADATA_DATATYPE.FIDT_ASCII + tag_count = len(tag_bytes) + else: + if not hasattr(tag_val, "dtype"): + tag_val = numpy.array([tag_val]) + tag_type = get_tag_type_number(tag_val.dtype) + if tag_type is None: + logger.warning( + "imageio.freeimage warning: Could not " + "determine tag type of %r." % tag_name + ) + continue + tag_bytes = tag_val.tostring() + tag_count = tag_val.size + # Set properties + lib.FreeImage_SetTagKey(tag, tag_name.encode("utf-8")) + lib.FreeImage_SetTagType(tag, tag_type) + lib.FreeImage_SetTagCount(tag, tag_count) + lib.FreeImage_SetTagLength(tag, len(tag_bytes)) + lib.FreeImage_SetTagValue(tag, tag_bytes) + # Store tag + tag_key = lib.FreeImage_GetTagKey(tag) + lib.FreeImage_SetMetadata(number, self._bitmap, tag_key, tag) + + except Exception as err: # pragma: no cover + logger.warning( + "imagio.freeimage warning: Could not set tag " + "%r: %s, %s" + % (tag_name, self._fi._get_error_message(), str(err)) + ) + finally: + lib.FreeImage_DeleteTag(tag) + + +class FIBitmap(FIBaseBitmap): + """ Wrapper for the FI bitmap object. + """ + + def allocate(self, array): + + # Prepare array + assert isinstance(array, numpy.ndarray) + shape = array.shape + dtype = array.dtype + + # Get shape and channel info + r, c = shape[:2] + if len(shape) == 2: + n_channels = 1 + elif len(shape) == 3: + n_channels = shape[2] + else: + n_channels = shape[0] + + # Get fi_type + try: + fi_type = FI_TYPES.fi_types[(dtype.type, n_channels)] + self._fi_type = fi_type + except KeyError: + raise ValueError("Cannot write arrays of given type and shape.") + + # Allocate bitmap + with self._fi as lib: + bpp = 8 * dtype.itemsize * n_channels + bitmap = lib.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0) + bitmap = ctypes.c_void_p(bitmap) + + # Check and store + if not bitmap: # pragma: no cover + raise RuntimeError( + "Could not allocate bitmap for storage: %s" + % self._fi._get_error_message() + ) + self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + def load_from_filename(self, filename=None): + if filename is None: + filename = self._filename + + with self._fi as lib: + # Create bitmap + bitmap = lib.FreeImage_Load(self._ftype, efn(filename), self._flags) + bitmap = ctypes.c_void_p(bitmap) + + # Check and store + if not bitmap: # pragma: no cover + raise ValueError( + 'Could not load bitmap "%s": %s' + % (self._filename, self._fi._get_error_message()) + ) + self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + # def load_from_bytes(self, bb): + # with self._fi as lib: + # # Create bitmap + # fimemory = lib.FreeImage_OpenMemory( + # ctypes.c_char_p(bb), len(bb)) + # bitmap = lib.FreeImage_LoadFromMemory( + # self._ftype, ctypes.c_void_p(fimemory), self._flags) + # bitmap = ctypes.c_void_p(bitmap) + # lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # + # # Check + # if not bitmap: + # raise ValueError('Could not load bitmap "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # else: + # self._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + + def save_to_filename(self, filename=None): + if filename is None: + filename = self._filename + + ftype = self._ftype + bitmap = self._bitmap + fi_type = self._fi_type # element type + + with self._fi as lib: + # Check if can write + if fi_type == FI_TYPES.FIT_BITMAP: + can_write = lib.FreeImage_FIFSupportsExportBPP( + ftype, lib.FreeImage_GetBPP(bitmap) + ) + else: + can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type) + if not can_write: + raise TypeError("Cannot save image of this format " "to this file type") + + # Save to file + res = lib.FreeImage_Save(ftype, bitmap, efn(filename), self._flags) + # Check + if not res: # pragma: no cover, we do so many checks, this is rare + raise RuntimeError( + 'Could not save file "%s": %s' + % (self._filename, self._fi._get_error_message()) + ) + + # def save_to_bytes(self): + # ftype = self._ftype + # bitmap = self._bitmap + # fi_type = self._fi_type # element type + # + # with self._fi as lib: + # # Check if can write + # if fi_type == FI_TYPES.FIT_BITMAP: + # can_write = lib.FreeImage_FIFSupportsExportBPP(ftype, + # lib.FreeImage_GetBPP(bitmap)) + # else: + # can_write = lib.FreeImage_FIFSupportsExportType(ftype, fi_type) + # if not can_write: + # raise TypeError('Cannot save image of this format ' + # 'to this file type') + # + # # Extract the bytes + # fimemory = lib.FreeImage_OpenMemory(0, 0) + # res = lib.FreeImage_SaveToMemory(ftype, bitmap, + # ctypes.c_void_p(fimemory), + # self._flags) + # if res: + # N = lib.FreeImage_TellMemory(ctypes.c_void_p(fimemory)) + # result = ctypes.create_string_buffer(N) + # lib.FreeImage_SeekMemory(ctypes.c_void_p(fimemory), 0) + # lib.FreeImage_ReadMemory(result, 1, N, ctypes.c_void_p(fimemory)) + # result = result.raw + # lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # + # # Check + # if not res: + # raise RuntimeError('Could not save file "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # + # # Done + # return result + + def get_image_data(self): + dtype, shape, bpp = self._get_type_and_shape() + array = self._wrap_bitmap_bits_in_array(shape, dtype, False) + with self._fi as lib: + isle = lib.FreeImage_IsLittleEndian() + + # swizzle the color components and flip the scanlines to go from + # FreeImage's BGR[A] and upside-down internal memory format to + # something more normal + def n(arr): + # return arr[..., ::-1].T # Does not work on numpypy yet + if arr.ndim == 1: # pragma: no cover + return arr[::-1].T + elif arr.ndim == 2: # Always the case here ... + return arr[:, ::-1].T + elif arr.ndim == 3: # pragma: no cover + return arr[:, :, ::-1].T + elif arr.ndim == 4: # pragma: no cover + return arr[:, :, :, ::-1].T + + if len(shape) == 3 and isle and dtype.type == numpy.uint8: + b = n(array[0]) + g = n(array[1]) + r = n(array[2]) + if shape[0] == 3: + return numpy.dstack((r, g, b)) + elif shape[0] == 4: + a = n(array[3]) + return numpy.dstack((r, g, b, a)) + else: # pragma: no cover - we check this earlier + raise ValueError("Cannot handle images of shape %s" % shape) + + # We need to copy because array does *not* own its memory + # after bitmap is freed. + a = n(array).copy() + return a + + def set_image_data(self, array): + + # Prepare array + assert isinstance(array, numpy.ndarray) + shape = array.shape + dtype = array.dtype + with self._fi as lib: + isle = lib.FreeImage_IsLittleEndian() + + # Calculate shape and channels + r, c = shape[:2] + if len(shape) == 2: + n_channels = 1 + w_shape = (c, r) + elif len(shape) == 3: + n_channels = shape[2] + w_shape = (n_channels, c, r) + else: + n_channels = shape[0] + + def n(arr): # normalise to freeimage's in-memory format + return arr[::-1].T + + wrapped_array = self._wrap_bitmap_bits_in_array(w_shape, dtype, True) + # swizzle the color components and flip the scanlines to go to + # FreeImage's BGR[A] and upside-down internal memory format + # The BGR[A] order is only used for 8bits per channel images + # on little endian machines. For everything else RGB[A] is + # used. + if len(shape) == 3 and isle and dtype.type == numpy.uint8: + R = array[:, :, 0] + G = array[:, :, 1] + B = array[:, :, 2] + wrapped_array[0] = n(B) + wrapped_array[1] = n(G) + wrapped_array[2] = n(R) + if shape[2] == 4: + A = array[:, :, 3] + wrapped_array[3] = n(A) + else: + wrapped_array[:] = n(array) + if self._need_finish: + self._finish_wrapped_array(wrapped_array) + + if len(shape) == 2 and dtype.type == numpy.uint8: + with self._fi as lib: + palette = lib.FreeImage_GetPalette(self._bitmap) + palette = ctypes.c_void_p(palette) + if not palette: + raise RuntimeError("Could not get image palette") + try: + palette_data = GREY_PALETTE.ctypes.data + except Exception: # pragma: no cover - IS_PYPY + palette_data = GREY_PALETTE.__array_interface__["data"][0] + ctypes.memmove(palette, palette_data, 1024) + + def _wrap_bitmap_bits_in_array(self, shape, dtype, save): + """Return an ndarray view on the data in a FreeImage bitmap. Only + valid for as long as the bitmap is loaded (if single page) / locked + in memory (if multipage). This is used in loading data, but + also during saving, to prepare a strided numpy array buffer. + + """ + # Get bitmap info + with self._fi as lib: + pitch = lib.FreeImage_GetPitch(self._bitmap) + bits = lib.FreeImage_GetBits(self._bitmap) + + # Get more info + height = shape[-1] + byte_size = height * pitch + itemsize = dtype.itemsize + + # Get strides + if len(shape) == 3: + strides = (itemsize, shape[0] * itemsize, pitch) + else: + strides = (itemsize, pitch) + + # Create numpy array and return + data = (ctypes.c_char * byte_size).from_address(bits) + try: + self._need_finish = False + if TEST_NUMPY_NO_STRIDES: + raise NotImplementedError() + return numpy.ndarray(shape, dtype=dtype, buffer=data, strides=strides) + except NotImplementedError: + # IS_PYPY - not very efficient. We create a C-contiguous + # numpy array (because pypy does not support Fortran-order) + # and shape it such that the rest of the code can remain. + if save: + self._need_finish = True # Flag to use _finish_wrapped_array + return numpy.zeros(shape, dtype=dtype) + else: + bb = bytes(bytearray(data)) + array = numpy.frombuffer(bb, dtype=dtype).copy() + # Deal with strides + if len(shape) == 3: + array.shape = shape[2], strides[-1] // shape[0], shape[0] + array2 = array[: shape[2], : shape[1], : shape[0]] + array = numpy.zeros(shape, dtype=array.dtype) + for i in range(shape[0]): + array[i] = array2[:, :, i].T + else: + array.shape = shape[1], strides[-1] + array = array[: shape[1], : shape[0]].T + return array + + def _finish_wrapped_array(self, array): # IS_PYPY + """ Hardcore way to inject numpy array in bitmap. + """ + # Get bitmap info + with self._fi as lib: + pitch = lib.FreeImage_GetPitch(self._bitmap) + bits = lib.FreeImage_GetBits(self._bitmap) + bpp = lib.FreeImage_GetBPP(self._bitmap) + # Get channels and realwidth + nchannels = bpp // 8 // array.itemsize + realwidth = pitch // nchannels + # Apply padding for pitch if necessary + extra = realwidth - array.shape[-2] + assert 0 <= extra < 10 + # Make sort of Fortran, also take padding (i.e. pitch) into account + newshape = array.shape[-1], realwidth, nchannels + array2 = numpy.zeros(newshape, array.dtype) + if nchannels == 1: + array2[:, : array.shape[-2], 0] = array.T + else: + for i in range(nchannels): + array2[:, : array.shape[-2], i] = array[i, :, :].T + # copy data + data_ptr = array2.__array_interface__["data"][0] + ctypes.memmove(bits, data_ptr, array2.nbytes) + del array2 + + def _get_type_and_shape(self): + bitmap = self._bitmap + + # Get info on bitmap + with self._fi as lib: + w = lib.FreeImage_GetWidth(bitmap) + h = lib.FreeImage_GetHeight(bitmap) + self._fi_type = fi_type = lib.FreeImage_GetImageType(bitmap) + if not fi_type: + raise ValueError("Unknown image pixel type") + + # Determine required props for numpy array + bpp = None + dtype = FI_TYPES.dtypes[fi_type] + + if fi_type == FI_TYPES.FIT_BITMAP: + with self._fi as lib: + bpp = lib.FreeImage_GetBPP(bitmap) + has_pallette = lib.FreeImage_GetColorsUsed(bitmap) + if has_pallette: + # Examine the palette. If it is grayscale, we return as such + if has_pallette == 256: + palette = lib.FreeImage_GetPalette(bitmap) + palette = ctypes.c_void_p(palette) + p = (ctypes.c_uint8 * (256 * 4)).from_address(palette.value) + p = numpy.frombuffer(p, numpy.uint32).copy() + if (GREY_PALETTE == p).all(): + extra_dims = [] + return numpy.dtype(dtype), extra_dims + [w, h], bpp + # Convert bitmap and call this method again + newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap) + newbitmap = ctypes.c_void_p(newbitmap) + self._set_bitmap(newbitmap) + return self._get_type_and_shape() + elif bpp == 8: + extra_dims = [] + elif bpp == 24: + extra_dims = [3] + elif bpp == 32: + extra_dims = [4] + else: # pragma: no cover + # raise ValueError('Cannot convert %d BPP bitmap' % bpp) + # Convert bitmap and call this method again + newbitmap = lib.FreeImage_ConvertTo32Bits(bitmap) + newbitmap = ctypes.c_void_p(newbitmap) + self._set_bitmap(newbitmap) + return self._get_type_and_shape() + else: + extra_dims = FI_TYPES.extra_dims[fi_type] + + # Return dtype and shape + return numpy.dtype(dtype), extra_dims + [w, h], bpp + + def quantize(self, quantizer=0, palettesize=256): + """ Quantize the bitmap to make it 8-bit (paletted). Returns a new + FIBitmap object. + Only for 24 bit images. + """ + with self._fi as lib: + # New bitmap + bitmap = lib.FreeImage_ColorQuantizeEx( + self._bitmap, quantizer, palettesize, 0, None + ) + bitmap = ctypes.c_void_p(bitmap) + + # Check and return + if not bitmap: + raise ValueError( + 'Could not quantize bitmap "%s": %s' + % (self._filename, self._fi._get_error_message()) + ) + + new = FIBitmap(self._fi, self._filename, self._ftype, self._flags) + new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) + new._fi_type = self._fi_type + return new + + +# def convert_to_32bit(self): +# """ Convert to 32bit image. +# """ +# with self._fi as lib: +# # New bitmap +# bitmap = lib.FreeImage_ConvertTo32Bits(self._bitmap) +# bitmap = ctypes.c_void_p(bitmap) +# +# # Check and return +# if not bitmap: +# raise ValueError('Could not convert bitmap to 32bit "%s": %s' % +# (self._filename, +# self._fi._get_error_message())) +# else: +# new = FIBitmap(self._fi, self._filename, self._ftype, +# self._flags) +# new._set_bitmap(bitmap, (lib.FreeImage_Unload, bitmap)) +# new._fi_type = self._fi_type +# return new + + +class FIMultipageBitmap(FIBaseBitmap): + """ Wrapper for the multipage FI bitmap object. + """ + + def load_from_filename(self, filename=None): + if filename is None: # pragma: no cover + filename = self._filename + + # Prepare + create_new = False + read_only = True + keep_cache_in_memory = False + + # Try opening + with self._fi as lib: + + # Create bitmap + multibitmap = lib.FreeImage_OpenMultiBitmap( + self._ftype, + efn(filename), + create_new, + read_only, + keep_cache_in_memory, + self._flags, + ) + multibitmap = ctypes.c_void_p(multibitmap) + + # Check + if not multibitmap: # pragma: no cover + err = self._fi._get_error_message() + raise ValueError( + 'Could not open file "%s" as multi-image: %s' + % (self._filename, err) + ) + self._set_bitmap(multibitmap, (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + # def load_from_bytes(self, bb): + # with self._fi as lib: + # # Create bitmap + # fimemory = lib.FreeImage_OpenMemory( + # ctypes.c_char_p(bb), len(bb)) + # multibitmap = lib.FreeImage_LoadMultiBitmapFromMemory( + # self._ftype, ctypes.c_void_p(fimemory), self._flags) + # multibitmap = ctypes.c_void_p(multibitmap) + # #lib.FreeImage_CloseMemory(ctypes.c_void_p(fimemory)) + # self._mem = fimemory + # self._bytes = bb + # # Check + # if not multibitmap: + # raise ValueError('Could not load multibitmap "%s": %s' + # % (self._filename, self._fi._get_error_message())) + # else: + # self._set_bitmap(multibitmap, + # (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + def save_to_filename(self, filename=None): + if filename is None: # pragma: no cover + filename = self._filename + + # Prepare + create_new = True + read_only = False + keep_cache_in_memory = False + + # Open the file + # todo: Set flags at close func + with self._fi as lib: + multibitmap = lib.FreeImage_OpenMultiBitmap( + self._ftype, + efn(filename), + create_new, + read_only, + keep_cache_in_memory, + 0, + ) + multibitmap = ctypes.c_void_p(multibitmap) + + # Check + if not multibitmap: # pragma: no cover + msg = 'Could not open file "%s" for writing multi-image: %s' % ( + self._filename, + self._fi._get_error_message(), + ) + raise ValueError(msg) + self._set_bitmap(multibitmap, (lib.FreeImage_CloseMultiBitmap, multibitmap)) + + def __len__(self): + with self._fi as lib: + return lib.FreeImage_GetPageCount(self._bitmap) + + def get_page(self, index): + """ Return the sub-bitmap for the given page index. + Please close the returned bitmap when done. + """ + with self._fi as lib: + + # Create low-level bitmap in freeimage + bitmap = lib.FreeImage_LockPage(self._bitmap, index) + bitmap = ctypes.c_void_p(bitmap) + if not bitmap: # pragma: no cover + raise ValueError( + "Could not open sub-image %i in %r: %s" + % (index, self._filename, self._fi._get_error_message()) + ) + + # Get bitmap object to wrap this bitmap + bm = FIBitmap(self._fi, self._filename, self._ftype, self._flags) + bm._set_bitmap( + bitmap, (lib.FreeImage_UnlockPage, self._bitmap, bitmap, False) + ) + return bm + + def append_bitmap(self, bitmap): + """ Add a sub-bitmap to the multi-page bitmap. + """ + with self._fi as lib: + # no return value + lib.FreeImage_AppendPage(self._bitmap, bitmap._bitmap) + + +# Create instance +fi = Freeimage() diff --git a/venv/Lib/site-packages/imageio/plugins/_swf.py b/venv/Lib/site-packages/imageio/plugins/_swf.py new file mode 100644 index 000000000..859757078 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/_swf.py @@ -0,0 +1,902 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. +# This code was taken from visvis/vvmovy/images2swf.py + +# styletest: ignore E261 + +""" +Provides a function (write_swf) to store a series of numpy arrays in an +SWF movie, that can be played on a wide range of OS's. + +In desperation of wanting to share animated images, and then lacking a good +writer for animated gif or .avi, I decided to look into SWF. This format +is very well documented. + +This is a pure python module to create an SWF file that shows a series +of images. The images are stored using the DEFLATE algorithm (same as +PNG and ZIP and which is included in the standard Python distribution). +As this compression algorithm is much more effective than that used in +GIF images, we obtain better quality (24 bit colors + alpha channel) +while still producesing smaller files (a test showed ~75%). Although +SWF also allows for JPEG compression, doing so would probably require +a third party library for the JPEG encoding/decoding, we could +perhaps do this via Pillow or freeimage. + +sources and tools: + +- SWF on wikipedia +- Adobes "SWF File Format Specification" version 10 + (http://www.adobe.com/devnet/swf/pdf/swf_file_format_spec_v10.pdf) +- swftools (swfdump in specific) for debugging +- iwisoft swf2avi can be used to convert swf to avi/mpg/flv with really + good quality, while file size is reduced with factors 20-100. + A good program in my opinion. The free version has the limitation + of a watermark in the upper left corner. + +""" + +import os +import zlib +import time # noqa +import logging + +import numpy as np + + +logger = logging.getLogger(__name__) + +# todo: use Pillow to support reading JPEG images from SWF? + + +## Base functions and classes + + +class BitArray: + """ Dynamic array of bits that automatically resizes + with factors of two. + Append bits using .append() or += + You can reverse bits using .reverse() + """ + + def __init__(self, initvalue=None): + self.data = np.zeros((16,), dtype=np.uint8) + self._len = 0 + if initvalue is not None: + self.append(initvalue) + + def __len__(self): + return self._len # self.data.shape[0] + + def __repr__(self): + return self.data[: self._len].tostring().decode("ascii") + + def _checkSize(self): + # check length... grow if necessary + arraylen = self.data.shape[0] + if self._len >= arraylen: + tmp = np.zeros((arraylen * 2,), dtype=np.uint8) + tmp[: self._len] = self.data[: self._len] + self.data = tmp + + def __add__(self, value): + self.append(value) + return self + + def append(self, bits): + + # check input + if isinstance(bits, BitArray): + bits = str(bits) + if isinstance(bits, int): # pragma: no cover - we dont use it + bits = str(bits) + if not isinstance(bits, str): # pragma: no cover + raise ValueError("Append bits as strings or integers!") + + # add bits + for bit in bits: + self.data[self._len] = ord(bit) + self._len += 1 + self._checkSize() + + def reverse(self): + """ In-place reverse. """ + tmp = self.data[: self._len].copy() + self.data[: self._len] = tmp[::-1] + + def tobytes(self): + """ Convert to bytes. If necessary, + zeros are padded to the end (right side). + """ + bits = str(self) + + # determine number of bytes + nbytes = 0 + while nbytes * 8 < len(bits): + nbytes += 1 + # pad + bits = bits.ljust(nbytes * 8, "0") + + # go from bits to bytes + bb = bytes() + for i in range(nbytes): + tmp = int(bits[i * 8 : (i + 1) * 8], 2) + bb += int2uint8(tmp) + + # done + return bb + + +def int2uint32(i): + return int(i).to_bytes(4, "little") + + +def int2uint16(i): + return int(i).to_bytes(2, "little") + + +def int2uint8(i): + return int(i).to_bytes(1, "little") + + +def int2bits(i, n=None): + """ convert int to a string of bits (0's and 1's in a string), + pad to n elements. Convert back using int(ss,2). """ + ii = i + + # make bits + bb = BitArray() + while ii > 0: + bb += str(ii % 2) + ii = ii >> 1 + bb.reverse() + + # justify + if n is not None: + if len(bb) > n: # pragma: no cover + raise ValueError("int2bits fail: len larger than padlength.") + bb = str(bb).rjust(n, "0") + + # done + return BitArray(bb) + + +def bits2int(bb, n=8): + # Init + value = "" + + # Get value in bits + for i in range(len(bb)): + b = bb[i : i + 1] + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + + # Make decimal + return int(value[:n], 2) + + +def get_type_and_len(bb): + """ bb should be 6 bytes at least + Return (type, length, length_of_full_tag) + """ + # Init + value = "" + + # Get first 16 bits + for i in range(2): + b = bb[i : i + 1] + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + + # Get type and length + type = int(value[:10], 2) + L = int(value[10:], 2) + L2 = L + 2 + + # Long tag header? + if L == 63: # '111111' + value = "" + for i in range(2, 6): + b = bb[i : i + 1] # becomes a single-byte bytes() + tmp = bin(ord(b))[2:] + # value += tmp.rjust(8,'0') + value = tmp.rjust(8, "0") + value + L = int(value, 2) + L2 = L + 6 + + # Done + return type, L, L2 + + +def signedint2bits(i, n=None): + """ convert signed int to a string of bits (0's and 1's in a string), + pad to n elements. Negative numbers are stored in 2's complement bit + patterns, thus positive numbers always start with a 0. + """ + + # negative number? + ii = i + if i < 0: + # A negative number, -n, is represented as the bitwise opposite of + ii = abs(ii) - 1 # the positive-zero number n-1. + + # make bits + bb = BitArray() + while ii > 0: + bb += str(ii % 2) + ii = ii >> 1 + bb.reverse() + + # justify + bb = "0" + str(bb) # always need the sign bit in front + if n is not None: + if len(bb) > n: # pragma: no cover + raise ValueError("signedint2bits fail: len larger than padlength.") + bb = bb.rjust(n, "0") + + # was it negative? (then opposite bits) + if i < 0: + bb = bb.replace("0", "x").replace("1", "0").replace("x", "1") + + # done + return BitArray(bb) + + +def twits2bits(arr): + """ Given a few (signed) numbers, store them + as compactly as possible in the wat specifief by the swf format. + The numbers are multiplied by 20, assuming they + are twits. + Can be used to make the RECT record. + """ + + # first determine length using non justified bit strings + maxlen = 1 + for i in arr: + tmp = len(signedint2bits(i * 20)) + if tmp > maxlen: + maxlen = tmp + + # build array + bits = int2bits(maxlen, 5) + for i in arr: + bits += signedint2bits(i * 20, maxlen) + + return bits + + +def floats2bits(arr): + """ Given a few (signed) numbers, convert them to bits, + stored as FB (float bit values). We always use 16.16. + Negative numbers are not (yet) possible, because I don't + know how the're implemented (ambiguity). + """ + bits = int2bits(31, 5) # 32 does not fit in 5 bits! + for i in arr: + if i < 0: # pragma: no cover + raise ValueError("Dit not implement negative floats!") + i1 = int(i) + i2 = i - i1 + bits += int2bits(i1, 15) + bits += int2bits(i2 * 2 ** 16, 16) + return bits + + +## Base Tag + + +class Tag: + def __init__(self): + self.bytes = bytes() + self.tagtype = -1 + + def process_tag(self): + """ Implement this to create the tag. """ + raise NotImplementedError() + + def get_tag(self): + """ Calls processTag and attaches the header. """ + self.process_tag() + + # tag to binary + bits = int2bits(self.tagtype, 10) + + # complete header uint16 thing + bits += "1" * 6 # = 63 = 0x3f + # make uint16 + bb = int2uint16(int(str(bits), 2)) + + # now add 32bit length descriptor + bb += int2uint32(len(self.bytes)) + + # done, attach and return + bb += self.bytes + return bb + + def make_rect_record(self, xmin, xmax, ymin, ymax): + """ Simply uses makeCompactArray to produce + a RECT Record. """ + return twits2bits([xmin, xmax, ymin, ymax]) + + def make_matrix_record(self, scale_xy=None, rot_xy=None, trans_xy=None): + + # empty matrix? + if scale_xy is None and rot_xy is None and trans_xy is None: + return "0" * 8 + + # init + bits = BitArray() + + # scale + if scale_xy: + bits += "1" + bits += floats2bits([scale_xy[0], scale_xy[1]]) + else: + bits += "0" + + # rotation + if rot_xy: + bits += "1" + bits += floats2bits([rot_xy[0], rot_xy[1]]) + else: + bits += "0" + + # translation (no flag here) + if trans_xy: + bits += twits2bits([trans_xy[0], trans_xy[1]]) + else: + bits += twits2bits([0, 0]) + + # done + return bits + + +## Control tags + + +class ControlTag(Tag): + def __init__(self): + Tag.__init__(self) + + +class FileAttributesTag(ControlTag): + def __init__(self): + ControlTag.__init__(self) + self.tagtype = 69 + + def process_tag(self): + self.bytes = "\x00".encode("ascii") * (1 + 3) + + +class ShowFrameTag(ControlTag): + def __init__(self): + ControlTag.__init__(self) + self.tagtype = 1 + + def process_tag(self): + self.bytes = bytes() + + +class SetBackgroundTag(ControlTag): + """ Set the color in 0-255, or 0-1 (if floats given). """ + + def __init__(self, *rgb): + self.tagtype = 9 + if len(rgb) == 1: + rgb = rgb[0] + self.rgb = rgb + + def process_tag(self): + bb = bytes() + for i in range(3): + clr = self.rgb[i] + if isinstance(clr, float): # pragma: no cover - not used + clr = clr * 255 + bb += int2uint8(clr) + self.bytes = bb + + +class DoActionTag(Tag): + def __init__(self, action="stop"): + Tag.__init__(self) + self.tagtype = 12 + self.actions = [action] + + def append(self, action): # pragma: no cover - not used + self.actions.append(action) + + def process_tag(self): + bb = bytes() + + for action in self.actions: + action = action.lower() + if action == "stop": + bb += "\x07".encode("ascii") + elif action == "play": # pragma: no cover - not used + bb += "\x06".encode("ascii") + else: # pragma: no cover + logger.warning("unkown action: %s" % action) + + bb += int2uint8(0) + self.bytes = bb + + +## Definition tags +class DefinitionTag(Tag): + counter = 0 # to give automatically id's + + def __init__(self): + Tag.__init__(self) + DefinitionTag.counter += 1 + self.id = DefinitionTag.counter # id in dictionary + + +class BitmapTag(DefinitionTag): + def __init__(self, im): + DefinitionTag.__init__(self) + self.tagtype = 36 # DefineBitsLossless2 + + # convert image (note that format is ARGB) + # even a grayscale image is stored in ARGB, nevertheless, + # the fabilous deflate compression will make it that not much + # more data is required for storing (25% or so, and less than 10% + # when storing RGB as ARGB). + + if len(im.shape) == 3: + if im.shape[2] in [3, 4]: + tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255 + for i in range(3): + tmp[:, :, i + 1] = im[:, :, i] + if im.shape[2] == 4: + tmp[:, :, 0] = im[:, :, 3] # swap channel where alpha is + else: # pragma: no cover + raise ValueError("Invalid shape to be an image.") + + elif len(im.shape) == 2: + tmp = np.ones((im.shape[0], im.shape[1], 4), dtype=np.uint8) * 255 + for i in range(3): + tmp[:, :, i + 1] = im[:, :] + else: # pragma: no cover + raise ValueError("Invalid shape to be an image.") + + # we changed the image to uint8 4 channels. + # now compress! + self._data = zlib.compress(tmp.tostring(), zlib.DEFLATED) + self.imshape = im.shape + + def process_tag(self): + + # build tag + bb = bytes() + bb += int2uint16(self.id) # CharacterID + bb += int2uint8(5) # BitmapFormat + bb += int2uint16(self.imshape[1]) # BitmapWidth + bb += int2uint16(self.imshape[0]) # BitmapHeight + bb += self._data # ZlibBitmapData + + self.bytes = bb + + +class PlaceObjectTag(ControlTag): + def __init__(self, depth, idToPlace=None, xy=(0, 0), move=False): + ControlTag.__init__(self) + self.tagtype = 26 + self.depth = depth + self.idToPlace = idToPlace + self.xy = xy + self.move = move + + def process_tag(self): + # retrieve stuff + depth = self.depth + xy = self.xy + id = self.idToPlace + + # build PlaceObject2 + bb = bytes() + if self.move: + bb += "\x07".encode("ascii") + else: + # (8 bit flags): 4:matrix, 2:character, 1:move + bb += "\x06".encode("ascii") + bb += int2uint16(depth) # Depth + bb += int2uint16(id) # character id + bb += self.make_matrix_record(trans_xy=xy).tobytes() # MATRIX record + self.bytes = bb + + +class ShapeTag(DefinitionTag): + def __init__(self, bitmapId, xy, wh): + DefinitionTag.__init__(self) + self.tagtype = 2 + self.bitmapId = bitmapId + self.xy = xy + self.wh = wh + + def process_tag(self): + """ Returns a defineshape tag. with a bitmap fill """ + + bb = bytes() + bb += int2uint16(self.id) + xy, wh = self.xy, self.wh + tmp = self.make_rect_record(xy[0], wh[0], xy[1], wh[1]) # ShapeBounds + bb += tmp.tobytes() + + # make SHAPEWITHSTYLE structure + + # first entry: FILLSTYLEARRAY with in it a single fill style + bb += int2uint8(1) # FillStyleCount + bb += "\x41".encode("ascii") # FillStyleType (0x41 or 0x43 unsmoothed) + bb += int2uint16(self.bitmapId) # BitmapId + # bb += '\x00' # BitmapMatrix (empty matrix with leftover bits filled) + bb += self.make_matrix_record(scale_xy=(20, 20)).tobytes() + + # # first entry: FILLSTYLEARRAY with in it a single fill style + # bb += int2uint8(1) # FillStyleCount + # bb += '\x00' # solid fill + # bb += '\x00\x00\xff' # color + + # second entry: LINESTYLEARRAY with a single line style + bb += int2uint8(0) # LineStyleCount + # bb += int2uint16(0*20) # Width + # bb += '\x00\xff\x00' # Color + + # third and fourth entry: NumFillBits and NumLineBits (4 bits each) + # I each give them four bits, so 16 styles possible. + bb += "\x44".encode("ascii") + + self.bytes = bb + + # last entries: SHAPERECORDs ... (individual shape records not aligned) + # STYLECHANGERECORD + bits = BitArray() + bits += self.make_style_change_record(0, 1, moveTo=(self.wh[0], self.wh[1])) + # STRAIGHTEDGERECORD 4x + bits += self.make_straight_edge_record(-self.wh[0], 0) + bits += self.make_straight_edge_record(0, -self.wh[1]) + bits += self.make_straight_edge_record(self.wh[0], 0) + bits += self.make_straight_edge_record(0, self.wh[1]) + + # ENDSHAPRECORD + bits += self.make_end_shape_record() + + self.bytes += bits.tobytes() + + # done + # self.bytes = bb + + def make_style_change_record(self, lineStyle=None, fillStyle=None, moveTo=None): + + # first 6 flags + # Note that we use FillStyle1. If we don't flash (at least 8) does not + # recognize the frames properly when importing to library. + + bits = BitArray() + bits += "0" # TypeFlag (not an edge record) + bits += "0" # StateNewStyles (only for DefineShape2 and Defineshape3) + if lineStyle: + bits += "1" # StateLineStyle + else: + bits += "0" + if fillStyle: + bits += "1" # StateFillStyle1 + else: + bits += "0" + bits += "0" # StateFillStyle0 + if moveTo: + bits += "1" # StateMoveTo + else: + bits += "0" + + # give information + # todo: nbits for fillStyle and lineStyle is hard coded. + + if moveTo: + bits += twits2bits([moveTo[0], moveTo[1]]) + if fillStyle: + bits += int2bits(fillStyle, 4) + if lineStyle: + bits += int2bits(lineStyle, 4) + + return bits + + def make_straight_edge_record(self, *dxdy): + if len(dxdy) == 1: + dxdy = dxdy[0] + + # determine required number of bits + xbits = signedint2bits(dxdy[0] * 20) + ybits = signedint2bits(dxdy[1] * 20) + nbits = max([len(xbits), len(ybits)]) + + bits = BitArray() + bits += "11" # TypeFlag and StraightFlag + bits += int2bits(nbits - 2, 4) + bits += "1" # GeneralLineFlag + bits += signedint2bits(dxdy[0] * 20, nbits) + bits += signedint2bits(dxdy[1] * 20, nbits) + + # note: I do not make use of vertical/horizontal only lines... + + return bits + + def make_end_shape_record(self): + bits = BitArray() + bits += "0" # TypeFlag: no edge + bits += "0" * 5 # EndOfShape + return bits + + +def read_pixels(bb, i, tagType, L1): + """ With pf's seed after the recordheader, reads the pixeldata. + """ + + # Get info + charId = bb[i : i + 2] # noqa + i += 2 + format = ord(bb[i : i + 1]) + i += 1 + width = bits2int(bb[i : i + 2], 16) + i += 2 + height = bits2int(bb[i : i + 2], 16) + i += 2 + + # If we can, get pixeldata and make numpy array + if format != 5: + logger.warning("Can only read 24bit or 32bit RGB(A) lossless images.") + else: + # Read byte data + offset = 2 + 1 + 2 + 2 # all the info bits + bb2 = bb[i : i + (L1 - offset)] + + # Decompress and make numpy array + data = zlib.decompress(bb2) + a = np.frombuffer(data, dtype=np.uint8) + + # Set shape + if tagType == 20: + # DefineBitsLossless - RGB data + try: + a.shape = height, width, 3 + except Exception: + # Byte align stuff might cause troubles + logger.warning("Cannot read image due to byte alignment") + if tagType == 36: + # DefineBitsLossless2 - ARGB data + a.shape = height, width, 4 + # Swap alpha channel to make RGBA + b = a + a = np.zeros_like(a) + a[:, :, 0] = b[:, :, 1] + a[:, :, 1] = b[:, :, 2] + a[:, :, 2] = b[:, :, 3] + a[:, :, 3] = b[:, :, 0] + + return a + + +## Last few functions + + +# These are the original public functions, we don't use them, but we +# keep it so that in principle this module can be used stand-alone. + + +def checkImages(images): # pragma: no cover + """ checkImages(images) + Check numpy images and correct intensity range etc. + The same for all movie formats. + """ + # Init results + images2 = [] + + for im in images: + if isinstance(im, np.ndarray): + # Check and convert dtype + if im.dtype == np.uint8: + images2.append(im) # Ok + elif im.dtype in [np.float32, np.float64]: + theMax = im.max() + if 128 < theMax < 300: + pass # assume 0:255 + else: + im = im.copy() + im[im < 0] = 0 + im[im > 1] = 1 + im *= 255 + images2.append(im.astype(np.uint8)) + else: + im = im.astype(np.uint8) + images2.append(im) + # Check size + if im.ndim == 2: + pass # ok + elif im.ndim == 3: + if im.shape[2] not in [3, 4]: + raise ValueError("This array can not represent an image.") + else: + raise ValueError("This array can not represent an image.") + else: + raise ValueError("Invalid image type: " + str(type(im))) + + # Done + return images2 + + +def build_file( + fp, taglist, nframes=1, framesize=(500, 500), fps=10, version=8 +): # pragma: no cover + """ Give the given file (as bytes) a header. """ + + # compose header + bb = bytes() + bb += "F".encode("ascii") # uncompressed + bb += "WS".encode("ascii") # signature bytes + bb += int2uint8(version) # version + bb += "0000".encode("ascii") # FileLength (leave open for now) + bb += Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes() + bb += int2uint8(0) + int2uint8(fps) # FrameRate + bb += int2uint16(nframes) + fp.write(bb) + + # produce all tags + for tag in taglist: + fp.write(tag.get_tag()) + + # finish with end tag + fp.write("\x00\x00".encode("ascii")) + + # set size + sze = fp.tell() + fp.seek(4) + fp.write(int2uint32(sze)) + + +def write_swf(filename, images, duration=0.1, repeat=True): # pragma: no cover + """Write an swf-file from the specified images. If repeat is False, + the movie is finished with a stop action. Duration may also + be a list with durations for each frame (note that the duration + for each frame is always an integer amount of the minimum duration.) + + Images should be a list consisting numpy arrays with values between + 0 and 255 for integer types, and between 0 and 1 for float types. + + """ + + # Check images + images2 = checkImages(images) + + # Init + taglist = [FileAttributesTag(), SetBackgroundTag(0, 0, 0)] + + # Check duration + if hasattr(duration, "__len__"): + if len(duration) == len(images2): + duration = [d for d in duration] + else: + raise ValueError("len(duration) doesn't match amount of images.") + else: + duration = [duration for im in images2] + + # Build delays list + minDuration = float(min(duration)) + delays = [round(d / minDuration) for d in duration] + delays = [max(1, int(d)) for d in delays] + + # Get FPS + fps = 1.0 / minDuration + + # Produce series of tags for each image + # t0 = time.time() + nframes = 0 + for im in images2: + bm = BitmapTag(im) + wh = (im.shape[1], im.shape[0]) + sh = ShapeTag(bm.id, (0, 0), wh) + po = PlaceObjectTag(1, sh.id, move=nframes > 0) + taglist.extend([bm, sh, po]) + for i in range(delays[nframes]): + taglist.append(ShowFrameTag()) + nframes += 1 + + if not repeat: + taglist.append(DoActionTag("stop")) + + # Build file + # t1 = time.time() + fp = open(filename, "wb") + try: + build_file(fp, taglist, nframes=nframes, framesize=wh, fps=fps) + except Exception: + raise + finally: + fp.close() + # t2 = time.time() + + # logger.warning("Writing SWF took %1.2f and %1.2f seconds" % (t1-t0, t2-t1) ) + + +def read_swf(filename): # pragma: no cover + """Read all images from an SWF (shockwave flash) file. Returns a list + of numpy arrays. + + Limitation: only read the PNG encoded images (not the JPG encoded ones). + """ + + # Check whether it exists + if not os.path.isfile(filename): + raise IOError("File not found: " + str(filename)) + + # Init images + images = [] + + # Open file and read all + fp = open(filename, "rb") + bb = fp.read() + + try: + # Check opening tag + tmp = bb[0:3].decode("ascii", "ignore") + if tmp.upper() == "FWS": + pass # ok + elif tmp.upper() == "CWS": + # Decompress movie + bb = bb[:8] + zlib.decompress(bb[8:]) + else: + raise IOError("Not a valid SWF file: " + str(filename)) + + # Set filepointer at first tag (skipping framesize RECT and two uin16's + i = 8 + nbits = bits2int(bb[i : i + 1], 5) # skip FrameSize + nbits = 5 + nbits * 4 + Lrect = nbits / 8.0 + if Lrect % 1: + Lrect += 1 + Lrect = int(Lrect) + i += Lrect + 4 + + # Iterate over the tags + counter = 0 + while True: + counter += 1 + + # Get tag header + head = bb[i : i + 6] + if not head: + break # Done (we missed end tag) + + # Determine type and length + T, L1, L2 = get_type_and_len(head) + if not L2: + logger.warning("Invalid tag length, could not proceed") + break + # logger.warning(T, L2) + + # Read image if we can + if T in [20, 36]: + im = read_pixels(bb, i + 6, T, L1) + if im is not None: + images.append(im) + elif T in [6, 21, 35, 90]: + logger.warning("Ignoring JPEG image: cannot read JPEG.") + else: + pass # Not an image tag + + # Detect end tag + if T == 0: + break + + # Next tag! + i += L2 + + finally: + fp.close() + + # Done + return images + + +# Backward compatibility; same public names as when this was images2swf. +writeSwf = write_swf +readSwf = read_swf diff --git a/venv/Lib/site-packages/imageio/plugins/_tifffile.py b/venv/Lib/site-packages/imageio/plugins/_tifffile.py new file mode 100644 index 000000000..027bcfbaf --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/_tifffile.py @@ -0,0 +1,10182 @@ + + +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# tifffile.py + +# Copyright (c) 2008-2018, Christoph Gohlke +# Copyright (c) 2008-2018, The Regents of the University of California +# Produced at the Laboratory for Fluorescence Dynamics +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the copyright holders nor the names of any +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Read image and meta data from (bio) TIFF(R) files. Save numpy arrays as TIFF. + +Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH, +SGI, ImageJ, MicroManager, FluoView, ScanImage, SEQ, GEL, and GeoTIFF files. + +Tifffile is not a general-purpose TIFF library. +Only a subset of the TIFF specification is supported, mainly uncompressed and +losslessly compressed 1, 8, 16, 32 and 64 bit integer, 16, 32 and 64-bit float, +grayscale and RGB(A) images, which are commonly used in scientific imaging. +Specifically, reading slices of image data, image trees defined via SubIFDs, +CCITT and OJPEG compression, chroma subsampling without JPEG compression, +or IPTC and XMP metadata are not implemented. + +TIFF(R), the tagged Image File Format, is a trademark and under control of +Adobe Systems Incorporated. BigTIFF allows for files greater than 4 GB. +STK, LSM, FluoView, SGI, SEQ, GEL, and OME-TIFF, are custom extensions +defined by Molecular Devices (Universal Imaging Corporation), Carl Zeiss +MicroImaging, Olympus, Silicon Graphics International, Media Cybernetics, +Molecular Dynamics, and the Open Microscopy Environment consortium +respectively. + +For command line usage run C{python -m tifffile --help} + +:Author: + `Christoph Gohlke `_ + +:Organization: + Laboratory for Fluorescence Dynamics, University of California, Irvine + +:Version: 2018.06.15 + +Requirements +------------ +* `CPython 3.6 64-bit `_ +* `Numpy 1.14 `_ +* `Matplotlib 2.2 `_ (optional for plotting) +* `Tifffile.c 2018.02.10 `_ + (recommended for faster decoding of PackBits and LZW encoded strings) +* `Tifffile_geodb.py 2018.02.10 `_ + (optional enums for GeoTIFF metadata) +* Python 2 requires 'futures', 'enum34', 'pathlib'. + +Revisions +--------- +2018.06.15 + Pass 2680 tests. + Towards reading JPEG and other compressions via imagecodecs package (WIP). + Add function to validate TIFF using 'jhove -m TIFF-hul'. + Save bool arrays as bilevel TIFF. + Accept pathlib.Path as filenames. + Move 'software' argument from TiffWriter __init__ to save. + Raise DOS limit to 16 TB. + Lazy load lzma and zstd compressors and decompressors. + Add option to save IJMetadata tags. + Return correct number of pages for truncated series (bug fix). + Move EXIF tags to TIFF.TAG as per TIFF/EP standard. +2018.02.18 + Pass 2293 tests. + Always save RowsPerStrip and Resolution tags as required by TIFF standard. + Do not use badly typed ImageDescription. + Coherce bad ASCII string tags to bytes. + Tuning of __str__ functions. + Fix reading 'undefined' tag values (bug fix). + Read and write ZSTD compressed data. + Use hexdump to print byte strings. + Determine TIFF byte order from data dtype in imsave. + Add option to specify RowsPerStrip for compressed strips. + Allow memory map of arrays with non-native byte order. + Attempt to handle ScanImage <= 5.1 files. + Restore TiffPageSeries.pages sequence interface. + Use numpy.frombuffer instead of fromstring to read from binary data. + Parse GeoTIFF metadata. + Add option to apply horizontal differencing before compression. + Towards reading PerkinElmer QPTIFF (no test files). + Do not index out of bounds data in tifffile.c unpackbits and decodelzw. +2017.09.29 (tentative) + Many backwards incompatible changes improving speed and resource usage: + Pass 2268 tests. + Add detail argument to __str__ function. Remove info functions. + Fix potential issue correcting offsets of large LSM files with positions. + Remove TiffFile sequence interface; use TiffFile.pages instead. + Do not make tag values available as TiffPage attributes. + Use str (not bytes) type for tag and metadata strings (WIP). + Use documented standard tag and value names (WIP). + Use enums for some documented TIFF tag values. + Remove 'memmap' and 'tmpfile' options; use out='memmap' instead. + Add option to specify output in asarray functions. + Add option to concurrently decode image strips or tiles using threads. + Add TiffPage.asrgb function (WIP). + Do not apply colormap in asarray. + Remove 'colormapped', 'rgbonly', and 'scale_mdgel' options from asarray. + Consolidate metadata in TiffFile _metadata functions. + Remove non-tag metadata properties from TiffPage. + Add function to convert LSM to tiled BIN files. + Align image data in file. + Make TiffPage.dtype a numpy.dtype. + Add 'ndim' and 'size' properties to TiffPage and TiffPageSeries. + Allow imsave to write non-BigTIFF files up to ~4 GB. + Only read one page for shaped series if possible. + Add memmap function to create memory-mapped array stored in TIFF file. + Add option to save empty arrays to TIFF files. + Add option to save truncated TIFF files. + Allow single tile images to be saved contiguously. + Add optional movie mode for files with uniform pages. + Lazy load pages. + Use lightweight TiffFrame for IFDs sharing properties with key TiffPage. + Move module constants to 'TIFF' namespace (speed up module import). + Remove 'fastij' option from TiffFile. + Remove 'pages' parameter from TiffFile. + Remove TIFFfile alias. + Deprecate Python 2. + Require enum34 and futures packages on Python 2.7. + Remove Record class and return all metadata as dict instead. + Add functions to parse STK, MetaSeries, ScanImage, SVS, Pilatus metadata. + Read tags from EXIF and GPS IFDs. + Use pformat for tag and metadata values. + Fix reading some UIC tags (bug fix). + Do not modify input array in imshow (bug fix). + Fix Python implementation of unpack_ints. +2017.05.23 + Pass 1961 tests. + Write correct number of SampleFormat values (bug fix). + Use Adobe deflate code to write ZIP compressed files. + Add option to pass tag values as packed binary data for writing. + Defer tag validation to attribute access. + Use property instead of lazyattr decorator for simple expressions. +2017.03.17 + Write IFDs and tag values on word boundaries. + Read ScanImage metadata. + Remove is_rgb and is_indexed attributes from TiffFile. + Create files used by doctests. +2017.01.12 + Read Zeiss SEM metadata. + Read OME-TIFF with invalid references to external files. + Rewrite C LZW decoder (5x faster). + Read corrupted LSM files missing EOI code in LZW stream. +2017.01.01 + Add option to append images to existing TIFF files. + Read files without pages. + Read S-FEG and Helios NanoLab tags created by FEI software. + Allow saving Color Filter Array (CFA) images. + Add info functions returning more information about TiffFile and TiffPage. + Add option to read specific pages only. + Remove maxpages argument (backwards incompatible). + Remove test_tifffile function. +2016.10.28 + Pass 1944 tests. + Improve detection of ImageJ hyperstacks. + Read TVIPS metadata created by EM-MENU (by Marco Oster). + Add option to disable using OME-XML metadata. + Allow non-integer range attributes in modulo tags (by Stuart Berg). +2016.06.21 + Do not always memmap contiguous data in page series. +2016.05.13 + Add option to specify resolution unit. + Write grayscale images with extra samples when planarconfig is specified. + Do not write RGB color images with 2 samples. + Reorder TiffWriter.save keyword arguments (backwards incompatible). +2016.04.18 + Pass 1932 tests. + TiffWriter, imread, and imsave accept open binary file streams. +2016.04.13 + Correctly handle reversed fill order in 2 and 4 bps images (bug fix). + Implement reverse_bitorder in C. +2016.03.18 + Fix saving additional ImageJ metadata. +2016.02.22 + Pass 1920 tests. + Write 8 bytes double tag values using offset if necessary (bug fix). + Add option to disable writing second image description tag. + Detect tags with incorrect counts. + Disable color mapping for LSM. +2015.11.13 + Read LSM 6 mosaics. + Add option to specify directory of memory-mapped files. + Add command line options to specify vmin and vmax values for colormapping. +2015.10.06 + New helper function to apply colormaps. + Renamed is_palette attributes to is_indexed (backwards incompatible). + Color-mapped samples are now contiguous (backwards incompatible). + Do not color-map ImageJ hyperstacks (backwards incompatible). + Towards reading Leica SCN. +2015.09.25 + Read images with reversed bit order (FillOrder is LSB2MSB). +2015.09.21 + Read RGB OME-TIFF. + Warn about malformed OME-XML. +2015.09.16 + Detect some corrupted ImageJ metadata. + Better axes labels for 'shaped' files. + Do not create TiffTag for default values. + Chroma subsampling is not supported. + Memory-map data in TiffPageSeries if possible (optional). +2015.08.17 + Pass 1906 tests. + Write ImageJ hyperstacks (optional). + Read and write LZMA compressed data. + Specify datetime when saving (optional). + Save tiled and color-mapped images (optional). + Ignore void bytecounts and offsets if possible. + Ignore bogus image_depth tag created by ISS Vista software. + Decode floating point horizontal differencing (not tiled). + Save image data contiguously if possible. + Only read first IFD from ImageJ files if possible. + Read ImageJ 'raw' format (files larger than 4 GB). + TiffPageSeries class for pages with compatible shape and data type. + Try to read incomplete tiles. + Open file dialog if no filename is passed on command line. + Ignore errors when decoding OME-XML. + Rename decoder functions (backwards incompatible). +2014.08.24 + TiffWriter class for incremental writing images. + Simplify examples. +2014.08.19 + Add memmap function to FileHandle. + Add function to determine if image data in TiffPage is memory-mappable. + Do not close files if multifile_close parameter is False. +2014.08.10 + Pass 1730 tests. + Return all extrasamples by default (backwards incompatible). + Read data from series of pages into memory-mapped array (optional). + Squeeze OME dimensions (backwards incompatible). + Workaround missing EOI code in strips. + Support image and tile depth tags (SGI extension). + Better handling of STK/UIC tags (backwards incompatible). + Disable color mapping for STK. + Julian to datetime converter. + TIFF ASCII type may be NULL separated. + Unwrap strip offsets for LSM files greater than 4 GB. + Correct strip byte counts in compressed LSM files. + Skip missing files in OME series. + Read embedded TIFF files. +2014.02.05 + Save rational numbers as type 5 (bug fix). +2013.12.20 + Keep other files in OME multi-file series closed. + FileHandle class to abstract binary file handle. + Disable color mapping for bad OME-TIFF produced by bio-formats. + Read bad OME-XML produced by ImageJ when cropping. +2013.11.03 + Allow zlib compress data in imsave function (optional). + Memory-map contiguous image data (optional). +2013.10.28 + Read MicroManager metadata and little-endian ImageJ tag. + Save extra tags in imsave function. + Save tags in ascending order by code (bug fix). +2012.10.18 + Accept file like objects (read from OIB files). +2012.08.21 + Rename TIFFfile to TiffFile and TIFFpage to TiffPage. + TiffSequence class for reading sequence of TIFF files. + Read UltraQuant tags. + Allow float numbers as resolution in imsave function. +2012.08.03 + Read MD GEL tags and NIH Image header. +2012.07.25 + Read ImageJ tags. + ... + +Notes +----- +The API is not stable yet and might change between revisions. + +Tested on little-endian platforms only. + +Other Python packages and modules for reading (bio) scientific TIFF files: + +* `python-bioformats `_ +* `Imread `_ +* `PyLibTiff `_ +* `ITK `_ +* `PyLSM `_ +* `PyMca.TiffIO.py `_ (same as fabio.TiffIO) +* `BioImageXD.Readers `_ +* `Cellcognition.io `_ +* `pymimage `_ +* `pytiff `_ + +Acknowledgements +---------------- +* Egor Zindy, University of Manchester, for lsm_scan_info specifics. +* Wim Lewis for a bug fix and some LSM functions. +* Hadrien Mary for help on reading MicroManager files. +* Christian Kliche for help writing tiled and color-mapped files. + +References +---------- +1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated. + http://partners.adobe.com/public/developer/tiff/ +2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html +3) MetaMorph Stack (STK) Image File Format. + http://support.meta.moleculardevices.com/docs/t10243.pdf +4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010). + Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011 +5) The OME-TIFF format. + http://www.openmicroscopy.org/site/support/file-formats/ome-tiff +6) UltraQuant(r) Version 6.0 for Windows Start-Up Guide. + http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf +7) Micro-Manager File Formats. + http://www.micro-manager.org/wiki/Micro-Manager_File_Formats +8) Tags for TIFF and Related Specifications. Digital Preservation. + http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml +9) ScanImage BigTiff Specification - ScanImage 2016. + http://scanimage.vidriotechnologies.com/display/SI2016/ + ScanImage+BigTiff+Specification +10) CIPA DC-008-2016: Exchangeable image file format for digital still cameras: + Exif Version 2.31. + http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf + +Examples +-------- +>>> # write numpy array to TIFF file +>>> data = numpy.random.rand(4, 301, 219) +>>> imsave('temp.tif', data, photometric='minisblack') + +>>> # read numpy array from TIFF file +>>> image = imread('temp.tif') +>>> numpy.testing.assert_array_equal(image, data) + +>>> # iterate over pages and tags in TIFF file +>>> with TiffFile('temp.tif') as tif: +... images = tif.asarray() +... for page in tif.pages: +... for tag in page.tags.values(): +... _ = tag.name, tag.value +... image = page.asarray() + +""" + +from __future__ import division, print_function + +import sys +import os +import io +import re +import glob +import math +import zlib +import time +import json +import enum +import struct +import pathlib +import warnings +import binascii +import tempfile +import datetime +import threading +import collections +import multiprocessing +import concurrent.futures + +import numpy + +# delay imports: mmap, pprint, fractions, xml, tkinter, matplotlib, lzma, zstd, +# subprocess + +__version__ = '2018.06.15' +__docformat__ = 'restructuredtext en' +__all__ = ( + 'imsave', 'imread', 'imshow', 'memmap', + 'TiffFile', 'TiffWriter', 'TiffSequence', + # utility functions used by oiffile or czifile + 'FileHandle', 'lazyattr', 'natural_sorted', 'decode_lzw', 'stripnull', + 'create_output', 'repeat_nd', 'format_size', 'product', 'xml2dict') + + +def imread(files, **kwargs): + """Return image data from TIFF file(s) as numpy array. + + Refer to the TiffFile class and member functions for documentation. + + Parameters + ---------- + files : str, binary stream, or sequence + File name, seekable binary stream, glob pattern, or sequence of + file names. + kwargs : dict + Parameters 'multifile' and 'is_ome' are passed to the TiffFile class. + The 'pattern' parameter is passed to the TiffSequence class. + Other parameters are passed to the asarray functions. + The first image series is returned if no arguments are provided. + + Examples + -------- + >>> # get image from first page + >>> imsave('temp.tif', numpy.random.rand(3, 4, 301, 219)) + >>> im = imread('temp.tif', key=0) + >>> im.shape + (4, 301, 219) + + >>> # get images from sequence of files + >>> ims = imread(['temp.tif', 'temp.tif']) + >>> ims.shape + (2, 3, 4, 301, 219) + + """ + kwargs_file = parse_kwargs(kwargs, 'multifile', 'is_ome') + kwargs_seq = parse_kwargs(kwargs, 'pattern') + + if isinstance(files, basestring) and any(i in files for i in '?*'): + files = glob.glob(files) + if not files: + raise ValueError('no files found') + if not hasattr(files, 'seek') and len(files) == 1: + files = files[0] + + if isinstance(files, basestring) or hasattr(files, 'seek'): + with TiffFile(files, **kwargs_file) as tif: + return tif.asarray(**kwargs) + else: + with TiffSequence(files, **kwargs_seq) as imseq: + return imseq.asarray(**kwargs) + + +def imsave(file, data=None, shape=None, dtype=None, bigsize=2**32-2**25, + **kwargs): + """Write numpy array to TIFF file. + + Refer to the TiffWriter class and member functions for documentation. + + Parameters + ---------- + file : str or binary stream + File name or writable binary stream, such as an open file or BytesIO. + data : array_like + Input image. The last dimensions are assumed to be image depth, + height, width, and samples. + If None, an empty array of the specified shape and dtype is + saved to file. + Unless 'byteorder' is specified in 'kwargs', the TIFF file byte order + is determined from the data's dtype or the dtype argument. + shape : tuple + If 'data' is None, shape of an empty array to save to the file. + dtype : numpy.dtype + If 'data' is None, data-type of an empty array to save to the file. + bigsize : int + Create a BigTIFF file if the size of data in bytes is larger than + this threshold and 'imagej' or 'truncate' are not enabled. + By default, the threshold is 4 GB minus 32 MB reserved for metadata. + Use the 'bigtiff' parameter to explicitly specify the type of + file created. + kwargs : dict + Parameters 'append', 'byteorder', 'bigtiff', and 'imagej', are passed + to TiffWriter(). Other parameters are passed to TiffWriter.save(). + + Returns + ------- + If the image data are written contiguously, return offset and bytecount + of image data in the file. + + Examples + -------- + >>> # save a RGB image + >>> data = numpy.random.randint(0, 255, (256, 256, 3), 'uint8') + >>> imsave('temp.tif', data, photometric='rgb') + + >>> # save a random array and metadata, using compression + >>> data = numpy.random.rand(2, 5, 3, 301, 219) + >>> imsave('temp.tif', data, compress=6, metadata={'axes': 'TZCYX'}) + + """ + tifargs = parse_kwargs(kwargs, 'append', 'bigtiff', 'byteorder', 'imagej') + if data is None: + size = product(shape) * numpy.dtype(dtype).itemsize + byteorder = numpy.dtype(dtype).byteorder + else: + try: + size = data.nbytes + byteorder = data.dtype.byteorder + except Exception: + size = 0 + byteorder = None + if size > bigsize and 'bigtiff' not in tifargs and not ( + tifargs.get('imagej', False) or tifargs.get('truncate', False)): + tifargs['bigtiff'] = True + if 'byteorder' not in tifargs: + tifargs['byteorder'] = byteorder + + with TiffWriter(file, **tifargs) as tif: + return tif.save(data, shape, dtype, **kwargs) + + +def memmap(filename, shape=None, dtype=None, page=None, series=0, mode='r+', + **kwargs): + """Return memory-mapped numpy array stored in TIFF file. + + Memory-mapping requires data stored in native byte order, without tiling, + compression, predictors, etc. + If 'shape' and 'dtype' are provided, existing files will be overwritten or + appended to depending on the 'append' parameter. + Otherwise the image data of a specified page or series in an existing + file will be memory-mapped. By default, the image data of the first page + series is memory-mapped. + Call flush() to write any changes in the array to the file. + Raise ValueError if the image data in the file is not memory-mappable. + + Parameters + ---------- + filename : str + Name of the TIFF file which stores the array. + shape : tuple + Shape of the empty array. + dtype : numpy.dtype + Data-type of the empty array. + page : int + Index of the page which image data to memory-map. + series : int + Index of the page series which image data to memory-map. + mode : {'r+', 'r', 'c'}, optional + The file open mode. Default is to open existing file for reading and + writing ('r+'). + kwargs : dict + Additional parameters passed to imsave() or TiffFile(). + + Examples + -------- + >>> # create an empty TIFF file and write to memory-mapped image + >>> im = memmap('temp.tif', shape=(256, 256), dtype='float32') + >>> im[255, 255] = 1.0 + >>> im.flush() + >>> im.shape, im.dtype + ((256, 256), dtype('float32')) + >>> del im + + >>> # memory-map image data in a TIFF file + >>> im = memmap('temp.tif', page=0) + >>> im[255, 255] + 1.0 + + """ + if shape is not None and dtype is not None: + # create a new, empty array + kwargs.update(data=None, shape=shape, dtype=dtype, returnoffset=True, + align=TIFF.ALLOCATIONGRANULARITY) + result = imsave(filename, **kwargs) + if result is None: + # TODO: fail before creating file or writing data + raise ValueError('image data are not memory-mappable') + offset = result[0] + else: + # use existing file + with TiffFile(filename, **kwargs) as tif: + if page is not None: + page = tif.pages[page] + if not page.is_memmappable: + raise ValueError('image data are not memory-mappable') + offset, _ = page.is_contiguous + shape = page.shape + dtype = page.dtype + else: + series = tif.series[series] + if series.offset is None: + raise ValueError('image data are not memory-mappable') + shape = series.shape + dtype = series.dtype + offset = series.offset + dtype = tif.byteorder + dtype.char + return numpy.memmap(filename, dtype, mode, offset, shape, 'C') + + +class lazyattr(object): + """Attribute whose value is computed on first access.""" + # TODO: help() doesn't work + __slots__ = ('func',) + + def __init__(self, func): + self.func = func + # self.__name__ = func.__name__ + # self.__doc__ = func.__doc__ + # self.lock = threading.RLock() + + def __get__(self, instance, owner): + # with self.lock: + if instance is None: + return self + try: + value = self.func(instance) + except AttributeError as e: + raise RuntimeError(e) + if value is NotImplemented: + return getattr(super(owner, instance), self.func.__name__) + setattr(instance, self.func.__name__, value) + return value + + +class TiffWriter(object): + """Write numpy arrays to TIFF file. + + TiffWriter instances must be closed using the 'close' method, which is + automatically called when using the 'with' context manager. + + TiffWriter's main purpose is saving nD numpy array's as TIFF, + not to create any possible TIFF format. Specifically, JPEG compression, + SubIFDs, ExifIFD, or GPSIFD tags are not supported. + + Examples + -------- + >>> # successively append images to BigTIFF file + >>> data = numpy.random.rand(2, 5, 3, 301, 219) + >>> with TiffWriter('temp.tif', bigtiff=True) as tif: + ... for i in range(data.shape[0]): + ... tif.save(data[i], compress=6, photometric='minisblack') + + """ + def __init__(self, file, bigtiff=False, byteorder=None, append=False, + imagej=False): + """Open a TIFF file for writing. + + An empty TIFF file is created if the file does not exist, else the + file is overwritten with an empty TIFF file unless 'append' + is true. Use bigtiff=True when creating files larger than 4 GB. + + Parameters + ---------- + file : str, binary stream, or FileHandle + File name or writable binary stream, such as an open file + or BytesIO. + bigtiff : bool + If True, the BigTIFF format is used. + byteorder : {'<', '>', '=', '|'} + The endianness of the data in the file. + By default, this is the system's native byte order. + append : bool + If True and 'file' is an existing standard TIFF file, image data + and tags are appended to the file. + Appending data may corrupt specifically formatted TIFF files + such as LSM, STK, ImageJ, NIH, or FluoView. + imagej : bool + If True, write an ImageJ hyperstack compatible file. + This format can handle data types uint8, uint16, or float32 and + data shapes up to 6 dimensions in TZCYXS order. + RGB images (S=3 or S=4) must be uint8. + ImageJ's default byte order is big-endian but this implementation + uses the system's native byte order by default. + ImageJ does not support BigTIFF format or LZMA compression. + The ImageJ file format is undocumented. + + """ + if append: + # determine if file is an existing TIFF file that can be extended + try: + with FileHandle(file, mode='rb', size=0) as fh: + pos = fh.tell() + try: + with TiffFile(fh) as tif: + if (append != 'force' and + any(getattr(tif, 'is_'+a) for a in ( + 'lsm', 'stk', 'imagej', 'nih', + 'fluoview', 'micromanager'))): + raise ValueError('file contains metadata') + byteorder = tif.byteorder + bigtiff = tif.is_bigtiff + self._ifdoffset = tif.pages.next_page_offset + except Exception as e: + raise ValueError('cannot append to file: %s' % str(e)) + finally: + fh.seek(pos) + except (IOError, FileNotFoundError): + append = False + + if byteorder in (None, '=', '|'): + byteorder = '<' if sys.byteorder == 'little' else '>' + elif byteorder not in ('<', '>'): + raise ValueError('invalid byteorder %s' % byteorder) + if imagej and bigtiff: + warnings.warn('writing incompatible BigTIFF ImageJ') + + self._byteorder = byteorder + self._imagej = bool(imagej) + self._truncate = False + self._metadata = None + self._colormap = None + + self._descriptionoffset = 0 + self._descriptionlen = 0 + self._descriptionlenoffset = 0 + self._tags = None + self._shape = None # normalized shape of data in consecutive pages + self._datashape = None # shape of data in consecutive pages + self._datadtype = None # data type + self._dataoffset = None # offset to data + self._databytecounts = None # byte counts per plane + self._tagoffsets = None # strip or tile offset tag code + + if bigtiff: + self._bigtiff = True + self._offsetsize = 8 + self._tagsize = 20 + self._tagnoformat = 'Q' + self._offsetformat = 'Q' + self._valueformat = '8s' + else: + self._bigtiff = False + self._offsetsize = 4 + self._tagsize = 12 + self._tagnoformat = 'H' + self._offsetformat = 'I' + self._valueformat = '4s' + + if append: + self._fh = FileHandle(file, mode='r+b', size=0) + self._fh.seek(0, 2) + else: + self._fh = FileHandle(file, mode='wb', size=0) + self._fh.write({'<': b'II', '>': b'MM'}[byteorder]) + if bigtiff: + self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0)) + else: + self._fh.write(struct.pack(byteorder+'H', 42)) + # first IFD + self._ifdoffset = self._fh.tell() + self._fh.write(struct.pack(byteorder+self._offsetformat, 0)) + + def save(self, data=None, shape=None, dtype=None, returnoffset=False, + photometric=None, planarconfig=None, tile=None, contiguous=True, + align=16, truncate=False, compress=0, rowsperstrip=None, + predictor=False, colormap=None, description=None, + datetime=None, resolution=None, software='tifffile.py', + metadata={}, ijmetadata=None, extratags=()): + """Write numpy array and tags to TIFF file. + + The data shape's last dimensions are assumed to be image depth, + height (length), width, and samples. + If a colormap is provided, the data's dtype must be uint8 or uint16 + and the data values are indices into the last dimension of the + colormap. + If 'shape' and 'dtype' are specified, an empty array is saved. + This option cannot be used with compression or multiple tiles. + Image data are written uncompressed in one strip per plane by default. + Dimensions larger than 2 to 4 (depending on photometric mode, planar + configuration, and SGI mode) are flattened and saved as separate pages. + The SampleFormat and BitsPerSample tags are derived from the data type. + + Parameters + ---------- + data : numpy.ndarray or None + Input image array. + shape : tuple or None + Shape of the empty array to save. Used only if 'data' is None. + dtype : numpy.dtype or None + Data-type of the empty array to save. Used only if 'data' is None. + returnoffset : bool + If True and the image data in the file is memory-mappable, return + the offset and number of bytes of the image data in the file. + photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'} + The color space of the image data. + By default, this setting is inferred from the data shape and the + value of colormap. + For CFA images, DNG tags must be specified in 'extratags'. + planarconfig : {'CONTIG', 'SEPARATE'} + Specifies if samples are stored contiguous or in separate planes. + By default, this setting is inferred from the data shape. + If this parameter is set, extra samples are used to store grayscale + images. + 'CONTIG': last dimension contains samples. + 'SEPARATE': third last dimension contains samples. + tile : tuple of int + The shape (depth, length, width) of image tiles to write. + If None (default), image data are written in strips. + The tile length and width must be a multiple of 16. + If the tile depth is provided, the SGI ImageDepth and TileDepth + tags are used to save volume data. + Unless a single tile is used, tiles cannot be used to write + contiguous files. + Few software can read the SGI format, e.g. MeVisLab. + contiguous : bool + If True (default) and the data and parameters are compatible with + previous ones, if any, the image data are stored contiguously after + the previous one. Parameters 'photometric' and 'planarconfig' + are ignored. Parameters 'description', datetime', and 'extratags' + are written to the first page of a contiguous series only. + align : int + Byte boundary on which to align the image data in the file. + Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data. + Following contiguous writes are not aligned. + truncate : bool + If True, only write the first page including shape metadata if + possible (uncompressed, contiguous, not tiled). + Other TIFF readers will only be able to read part of the data. + compress : int or 'LZMA', 'ZSTD' + Values from 0 to 9 controlling the level of zlib compression. + If 0 (default), data are written uncompressed. + Compression cannot be used to write contiguous files. + If 'LZMA' or 'ZSTD', LZMA or ZSTD compression is used, which is + not available on all platforms. + rowsperstrip : int + The number of rows per strip used for compression. + Uncompressed data are written in one strip per plane. + predictor : bool + If True, apply horizontal differencing to integer type images + before compression. + colormap : numpy.ndarray + RGB color values for the corresponding data value. + Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16. + description : str + The subject of the image. Must be 7-bit ASCII. Cannot be used with + the ImageJ format. Saved with the first page only. + datetime : datetime + Date and time of image creation in '%Y:%m:%d %H:%M:%S' format. + If None (default), the current date and time is used. + Saved with the first page only. + resolution : (float, float[, str]) or ((int, int), (int, int)[, str]) + X and Y resolutions in pixels per resolution unit as float or + rational numbers. A third, optional parameter specifies the + resolution unit, which must be None (default for ImageJ), + 'INCH' (default), or 'CENTIMETER'. + software : str + Name of the software used to create the file. Must be 7-bit ASCII. + Saved with the first page only. + metadata : dict + Additional meta data to be saved along with shape information + in JSON or ImageJ formats in an ImageDescription tag. + If None, do not write a second ImageDescription tag. + Strings must be 7-bit ASCII. Saved with the first page only. + ijmetadata : dict + Additional meta data to be saved in application specific + IJMetadata and IJMetadataByteCounts tags. Refer to the + imagej_metadata_tags function for valid keys and values. + Saved with the first page only. + extratags : sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + + code : int + The TIFF tag Id. + dtype : str + Data type of items in 'value' in Python struct format. + One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q. + count : int + Number of data values. Not used for string or byte string + values. + value : sequence + 'Count' values compatible with 'dtype'. + Byte strings must contain count values of dtype packed as + binary data. + writeonce : bool + If True, the tag is written to the first page only. + + """ + # TODO: refactor this function + fh = self._fh + byteorder = self._byteorder + + if data is None: + if compress: + raise ValueError('cannot save compressed empty file') + datashape = shape + datadtype = numpy.dtype(dtype).newbyteorder(byteorder) + datadtypechar = datadtype.char + else: + data = numpy.asarray(data, byteorder+data.dtype.char, 'C') + if data.size == 0: + raise ValueError('cannot save empty array') + datashape = data.shape + datadtype = data.dtype + datadtypechar = data.dtype.char + + returnoffset = returnoffset and datadtype.isnative + bilevel = datadtypechar == '?' + if bilevel: + index = -1 if datashape[-1] > 1 else -2 + datasize = product(datashape[:index]) + if datashape[index] % 8: + datasize *= datashape[index] // 8 + 1 + else: + datasize *= datashape[index] // 8 + else: + datasize = product(datashape) * datadtype.itemsize + + # just append contiguous data if possible + self._truncate = bool(truncate) + if self._datashape: + if (not contiguous + or self._datashape[1:] != datashape + or self._datadtype != datadtype + or (compress and self._tags) + or tile + or not numpy.array_equal(colormap, self._colormap)): + # incompatible shape, dtype, compression mode, or colormap + self._write_remaining_pages() + self._write_image_description() + self._truncate = False + self._descriptionoffset = 0 + self._descriptionlenoffset = 0 + self._datashape = None + self._colormap = None + if self._imagej: + raise ValueError( + 'ImageJ does not support non-contiguous data') + else: + # consecutive mode + self._datashape = (self._datashape[0] + 1,) + datashape + if not compress: + # write contiguous data, write IFDs/tags later + offset = fh.tell() + if data is None: + fh.write_empty(datasize) + else: + fh.write_array(data) + if returnoffset: + return offset, datasize + return + + input_shape = datashape + tagnoformat = self._tagnoformat + valueformat = self._valueformat + offsetformat = self._offsetformat + offsetsize = self._offsetsize + tagsize = self._tagsize + + MINISBLACK = TIFF.PHOTOMETRIC.MINISBLACK + RGB = TIFF.PHOTOMETRIC.RGB + CFA = TIFF.PHOTOMETRIC.CFA + PALETTE = TIFF.PHOTOMETRIC.PALETTE + CONTIG = TIFF.PLANARCONFIG.CONTIG + SEPARATE = TIFF.PLANARCONFIG.SEPARATE + + # parse input + if photometric is not None: + photometric = enumarg(TIFF.PHOTOMETRIC, photometric) + if planarconfig: + planarconfig = enumarg(TIFF.PLANARCONFIG, planarconfig) + if not compress: + compress = False + compresstag = 1 + predictor = False + else: + if isinstance(compress, (tuple, list)): + compress, compresslevel = compress + elif isinstance(compress, int): + compress, compresslevel = 'ADOBE_DEFLATE', int(compress) + if not 0 <= compresslevel <= 9: + raise ValueError('invalid compression level %s' % compress) + else: + compresslevel = None + compress = compress.upper() + compresstag = enumarg(TIFF.COMPRESSION, compress) + + # prepare ImageJ format + if self._imagej: + if compress in ('LZMA', 'ZSTD'): + raise ValueError( + 'ImageJ cannot handle LZMA or ZSTD compression') + if description: + warnings.warn('not writing description to ImageJ file') + description = None + volume = False + if datadtypechar not in 'BHhf': + raise ValueError( + 'ImageJ does not support data type %s' % datadtypechar) + ijrgb = photometric == RGB if photometric else None + if datadtypechar not in 'B': + ijrgb = False + ijshape = imagej_shape(datashape, ijrgb) + if ijshape[-1] in (3, 4): + photometric = RGB + if datadtypechar not in 'B': + raise ValueError('ImageJ does not support data type %s ' + 'for RGB' % datadtypechar) + elif photometric is None: + photometric = MINISBLACK + planarconfig = None + if planarconfig == SEPARATE: + raise ValueError('ImageJ does not support planar images') + else: + planarconfig = CONTIG if ijrgb else None + + # define compress function + if compress: + if compresslevel is None: + compressor, compresslevel = TIFF.COMPESSORS[compresstag] + else: + compressor, _ = TIFF.COMPESSORS[compresstag] + compresslevel = int(compresslevel) + if predictor: + if datadtype.kind not in 'iu': + raise ValueError( + 'prediction not implemented for %s' % datadtype) + + def compress(data, level=compresslevel): + # horizontal differencing + diff = numpy.diff(data, axis=-2) + data = numpy.insert(diff, 0, data[..., 0, :], axis=-2) + return compressor(data, level) + else: + def compress(data, level=compresslevel): + return compressor(data, level) + + # verify colormap and indices + if colormap is not None: + if datadtypechar not in 'BH': + raise ValueError('invalid data dtype for palette mode') + colormap = numpy.asarray(colormap, dtype=byteorder+'H') + if colormap.shape != (3, 2**(datadtype.itemsize * 8)): + raise ValueError('invalid color map shape') + self._colormap = colormap + + # verify tile shape + if tile: + tile = tuple(int(i) for i in tile[:3]) + volume = len(tile) == 3 + if (len(tile) < 2 or tile[-1] % 16 or tile[-2] % 16 or + any(i < 1 for i in tile)): + raise ValueError('invalid tile shape') + else: + tile = () + volume = False + + # normalize data shape to 5D or 6D, depending on volume: + # (pages, planar_samples, [depth,] height, width, contig_samples) + datashape = reshape_nd(datashape, 3 if photometric == RGB else 2) + shape = datashape + ndim = len(datashape) + + samplesperpixel = 1 + extrasamples = 0 + if volume and ndim < 3: + volume = False + if colormap is not None: + photometric = PALETTE + planarconfig = None + if photometric is None: + photometric = MINISBLACK + if bilevel: + photometric = TIFF.PHOTOMETRIC.MINISWHITE + elif planarconfig == CONTIG: + if ndim > 2 and shape[-1] in (3, 4): + photometric = RGB + elif planarconfig == SEPARATE: + if volume and ndim > 3 and shape[-4] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-3] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-1] in (3, 4): + photometric = RGB + elif self._imagej: + photometric = MINISBLACK + elif volume and ndim > 3 and shape[-4] in (3, 4): + photometric = RGB + elif ndim > 2 and shape[-3] in (3, 4): + photometric = RGB + if planarconfig and len(shape) <= (3 if volume else 2): + planarconfig = None + photometric = MINISBLACK + if photometric == RGB: + if len(shape) < 3: + raise ValueError('not a RGB(A) image') + if len(shape) < 4: + volume = False + if planarconfig is None: + if shape[-1] in (3, 4): + planarconfig = CONTIG + elif shape[-4 if volume else -3] in (3, 4): + planarconfig = SEPARATE + elif shape[-1] > shape[-4 if volume else -3]: + planarconfig = SEPARATE + else: + planarconfig = CONTIG + if planarconfig == CONTIG: + datashape = (-1, 1) + shape[(-4 if volume else -3):] + samplesperpixel = datashape[-1] + else: + datashape = (-1,) + shape[(-4 if volume else -3):] + (1,) + samplesperpixel = datashape[1] + if samplesperpixel > 3: + extrasamples = samplesperpixel - 3 + elif photometric == CFA: + if len(shape) != 2: + raise ValueError('invalid CFA image') + volume = False + planarconfig = None + datashape = (-1, 1) + shape[-2:] + (1,) + if 50706 not in (et[0] for et in extratags): + raise ValueError('must specify DNG tags for CFA image') + elif planarconfig and len(shape) > (3 if volume else 2): + if planarconfig == CONTIG: + datashape = (-1, 1) + shape[(-4 if volume else -3):] + samplesperpixel = datashape[-1] + else: + datashape = (-1,) + shape[(-4 if volume else -3):] + (1,) + samplesperpixel = datashape[1] + extrasamples = samplesperpixel - 1 + else: + planarconfig = None + # remove trailing 1s + while len(shape) > 2 and shape[-1] == 1: + shape = shape[:-1] + if len(shape) < 3: + volume = False + datashape = (-1, 1) + shape[(-3 if volume else -2):] + (1,) + + # normalize shape to 6D + assert len(datashape) in (5, 6) + if len(datashape) == 5: + datashape = datashape[:2] + (1,) + datashape[2:] + if datashape[0] == -1: + s0 = product(input_shape) // product(datashape[1:]) + datashape = (s0,) + datashape[1:] + shape = datashape + if data is not None: + data = data.reshape(shape) + + if tile and not volume: + tile = (1, tile[-2], tile[-1]) + + if photometric == PALETTE: + if (samplesperpixel != 1 or extrasamples or + shape[1] != 1 or shape[-1] != 1): + raise ValueError('invalid data shape for palette mode') + + if photometric == RGB and samplesperpixel == 2: + raise ValueError('not a RGB image (samplesperpixel=2)') + + if bilevel: + if compress: + raise ValueError('cannot save compressed bilevel image') + if tile: + raise ValueError('cannot save tiled bilevel image') + if photometric not in (0, 1): + raise ValueError('cannot save bilevel image as %s' % + str(photometric)) + datashape = list(datashape) + if datashape[-2] % 8: + datashape[-2] = datashape[-2] // 8 + 1 + else: + datashape[-2] = datashape[-2] // 8 + datashape = tuple(datashape) + assert datasize == product(datashape) + if data is not None: + data = numpy.packbits(data, axis=-2) + assert datashape[-2] == data.shape[-2] + + bytestr = bytes if sys.version[0] == '2' else ( + lambda x: bytes(x, 'ascii') if isinstance(x, str) else x) + tags = [] # list of (code, ifdentry, ifdvalue, writeonce) + + strip_or_tile = 'Tile' if tile else 'Strip' + tagbytecounts = TIFF.TAG_NAMES[strip_or_tile + 'ByteCounts'] + tag_offsets = TIFF.TAG_NAMES[strip_or_tile + 'Offsets'] + self._tagoffsets = tag_offsets + + def pack(fmt, *val): + return struct.pack(byteorder+fmt, *val) + + def addtag(code, dtype, count, value, writeonce=False): + # Compute ifdentry & ifdvalue bytes from code, dtype, count, value + # Append (code, ifdentry, ifdvalue, writeonce) to tags list + code = int(TIFF.TAG_NAMES.get(code, code)) + try: + tifftype = TIFF.DATA_DTYPES[dtype] + except KeyError: + raise ValueError('unknown dtype %s' % dtype) + rawcount = count + + if dtype == 's': + # strings + value = bytestr(value) + b'\0' + count = rawcount = len(value) + rawcount = value.find(b'\0\0') + if rawcount < 0: + rawcount = count + else: + rawcount += 1 # length of string without buffer + value = (value,) + elif isinstance(value, bytes): + # packed binary data + dtsize = struct.calcsize(dtype) + if len(value) % dtsize: + raise ValueError('invalid packed binary data') + count = len(value) // dtsize + if len(dtype) > 1: + count *= int(dtype[:-1]) + dtype = dtype[-1] + ifdentry = [pack('HH', code, tifftype), + pack(offsetformat, rawcount)] + ifdvalue = None + if struct.calcsize(dtype) * count <= offsetsize: + # value(s) can be written directly + if isinstance(value, bytes): + ifdentry.append(pack(valueformat, value)) + elif count == 1: + if isinstance(value, (tuple, list, numpy.ndarray)): + value = value[0] + ifdentry.append(pack(valueformat, pack(dtype, value))) + else: + ifdentry.append(pack(valueformat, + pack(str(count)+dtype, *value))) + else: + # use offset to value(s) + ifdentry.append(pack(offsetformat, 0)) + if isinstance(value, bytes): + ifdvalue = value + elif isinstance(value, numpy.ndarray): + assert value.size == count + assert value.dtype.char == dtype + ifdvalue = value.tostring() + elif isinstance(value, (tuple, list)): + ifdvalue = pack(str(count)+dtype, *value) + else: + ifdvalue = pack(dtype, value) + tags.append((code, b''.join(ifdentry), ifdvalue, writeonce)) + + def rational(arg, max_denominator=1000000): + """"Return nominator and denominator from float or two integers.""" + from fractions import Fraction # delayed import + try: + f = Fraction.from_float(arg) + except TypeError: + f = Fraction(arg[0], arg[1]) + f = f.limit_denominator(max_denominator) + return f.numerator, f.denominator + + if description: + # user provided description + addtag('ImageDescription', 's', 0, description, writeonce=True) + + # write shape and metadata to ImageDescription + self._metadata = {} if not metadata else metadata.copy() + if self._imagej: + description = imagej_description( + input_shape, shape[-1] in (3, 4), self._colormap is not None, + **self._metadata) + elif metadata or metadata == {}: + if self._truncate: + self._metadata.update(truncated=True) + description = json_description(input_shape, **self._metadata) + else: + description = None + if description: + # add 64 bytes buffer + # the image description might be updated later with the final shape + description = str2bytes(description, 'ascii') + description += b'\0'*64 + self._descriptionlen = len(description) + addtag('ImageDescription', 's', 0, description, writeonce=True) + + if software: + addtag('Software', 's', 0, software, writeonce=True) + if datetime is None: + datetime = self._now() + addtag('DateTime', 's', 0, datetime.strftime('%Y:%m:%d %H:%M:%S'), + writeonce=True) + addtag('Compression', 'H', 1, compresstag) + if predictor: + addtag('Predictor', 'H', 1, 2) + addtag('ImageWidth', 'I', 1, shape[-2]) + addtag('ImageLength', 'I', 1, shape[-3]) + if tile: + addtag('TileWidth', 'I', 1, tile[-1]) + addtag('TileLength', 'I', 1, tile[-2]) + if tile[0] > 1: + addtag('ImageDepth', 'I', 1, shape[-4]) + addtag('TileDepth', 'I', 1, tile[0]) + addtag('NewSubfileType', 'I', 1, 0) + if not bilevel: + sampleformat = {'u': 1, 'i': 2, 'f': 3, 'c': 6}[datadtype.kind] + addtag('SampleFormat', 'H', samplesperpixel, + (sampleformat,) * samplesperpixel) + addtag('PhotometricInterpretation', 'H', 1, photometric.value) + if colormap is not None: + addtag('ColorMap', 'H', colormap.size, colormap) + addtag('SamplesPerPixel', 'H', 1, samplesperpixel) + if bilevel: + pass + elif planarconfig and samplesperpixel > 1: + addtag('PlanarConfiguration', 'H', 1, planarconfig.value) + addtag('BitsPerSample', 'H', samplesperpixel, + (datadtype.itemsize * 8,) * samplesperpixel) + else: + addtag('BitsPerSample', 'H', 1, datadtype.itemsize * 8) + if extrasamples: + if photometric == RGB and extrasamples == 1: + addtag('ExtraSamples', 'H', 1, 1) # associated alpha channel + else: + addtag('ExtraSamples', 'H', extrasamples, (0,) * extrasamples) + if resolution is not None: + addtag('XResolution', '2I', 1, rational(resolution[0])) + addtag('YResolution', '2I', 1, rational(resolution[1])) + if len(resolution) > 2: + unit = resolution[2] + unit = 1 if unit is None else enumarg(TIFF.RESUNIT, unit) + elif self._imagej: + unit = 1 + else: + unit = 2 + addtag('ResolutionUnit', 'H', 1, unit) + elif not self._imagej: + addtag('XResolution', '2I', 1, (1, 1)) + addtag('YResolution', '2I', 1, (1, 1)) + addtag('ResolutionUnit', 'H', 1, 1) + if ijmetadata: + for t in imagej_metadata_tags(ijmetadata, byteorder): + addtag(*t) + + contiguous = not compress + if tile: + # one chunk per tile per plane + tiles = ((shape[2] + tile[0] - 1) // tile[0], + (shape[3] + tile[1] - 1) // tile[1], + (shape[4] + tile[2] - 1) // tile[2]) + numtiles = product(tiles) * shape[1] + stripbytecounts = [ + product(tile) * shape[-1] * datadtype.itemsize] * numtiles + addtag(tagbytecounts, offsetformat, numtiles, stripbytecounts) + addtag(tag_offsets, offsetformat, numtiles, [0] * numtiles) + contiguous = contiguous and product(tiles) == 1 + if not contiguous: + # allocate tile buffer + chunk = numpy.empty(tile + (shape[-1],), dtype=datadtype) + elif contiguous: + # one strip per plane + if bilevel: + stripbytecounts = [product(datashape[2:])] * shape[1] + else: + stripbytecounts = [ + product(datashape[2:]) * datadtype.itemsize] * shape[1] + addtag(tagbytecounts, offsetformat, shape[1], stripbytecounts) + addtag(tag_offsets, offsetformat, shape[1], [0] * shape[1]) + addtag('RowsPerStrip', 'I', 1, shape[-3]) + else: + # compress rowsperstrip or ~64 KB chunks + rowsize = product(shape[-2:]) * datadtype.itemsize + if rowsperstrip is None: + rowsperstrip = 65536 // rowsize + if rowsperstrip < 1: + rowsperstrip = 1 + elif rowsperstrip > shape[-3]: + rowsperstrip = shape[-3] + addtag('RowsPerStrip', 'I', 1, rowsperstrip) + + numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip + numstrips *= shape[1] + stripbytecounts = [0] * numstrips + addtag(tagbytecounts, offsetformat, numstrips, [0] * numstrips) + addtag(tag_offsets, offsetformat, numstrips, [0] * numstrips) + + if data is None and not contiguous: + raise ValueError('cannot write non-contiguous empty file') + + # add extra tags from user + for t in extratags: + addtag(*t) + + # TODO: check TIFFReadDirectoryCheckOrder warning in files containing + # multiple tags of same code + # the entries in an IFD must be sorted in ascending order by tag code + tags = sorted(tags, key=lambda x: x[0]) + + if not (self._bigtiff or self._imagej) and ( + fh.tell() + datasize > 2**31-1): + raise ValueError('data too large for standard TIFF file') + + # if not compressed or multi-tiled, write the first IFD and then + # all data contiguously; else, write all IFDs and data interleaved + for pageindex in range(1 if contiguous else shape[0]): + # update pointer at ifd_offset + pos = fh.tell() + if pos % 2: + # location of IFD must begin on a word boundary + fh.write(b'\0') + pos += 1 + fh.seek(self._ifdoffset) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + + # write ifdentries + fh.write(pack(tagnoformat, len(tags))) + tag_offset = fh.tell() + fh.write(b''.join(t[1] for t in tags)) + self._ifdoffset = fh.tell() + fh.write(pack(offsetformat, 0)) # offset to next IFD + + # write tag values and patch offsets in ifdentries, if necessary + for tagindex, tag in enumerate(tags): + if tag[2]: + pos = fh.tell() + if pos % 2: + # tag value is expected to begin on word boundary + fh.write(b'\0') + pos += 1 + fh.seek(tag_offset + tagindex*tagsize + offsetsize + 4) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + if tag[0] == tag_offsets: + stripoffsetsoffset = pos + elif tag[0] == tagbytecounts: + strip_bytecounts_offset = pos + elif tag[0] == 270 and tag[2].endswith(b'\0\0\0\0'): + # image description buffer + self._descriptionoffset = pos + self._descriptionlenoffset = ( + tag_offset + tagindex * tagsize + 4) + fh.write(tag[2]) + + # write image data + data_offset = fh.tell() + skip = align - data_offset % align + fh.seek(skip, 1) + data_offset += skip + if contiguous: + if data is None: + fh.write_empty(datasize) + else: + fh.write_array(data) + elif tile: + if data is None: + fh.write_empty(numtiles * stripbytecounts[0]) + else: + stripindex = 0 + for plane in data[pageindex]: + for tz in range(tiles[0]): + for ty in range(tiles[1]): + for tx in range(tiles[2]): + c0 = min(tile[0], shape[2] - tz*tile[0]) + c1 = min(tile[1], shape[3] - ty*tile[1]) + c2 = min(tile[2], shape[4] - tx*tile[2]) + chunk[c0:, c1:, c2:] = 0 + chunk[:c0, :c1, :c2] = plane[ + tz*tile[0]:tz*tile[0]+c0, + ty*tile[1]:ty*tile[1]+c1, + tx*tile[2]:tx*tile[2]+c2] + if compress: + t = compress(chunk) + fh.write(t) + stripbytecounts[stripindex] = len(t) + stripindex += 1 + else: + fh.write_array(chunk) + fh.flush() + elif compress: + # write one strip per rowsperstrip + assert data.shape[2] == 1 # not handling depth + numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip + stripindex = 0 + for plane in data[pageindex]: + for i in range(numstrips): + strip = plane[0, i*rowsperstrip: (i+1)*rowsperstrip] + strip = compress(strip) + fh.write(strip) + stripbytecounts[stripindex] = len(strip) + stripindex += 1 + + # update strip/tile offsets and bytecounts if necessary + pos = fh.tell() + for tagindex, tag in enumerate(tags): + if tag[0] == tag_offsets: # strip/tile offsets + if tag[2]: + fh.seek(stripoffsetsoffset) + strip_offset = data_offset + for size in stripbytecounts: + fh.write(pack(offsetformat, strip_offset)) + strip_offset += size + else: + fh.seek(tag_offset + tagindex*tagsize + offsetsize + 4) + fh.write(pack(offsetformat, data_offset)) + elif tag[0] == tagbytecounts: # strip/tile bytecounts + if compress: + if tag[2]: + fh.seek(strip_bytecounts_offset) + for size in stripbytecounts: + fh.write(pack(offsetformat, size)) + else: + fh.seek(tag_offset + tagindex*tagsize + + offsetsize + 4) + fh.write(pack(offsetformat, stripbytecounts[0])) + break + fh.seek(pos) + fh.flush() + + # remove tags that should be written only once + if pageindex == 0: + tags = [tag for tag in tags if not tag[-1]] + + self._shape = shape + self._datashape = (1,) + input_shape + self._datadtype = datadtype + self._dataoffset = data_offset + self._databytecounts = stripbytecounts + + if contiguous: + # write remaining IFDs/tags later + self._tags = tags + # return offset and size of image data + if returnoffset: + return data_offset, sum(stripbytecounts) + + def _write_remaining_pages(self): + """Write outstanding IFDs and tags to file.""" + if not self._tags or self._truncate: + return + + fh = self._fh + fhpos = fh.tell() + if fhpos % 2: + fh.write(b'\0') + fhpos += 1 + byteorder = self._byteorder + offsetformat = self._offsetformat + offsetsize = self._offsetsize + tagnoformat = self._tagnoformat + tagsize = self._tagsize + dataoffset = self._dataoffset + pagedatasize = sum(self._databytecounts) + pageno = self._shape[0] * self._datashape[0] - 1 + + def pack(fmt, *val): + return struct.pack(byteorder+fmt, *val) + + # construct template IFD in memory + # need to patch offsets to next IFD and data before writing to disk + ifd = io.BytesIO() + ifd.write(pack(tagnoformat, len(self._tags))) + tagoffset = ifd.tell() + ifd.write(b''.join(t[1] for t in self._tags)) + ifdoffset = ifd.tell() + ifd.write(pack(offsetformat, 0)) # offset to next IFD + # tag values + for tagindex, tag in enumerate(self._tags): + offset2value = tagoffset + tagindex*tagsize + offsetsize + 4 + if tag[2]: + pos = ifd.tell() + if pos % 2: # tag value is expected to begin on word boundary + ifd.write(b'\0') + pos += 1 + ifd.seek(offset2value) + try: + ifd.write(pack(offsetformat, pos + fhpos)) + except Exception: # struct.error + if self._imagej: + warnings.warn('truncating ImageJ file') + self._truncate = True + return + raise ValueError('data too large for non-BigTIFF file') + ifd.seek(pos) + ifd.write(tag[2]) + if tag[0] == self._tagoffsets: + # save strip/tile offsets for later updates + stripoffset2offset = offset2value + stripoffset2value = pos + elif tag[0] == self._tagoffsets: + # save strip/tile offsets for later updates + stripoffset2offset = None + stripoffset2value = offset2value + # size to word boundary + if ifd.tell() % 2: + ifd.write(b'\0') + + # check if all IFDs fit in file + pos = fh.tell() + if not self._bigtiff and pos + ifd.tell() * pageno > 2**32 - 256: + if self._imagej: + warnings.warn('truncating ImageJ file') + self._truncate = True + return + raise ValueError('data too large for non-BigTIFF file') + + # TODO: assemble IFD chain in memory + for _ in range(pageno): + # update pointer at IFD offset + pos = fh.tell() + fh.seek(self._ifdoffset) + fh.write(pack(offsetformat, pos)) + fh.seek(pos) + self._ifdoffset = pos + ifdoffset + # update strip/tile offsets in IFD + dataoffset += pagedatasize # offset to image data + if stripoffset2offset is None: + ifd.seek(stripoffset2value) + ifd.write(pack(offsetformat, dataoffset)) + else: + ifd.seek(stripoffset2offset) + ifd.write(pack(offsetformat, pos + stripoffset2value)) + ifd.seek(stripoffset2value) + stripoffset = dataoffset + for size in self._databytecounts: + ifd.write(pack(offsetformat, stripoffset)) + stripoffset += size + # write IFD entry + fh.write(ifd.getvalue()) + + self._tags = None + self._datadtype = None + self._dataoffset = None + self._databytecounts = None + # do not reset _shape or _data_shape + + def _write_image_description(self): + """Write meta data to ImageDescription tag.""" + if (not self._datashape or self._datashape[0] == 1 or + self._descriptionoffset <= 0): + return + + colormapped = self._colormap is not None + if self._imagej: + isrgb = self._shape[-1] in (3, 4) + description = imagej_description( + self._datashape, isrgb, colormapped, **self._metadata) + else: + description = json_description(self._datashape, **self._metadata) + + # rewrite description and its length to file + description = description.encode('utf-8') + description = description[:self._descriptionlen-1] + pos = self._fh.tell() + self._fh.seek(self._descriptionoffset) + self._fh.write(description) + self._fh.seek(self._descriptionlenoffset) + self._fh.write(struct.pack(self._byteorder+self._offsetformat, + len(description)+1)) + self._fh.seek(pos) + + self._descriptionoffset = 0 + self._descriptionlenoffset = 0 + self._descriptionlen = 0 + + def _now(self): + """Return current date and time.""" + return datetime.datetime.now() + + def close(self): + """Write remaining pages and close file handle.""" + if not self._truncate: + self._write_remaining_pages() + self._write_image_description() + self._fh.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + +class TiffFile(object): + """Read image and metadata from TIFF file. + + TiffFile instances must be closed using the 'close' method, which is + automatically called when using the 'with' context manager. + + Attributes + ---------- + pages : TiffPages + Sequence of TIFF pages in file. + series : list of TiffPageSeries + Sequences of closely related TIFF pages. These are computed + from OME, LSM, ImageJ, etc. metadata or based on similarity + of page properties such as shape, dtype, and compression. + byteorder : '>', '<' + The endianness of data in the file. + '>': big-endian (Motorola). + '>': little-endian (Intel). + is_flag : bool + If True, file is of a certain format. + Flags are: bigtiff, movie, shaped, ome, imagej, stk, lsm, fluoview, + nih, vista, 'micromanager, metaseries, mdgel, mediacy, tvips, fei, + sem, scn, svs, scanimage, andor, epics, pilatus, qptiff. + + All attributes are read-only. + + Examples + -------- + >>> # read image array from TIFF file + >>> imsave('temp.tif', numpy.random.rand(5, 301, 219)) + >>> with TiffFile('temp.tif') as tif: + ... data = tif.asarray() + >>> data.shape + (5, 301, 219) + + """ + def __init__(self, arg, name=None, offset=None, size=None, + multifile=True, movie=None, **kwargs): + """Initialize instance from file. + + Parameters + ---------- + arg : str or open file + Name of file or open file object. + The file objects are closed in TiffFile.close(). + name : str + Optional name of file in case 'arg' is a file handle. + offset : int + Optional start position of embedded file. By default, this is + the current file position. + size : int + Optional size of embedded file. By default, this is the number + of bytes from the 'offset' to the end of the file. + multifile : bool + If True (default), series may include pages from multiple files. + Currently applies to OME-TIFF only. + movie : bool + If True, assume that later pages differ from first page only by + data offsets and byte counts. Significantly increases speed and + reduces memory usage when reading movies with thousands of pages. + Enabling this for non-movie files will result in data corruption + or crashes. Python 3 only. + kwargs : bool + 'is_ome': If False, disable processing of OME-XML metadata. + + """ + if 'fastij' in kwargs: + del kwargs['fastij'] + raise DeprecationWarning('the fastij option will be removed') + for key, value in kwargs.items(): + if key[:3] == 'is_' and key[3:] in TIFF.FILE_FLAGS: + if value is not None and not value: + setattr(self, key, bool(value)) + else: + raise TypeError('unexpected keyword argument: %s' % key) + + fh = FileHandle(arg, mode='rb', name=name, offset=offset, size=size) + self._fh = fh + self._multifile = bool(multifile) + self._files = {fh.name: self} # cache of TiffFiles + try: + fh.seek(0) + try: + byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)] + except KeyError: + raise ValueError('not a TIFF file') + sys_byteorder = {'big': '>', 'little': '<'}[sys.byteorder] + self.isnative = byteorder == sys_byteorder + + version = struct.unpack(byteorder+'H', fh.read(2))[0] + if version == 43: + # BigTiff + self.is_bigtiff = True + offsetsize, zero = struct.unpack(byteorder+'HH', fh.read(4)) + if zero or offsetsize != 8: + raise ValueError('invalid BigTIFF file') + self.byteorder = byteorder + self.offsetsize = 8 + self.offsetformat = byteorder+'Q' + self.tagnosize = 8 + self.tagnoformat = byteorder+'Q' + self.tagsize = 20 + self.tagformat1 = byteorder+'HH' + self.tagformat2 = byteorder+'Q8s' + elif version == 42: + self.is_bigtiff = False + self.byteorder = byteorder + self.offsetsize = 4 + self.offsetformat = byteorder+'I' + self.tagnosize = 2 + self.tagnoformat = byteorder+'H' + self.tagsize = 12 + self.tagformat1 = byteorder+'HH' + self.tagformat2 = byteorder+'I4s' + else: + raise ValueError('invalid TIFF file') + + # file handle is at offset to offset to first page + self.pages = TiffPages(self) + + if self.is_lsm and (self.filehandle.size >= 2**32 or + self.pages[0].compression != 1 or + self.pages[1].compression != 1): + self._lsm_load_pages() + self._lsm_fix_strip_offsets() + self._lsm_fix_strip_bytecounts() + elif movie: + self.pages.useframes = True + + except Exception: + fh.close() + raise + + @property + def filehandle(self): + """Return file handle.""" + return self._fh + + @property + def filename(self): + """Return name of file handle.""" + return self._fh.name + + @lazyattr + def fstat(self): + """Return status of file handle as stat_result object.""" + try: + return os.fstat(self._fh.fileno()) + except Exception: # io.UnsupportedOperation + return None + + def close(self): + """Close open file handle(s).""" + for tif in self._files.values(): + tif.filehandle.close() + self._files = {} + + def asarray(self, key=None, series=None, out=None, validate=True, + maxworkers=1): + """Return image data from multiple TIFF pages as numpy array. + + By default, the data from the first series is returned. + + Parameters + ---------- + key : int, slice, or sequence of page indices + Defines which pages to return as array. + series : int or TiffPageSeries + Defines which series of pages to return as array. + out : numpy.ndarray, str, or file-like object; optional + Buffer where image data will be saved. + If None (default), a new array will be created. + If numpy.ndarray, a writable array of compatible dtype and shape. + If 'memmap', directly memory-map the image data in the TIFF file + if possible; else create a memory-mapped array in a temporary file. + If str or open file, the file name or file object used to + create a memory-map to an array stored in a binary file on disk. + validate : bool + If True (default), validate various tags. + Passed to TiffPage.asarray(). + maxworkers : int + Maximum number of threads to concurrently get data from pages. + Default is 1. If None, up to half the CPU cores are used. + Reading data from file is limited to a single thread. + Using multiple threads can significantly speed up this function + if the bottleneck is decoding compressed data, e.g. in case of + large LZW compressed LSM files. + If the bottleneck is I/O or pure Python code, using multiple + threads might be detrimental. + + """ + if not self.pages: + return numpy.array([]) + if key is None and series is None: + series = 0 + if series is not None: + try: + series = self.series[series] + except (KeyError, TypeError): + pass + pages = series._pages + else: + pages = self.pages + + if key is None: + pass + elif isinstance(key, inttypes): + pages = [pages[key]] + elif isinstance(key, slice): + pages = pages[key] + elif isinstance(key, collections.Iterable): + pages = [pages[k] for k in key] + else: + raise TypeError('key must be an int, slice, or sequence') + + if not pages: + raise ValueError('no pages selected') + + if self.is_nih: + result = stack_pages(pages, out=out, maxworkers=maxworkers, + squeeze=False) + elif key is None and series and series.offset: + typecode = self.byteorder + series.dtype.char + if out == 'memmap' and pages[0].is_memmappable: + result = self.filehandle.memmap_array( + typecode, series.shape, series.offset) + else: + if out is not None: + out = create_output(out, series.shape, series.dtype) + self.filehandle.seek(series.offset) + result = self.filehandle.read_array( + typecode, product(series.shape), out=out, native=True) + elif len(pages) == 1: + result = pages[0].asarray(out=out, validate=validate) + else: + result = stack_pages(pages, out=out, maxworkers=maxworkers) + + if result is None: + return + + if key is None: + try: + result.shape = series.shape + except ValueError: + try: + warnings.warn('failed to reshape %s to %s' % ( + result.shape, series.shape)) + # try series of expected shapes + result.shape = (-1,) + series.shape + except ValueError: + # revert to generic shape + result.shape = (-1,) + pages[0].shape + elif len(pages) == 1: + result.shape = pages[0].shape + else: + result.shape = (-1,) + pages[0].shape + return result + + @lazyattr + def series(self): + """Return related pages as TiffPageSeries. + + Side effect: after calling this function, TiffFile.pages might contain + TiffPage and TiffFrame instances. + + """ + if not self.pages: + return [] + + useframes = self.pages.useframes + keyframe = self.pages.keyframe + series = [] + for name in 'ome imagej lsm fluoview nih mdgel shaped'.split(): + if getattr(self, 'is_' + name, False): + series = getattr(self, '_%s_series' % name)() + break + self.pages.useframes = useframes + self.pages.keyframe = keyframe + if not series: + series = self._generic_series() + + # remove empty series, e.g. in MD Gel files + series = [s for s in series if sum(s.shape) > 0] + + for i, s in enumerate(series): + s.index = i + return series + + def _generic_series(self): + """Return image series in file.""" + if self.pages.useframes: + # movie mode + page = self.pages[0] + shape = page.shape + axes = page.axes + if len(self.pages) > 1: + shape = (len(self.pages),) + shape + axes = 'I' + axes + return [TiffPageSeries(self.pages[:], shape, page.dtype, axes, + stype='movie')] + + self.pages.clear(False) + self.pages.load() + result = [] + keys = [] + series = {} + compressions = TIFF.DECOMPESSORS + for page in self.pages: + if not page.shape: + continue + key = page.shape + (page.axes, page.compression in compressions) + if key in series: + series[key].append(page) + else: + keys.append(key) + series[key] = [page] + for key in keys: + pages = series[key] + page = pages[0] + shape = page.shape + axes = page.axes + if len(pages) > 1: + shape = (len(pages),) + shape + axes = 'I' + axes + result.append(TiffPageSeries(pages, shape, page.dtype, axes, + stype='Generic')) + + return result + + def _shaped_series(self): + """Return image series in "shaped" file.""" + pages = self.pages + pages.useframes = True + lenpages = len(pages) + + def append_series(series, pages, axes, shape, reshape, name, + truncated): + page = pages[0] + if not axes: + shape = page.shape + axes = page.axes + if len(pages) > 1: + shape = (len(pages),) + shape + axes = 'Q' + axes + size = product(shape) + resize = product(reshape) + if page.is_contiguous and resize > size and resize % size == 0: + if truncated is None: + truncated = True + axes = 'Q' + axes + shape = (resize // size,) + shape + try: + axes = reshape_axes(axes, shape, reshape) + shape = reshape + except ValueError as e: + warnings.warn(str(e)) + series.append( + TiffPageSeries(pages, shape, page.dtype, axes, name=name, + stype='Shaped', truncated=truncated)) + + keyframe = axes = shape = reshape = name = None + series = [] + index = 0 + while True: + if index >= lenpages: + break + # new keyframe; start of new series + pages.keyframe = index + keyframe = pages[index] + if not keyframe.is_shaped: + warnings.warn('invalid shape metadata or corrupted file') + return + # read metadata + axes = None + shape = None + metadata = json_description_metadata(keyframe.is_shaped) + name = metadata.get('name', '') + reshape = metadata['shape'] + truncated = metadata.get('truncated', None) + if 'axes' in metadata: + axes = metadata['axes'] + if len(axes) == len(reshape): + shape = reshape + else: + axes = '' + warnings.warn('axes do not match shape') + # skip pages if possible + spages = [keyframe] + size = product(reshape) + npages, mod = divmod(size, product(keyframe.shape)) + if mod: + warnings.warn('series shape does not match page shape') + return + if 1 < npages <= lenpages - index: + size *= keyframe._dtype.itemsize + if truncated: + npages = 1 + elif (keyframe.is_final and + keyframe.offset + size < pages[index+1].offset): + truncated = False + else: + # need to read all pages for series + truncated = False + for j in range(index+1, index+npages): + page = pages[j] + page.keyframe = keyframe + spages.append(page) + append_series(series, spages, axes, shape, reshape, name, + truncated) + index += npages + + return series + + def _imagej_series(self): + """Return image series in ImageJ file.""" + # ImageJ's dimension order is always TZCYXS + # TODO: fix loading of color, composite, or palette images + self.pages.useframes = True + self.pages.keyframe = 0 + + ij = self.imagej_metadata + pages = self.pages + page = pages[0] + + def is_hyperstack(): + # ImageJ hyperstack store all image metadata in the first page and + # image data are stored contiguously before the second page, if any + if not page.is_final: + return False + images = ij.get('images', 0) + if images <= 1: + return False + offset, count = page.is_contiguous + if (count != product(page.shape) * page.bitspersample // 8 + or offset + count*images > self.filehandle.size): + raise ValueError() + # check that next page is stored after data + if len(pages) > 1 and offset + count*images > pages[1].offset: + return False + return True + + try: + hyperstack = is_hyperstack() + except ValueError: + warnings.warn('invalid ImageJ metadata or corrupted file') + return + if hyperstack: + # no need to read other pages + pages = [page] + else: + self.pages.load() + + shape = [] + axes = [] + if 'frames' in ij: + shape.append(ij['frames']) + axes.append('T') + if 'slices' in ij: + shape.append(ij['slices']) + axes.append('Z') + if 'channels' in ij and not (page.photometric == 2 and not + ij.get('hyperstack', False)): + shape.append(ij['channels']) + axes.append('C') + remain = ij.get('images', len(pages))//(product(shape) if shape else 1) + if remain > 1: + shape.append(remain) + axes.append('I') + if page.axes[0] == 'I': + # contiguous multiple images + shape.extend(page.shape[1:]) + axes.extend(page.axes[1:]) + elif page.axes[:2] == 'SI': + # color-mapped contiguous multiple images + shape = page.shape[0:1] + tuple(shape) + page.shape[2:] + axes = list(page.axes[0]) + axes + list(page.axes[2:]) + else: + shape.extend(page.shape) + axes.extend(page.axes) + + truncated = ( + hyperstack and len(self.pages) == 1 and + page.is_contiguous[1] != product(shape) * page.bitspersample // 8) + + return [TiffPageSeries(pages, shape, page.dtype, axes, stype='ImageJ', + truncated=truncated)] + + def _fluoview_series(self): + """Return image series in FluoView file.""" + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + mm = self.fluoview_metadata + mmhd = list(reversed(mm['Dimensions'])) + axes = ''.join(TIFF.MM_DIMENSIONS.get(i[0].upper(), 'Q') + for i in mmhd if i[1] > 1) + shape = tuple(int(i[1]) for i in mmhd if i[1] > 1) + return [TiffPageSeries(self.pages, shape, self.pages[0].dtype, axes, + name=mm['ImageName'], stype='FluoView')] + + def _mdgel_series(self): + """Return image series in MD Gel file.""" + # only a single page, scaled according to metadata in second page + self.pages.useframes = False + self.pages.keyframe = 0 + self.pages.load() + md = self.mdgel_metadata + if md['FileTag'] in (2, 128): + dtype = numpy.dtype('float32') + scale = md['ScalePixel'] + scale = scale[0] / scale[1] # rational + if md['FileTag'] == 2: + # squary root data format + def transform(a): + return a.astype('float32')**2 * scale + else: + def transform(a): + return a.astype('float32') * scale + else: + transform = None + page = self.pages[0] + return [TiffPageSeries([page], page.shape, dtype, page.axes, + transform=transform, stype='MDGel')] + + def _nih_series(self): + """Return image series in NIH file.""" + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + page0 = self.pages[0] + if len(self.pages) == 1: + shape = page0.shape + axes = page0.axes + else: + shape = (len(self.pages),) + page0.shape + axes = 'I' + page0.axes + return [ + TiffPageSeries(self.pages, shape, page0.dtype, axes, stype='NIH')] + + def _ome_series(self): + """Return image series in OME-TIFF file(s).""" + from xml.etree import cElementTree as etree # delayed import + omexml = self.pages[0].description + try: + root = etree.fromstring(omexml) + except etree.ParseError as e: + # TODO: test badly encoded OME-XML + warnings.warn('ome-xml: %s' % e) + try: + # might work on Python 2 + omexml = omexml.decode('utf-8', 'ignore').encode('utf-8') + root = etree.fromstring(omexml) + except Exception: + return + + self.pages.useframes = True + self.pages.keyframe = 0 + self.pages.load() + + uuid = root.attrib.get('UUID', None) + self._files = {uuid: self} + dirname = self._fh.dirname + modulo = {} + series = [] + for element in root: + if element.tag.endswith('BinaryOnly'): + # TODO: load OME-XML from master or companion file + warnings.warn('ome-xml: not an ome-tiff master file') + break + if element.tag.endswith('StructuredAnnotations'): + for annot in element: + if not annot.attrib.get('Namespace', + '').endswith('modulo'): + continue + for value in annot: + for modul in value: + for along in modul: + if not along.tag[:-1].endswith('Along'): + continue + axis = along.tag[-1] + newaxis = along.attrib.get('Type', 'other') + newaxis = TIFF.AXES_LABELS[newaxis] + if 'Start' in along.attrib: + step = float(along.attrib.get('Step', 1)) + start = float(along.attrib['Start']) + stop = float(along.attrib['End']) + step + labels = numpy.arange(start, stop, step) + else: + labels = [label.text for label in along + if label.tag.endswith('Label')] + modulo[axis] = (newaxis, labels) + + if not element.tag.endswith('Image'): + continue + + attr = element.attrib + name = attr.get('Name', None) + + for pixels in element: + if not pixels.tag.endswith('Pixels'): + continue + attr = pixels.attrib + dtype = attr.get('PixelType', None) + axes = ''.join(reversed(attr['DimensionOrder'])) + shape = list(int(attr['Size'+ax]) for ax in axes) + size = product(shape[:-2]) + ifds = None + spp = 1 # samples per pixel + # FIXME: this implementation assumes the last two + # dimensions are stored in tiff pages (shape[:-2]). + # Apparently that is not always the case. + for data in pixels: + if data.tag.endswith('Channel'): + attr = data.attrib + if ifds is None: + spp = int(attr.get('SamplesPerPixel', spp)) + ifds = [None] * (size // spp) + elif int(attr.get('SamplesPerPixel', 1)) != spp: + raise ValueError( + "cannot handle differing SamplesPerPixel") + continue + if ifds is None: + ifds = [None] * (size // spp) + if not data.tag.endswith('TiffData'): + continue + attr = data.attrib + ifd = int(attr.get('IFD', 0)) + num = int(attr.get('NumPlanes', 1 if 'IFD' in attr else 0)) + num = int(attr.get('PlaneCount', num)) + idx = [int(attr.get('First'+ax, 0)) for ax in axes[:-2]] + try: + idx = numpy.ravel_multi_index(idx, shape[:-2]) + except ValueError: + # ImageJ produces invalid ome-xml when cropping + warnings.warn('ome-xml: invalid TiffData index') + continue + for uuid in data: + if not uuid.tag.endswith('UUID'): + continue + if uuid.text not in self._files: + if not self._multifile: + # abort reading multifile OME series + # and fall back to generic series + return [] + fname = uuid.attrib['FileName'] + try: + tif = TiffFile(os.path.join(dirname, fname)) + tif.pages.useframes = True + tif.pages.keyframe = 0 + tif.pages.load() + except (IOError, FileNotFoundError, ValueError): + warnings.warn( + "ome-xml: failed to read '%s'" % fname) + break + self._files[uuid.text] = tif + tif.close() + pages = self._files[uuid.text].pages + try: + for i in range(num if num else len(pages)): + ifds[idx + i] = pages[ifd + i] + except IndexError: + warnings.warn('ome-xml: index out of range') + # only process first UUID + break + else: + pages = self.pages + try: + for i in range(num if num else len(pages)): + ifds[idx + i] = pages[ifd + i] + except IndexError: + warnings.warn('ome-xml: index out of range') + + if all(i is None for i in ifds): + # skip images without data + continue + + # set a keyframe on all IFDs + keyframe = None + for i in ifds: + # try find a TiffPage + if i and i == i.keyframe: + keyframe = i + break + if not keyframe: + # reload a TiffPage from file + for i, keyframe in enumerate(ifds): + if keyframe: + keyframe.parent.pages.keyframe = keyframe.index + keyframe = keyframe.parent.pages[keyframe.index] + ifds[i] = keyframe + break + for i in ifds: + if i is not None: + i.keyframe = keyframe + + dtype = keyframe.dtype + series.append( + TiffPageSeries(ifds, shape, dtype, axes, parent=self, + name=name, stype='OME')) + for serie in series: + shape = list(serie.shape) + for axis, (newaxis, labels) in modulo.items(): + i = serie.axes.index(axis) + size = len(labels) + if shape[i] == size: + serie.axes = serie.axes.replace(axis, newaxis, 1) + else: + shape[i] //= size + shape.insert(i+1, size) + serie.axes = serie.axes.replace(axis, axis+newaxis, 1) + serie.shape = tuple(shape) + # squeeze dimensions + for serie in series: + serie.shape, serie.axes = squeeze_axes(serie.shape, serie.axes) + return series + + def _lsm_series(self): + """Return main image series in LSM file. Skip thumbnails.""" + lsmi = self.lsm_metadata + axes = TIFF.CZ_LSMINFO_SCANTYPE[lsmi['ScanType']] + if self.pages[0].photometric == 2: # RGB; more than one channel + axes = axes.replace('C', '').replace('XY', 'XYC') + if lsmi.get('DimensionP', 0) > 1: + axes += 'P' + if lsmi.get('DimensionM', 0) > 1: + axes += 'M' + axes = axes[::-1] + shape = tuple(int(lsmi[TIFF.CZ_LSMINFO_DIMENSIONS[i]]) for i in axes) + name = lsmi.get('Name', '') + self.pages.keyframe = 0 + pages = self.pages[::2] + dtype = pages[0].dtype + series = [TiffPageSeries(pages, shape, dtype, axes, name=name, + stype='LSM')] + + if self.pages[1].is_reduced: + self.pages.keyframe = 1 + pages = self.pages[1::2] + dtype = pages[0].dtype + cp, i = 1, 0 + while cp < len(pages) and i < len(shape)-2: + cp *= shape[i] + i += 1 + shape = shape[:i] + pages[0].shape + axes = axes[:i] + 'CYX' + series.append(TiffPageSeries(pages, shape, dtype, axes, name=name, + stype='LSMreduced')) + + return series + + def _lsm_load_pages(self): + """Load all pages from LSM file.""" + self.pages.cache = True + self.pages.useframes = True + # second series: thumbnails + self.pages.keyframe = 1 + keyframe = self.pages[1] + for page in self.pages[1::2]: + page.keyframe = keyframe + # first series: data + self.pages.keyframe = 0 + keyframe = self.pages[0] + for page in self.pages[::2]: + page.keyframe = keyframe + + def _lsm_fix_strip_offsets(self): + """Unwrap strip offsets for LSM files greater than 4 GB. + + Each series and position require separate unwrapping (undocumented). + + """ + if self.filehandle.size < 2**32: + return + + pages = self.pages + npages = len(pages) + series = self.series[0] + axes = series.axes + + # find positions + positions = 1 + for i in 0, 1: + if series.axes[i] in 'PM': + positions *= series.shape[i] + + # make time axis first + if positions > 1: + ntimes = 0 + for i in 1, 2: + if axes[i] == 'T': + ntimes = series.shape[i] + break + if ntimes: + div, mod = divmod(npages, 2*positions*ntimes) + assert mod == 0 + shape = (positions, ntimes, div, 2) + indices = numpy.arange(product(shape)).reshape(shape) + indices = numpy.moveaxis(indices, 1, 0) + else: + indices = numpy.arange(npages).reshape(-1, 2) + + # images of reduced page might be stored first + if pages[0].dataoffsets[0] > pages[1].dataoffsets[0]: + indices = indices[..., ::-1] + + # unwrap offsets + wrap = 0 + previousoffset = 0 + for i in indices.flat: + page = pages[i] + dataoffsets = [] + for currentoffset in page.dataoffsets: + if currentoffset < previousoffset: + wrap += 2**32 + dataoffsets.append(currentoffset + wrap) + previousoffset = currentoffset + page.dataoffsets = tuple(dataoffsets) + + def _lsm_fix_strip_bytecounts(self): + """Set databytecounts to size of compressed data. + + The StripByteCounts tag in LSM files contains the number of bytes + for the uncompressed data. + + """ + pages = self.pages + if pages[0].compression == 1: + return + # sort pages by first strip offset + pages = sorted(pages, key=lambda p: p.dataoffsets[0]) + npages = len(pages) - 1 + for i, page in enumerate(pages): + if page.index % 2: + continue + offsets = page.dataoffsets + bytecounts = page.databytecounts + if i < npages: + lastoffset = pages[i+1].dataoffsets[0] + else: + # LZW compressed strips might be longer than uncompressed + lastoffset = min(offsets[-1] + 2*bytecounts[-1], self._fh.size) + offsets = offsets + (lastoffset,) + page.databytecounts = tuple(offsets[j+1] - offsets[j] + for j in range(len(bytecounts))) + + def __getattr__(self, name): + """Return 'is_flag' attributes from first page.""" + if name[3:] in TIFF.FILE_FLAGS: + if not self.pages: + return False + value = bool(getattr(self.pages[0], name)) + setattr(self, name, value) + return value + raise AttributeError("'%s' object has no attribute '%s'" % + (self.__class__.__name__, name)) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def __str__(self, detail=0, width=79): + """Return string containing information about file. + + The detail parameter specifies the level of detail returned: + + 0: file only. + 1: all series, first page of series and its tags. + 2: large tag values and file metadata. + 3: all pages. + + """ + info = [ + "TiffFile '%s'", + format_size(self._fh.size), + {'<': 'LittleEndian', '>': 'BigEndian'}[self.byteorder]] + if self.is_bigtiff: + info.append('BigTiff') + info.append('|'.join(f.upper() for f in self.flags)) + if len(self.pages) > 1: + info.append('%i Pages' % len(self.pages)) + if len(self.series) > 1: + info.append('%i Series' % len(self.series)) + if len(self._files) > 1: + info.append('%i Files' % (len(self._files))) + info = ' '.join(info) + info = info.replace(' ', ' ').replace(' ', ' ') + info = info % snipstr(self._fh.name, max(12, width+2-len(info))) + if detail <= 0: + return info + info = [info] + info.append('\n'.join(str(s) for s in self.series)) + if detail >= 3: + info.extend((TiffPage.__str__(p, detail=detail, width=width) + for p in self.pages + if p is not None)) + else: + info.extend((TiffPage.__str__(s.pages[0], detail=detail, + width=width) + for s in self.series + if s.pages[0] is not None)) + if detail >= 2: + for name in sorted(self.flags): + if hasattr(self, name + '_metadata'): + m = getattr(self, name + '_metadata') + if m: + info.append( + '%s_METADATA\n%s' % (name.upper(), + pformat(m, width=width, + height=detail*12))) + return '\n\n'.join(info).replace('\n\n\n', '\n\n') + + @lazyattr + def flags(self): + """Return set of file flags.""" + return set(name.lower() for name in sorted(TIFF.FILE_FLAGS) + if getattr(self, 'is_' + name)) + + @lazyattr + def is_mdgel(self): + """File has MD Gel format.""" + try: + return self.pages[0].is_mdgel or self.pages[1].is_mdgel + except IndexError: + return False + + @property + def is_movie(self): + """Return if file is a movie.""" + return self.pages.useframes + + @lazyattr + def shaped_metadata(self): + """Return Tifffile metadata from JSON descriptions as dicts.""" + if not self.is_shaped: + return + return tuple(json_description_metadata(s.pages[0].is_shaped) + for s in self.series if s.stype.lower() == 'shaped') + + @lazyattr + def ome_metadata(self): + """Return OME XML as dict.""" + # TODO: remove this or return XML? + if not self.is_ome: + return + return xml2dict(self.pages[0].description)['OME'] + + @lazyattr + def qptiff_metadata(self): + """Return PerkinElmer-QPI-ImageDescription XML element as dict.""" + if not self.is_qptiff: + return + root = 'PerkinElmer-QPI-ImageDescription' + xml = self.pages[0].description.replace(' ' + root + ' ', root) + return xml2dict(xml)[root] + + @lazyattr + def lsm_metadata(self): + """Return LSM metadata from CZ_LSMINFO tag as dict.""" + if not self.is_lsm: + return + return self.pages[0].tags['CZ_LSMINFO'].value + + @lazyattr + def stk_metadata(self): + """Return STK metadata from UIC tags as dict.""" + if not self.is_stk: + return + page = self.pages[0] + tags = page.tags + result = {} + result['NumberPlanes'] = tags['UIC2tag'].count + if page.description: + result['PlaneDescriptions'] = page.description.split('\0') + # result['plane_descriptions'] = stk_description_metadata( + # page.image_description) + if 'UIC1tag' in tags: + result.update(tags['UIC1tag'].value) + if 'UIC3tag' in tags: + result.update(tags['UIC3tag'].value) # wavelengths + if 'UIC4tag' in tags: + result.update(tags['UIC4tag'].value) # override uic1 tags + uic2tag = tags['UIC2tag'].value + result['ZDistance'] = uic2tag['ZDistance'] + result['TimeCreated'] = uic2tag['TimeCreated'] + result['TimeModified'] = uic2tag['TimeModified'] + try: + result['DatetimeCreated'] = numpy.array( + [julian_datetime(*dt) for dt in + zip(uic2tag['DateCreated'], uic2tag['TimeCreated'])], + dtype='datetime64[ns]') + result['DatetimeModified'] = numpy.array( + [julian_datetime(*dt) for dt in + zip(uic2tag['DateModified'], uic2tag['TimeModified'])], + dtype='datetime64[ns]') + except ValueError as e: + warnings.warn('stk_metadata: %s' % e) + return result + + @lazyattr + def imagej_metadata(self): + """Return consolidated ImageJ metadata as dict.""" + if not self.is_imagej: + return + page = self.pages[0] + result = imagej_description_metadata(page.is_imagej) + if 'IJMetadata' in page.tags: + try: + result.update(page.tags['IJMetadata'].value) + except Exception: + pass + return result + + @lazyattr + def fluoview_metadata(self): + """Return consolidated FluoView metadata as dict.""" + if not self.is_fluoview: + return + result = {} + page = self.pages[0] + result.update(page.tags['MM_Header'].value) + # TODO: read stamps from all pages + result['Stamp'] = page.tags['MM_Stamp'].value + # skip parsing image description; not reliable + # try: + # t = fluoview_description_metadata(page.image_description) + # if t is not None: + # result['ImageDescription'] = t + # except Exception as e: + # warnings.warn( + # "failed to read FluoView image description: %s" % e) + return result + + @lazyattr + def nih_metadata(self): + """Return NIH Image metadata from NIHImageHeader tag as dict.""" + if not self.is_nih: + return + return self.pages[0].tags['NIHImageHeader'].value + + @lazyattr + def fei_metadata(self): + """Return FEI metadata from SFEG or HELIOS tags as dict.""" + if not self.is_fei: + return + tags = self.pages[0].tags + if 'FEI_SFEG' in tags: + return tags['FEI_SFEG'].value + if 'FEI_HELIOS' in tags: + return tags['FEI_HELIOS'].value + + @lazyattr + def sem_metadata(self): + """Return SEM metadata from CZ_SEM tag as dict.""" + if not self.is_sem: + return + return self.pages[0].tags['CZ_SEM'].value + + @lazyattr + def mdgel_metadata(self): + """Return consolidated metadata from MD GEL tags as dict.""" + for page in self.pages[:2]: + if 'MDFileTag' in page.tags: + tags = page.tags + break + else: + return + result = {} + for code in range(33445, 33453): + name = TIFF.TAGS[code] + if name not in tags: + continue + result[name[2:]] = tags[name].value + return result + + @lazyattr + def andor_metadata(self): + """Return Andor tags as dict.""" + return self.pages[0].andor_tags + + @lazyattr + def epics_metadata(self): + """Return EPICS areaDetector tags as dict.""" + return self.pages[0].epics_tags + + @lazyattr + def tvips_metadata(self): + """Return TVIPS tag as dict.""" + if not self.is_tvips: + return + return self.pages[0].tags['TVIPS'].value + + @lazyattr + def metaseries_metadata(self): + """Return MetaSeries metadata from image description as dict.""" + if not self.is_metaseries: + return + return metaseries_description_metadata(self.pages[0].description) + + @lazyattr + def pilatus_metadata(self): + """Return Pilatus metadata from image description as dict.""" + if not self.is_pilatus: + return + return pilatus_description_metadata(self.pages[0].description) + + @lazyattr + def micromanager_metadata(self): + """Return consolidated MicroManager metadata as dict.""" + if not self.is_micromanager: + return + # from file header + result = read_micromanager_metadata(self._fh) + # from tag + result.update(self.pages[0].tags['MicroManagerMetadata'].value) + return result + + @lazyattr + def scanimage_metadata(self): + """Return ScanImage non-varying frame and ROI metadata as dict.""" + if not self.is_scanimage: + return + result = {} + try: + framedata, roidata = read_scanimage_metadata(self._fh) + result['FrameData'] = framedata + result.update(roidata) + except ValueError: + pass + # TODO: scanimage_artist_metadata + try: + result['Description'] = scanimage_description_metadata( + self.pages[0].description) + except Exception as e: + warnings.warn('scanimage_description_metadata failed: %s' % e) + return result + + @property + def geotiff_metadata(self): + """Return GeoTIFF metadata from first page as dict.""" + if not self.is_geotiff: + return + return self.pages[0].geotiff_tags + + +class TiffPages(object): + """Sequence of TIFF image file directories.""" + def __init__(self, parent): + """Initialize instance from file. Read first TiffPage from file. + + The file position must be at an offset to an offset to a TiffPage. + + """ + self.parent = parent + self.pages = [] # cache of TiffPages, TiffFrames, or their offsets + self.complete = False # True if offsets to all pages were read + self._tiffpage = TiffPage # class for reading tiff pages + self._keyframe = None + self._cache = True + + # read offset to first page + fh = parent.filehandle + self._nextpageoffset = fh.tell() + offset = struct.unpack(parent.offsetformat, + fh.read(parent.offsetsize))[0] + + if offset == 0: + # warnings.warn('file contains no pages') + self.complete = True + return + if offset >= fh.size: + warnings.warn('invalid page offset (%i)' % offset) + self.complete = True + return + + # always read and cache first page + fh.seek(offset) + page = TiffPage(parent, index=0) + self.pages.append(page) + self._keyframe = page + + @property + def cache(self): + """Return if pages/frames are currenly being cached.""" + return self._cache + + @cache.setter + def cache(self, value): + """Enable or disable caching of pages/frames. Clear cache if False.""" + value = bool(value) + if self._cache and not value: + self.clear() + self._cache = value + + @property + def useframes(self): + """Return if currently using TiffFrame (True) or TiffPage (False).""" + return self._tiffpage == TiffFrame and TiffFrame is not TiffPage + + @useframes.setter + def useframes(self, value): + """Set to use TiffFrame (True) or TiffPage (False).""" + self._tiffpage = TiffFrame if value else TiffPage + + @property + def keyframe(self): + """Return index of current keyframe.""" + return self._keyframe.index + + @keyframe.setter + def keyframe(self, index): + """Set current keyframe. Load TiffPage from file if necessary.""" + if self._keyframe.index == index: + return + if self.complete or 0 <= index < len(self.pages): + page = self.pages[index] + if isinstance(page, TiffPage): + self._keyframe = page + return + elif isinstance(page, TiffFrame): + # remove existing frame + self.pages[index] = page.offset + # load TiffPage from file + useframes = self.useframes + self._tiffpage = TiffPage + self._keyframe = self[index] + self.useframes = useframes + + @property + def next_page_offset(self): + """Return offset where offset to a new page can be stored.""" + if not self.complete: + self._seek(-1) + return self._nextpageoffset + + def load(self): + """Read all remaining pages from file.""" + fh = self.parent.filehandle + keyframe = self._keyframe + pages = self.pages + if not self.complete: + self._seek(-1) + for i, page in enumerate(pages): + if isinstance(page, inttypes): + fh.seek(page) + page = self._tiffpage(self.parent, index=i, keyframe=keyframe) + pages[i] = page + + def clear(self, fully=True): + """Delete all but first page from cache. Set keyframe to first page.""" + pages = self.pages + if not self._cache or len(pages) < 1: + return + self._keyframe = pages[0] + if fully: + # delete all but first TiffPage/TiffFrame + for i, page in enumerate(pages[1:]): + if not isinstance(page, inttypes): + pages[i+1] = page.offset + elif TiffFrame is not TiffPage: + # delete only TiffFrames + for i, page in enumerate(pages): + if isinstance(page, TiffFrame): + pages[i] = page.offset + + def _seek(self, index, maxpages=2**22): + """Seek file to offset of specified page.""" + pages = self.pages + if not pages: + return + + fh = self.parent.filehandle + if fh.closed: + raise RuntimeError('FileHandle is closed') + + if self.complete or 0 <= index < len(pages): + page = pages[index] + offset = page if isinstance(page, inttypes) else page.offset + fh.seek(offset) + return + + offsetformat = self.parent.offsetformat + offsetsize = self.parent.offsetsize + tagnoformat = self.parent.tagnoformat + tagnosize = self.parent.tagnosize + tagsize = self.parent.tagsize + unpack = struct.unpack + + page = pages[-1] + offset = page if isinstance(page, inttypes) else page.offset + + while len(pages) < maxpages: + # read offsets to pages from file until index is reached + fh.seek(offset) + # skip tags + try: + tagno = unpack(tagnoformat, fh.read(tagnosize))[0] + if tagno > 4096: + raise ValueError('suspicious number of tags') + except Exception: + warnings.warn('corrupted tag list at offset %i' % offset) + del pages[-1] + self.complete = True + break + self._nextpageoffset = offset + tagnosize + tagno * tagsize + fh.seek(self._nextpageoffset) + + # read offset to next page + offset = unpack(offsetformat, fh.read(offsetsize))[0] + if offset == 0: + self.complete = True + break + if offset >= fh.size: + warnings.warn('invalid page offset (%i)' % offset) + self.complete = True + break + + pages.append(offset) + if 0 <= index < len(pages): + break + + if index >= len(pages): + raise IndexError('list index out of range') + + page = pages[index] + fh.seek(page if isinstance(page, inttypes) else page.offset) + + def __bool__(self): + """Return True if file contains any pages.""" + return len(self.pages) > 0 + + def __len__(self): + """Return number of pages in file.""" + if not self.complete: + self._seek(-1) + return len(self.pages) + + def __getitem__(self, key): + """Return specified page(s) from cache or file.""" + pages = self.pages + if not pages: + raise IndexError('list index out of range') + if key == 0: + return pages[key] + + if isinstance(key, slice): + start, stop, _ = key.indices(2**31-1) + if not self.complete and max(stop, start) > len(pages): + self._seek(-1) + return [self[i] for i in range(*key.indices(len(pages)))] + + if self.complete and key >= len(pages): + raise IndexError('list index out of range') + + try: + page = pages[key] + except IndexError: + page = 0 + if not isinstance(page, inttypes): + return page + + self._seek(key) + page = self._tiffpage(self.parent, index=key, keyframe=self._keyframe) + if self._cache: + pages[key] = page + return page + + def __iter__(self): + """Return iterator over all pages.""" + i = 0 + while True: + try: + yield self[i] + i += 1 + except IndexError: + break + + +class TiffPage(object): + """TIFF image file directory (IFD). + + Attributes + ---------- + index : int + Index of page in file. + dtype : numpy.dtype or None + Data type (native byte order) of the image in IFD. + shape : tuple + Dimensions of the image in IFD. + axes : str + Axes label codes: + 'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane, + 'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda, + 'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime, + 'L' exposure, 'V' event, 'Q' unknown, '_' missing + tags : dict + Dictionary of tags in IFD. {tag.name: TiffTag} + colormap : numpy.ndarray + Color look up table, if exists. + + All attributes are read-only. + + Notes + ----- + The internal, normalized '_shape' attribute is 6 dimensional: + + 0 : number planes/images (stk, ij). + 1 : planar samplesperpixel. + 2 : imagedepth Z (sgi). + 3 : imagelength Y. + 4 : imagewidth X. + 5 : contig samplesperpixel. + + """ + # default properties; will be updated from tags + imagewidth = 0 + imagelength = 0 + imagedepth = 1 + tilewidth = 0 + tilelength = 0 + tiledepth = 1 + bitspersample = 1 + samplesperpixel = 1 + sampleformat = 1 + rowsperstrip = 2**32-1 + compression = 1 + planarconfig = 1 + fillorder = 1 + photometric = 0 + predictor = 1 + extrasamples = 1 + colormap = None + software = '' + description = '' + description1 = '' + + def __init__(self, parent, index, keyframe=None): + """Initialize instance from file. + + The file handle position must be at offset to a valid IFD. + + """ + self.parent = parent + self.index = index + self.shape = () + self._shape = () + self.dtype = None + self._dtype = None + self.axes = '' + self.tags = {} + + self.dataoffsets = () + self.databytecounts = () + + # read TIFF IFD structure and its tags from file + fh = parent.filehandle + self.offset = fh.tell() # offset to this IFD + try: + tagno = struct.unpack(parent.tagnoformat, + fh.read(parent.tagnosize))[0] + if tagno > 4096: + raise ValueError('suspicious number of tags') + except Exception: + raise ValueError('corrupted tag list at offset %i' % self.offset) + + tagsize = parent.tagsize + data = fh.read(tagsize * tagno) + tags = self.tags + index = -tagsize + for _ in range(tagno): + index += tagsize + try: + tag = TiffTag(self.parent, data[index:index+tagsize]) + except TiffTag.Error as e: + warnings.warn(str(e)) + continue + tagname = tag.name + if tagname not in tags: + name = tagname + tags[name] = tag + else: + # some files contain multiple tags with same code + # e.g. MicroManager files contain two ImageDescription tags + i = 1 + while True: + name = '%s%i' % (tagname, i) + if name not in tags: + tags[name] = tag + break + name = TIFF.TAG_ATTRIBUTES.get(name, '') + if name: + if (name[:3] in 'sof des' and not isinstance(tag.value, str)): + pass # wrong string type for software, description + else: + setattr(self, name, tag.value) + + if not tags: + return # found in FIBICS + + # consolidate private tags; remove them from self.tags + if self.is_andor: + self.andor_tags + elif self.is_epics: + self.epics_tags + + if self.is_lsm or (self.index and self.parent.is_lsm): + # correct non standard LSM bitspersample tags + self.tags['BitsPerSample']._fix_lsm_bitspersample(self) + + if self.is_vista or (self.index and self.parent.is_vista): + # ISS Vista writes wrong ImageDepth tag + self.imagedepth = 1 + + if self.is_stk and 'UIC1tag' in tags and not tags['UIC1tag'].value: + # read UIC1tag now that plane count is known + uic1tag = tags['UIC1tag'] + fh.seek(uic1tag.valueoffset) + tags['UIC1tag'].value = read_uic1tag( + fh, self.parent.byteorder, uic1tag.dtype, + uic1tag.count, None, tags['UIC2tag'].count) + + if 'IJMetadata' in tags: + # decode IJMetadata tag + try: + tags['IJMetadata'].value = imagej_metadata( + tags['IJMetadata'].value, + tags['IJMetadataByteCounts'].value, + self.parent.byteorder) + except Exception as e: + warnings.warn(str(e)) + + if 'BitsPerSample' in tags: + tag = tags['BitsPerSample'] + if tag.count == 1: + self.bitspersample = tag.value + else: + # LSM might list more items than samplesperpixel + value = tag.value[:self.samplesperpixel] + if any((v-value[0] for v in value)): + self.bitspersample = value + else: + self.bitspersample = value[0] + + if 'SampleFormat' in tags: + tag = tags['SampleFormat'] + if tag.count == 1: + self.sampleformat = tag.value + else: + value = tag.value[:self.samplesperpixel] + if any((v-value[0] for v in value)): + self.sampleformat = value + else: + self.sampleformat = value[0] + + if 'ImageLength' in tags: + if 'RowsPerStrip' not in tags or tags['RowsPerStrip'].count > 1: + self.rowsperstrip = self.imagelength + # self.stripsperimage = int(math.floor( + # float(self.imagelength + self.rowsperstrip - 1) / + # self.rowsperstrip)) + + # determine dtype + dtype = self.sampleformat, self.bitspersample + dtype = TIFF.SAMPLE_DTYPES.get(dtype, None) + if dtype is not None: + dtype = numpy.dtype(dtype) + self.dtype = self._dtype = dtype + + # determine shape of data + imagelength = self.imagelength + imagewidth = self.imagewidth + imagedepth = self.imagedepth + samplesperpixel = self.samplesperpixel + + if self.is_stk: + assert self.imagedepth == 1 + uictag = tags['UIC2tag'].value + planes = tags['UIC2tag'].count + if self.planarconfig == 1: + self._shape = ( + planes, 1, 1, imagelength, imagewidth, samplesperpixel) + if samplesperpixel == 1: + self.shape = (planes, imagelength, imagewidth) + self.axes = 'YX' + else: + self.shape = ( + planes, imagelength, imagewidth, samplesperpixel) + self.axes = 'YXS' + else: + self._shape = ( + planes, samplesperpixel, 1, imagelength, imagewidth, 1) + if samplesperpixel == 1: + self.shape = (planes, imagelength, imagewidth) + self.axes = 'YX' + else: + self.shape = ( + planes, samplesperpixel, imagelength, imagewidth) + self.axes = 'SYX' + # detect type of series + if planes == 1: + self.shape = self.shape[1:] + elif numpy.all(uictag['ZDistance'] != 0): + self.axes = 'Z' + self.axes + elif numpy.all(numpy.diff(uictag['TimeCreated']) != 0): + self.axes = 'T' + self.axes + else: + self.axes = 'I' + self.axes + elif self.photometric == 2 or samplesperpixel > 1: # PHOTOMETRIC.RGB + if self.planarconfig == 1: + self._shape = ( + 1, 1, imagedepth, imagelength, imagewidth, samplesperpixel) + if imagedepth == 1: + self.shape = (imagelength, imagewidth, samplesperpixel) + self.axes = 'YXS' + else: + self.shape = ( + imagedepth, imagelength, imagewidth, samplesperpixel) + self.axes = 'ZYXS' + else: + self._shape = (1, samplesperpixel, imagedepth, + imagelength, imagewidth, 1) + if imagedepth == 1: + self.shape = (samplesperpixel, imagelength, imagewidth) + self.axes = 'SYX' + else: + self.shape = ( + samplesperpixel, imagedepth, imagelength, imagewidth) + self.axes = 'SZYX' + else: + self._shape = (1, 1, imagedepth, imagelength, imagewidth, 1) + if imagedepth == 1: + self.shape = (imagelength, imagewidth) + self.axes = 'YX' + else: + self.shape = (imagedepth, imagelength, imagewidth) + self.axes = 'ZYX' + + # dataoffsets and databytecounts + if 'TileOffsets' in tags: + self.dataoffsets = tags['TileOffsets'].value + elif 'StripOffsets' in tags: + self.dataoffsets = tags['StripOffsets'].value + else: + self.dataoffsets = (0,) + + if 'TileByteCounts' in tags: + self.databytecounts = tags['TileByteCounts'].value + elif 'StripByteCounts' in tags: + self.databytecounts = tags['StripByteCounts'].value + else: + self.databytecounts = ( + product(self.shape) * (self.bitspersample // 8),) + if self.compression != 1: + warnings.warn('required ByteCounts tag is missing') + + assert len(self.shape) == len(self.axes) + + def asarray(self, out=None, squeeze=True, lock=None, reopen=True, + maxsize=2**44, validate=True): + """Read image data from file and return as numpy array. + + Raise ValueError if format is unsupported. + + Parameters + ---------- + out : numpy.ndarray, str, or file-like object; optional + Buffer where image data will be saved. + If None (default), a new array will be created. + If numpy.ndarray, a writable array of compatible dtype and shape. + If 'memmap', directly memory-map the image data in the TIFF file + if possible; else create a memory-mapped array in a temporary file. + If str or open file, the file name or file object used to + create a memory-map to an array stored in a binary file on disk. + squeeze : bool + If True, all length-1 dimensions (except X and Y) are + squeezed out from the array. + If False, the shape of the returned array might be different from + the page.shape. + lock : {RLock, NullContext} + A reentrant lock used to syncronize reads from file. + If None (default), the lock of the parent's filehandle is used. + reopen : bool + If True (default) and the parent file handle is closed, the file + is temporarily re-opened and closed if no exception occurs. + maxsize: int or None + Maximum size of data before a ValueError is raised. + Can be used to catch DOS. Default: 16 TB. + validate : bool + If True (default), validate various parameters. + If None, only validate parameters and return None. + + """ + self_ = self + self = self.keyframe # self or keyframe + + if not self._shape or product(self._shape) == 0: + return + + tags = self.tags + + if validate or validate is None: + if maxsize and product(self._shape) > maxsize: + raise ValueError('data are too large %s' % str(self._shape)) + if self.dtype is None: + raise ValueError('data type not supported: %s%i' % ( + self.sampleformat, self.bitspersample)) + if self.compression not in TIFF.DECOMPESSORS: + raise ValueError( + 'cannot decompress %s' % self.compression.name) + if 'SampleFormat' in tags: + tag = tags['SampleFormat'] + if tag.count != 1 and any((i-tag.value[0] for i in tag.value)): + raise ValueError( + 'sample formats do not match %s' % tag.value) + if self.is_chroma_subsampled and (self.compression != 7 or + self.planarconfig == 2): + raise NotImplementedError('chroma subsampling not supported') + if validate is None: + return + + fh = self_.parent.filehandle + lock = fh.lock if lock is None else lock + with lock: + closed = fh.closed + if closed: + if reopen: + fh.open() + else: + raise IOError('file handle is closed') + + dtype = self._dtype + shape = self._shape + imagewidth = self.imagewidth + imagelength = self.imagelength + imagedepth = self.imagedepth + bitspersample = self.bitspersample + typecode = self.parent.byteorder + dtype.char + lsb2msb = self.fillorder == 2 + offsets, bytecounts = self_.offsets_bytecounts + istiled = self.is_tiled + + if istiled: + tilewidth = self.tilewidth + tilelength = self.tilelength + tiledepth = self.tiledepth + tw = (imagewidth + tilewidth - 1) // tilewidth + tl = (imagelength + tilelength - 1) // tilelength + td = (imagedepth + tiledepth - 1) // tiledepth + shape = (shape[0], shape[1], + td*tiledepth, tl*tilelength, tw*tilewidth, shape[-1]) + tileshape = (tiledepth, tilelength, tilewidth, shape[-1]) + runlen = tilewidth + else: + runlen = imagewidth + + if self.planarconfig == 1: + runlen *= self.samplesperpixel + + if out == 'memmap' and self.is_memmappable: + with lock: + result = fh.memmap_array(typecode, shape, offset=offsets[0]) + elif self.is_contiguous: + if out is not None: + out = create_output(out, shape, dtype) + with lock: + fh.seek(offsets[0]) + result = fh.read_array(typecode, product(shape), out=out) + if out is None and not result.dtype.isnative: + # swap byte order and dtype without copy + result.byteswap(True) + result = result.newbyteorder() + if lsb2msb: + reverse_bitorder(result) + else: + result = create_output(out, shape, dtype) + + decompress = TIFF.DECOMPESSORS[self.compression] + + if self.compression == 7: # COMPRESSION.JPEG + if bitspersample not in (8, 12): + raise ValueError( + 'unsupported JPEG precision %i' % bitspersample) + if 'JPEGTables' in tags: + table = tags['JPEGTables'].value + else: + table = b'' + unpack = identityfunc + colorspace = TIFF.PHOTOMETRIC(self.photometric).name + + def decompress(x, func=decompress, table=table, + bitspersample=bitspersample, + colorspace=colorspace): + return func(x, table, bitspersample, + colorspace).reshape(-1) + + elif bitspersample in (8, 16, 32, 64, 128): + if (bitspersample * runlen) % 8: + raise ValueError('data and sample size mismatch') + + def unpack(x, typecode=typecode): + if self.predictor == 3: # PREDICTOR.FLOATINGPOINT + # the floating point horizontal differencing decoder + # needs the raw byte order + typecode = dtype.char + try: + # read only numpy array + return numpy.frombuffer(x, typecode) + except ValueError: + # strips may be missing EOI + # warnings.warn('unpack: %s' % e) + xlen = ((len(x) // (bitspersample // 8)) * + (bitspersample // 8)) + return numpy.frombuffer(x[:xlen], typecode) + + elif isinstance(bitspersample, tuple): + def unpack(x, typecode=typecode, bitspersample=bitspersample): + return unpack_rgb(x, typecode, bitspersample) + else: + def unpack(x, typecode=typecode, bitspersample=bitspersample, + runlen=runlen): + return unpack_ints(x, typecode, bitspersample, runlen) + + if istiled: + writable = None + tw, tl, td, pl = 0, 0, 0, 0 + for tile in buffered_read(fh, lock, offsets, bytecounts): + if lsb2msb: + tile = reverse_bitorder(tile) + tile = decompress(tile) + tile = unpack(tile) + try: + tile.shape = tileshape + except ValueError: + # incomplete tiles; see gdal issue #1179 + warnings.warn('invalid tile data') + t = numpy.zeros(tileshape, dtype).reshape(-1) + s = min(tile.size, t.size) + t[:s] = tile[:s] + tile = t.reshape(tileshape) + if self.predictor == 2: # PREDICTOR.HORIZONTAL + if writable is None: + writable = tile.flags['WRITEABLE'] + if writable: + numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile) + else: + tile = numpy.cumsum(tile, axis=-2, dtype=dtype) + elif self.predictor == 3: # PREDICTOR.FLOATINGPOINT + raise NotImplementedError() + result[0, pl, td:td+tiledepth, + tl:tl+tilelength, tw:tw+tilewidth, :] = tile + del tile + tw += tilewidth + if tw >= shape[4]: + tw, tl = 0, tl + tilelength + if tl >= shape[3]: + tl, td = 0, td + tiledepth + if td >= shape[2]: + td, pl = 0, pl + 1 + result = result[..., :imagedepth, :imagelength, :imagewidth, :] + else: + strip_size = self.rowsperstrip * self.imagewidth + if self.planarconfig == 1: + strip_size *= self.samplesperpixel + result = result.reshape(-1) + index = 0 + for strip in buffered_read(fh, lock, offsets, bytecounts): + if lsb2msb: + strip = reverse_bitorder(strip) + strip = decompress(strip) + strip = unpack(strip) + size = min(result.size, strip.size, strip_size, + result.size - index) + result[index:index+size] = strip[:size] + del strip + index += size + + result.shape = self._shape + + if self.predictor != 1 and not (istiled and not self.is_contiguous): + if self.parent.is_lsm and self.compression == 1: + pass # work around bug in LSM510 software + elif self.predictor == 2: # PREDICTOR.HORIZONTAL + numpy.cumsum(result, axis=-2, dtype=dtype, out=result) + elif self.predictor == 3: # PREDICTOR.FLOATINGPOINT + result = decode_floats(result) + + if squeeze: + try: + result.shape = self.shape + except ValueError: + warnings.warn('failed to reshape from %s to %s' % ( + str(result.shape), str(self.shape))) + + if closed: + # TODO: file should remain open if an exception occurred above + fh.close() + return result + + def asrgb(self, uint8=False, alpha=None, colormap=None, + dmin=None, dmax=None, *args, **kwargs): + """Return image data as RGB(A). + + Work in progress. + + """ + data = self.asarray(*args, **kwargs) + self = self.keyframe # self or keyframe + photometric = self.photometric + PHOTOMETRIC = TIFF.PHOTOMETRIC + + if photometric == PHOTOMETRIC.PALETTE: + colormap = self.colormap + if (colormap.shape[1] < 2**self.bitspersample or + self.dtype.char not in 'BH'): + raise ValueError('cannot apply colormap') + if uint8: + if colormap.max() > 255: + colormap >>= 8 + colormap = colormap.astype('uint8') + if 'S' in self.axes: + data = data[..., 0] if self.planarconfig == 1 else data[0] + data = apply_colormap(data, colormap) + + elif photometric == PHOTOMETRIC.RGB: + if 'ExtraSamples' in self.tags: + if alpha is None: + alpha = TIFF.EXTRASAMPLE + extrasamples = self.extrasamples + if self.tags['ExtraSamples'].count == 1: + extrasamples = (extrasamples,) + for i, exs in enumerate(extrasamples): + if exs in alpha: + if self.planarconfig == 1: + data = data[..., [0, 1, 2, 3+i]] + else: + data = data[:, [0, 1, 2, 3+i]] + break + else: + if self.planarconfig == 1: + data = data[..., :3] + else: + data = data[:, :3] + # TODO: convert to uint8? + + elif photometric == PHOTOMETRIC.MINISBLACK: + raise NotImplementedError() + elif photometric == PHOTOMETRIC.MINISWHITE: + raise NotImplementedError() + elif photometric == PHOTOMETRIC.SEPARATED: + raise NotImplementedError() + else: + raise NotImplementedError() + return data + + def aspage(self): + return self + + @property + def keyframe(self): + return self + + @keyframe.setter + def keyframe(self, index): + return + + @lazyattr + def offsets_bytecounts(self): + """Return simplified offsets and bytecounts.""" + if self.is_contiguous: + offset, byte_count = self.is_contiguous + return [offset], [byte_count] + return clean_offsets_counts(self.dataoffsets, self.databytecounts) + + @lazyattr + def is_contiguous(self): + """Return offset and size of contiguous data, else None. + + Excludes prediction and fill_order. + + """ + if (self.compression != 1 + or self.bitspersample not in (8, 16, 32, 64)): + return + if 'TileWidth' in self.tags: + if (self.imagewidth != self.tilewidth or + self.imagelength % self.tilelength or + self.tilewidth % 16 or self.tilelength % 16): + return + if ('ImageDepth' in self.tags and 'TileDepth' in self.tags and + (self.imagelength != self.tilelength or + self.imagedepth % self.tiledepth)): + return + + offsets = self.dataoffsets + bytecounts = self.databytecounts + if len(offsets) == 1: + return offsets[0], bytecounts[0] + if self.is_stk or all((offsets[i] + bytecounts[i] == offsets[i+1] or + bytecounts[i+1] == 0) # no data/ignore offset + for i in range(len(offsets)-1)): + return offsets[0], sum(bytecounts) + + @lazyattr + def is_final(self): + """Return if page's image data are stored in final form. + + Excludes byte-swapping. + + """ + return (self.is_contiguous and self.fillorder == 1 and + self.predictor == 1 and not self.is_chroma_subsampled) + + @lazyattr + def is_memmappable(self): + """Return if page's image data in file can be memory-mapped.""" + return (self.parent.filehandle.is_file and self.is_final and + # (self.bitspersample == 8 or self.parent.isnative) and + self.is_contiguous[0] % self.dtype.itemsize == 0) # aligned? + + def __str__(self, detail=0, width=79): + """Return string containing information about page.""" + if self.keyframe != self: + return TiffFrame.__str__(self, detail) + attr = '' + for name in ('memmappable', 'final', 'contiguous'): + attr = getattr(self, 'is_'+name) + if attr: + attr = name.upper() + break + info = ' '.join(s for s in ( + 'x'.join(str(i) for i in self.shape), + '%s%s' % (TIFF.SAMPLEFORMAT(self.sampleformat).name, + self.bitspersample), + '|'.join(i for i in ( + TIFF.PHOTOMETRIC(self.photometric).name, + 'TILED' if self.is_tiled else '', + self.compression.name if self.compression != 1 else '', + self.planarconfig.name if self.planarconfig != 1 else '', + self.predictor.name if self.predictor != 1 else '', + self.fillorder.name if self.fillorder != 1 else '') + if i), + attr, + '|'.join((f.upper() for f in self.flags)) + ) if s) + info = 'TiffPage %i @%i %s' % (self.index, self.offset, info) + if detail <= 0: + return info + info = [info] + tags = self.tags + tlines = [] + vlines = [] + for tag in sorted(tags.values(), key=lambda x: x.code): + value = tag.__str__(width=width+1) + tlines.append(value[:width].strip()) + if detail > 1 and len(value) > width: + name = tag.name.upper() + if detail <= 2 and ('COUNTS' in name or 'OFFSETS' in name): + value = pformat(tag.value, width=width, height=detail*4) + else: + value = pformat(tag.value, width=width, height=detail*12) + vlines.append('%s\n%s' % (tag.name, value)) + info.append('\n'.join(tlines)) + if detail > 1: + info.append('\n\n'.join(vlines)) + if detail > 3: + try: + info.append('DATA\n%s' % pformat( + self.asarray(), width=width, height=detail*8)) + except Exception: + pass + return '\n\n'.join(info) + + @lazyattr + def flags(self): + """Return set of flags.""" + return set((name.lower() for name in sorted(TIFF.FILE_FLAGS) + if getattr(self, 'is_' + name))) + + @property + def ndim(self): + """Return number of array dimensions.""" + return len(self.shape) + + @property + def size(self): + """Return number of elements in array.""" + return product(self.shape) + + @lazyattr + def andor_tags(self): + """Return consolidated metadata from Andor tags as dict. + + Remove Andor tags from self.tags. + + """ + if not self.is_andor: + return + tags = self.tags + result = {'Id': tags['AndorId'].value} + for tag in list(self.tags.values()): + code = tag.code + if not 4864 < code < 5031: + continue + value = tag.value + name = tag.name[5:] if len(tag.name) > 5 else tag.name + result[name] = value + del tags[tag.name] + return result + + @lazyattr + def epics_tags(self): + """Return consolidated metadata from EPICS areaDetector tags as dict. + + Remove areaDetector tags from self.tags. + + """ + if not self.is_epics: + return + result = {} + tags = self.tags + for tag in list(self.tags.values()): + code = tag.code + if not 65000 <= code < 65500: + continue + value = tag.value + if code == 65000: + result['timeStamp'] = datetime.datetime.fromtimestamp( + float(value)) + elif code == 65001: + result['uniqueID'] = int(value) + elif code == 65002: + result['epicsTSSec'] = int(value) + elif code == 65003: + result['epicsTSNsec'] = int(value) + else: + key, value = value.split(':', 1) + result[key] = astype(value) + del tags[tag.name] + return result + + @lazyattr + def geotiff_tags(self): + """Return consolidated metadata from GeoTIFF tags as dict.""" + if not self.is_geotiff: + return + tags = self.tags + + gkd = tags['GeoKeyDirectoryTag'].value + if gkd[0] != 1: + warnings.warn('invalid GeoKeyDirectoryTag') + return {} + + result = { + 'KeyDirectoryVersion': gkd[0], + 'KeyRevision': gkd[1], + 'KeyRevisionMinor': gkd[2], + # 'NumberOfKeys': gkd[3], + } + # deltags = ['GeoKeyDirectoryTag'] + geokeys = TIFF.GEO_KEYS + geocodes = TIFF.GEO_CODES + for index in range(gkd[3]): + keyid, tagid, count, offset = gkd[4 + index * 4: index * 4 + 8] + keyid = geokeys.get(keyid, keyid) + if tagid == 0: + value = offset + else: + tagname = TIFF.TAGS[tagid] + # deltags.append(tagname) + value = tags[tagname].value[offset: offset + count] + if tagid == 34737 and count > 1 and value[-1] == '|': + value = value[:-1] + value = value if count > 1 else value[0] + if keyid in geocodes: + try: + value = geocodes[keyid](value) + except Exception: + pass + result[keyid] = value + + if 'IntergraphMatrixTag' in tags: + value = tags['IntergraphMatrixTag'].value + value = numpy.array(value) + if len(value) == 16: + value = value.reshape((4, 4)).tolist() + result['IntergraphMatrix'] = value + if 'ModelPixelScaleTag' in tags: + value = numpy.array(tags['ModelPixelScaleTag'].value).tolist() + result['ModelPixelScale'] = value + if 'ModelTiepointTag' in tags: + value = tags['ModelTiepointTag'].value + value = numpy.array(value).reshape((-1, 6)).squeeze().tolist() + result['ModelTiepoint'] = value + if 'ModelTransformationTag' in tags: + value = tags['ModelTransformationTag'].value + value = numpy.array(value).reshape((4, 4)).tolist() + result['ModelTransformation'] = value + elif False: + # if 'ModelPixelScaleTag' in tags and 'ModelTiepointTag' in tags: + sx, sy, sz = tags['ModelPixelScaleTag'].value + tiepoints = tags['ModelTiepointTag'].value + transforms = [] + for tp in range(0, len(tiepoints), 6): + i, j, k, x, y, z = tiepoints[tp:tp+6] + transforms.append([ + [sx, 0.0, 0.0, x - i * sx], + [0.0, -sy, 0.0, y + j * sy], + [0.0, 0.0, sz, z - k * sz], + [0.0, 0.0, 0.0, 1.0]]) + if len(tiepoints) == 6: + transforms = transforms[0] + result['ModelTransformation'] = transforms + + if 'RPCCoefficientTag' in tags: + rpcc = tags['RPCCoefficientTag'].value + result['RPCCoefficient'] = { + 'ERR_BIAS': rpcc[0], + 'ERR_RAND': rpcc[1], + 'LINE_OFF': rpcc[2], + 'SAMP_OFF': rpcc[3], + 'LAT_OFF': rpcc[4], + 'LONG_OFF': rpcc[5], + 'HEIGHT_OFF': rpcc[6], + 'LINE_SCALE': rpcc[7], + 'SAMP_SCALE': rpcc[8], + 'LAT_SCALE': rpcc[9], + 'LONG_SCALE': rpcc[10], + 'HEIGHT_SCALE': rpcc[11], + 'LINE_NUM_COEFF': rpcc[12:33], + 'LINE_DEN_COEFF ': rpcc[33:53], + 'SAMP_NUM_COEFF': rpcc[53:73], + 'SAMP_DEN_COEFF': rpcc[73:]} + + return result + + @property + def is_tiled(self): + """Page contains tiled image.""" + return 'TileWidth' in self.tags + + @property + def is_reduced(self): + """Page is reduced image of another image.""" + return ('NewSubfileType' in self.tags and + self.tags['NewSubfileType'].value & 1) + + @property + def is_chroma_subsampled(self): + """Page contains chroma subsampled image.""" + return ('YCbCrSubSampling' in self.tags and + self.tags['YCbCrSubSampling'].value != (1, 1)) + + @lazyattr + def is_imagej(self): + """Return ImageJ description if exists, else None.""" + for description in (self.description, self.description1): + if not description: + return + if description[:7] == 'ImageJ=': + return description + + @lazyattr + def is_shaped(self): + """Return description containing array shape if exists, else None.""" + for description in (self.description, self.description1): + if not description: + return + if description[:1] == '{' and '"shape":' in description: + return description + if description[:6] == 'shape=': + return description + + @property + def is_mdgel(self): + """Page contains MDFileTag tag.""" + return 'MDFileTag' in self.tags + + @property + def is_mediacy(self): + """Page contains Media Cybernetics Id tag.""" + return ('MC_Id' in self.tags and + self.tags['MC_Id'].value[:7] == b'MC TIFF') + + @property + def is_stk(self): + """Page contains UIC2Tag tag.""" + return 'UIC2tag' in self.tags + + @property + def is_lsm(self): + """Page contains CZ_LSMINFO tag.""" + return 'CZ_LSMINFO' in self.tags + + @property + def is_fluoview(self): + """Page contains FluoView MM_STAMP tag.""" + return 'MM_Stamp' in self.tags + + @property + def is_nih(self): + """Page contains NIH image header.""" + return 'NIHImageHeader' in self.tags + + @property + def is_sgi(self): + """Page contains SGI image and tile depth tags.""" + return 'ImageDepth' in self.tags and 'TileDepth' in self.tags + + @property + def is_vista(self): + """Software tag is 'ISS Vista'.""" + return self.software == 'ISS Vista' + + @property + def is_metaseries(self): + """Page contains MDS MetaSeries metadata in ImageDescription tag.""" + if self.index > 1 or self.software != 'MetaSeries': + return False + d = self.description + return d.startswith('') and d.endswith('') + + @property + def is_ome(self): + """Page contains OME-XML in ImageDescription tag.""" + if self.index > 1 or not self.description: + return False + d = self.description + return d[:14] == '' + + @property + def is_scn(self): + """Page contains Leica SCN XML in ImageDescription tag.""" + if self.index > 1 or not self.description: + return False + d = self.description + return d[:14] == '' + + @property + def is_micromanager(self): + """Page contains Micro-Manager metadata.""" + return 'MicroManagerMetadata' in self.tags + + @property + def is_andor(self): + """Page contains Andor Technology tags.""" + return 'AndorId' in self.tags + + @property + def is_pilatus(self): + """Page contains Pilatus tags.""" + return (self.software[:8] == 'TVX TIFF' and + self.description[:2] == '# ') + + @property + def is_epics(self): + """Page contains EPICS areaDetector tags.""" + return (self.description == 'EPICS areaDetector' or + self.software == 'EPICS areaDetector') + + @property + def is_tvips(self): + """Page contains TVIPS metadata.""" + return 'TVIPS' in self.tags + + @property + def is_fei(self): + """Page contains SFEG or HELIOS metadata.""" + return 'FEI_SFEG' in self.tags or 'FEI_HELIOS' in self.tags + + @property + def is_sem(self): + """Page contains Zeiss SEM metadata.""" + return 'CZ_SEM' in self.tags + + @property + def is_svs(self): + """Page contains Aperio metadata.""" + return self.description[:20] == 'Aperio Image Library' + + @property + def is_scanimage(self): + """Page contains ScanImage metadata.""" + return (self.description[:12] == 'state.config' or + self.software[:22] == 'SI.LINE_FORMAT_VERSION' or + 'scanimage.SI.' in self.description[-256:]) + + @property + def is_qptiff(self): + """Page contains PerkinElmer tissue images metadata.""" + # The ImageDescription tag contains XML with a top-level + # element + return self.software[:15] == 'PerkinElmer-QPI' + + @property + def is_geotiff(self): + """Page contains GeoTIFF metadata.""" + return 'GeoKeyDirectoryTag' in self.tags + + +class TiffFrame(object): + """Lightweight TIFF image file directory (IFD). + + Only a limited number of tag values are read from file, e.g. StripOffsets, + and StripByteCounts. Other tag values are assumed to be identical with a + specified TiffPage instance, the keyframe. + + TiffFrame is intended to reduce resource usage and speed up reading data + from file, not for introspection of metadata. + + Not compatible with Python 2. + + """ + __slots__ = ('keyframe', 'parent', 'index', 'offset', + 'dataoffsets', 'databytecounts') + + is_mdgel = False + tags = {} + + def __init__(self, parent, index, keyframe): + """Read specified tags from file. + + The file handle position must be at the offset to a valid IFD. + + """ + self.keyframe = keyframe + self.parent = parent + self.index = index + self.dataoffsets = None + self.databytecounts = None + + unpack = struct.unpack + fh = parent.filehandle + self.offset = fh.tell() + try: + tagno = unpack(parent.tagnoformat, fh.read(parent.tagnosize))[0] + if tagno > 4096: + raise ValueError('suspicious number of tags') + except Exception: + raise ValueError('corrupted page list at offset %i' % self.offset) + + # tags = {} + tagcodes = {273, 279, 324, 325} # TIFF.FRAME_TAGS + tagsize = parent.tagsize + codeformat = parent.tagformat1[:2] + + data = fh.read(tagsize * tagno) + index = -tagsize + for _ in range(tagno): + index += tagsize + code = unpack(codeformat, data[index:index+2])[0] + if code not in tagcodes: + continue + try: + tag = TiffTag(parent, data[index:index+tagsize]) + except TiffTag.Error as e: + warnings.warn(str(e)) + continue + if code == 273 or code == 324: + setattr(self, 'dataoffsets', tag.value) + elif code == 279 or code == 325: + setattr(self, 'databytecounts', tag.value) + # elif code == 270: + # tagname = tag.name + # if tagname not in tags: + # tags[tagname] = bytes2str(tag.value) + # elif 'ImageDescription1' not in tags: + # tags['ImageDescription1'] = bytes2str(tag.value) + # else: + # tags[tag.name] = tag.value + + def aspage(self): + """Return TiffPage from file.""" + self.parent.filehandle.seek(self.offset) + return TiffPage(self.parent, index=self.index, keyframe=None) + + def asarray(self, *args, **kwargs): + """Read image data from file and return as numpy array.""" + # TODO: fix TypeError on Python 2 + # "TypeError: unbound method asarray() must be called with TiffPage + # instance as first argument (got TiffFrame instance instead)" + kwargs['validate'] = False + return TiffPage.asarray(self, *args, **kwargs) + + def asrgb(self, *args, **kwargs): + """Read image data from file and return RGB image as numpy array.""" + kwargs['validate'] = False + return TiffPage.asrgb(self, *args, **kwargs) + + @property + def offsets_bytecounts(self): + """Return simplified offsets and bytecounts.""" + if self.keyframe.is_contiguous: + return self.dataoffsets[:1], self.keyframe.is_contiguous[1:] + return clean_offsets_counts(self.dataoffsets, self.databytecounts) + + @property + def is_contiguous(self): + """Return offset and size of contiguous data, else None.""" + if self.keyframe.is_contiguous: + return self.dataoffsets[0], self.keyframe.is_contiguous[1] + + @property + def is_memmappable(self): + """Return if page's image data in file can be memory-mapped.""" + return self.keyframe.is_memmappable + + def __getattr__(self, name): + """Return attribute from keyframe.""" + if name in TIFF.FRAME_ATTRS: + return getattr(self.keyframe, name) + # this error could be raised because an AttributeError was + # raised inside a @property function + raise AttributeError("'%s' object has no attribute '%s'" % + (self.__class__.__name__, name)) + + def __str__(self, detail=0): + """Return string containing information about frame.""" + info = ' '.join(s for s in ( + 'x'.join(str(i) for i in self.shape), + str(self.dtype))) + return 'TiffFrame %i @%i %s' % (self.index, self.offset, info) + + +class TiffTag(object): + """TIFF tag structure. + + Attributes + ---------- + name : string + Name of tag. + code : int + Decimal code of tag. + dtype : str + Datatype of tag data. One of TIFF DATA_FORMATS. + count : int + Number of values. + value : various types + Tag data as Python object. + ImageSourceData : int + Location of value in file. + + All attributes are read-only. + + """ + __slots__ = ('code', 'count', 'dtype', 'value', 'valueoffset') + + class Error(Exception): + pass + + def __init__(self, parent, tagheader, **kwargs): + """Initialize instance from tag header.""" + fh = parent.filehandle + byteorder = parent.byteorder + unpack = struct.unpack + offsetsize = parent.offsetsize + + self.valueoffset = fh.tell() + offsetsize + 4 + code, type_ = unpack(parent.tagformat1, tagheader[:4]) + count, value = unpack(parent.tagformat2, tagheader[4:]) + + try: + dtype = TIFF.DATA_FORMATS[type_] + except KeyError: + raise TiffTag.Error('unknown tag data type %i' % type_) + + fmt = '%s%i%s' % (byteorder, count * int(dtype[0]), dtype[1]) + size = struct.calcsize(fmt) + if size > offsetsize or code in TIFF.TAG_READERS: + self.valueoffset = offset = unpack(parent.offsetformat, value)[0] + if offset < 8 or offset > fh.size - size: + raise TiffTag.Error('invalid tag value offset') + # if offset % 2: + # warnings.warn('tag value does not begin on word boundary') + fh.seek(offset) + if code in TIFF.TAG_READERS: + readfunc = TIFF.TAG_READERS[code] + value = readfunc(fh, byteorder, dtype, count, offsetsize) + elif type_ == 7 or (count > 1 and dtype[-1] == 'B'): + value = read_bytes(fh, byteorder, dtype, count, offsetsize) + elif code in TIFF.TAGS or dtype[-1] == 's': + value = unpack(fmt, fh.read(size)) + else: + value = read_numpy(fh, byteorder, dtype, count, offsetsize) + elif dtype[-1] == 'B' or type_ == 7: + value = value[:size] + else: + value = unpack(fmt, value[:size]) + + process = (code not in TIFF.TAG_READERS and code not in TIFF.TAG_TUPLE + and type_ != 7) + if process and dtype[-1] == 's' and isinstance(value[0], bytes): + # TIFF ASCII fields can contain multiple strings, + # each terminated with a NUL + value = value[0] + try: + value = bytes2str(stripascii(value).strip()) + except UnicodeDecodeError: + warnings.warn('tag %i: coercing invalid ASCII to bytes' % code) + dtype = '1B' + else: + if code in TIFF.TAG_ENUM: + t = TIFF.TAG_ENUM[code] + try: + value = tuple(t(v) for v in value) + except ValueError as e: + warnings.warn(str(e)) + if process: + if len(value) == 1: + value = value[0] + + self.code = code + self.dtype = dtype + self.count = count + self.value = value + + @property + def name(self): + return TIFF.TAGS.get(self.code, str(self.code)) + + def _fix_lsm_bitspersample(self, parent): + """Correct LSM bitspersample tag. + + Old LSM writers may use a separate region for two 16-bit values, + although they fit into the tag value element of the tag. + + """ + if self.code == 258 and self.count == 2: + # TODO: test this case; need example file + warnings.warn('correcting LSM bitspersample tag') + tof = parent.offsetformat[parent.offsetsize] + self.valueoffset = struct.unpack(tof, self._value)[0] + parent.filehandle.seek(self.valueoffset) + self.value = struct.unpack('>> # read image stack from sequence of TIFF files + >>> imsave('temp_C001T001.tif', numpy.random.rand(64, 64)) + >>> imsave('temp_C001T002.tif', numpy.random.rand(64, 64)) + >>> tifs = TiffSequence('temp_C001*.tif') + >>> tifs.shape + (1, 2) + >>> tifs.axes + 'CT' + >>> data = tifs.asarray() + >>> data.shape + (1, 2, 64, 64) + + """ + _patterns = { + 'axes': r""" + # matches Olympus OIF and Leica TIFF series + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4})) + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + _?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))? + """} + + class ParseError(Exception): + pass + + def __init__(self, files, imread=TiffFile, pattern='axes', + *args, **kwargs): + """Initialize instance from multiple files. + + Parameters + ---------- + files : str, pathlib.Path, or sequence thereof + Glob pattern or sequence of file names. + Binary streams are not supported. + imread : function or class + Image read function or class with asarray function returning numpy + array from single file. + pattern : str + Regular expression pattern that matches axes names and sequence + indices in file names. + By default, the pattern matches Olympus OIF and Leica TIFF series. + + """ + if isinstance(files, pathlib.Path): + files = str(files) + if isinstance(files, basestring): + files = natural_sorted(glob.glob(files)) + files = list(files) + if not files: + raise ValueError('no files found') + if isinstance(files[0], pathlib.Path): + files = [str(pathlib.Path(f)) for f in files] + elif not isinstance(files[0], basestring): + raise ValueError('not a file name') + self.files = files + + if hasattr(imread, 'asarray'): + # redefine imread + _imread = imread + + def imread(fname, *args, **kwargs): + with _imread(fname) as im: + return im.asarray(*args, **kwargs) + + self.imread = imread + + self.pattern = self._patterns.get(pattern, pattern) + try: + self._parse() + if not self.axes: + self.axes = 'I' + except self.ParseError: + self.axes = 'I' + self.shape = (len(files),) + self._startindex = (0,) + self._indices = tuple((i,) for i in range(len(files))) + + def __str__(self): + """Return string with information about image sequence.""" + return '\n'.join([ + self.files[0], + ' size: %i' % len(self.files), + ' axes: %s' % self.axes, + ' shape: %s' % str(self.shape)]) + + def __len__(self): + return len(self.files) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + pass + + def asarray(self, out=None, *args, **kwargs): + """Read image data from all files and return as numpy array. + + The args and kwargs parameters are passed to the imread function. + + Raise IndexError or ValueError if image shapes do not match. + + """ + im = self.imread(self.files[0], *args, **kwargs) + shape = self.shape + im.shape + result = create_output(out, shape, dtype=im.dtype) + result = result.reshape(-1, *im.shape) + for index, fname in zip(self._indices, self.files): + index = [i-j for i, j in zip(index, self._startindex)] + index = numpy.ravel_multi_index(index, self.shape) + im = self.imread(fname, *args, **kwargs) + result[index] = im + result.shape = shape + return result + + def _parse(self): + """Get axes and shape from file names.""" + if not self.pattern: + raise self.ParseError('invalid pattern') + pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE) + matches = pattern.findall(self.files[0]) + if not matches: + raise self.ParseError('pattern does not match file names') + matches = matches[-1] + if len(matches) % 2: + raise self.ParseError('pattern does not match axis name and index') + axes = ''.join(m for m in matches[::2] if m) + if not axes: + raise self.ParseError('pattern does not match file names') + + indices = [] + for fname in self.files: + matches = pattern.findall(fname)[-1] + if axes != ''.join(m for m in matches[::2] if m): + raise ValueError('axes do not match within the image sequence') + indices.append([int(m) for m in matches[1::2] if m]) + shape = tuple(numpy.max(indices, axis=0)) + startindex = tuple(numpy.min(indices, axis=0)) + shape = tuple(i-j+1 for i, j in zip(shape, startindex)) + if product(shape) != len(self.files): + warnings.warn('files are missing. Missing data are zeroed') + + self.axes = axes.upper() + self.shape = shape + self._indices = indices + self._startindex = startindex + + +class FileHandle(object): + """Binary file handle. + + A limited, special purpose file handler that can: + + * handle embedded files (for CZI within CZI files) + * re-open closed files (for multi-file formats, such as OME-TIFF) + * read and write numpy arrays and records from file like objects + + Only 'rb' and 'wb' modes are supported. Concurrently reading and writing + of the same stream is untested. + + When initialized from another file handle, do not use it unless this + FileHandle is closed. + + Attributes + ---------- + name : str + Name of the file. + path : str + Absolute path to file. + size : int + Size of file in bytes. + is_file : bool + If True, file has a filno and can be memory-mapped. + + All attributes are read-only. + + """ + __slots__ = ('_fh', '_file', '_mode', '_name', '_dir', '_lock', + '_offset', '_size', '_close', 'is_file') + + def __init__(self, file, mode='rb', name=None, offset=None, size=None): + """Initialize file handle from file name or another file handle. + + Parameters + ---------- + file : str, pathlib.Path, binary stream, or FileHandle + File name or seekable binary stream, such as an open file + or BytesIO. + mode : str + File open mode in case 'file' is a file name. Must be 'rb' or 'wb'. + name : str + Optional name of file in case 'file' is a binary stream. + offset : int + Optional start position of embedded file. By default, this is + the current file position. + size : int + Optional size of embedded file. By default, this is the number + of bytes from the 'offset' to the end of the file. + + """ + self._file = file + self._fh = None + self._mode = mode + self._name = name + self._dir = '' + self._offset = offset + self._size = size + self._close = True + self.is_file = False + self._lock = NullContext() + self.open() + + def open(self): + """Open or re-open file.""" + if self._fh: + return # file is open + + if isinstance(self._file, pathlib.Path): + self._file = str(self._file) + if isinstance(self._file, basestring): + # file name + self._file = os.path.realpath(self._file) + self._dir, self._name = os.path.split(self._file) + self._fh = open(self._file, self._mode) + self._close = True + if self._offset is None: + self._offset = 0 + elif isinstance(self._file, FileHandle): + # FileHandle + self._fh = self._file._fh + if self._offset is None: + self._offset = 0 + self._offset += self._file._offset + self._close = False + if not self._name: + if self._offset: + name, ext = os.path.splitext(self._file._name) + self._name = '%s@%i%s' % (name, self._offset, ext) + else: + self._name = self._file._name + if self._mode and self._mode != self._file._mode: + raise ValueError('FileHandle has wrong mode') + self._mode = self._file._mode + self._dir = self._file._dir + elif hasattr(self._file, 'seek'): + # binary stream: open file, BytesIO + try: + self._file.tell() + except Exception: + raise ValueError('binary stream is not seekable') + self._fh = self._file + if self._offset is None: + self._offset = self._file.tell() + self._close = False + if not self._name: + try: + self._dir, self._name = os.path.split(self._fh.name) + except AttributeError: + self._name = 'Unnamed binary stream' + try: + self._mode = self._fh.mode + except AttributeError: + pass + else: + raise ValueError('The first parameter must be a file name, ' + 'seekable binary stream, or FileHandle') + + if self._offset: + self._fh.seek(self._offset) + + if self._size is None: + pos = self._fh.tell() + self._fh.seek(self._offset, 2) + self._size = self._fh.tell() + self._fh.seek(pos) + + try: + self._fh.fileno() + self.is_file = True + except Exception: + self.is_file = False + + def read(self, size=-1): + """Read 'size' bytes from file, or until EOF is reached.""" + if size < 0 and self._offset: + size = self._size + return self._fh.read(size) + + def write(self, bytestring): + """Write bytestring to file.""" + return self._fh.write(bytestring) + + def flush(self): + """Flush write buffers if applicable.""" + return self._fh.flush() + + def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'): + """Return numpy.memmap of data stored in file.""" + if not self.is_file: + raise ValueError('Cannot memory-map file without fileno') + return numpy.memmap(self._fh, dtype=dtype, mode=mode, + offset=self._offset + offset, + shape=shape, order=order) + + def read_array(self, dtype, count=-1, sep='', chunksize=2**25, out=None, + native=False): + """Return numpy array from file. + + Work around numpy issue #2230, "numpy.fromfile does not accept + StringIO object" https://github.com/numpy/numpy/issues/2230. + + """ + fh = self._fh + dtype = numpy.dtype(dtype) + size = self._size if count < 0 else count * dtype.itemsize + + if out is None: + try: + result = numpy.fromfile(fh, dtype, count, sep) + except IOError: + # ByteIO + data = fh.read(size) + result = numpy.frombuffer(data, dtype, count).copy() + if native and not result.dtype.isnative: + # swap byte order and dtype without copy + result.byteswap(True) + result = result.newbyteorder() + return result + + # Read data from file in chunks and copy to output array + shape = out.shape + size = min(out.nbytes, size) + out = out.reshape(-1) + index = 0 + while size > 0: + data = fh.read(min(chunksize, size)) + datasize = len(data) + if datasize == 0: + break + size -= datasize + data = numpy.frombuffer(data, dtype) + out[index:index+data.size] = data + index += data.size + + if hasattr(out, 'flush'): + out.flush() + return out.reshape(shape) + + def read_record(self, dtype, shape=1, byteorder=None): + """Return numpy record from file.""" + rec = numpy.rec + try: + record = rec.fromfile(self._fh, dtype, shape, byteorder=byteorder) + except Exception: + dtype = numpy.dtype(dtype) + if shape is None: + shape = self._size // dtype.itemsize + size = product(sequence(shape)) * dtype.itemsize + data = self._fh.read(size) + record = rec.fromstring(data, dtype, shape, byteorder=byteorder) + return record[0] if shape == 1 else record + + def write_empty(self, size): + """Append size bytes to file. Position must be at end of file.""" + if size < 1: + return + self._fh.seek(size-1, 1) + self._fh.write(b'\x00') + + def write_array(self, data): + """Write numpy array to binary file.""" + try: + data.tofile(self._fh) + except Exception: + # BytesIO + self._fh.write(data.tostring()) + + def tell(self): + """Return file's current position.""" + return self._fh.tell() - self._offset + + def seek(self, offset, whence=0): + """Set file's current position.""" + if self._offset: + if whence == 0: + self._fh.seek(self._offset + offset, whence) + return + elif whence == 2 and self._size > 0: + self._fh.seek(self._offset + self._size + offset, 0) + return + self._fh.seek(offset, whence) + + def close(self): + """Close file.""" + if self._close and self._fh: + self._fh.close() + self._fh = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def __getattr__(self, name): + """Return attribute from underlying file object.""" + if self._offset: + warnings.warn( + "FileHandle: '%s' not implemented for embedded files" % name) + return getattr(self._fh, name) + + @property + def name(self): + return self._name + + @property + def dirname(self): + return self._dir + + @property + def path(self): + return os.path.join(self._dir, self._name) + + @property + def size(self): + return self._size + + @property + def closed(self): + return self._fh is None + + @property + def lock(self): + return self._lock + + @lock.setter + def lock(self, value): + self._lock = threading.RLock() if value else NullContext() + + +class NullContext(object): + """Null context manager. + + >>> with NullContext(): + ... pass + + """ + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + pass + + +class OpenFileCache(object): + """Keep files open.""" + + __slots__ = ('files', 'past', 'lock', 'size') + + def __init__(self, size, lock=None): + """Initialize open file cache.""" + self.past = [] # FIFO of opened files + self.files = {} # refcounts of opened files + self.lock = NullContext() if lock is None else lock + self.size = int(size) + + def open(self, filehandle): + """Re-open file if necessary.""" + with self.lock: + if filehandle in self.files: + self.files[filehandle] += 1 + elif filehandle.closed: + filehandle.open() + self.files[filehandle] = 1 + self.past.append(filehandle) + + def close(self, filehandle): + """Close openend file if no longer used.""" + with self.lock: + if filehandle in self.files: + self.files[filehandle] -= 1 + # trim the file cache + index = 0 + size = len(self.past) + while size > self.size and index < size: + filehandle = self.past[index] + if self.files[filehandle] == 0: + filehandle.close() + del self.files[filehandle] + del self.past[index] + size -= 1 + else: + index += 1 + + def clear(self): + """Close all opened files if not in use.""" + with self.lock: + for filehandle, refcount in list(self.files.items()): + if refcount == 0: + filehandle.close() + del self.files[filehandle] + del self.past[self.past.index(filehandle)] + + +class LazyConst(object): + """Class whose attributes are computed on first access from its methods.""" + def __init__(self, cls): + self._cls = cls + self.__doc__ = getattr(cls, '__doc__') + + def __getattr__(self, name): + func = getattr(self._cls, name) + if not callable(func): + return func + try: + value = func() + except TypeError: + # Python 2 unbound method + value = func.__func__() + setattr(self, name, value) + return value + + +@LazyConst +class TIFF(object): + """Namespace for module constants.""" + + def TAGS(): + # TIFF tag codes and names from TIFF6, TIFF/EP, EXIF, and other specs + return { + 11: 'ProcessingSoftware', + 254: 'NewSubfileType', + 255: 'SubfileType', + 256: 'ImageWidth', + 257: 'ImageLength', + 258: 'BitsPerSample', + 259: 'Compression', + 262: 'PhotometricInterpretation', + 263: 'Thresholding', + 264: 'CellWidth', + 265: 'CellLength', + 266: 'FillOrder', + 269: 'DocumentName', + 270: 'ImageDescription', + 271: 'Make', + 272: 'Model', + 273: 'StripOffsets', + 274: 'Orientation', + 277: 'SamplesPerPixel', + 278: 'RowsPerStrip', + 279: 'StripByteCounts', + 280: 'MinSampleValue', + 281: 'MaxSampleValue', + 282: 'XResolution', + 283: 'YResolution', + 284: 'PlanarConfiguration', + 285: 'PageName', + 286: 'XPosition', + 287: 'YPosition', + 288: 'FreeOffsets', + 289: 'FreeByteCounts', + 290: 'GrayResponseUnit', + 291: 'GrayResponseCurve', + 292: 'T4Options', + 293: 'T6Options', + 296: 'ResolutionUnit', + 297: 'PageNumber', + 300: 'ColorResponseUnit', + 301: 'TransferFunction', + 305: 'Software', + 306: 'DateTime', + 315: 'Artist', + 316: 'HostComputer', + 317: 'Predictor', + 318: 'WhitePoint', + 319: 'PrimaryChromaticities', + 320: 'ColorMap', + 321: 'HalftoneHints', + 322: 'TileWidth', + 323: 'TileLength', + 324: 'TileOffsets', + 325: 'TileByteCounts', + 326: 'BadFaxLines', + 327: 'CleanFaxData', + 328: 'ConsecutiveBadFaxLines', + 330: 'SubIFDs', + 332: 'InkSet', + 333: 'InkNames', + 334: 'NumberOfInks', + 336: 'DotRange', + 337: 'TargetPrinter', + 338: 'ExtraSamples', + 339: 'SampleFormat', + 340: 'SMinSampleValue', + 341: 'SMaxSampleValue', + 342: 'TransferRange', + 343: 'ClipPath', + 344: 'XClipPathUnits', + 345: 'YClipPathUnits', + 346: 'Indexed', + 347: 'JPEGTables', + 351: 'OPIProxy', + 400: 'GlobalParametersIFD', + 401: 'ProfileType', + 402: 'FaxProfile', + 403: 'CodingMethods', + 404: 'VersionYear', + 405: 'ModeNumber', + 433: 'Decode', + 434: 'DefaultImageColor', + 435: 'T82Options', + 437: 'JPEGTables_', # 347 + 512: 'JPEGProc', + 513: 'JPEGInterchangeFormat', + 514: 'JPEGInterchangeFormatLength', + 515: 'JPEGRestartInterval', + 517: 'JPEGLosslessPredictors', + 518: 'JPEGPointTransforms', + 519: 'JPEGQTables', + 520: 'JPEGDCTables', + 521: 'JPEGACTables', + 529: 'YCbCrCoefficients', + 530: 'YCbCrSubSampling', + 531: 'YCbCrPositioning', + 532: 'ReferenceBlackWhite', + 559: 'StripRowCounts', + 700: 'XMP', # XMLPacket + 769: 'GDIGamma', # GDI+ + 770: 'ICCProfileDescriptor', # GDI+ + 771: 'SRGBRenderingIntent', # GDI+ + 800: 'ImageTitle', # GDI+ + 999: 'USPTO_Miscellaneous', + 4864: 'AndorId', # TODO: Andor Technology 4864 - 5030 + 4869: 'AndorTemperature', + 4876: 'AndorExposureTime', + 4878: 'AndorKineticCycleTime', + 4879: 'AndorAccumulations', + 4881: 'AndorAcquisitionCycleTime', + 4882: 'AndorReadoutTime', + 4884: 'AndorPhotonCounting', + 4885: 'AndorEmDacLevel', + 4890: 'AndorFrames', + 4896: 'AndorHorizontalFlip', + 4897: 'AndorVerticalFlip', + 4898: 'AndorClockwise', + 4899: 'AndorCounterClockwise', + 4904: 'AndorVerticalClockVoltage', + 4905: 'AndorVerticalShiftSpeed', + 4907: 'AndorPreAmpSetting', + 4908: 'AndorCameraSerial', + 4911: 'AndorActualTemperature', + 4912: 'AndorBaselineClamp', + 4913: 'AndorPrescans', + 4914: 'AndorModel', + 4915: 'AndorChipSizeX', + 4916: 'AndorChipSizeY', + 4944: 'AndorBaselineOffset', + 4966: 'AndorSoftwareVersion', + 18246: 'Rating', + 18247: 'XP_DIP_XML', + 18248: 'StitchInfo', + 18249: 'RatingPercent', + 20481: 'ResolutionXUnit', # GDI+ + 20482: 'ResolutionYUnit', # GDI+ + 20483: 'ResolutionXLengthUnit', # GDI+ + 20484: 'ResolutionYLengthUnit', # GDI+ + 20485: 'PrintFlags', # GDI+ + 20486: 'PrintFlagsVersion', # GDI+ + 20487: 'PrintFlagsCrop', # GDI+ + 20488: 'PrintFlagsBleedWidth', # GDI+ + 20489: 'PrintFlagsBleedWidthScale', # GDI+ + 20490: 'HalftoneLPI', # GDI+ + 20491: 'HalftoneLPIUnit', # GDI+ + 20492: 'HalftoneDegree', # GDI+ + 20493: 'HalftoneShape', # GDI+ + 20494: 'HalftoneMisc', # GDI+ + 20495: 'HalftoneScreen', # GDI+ + 20496: 'JPEGQuality', # GDI+ + 20497: 'GridSize', # GDI+ + 20498: 'ThumbnailFormat', # GDI+ + 20499: 'ThumbnailWidth', # GDI+ + 20500: 'ThumbnailHeight', # GDI+ + 20501: 'ThumbnailColorDepth', # GDI+ + 20502: 'ThumbnailPlanes', # GDI+ + 20503: 'ThumbnailRawBytes', # GDI+ + 20504: 'ThumbnailSize', # GDI+ + 20505: 'ThumbnailCompressedSize', # GDI+ + 20506: 'ColorTransferFunction', # GDI+ + 20507: 'ThumbnailData', + 20512: 'ThumbnailImageWidth', # GDI+ + 20513: 'ThumbnailImageHeight', # GDI+ + 20514: 'ThumbnailBitsPerSample', # GDI+ + 20515: 'ThumbnailCompression', + 20516: 'ThumbnailPhotometricInterp', # GDI+ + 20517: 'ThumbnailImageDescription', # GDI+ + 20518: 'ThumbnailEquipMake', # GDI+ + 20519: 'ThumbnailEquipModel', # GDI+ + 20520: 'ThumbnailStripOffsets', # GDI+ + 20521: 'ThumbnailOrientation', # GDI+ + 20522: 'ThumbnailSamplesPerPixel', # GDI+ + 20523: 'ThumbnailRowsPerStrip', # GDI+ + 20524: 'ThumbnailStripBytesCount', # GDI+ + 20525: 'ThumbnailResolutionX', + 20526: 'ThumbnailResolutionY', + 20527: 'ThumbnailPlanarConfig', # GDI+ + 20528: 'ThumbnailResolutionUnit', + 20529: 'ThumbnailTransferFunction', + 20530: 'ThumbnailSoftwareUsed', # GDI+ + 20531: 'ThumbnailDateTime', # GDI+ + 20532: 'ThumbnailArtist', # GDI+ + 20533: 'ThumbnailWhitePoint', # GDI+ + 20534: 'ThumbnailPrimaryChromaticities', # GDI+ + 20535: 'ThumbnailYCbCrCoefficients', # GDI+ + 20536: 'ThumbnailYCbCrSubsampling', # GDI+ + 20537: 'ThumbnailYCbCrPositioning', + 20538: 'ThumbnailRefBlackWhite', # GDI+ + 20539: 'ThumbnailCopyRight', # GDI+ + 20545: 'InteroperabilityIndex', + 20546: 'InteroperabilityVersion', + 20624: 'LuminanceTable', + 20625: 'ChrominanceTable', + 20736: 'FrameDelay', # GDI+ + 20737: 'LoopCount', # GDI+ + 20738: 'GlobalPalette', # GDI+ + 20739: 'IndexBackground', # GDI+ + 20740: 'IndexTransparent', # GDI+ + 20752: 'PixelUnit', # GDI+ + 20753: 'PixelPerUnitX', # GDI+ + 20754: 'PixelPerUnitY', # GDI+ + 20755: 'PaletteHistogram', # GDI+ + 28672: 'SonyRawFileType', # Sony ARW + 28722: 'VignettingCorrParams', # Sony ARW + 28725: 'ChromaticAberrationCorrParams', # Sony ARW + 28727: 'DistortionCorrParams', # Sony ARW + # Private tags >= 32768 + 32781: 'ImageID', + 32931: 'WangTag1', + 32932: 'WangAnnotation', + 32933: 'WangTag3', + 32934: 'WangTag4', + 32953: 'ImageReferencePoints', + 32954: 'RegionXformTackPoint', + 32955: 'WarpQuadrilateral', + 32956: 'AffineTransformMat', + 32995: 'Matteing', + 32996: 'DataType', + 32997: 'ImageDepth', + 32998: 'TileDepth', + 33300: 'ImageFullWidth', + 33301: 'ImageFullLength', + 33302: 'TextureFormat', + 33303: 'TextureWrapModes', + 33304: 'FieldOfViewCotangent', + 33305: 'MatrixWorldToScreen', + 33306: 'MatrixWorldToCamera', + 33405: 'Model2', + 33421: 'CFARepeatPatternDim', + 33422: 'CFAPattern', + 33423: 'BatteryLevel', + 33424: 'KodakIFD', + 33434: 'ExposureTime', + 33437: 'FNumber', + 33432: 'Copyright', + 33445: 'MDFileTag', + 33446: 'MDScalePixel', + 33447: 'MDColorTable', + 33448: 'MDLabName', + 33449: 'MDSampleInfo', + 33450: 'MDPrepDate', + 33451: 'MDPrepTime', + 33452: 'MDFileUnits', + 33550: 'ModelPixelScaleTag', + 33589: 'AdventScale', + 33590: 'AdventRevision', + 33628: 'UIC1tag', # Metamorph Universal Imaging Corp STK + 33629: 'UIC2tag', + 33630: 'UIC3tag', + 33631: 'UIC4tag', + 33723: 'IPTCNAA', + 33858: 'ExtendedTagsOffset', # DEFF points IFD with private tags + 33918: 'IntergraphPacketData', # INGRPacketDataTag + 33919: 'IntergraphFlagRegisters', # INGRFlagRegisters + 33920: 'IntergraphMatrixTag', # IrasBTransformationMatrix + 33921: 'INGRReserved', + 33922: 'ModelTiepointTag', + 33923: 'LeicaMagic', + 34016: 'Site', + 34017: 'ColorSequence', + 34018: 'IT8Header', + 34019: 'RasterPadding', + 34020: 'BitsPerRunLength', + 34021: 'BitsPerExtendedRunLength', + 34022: 'ColorTable', + 34023: 'ImageColorIndicator', + 34024: 'BackgroundColorIndicator', + 34025: 'ImageColorValue', + 34026: 'BackgroundColorValue', + 34027: 'PixelIntensityRange', + 34028: 'TransparencyIndicator', + 34029: 'ColorCharacterization', + 34030: 'HCUsage', + 34031: 'TrapIndicator', + 34032: 'CMYKEquivalent', + 34118: 'CZ_SEM', # Zeiss SEM + 34152: 'AFCP_IPTC', + 34232: 'PixelMagicJBIGOptions', + 34263: 'JPLCartoIFD', + 34122: 'IPLAB', # number of images + 34264: 'ModelTransformationTag', + 34306: 'WB_GRGBLevels', # Leaf MOS + 34310: 'LeafData', + 34361: 'MM_Header', + 34362: 'MM_Stamp', + 34363: 'MM_Unknown', + 34377: 'ImageResources', # Photoshop + 34386: 'MM_UserBlock', + 34412: 'CZ_LSMINFO', + 34665: 'ExifTag', + 34675: 'InterColorProfile', # ICCProfile + 34680: 'FEI_SFEG', # + 34682: 'FEI_HELIOS', # + 34683: 'FEI_TITAN', # + 34687: 'FXExtensions', + 34688: 'MultiProfiles', + 34689: 'SharedData', + 34690: 'T88Options', + 34710: 'MarCCD', # offset to MarCCD header + 34732: 'ImageLayer', + 34735: 'GeoKeyDirectoryTag', + 34736: 'GeoDoubleParamsTag', + 34737: 'GeoAsciiParamsTag', + 34750: 'JBIGOptions', + 34821: 'PIXTIFF', # ? Pixel Translations Inc + 34850: 'ExposureProgram', + 34852: 'SpectralSensitivity', + 34853: 'GPSTag', # GPSIFD + 34855: 'ISOSpeedRatings', + 34856: 'OECF', + 34857: 'Interlace', + 34858: 'TimeZoneOffset', + 34859: 'SelfTimerMode', + 34864: 'SensitivityType', + 34865: 'StandardOutputSensitivity', + 34866: 'RecommendedExposureIndex', + 34867: 'ISOSpeed', + 34868: 'ISOSpeedLatitudeyyy', + 34869: 'ISOSpeedLatitudezzz', + 34908: 'HylaFAXFaxRecvParams', + 34909: 'HylaFAXFaxSubAddress', + 34910: 'HylaFAXFaxRecvTime', + 34911: 'FaxDcs', + 34929: 'FedexEDR', + 34954: 'LeafSubIFD', + 34959: 'Aphelion1', + 34960: 'Aphelion2', + 34961: 'AphelionInternal', # ADCIS + 36864: 'ExifVersion', + 36867: 'DateTimeOriginal', + 36868: 'DateTimeDigitized', + 36873: 'GooglePlusUploadCode', + 36880: 'OffsetTime', + 36881: 'OffsetTimeOriginal', + 36882: 'OffsetTimeDigitized', + # TODO: Pilatus/CHESS/TV6 36864..37120 conflicting with Exif tags + # 36864: 'TVX ?', + # 36865: 'TVX_NumExposure', + # 36866: 'TVX_NumBackground', + # 36867: 'TVX_ExposureTime', + # 36868: 'TVX_BackgroundTime', + # 36870: 'TVX ?', + # 36873: 'TVX_SubBpp', + # 36874: 'TVX_SubWide', + # 36875: 'TVX_SubHigh', + # 36876: 'TVX_BlackLevel', + # 36877: 'TVX_DarkCurrent', + # 36878: 'TVX_ReadNoise', + # 36879: 'TVX_DarkCurrentNoise', + # 36880: 'TVX_BeamMonitor', + # 37120: 'TVX_UserVariables', # A/D values + 37121: 'ComponentsConfiguration', + 37122: 'CompressedBitsPerPixel', + 37377: 'ShutterSpeedValue', + 37378: 'ApertureValue', + 37379: 'BrightnessValue', + 37380: 'ExposureBiasValue', + 37381: 'MaxApertureValue', + 37382: 'SubjectDistance', + 37383: 'MeteringMode', + 37384: 'LightSource', + 37385: 'Flash', + 37386: 'FocalLength', + 37387: 'FlashEnergy_', # 37387 + 37388: 'SpatialFrequencyResponse_', # 37388 + 37389: 'Noise', + 37390: 'FocalPlaneXResolution', + 37391: 'FocalPlaneYResolution', + 37392: 'FocalPlaneResolutionUnit', + 37393: 'ImageNumber', + 37394: 'SecurityClassification', + 37395: 'ImageHistory', + 37396: 'SubjectLocation', + 37397: 'ExposureIndex', + 37398: 'TIFFEPStandardID', + 37399: 'SensingMethod', + 37434: 'CIP3DataFile', + 37435: 'CIP3Sheet', + 37436: 'CIP3Side', + 37439: 'StoNits', + 37500: 'MakerNote', + 37510: 'UserComment', + 37520: 'SubsecTime', + 37521: 'SubsecTimeOriginal', + 37522: 'SubsecTimeDigitized', + 37679: 'MODIText', # Microsoft Office Document Imaging + 37680: 'MODIOLEPropertySetStorage', + 37681: 'MODIPositioning', + 37706: 'TVIPS', # offset to TemData structure + 37707: 'TVIPS1', + 37708: 'TVIPS2', # same TemData structure as undefined + 37724: 'ImageSourceData', # Photoshop + 37888: 'Temperature', + 37889: 'Humidity', + 37890: 'Pressure', + 37891: 'WaterDepth', + 37892: 'Acceleration', + 37893: 'CameraElevationAngle', + 40001: 'MC_IpWinScal', # Media Cybernetics + 40100: 'MC_IdOld', + 40965: 'InteroperabilityTag', # InteropOffset + 40091: 'XPTitle', + 40092: 'XPComment', + 40093: 'XPAuthor', + 40094: 'XPKeywords', + 40095: 'XPSubject', + 40960: 'FlashpixVersion', + 40961: 'ColorSpace', + 40962: 'PixelXDimension', + 40963: 'PixelYDimension', + 40964: 'RelatedSoundFile', + 40976: 'SamsungRawPointersOffset', + 40977: 'SamsungRawPointersLength', + 41217: 'SamsungRawByteOrder', + 41218: 'SamsungRawUnknown', + 41483: 'FlashEnergy', + 41484: 'SpatialFrequencyResponse', + 41485: 'Noise_', # 37389 + 41486: 'FocalPlaneXResolution_', # 37390 + 41487: 'FocalPlaneYResolution_', # 37391 + 41488: 'FocalPlaneResolutionUnit_', # 37392 + 41489: 'ImageNumber_', # 37393 + 41490: 'SecurityClassification_', # 37394 + 41491: 'ImageHistory_', # 37395 + 41492: 'SubjectLocation_', # 37395 + 41493: 'ExposureIndex_ ', # 37397 + 41494: 'TIFF-EPStandardID', + 41495: 'SensingMethod_', # 37399 + 41728: 'FileSource', + 41729: 'SceneType', + 41730: 'CFAPattern_', # 33422 + 41985: 'CustomRendered', + 41986: 'ExposureMode', + 41987: 'WhiteBalance', + 41988: 'DigitalZoomRatio', + 41989: 'FocalLengthIn35mmFilm', + 41990: 'SceneCaptureType', + 41991: 'GainControl', + 41992: 'Contrast', + 41993: 'Saturation', + 41994: 'Sharpness', + 41995: 'DeviceSettingDescription', + 41996: 'SubjectDistanceRange', + 42016: 'ImageUniqueID', + 42032: 'CameraOwnerName', + 42033: 'BodySerialNumber', + 42034: 'LensSpecification', + 42035: 'LensMake', + 42036: 'LensModel', + 42037: 'LensSerialNumber', + 42112: 'GDAL_METADATA', + 42113: 'GDAL_NODATA', + 42240: 'Gamma', + 43314: 'NIHImageHeader', + 44992: 'ExpandSoftware', + 44993: 'ExpandLens', + 44994: 'ExpandFilm', + 44995: 'ExpandFilterLens', + 44996: 'ExpandScanner', + 44997: 'ExpandFlashLamp', + 48129: 'PixelFormat', # HDP and WDP + 48130: 'Transformation', + 48131: 'Uncompressed', + 48132: 'ImageType', + 48256: 'ImageWidth_', # 256 + 48257: 'ImageHeight_', + 48258: 'WidthResolution', + 48259: 'HeightResolution', + 48320: 'ImageOffset', + 48321: 'ImageByteCount', + 48322: 'AlphaOffset', + 48323: 'AlphaByteCount', + 48324: 'ImageDataDiscard', + 48325: 'AlphaDataDiscard', + 50215: 'OceScanjobDescription', + 50216: 'OceApplicationSelector', + 50217: 'OceIdentificationNumber', + 50218: 'OceImageLogicCharacteristics', + 50255: 'Annotations', + 50288: 'MC_Id', # Media Cybernetics + 50289: 'MC_XYPosition', + 50290: 'MC_ZPosition', + 50291: 'MC_XYCalibration', + 50292: 'MC_LensCharacteristics', + 50293: 'MC_ChannelName', + 50294: 'MC_ExcitationWavelength', + 50295: 'MC_TimeStamp', + 50296: 'MC_FrameProperties', + 50341: 'PrintImageMatching', + 50495: 'PCO_RAW', # TODO: PCO CamWare + 50547: 'OriginalFileName', + 50560: 'USPTO_OriginalContentType', # US Patent Office + 50561: 'USPTO_RotationCode', + 50656: 'CR2CFAPattern', + 50706: 'DNGVersion', # DNG 50706 .. 51112 + 50707: 'DNGBackwardVersion', + 50708: 'UniqueCameraModel', + 50709: 'LocalizedCameraModel', + 50710: 'CFAPlaneColor', + 50711: 'CFALayout', + 50712: 'LinearizationTable', + 50713: 'BlackLevelRepeatDim', + 50714: 'BlackLevel', + 50715: 'BlackLevelDeltaH', + 50716: 'BlackLevelDeltaV', + 50717: 'WhiteLevel', + 50718: 'DefaultScale', + 50719: 'DefaultCropOrigin', + 50720: 'DefaultCropSize', + 50721: 'ColorMatrix1', + 50722: 'ColorMatrix2', + 50723: 'CameraCalibration1', + 50724: 'CameraCalibration2', + 50725: 'ReductionMatrix1', + 50726: 'ReductionMatrix2', + 50727: 'AnalogBalance', + 50728: 'AsShotNeutral', + 50729: 'AsShotWhiteXY', + 50730: 'BaselineExposure', + 50731: 'BaselineNoise', + 50732: 'BaselineSharpness', + 50733: 'BayerGreenSplit', + 50734: 'LinearResponseLimit', + 50735: 'CameraSerialNumber', + 50736: 'LensInfo', + 50737: 'ChromaBlurRadius', + 50738: 'AntiAliasStrength', + 50739: 'ShadowScale', + 50740: 'DNGPrivateData', + 50741: 'MakerNoteSafety', + 50752: 'RawImageSegmentation', + 50778: 'CalibrationIlluminant1', + 50779: 'CalibrationIlluminant2', + 50780: 'BestQualityScale', + 50781: 'RawDataUniqueID', + 50784: 'AliasLayerMetadata', + 50827: 'OriginalRawFileName', + 50828: 'OriginalRawFileData', + 50829: 'ActiveArea', + 50830: 'MaskedAreas', + 50831: 'AsShotICCProfile', + 50832: 'AsShotPreProfileMatrix', + 50833: 'CurrentICCProfile', + 50834: 'CurrentPreProfileMatrix', + 50838: 'IJMetadataByteCounts', + 50839: 'IJMetadata', + 50844: 'RPCCoefficientTag', + 50879: 'ColorimetricReference', + 50885: 'SRawType', + 50898: 'PanasonicTitle', + 50899: 'PanasonicTitle2', + 50931: 'CameraCalibrationSignature', + 50932: 'ProfileCalibrationSignature', + 50933: 'ProfileIFD', + 50934: 'AsShotProfileName', + 50935: 'NoiseReductionApplied', + 50936: 'ProfileName', + 50937: 'ProfileHueSatMapDims', + 50938: 'ProfileHueSatMapData1', + 50939: 'ProfileHueSatMapData2', + 50940: 'ProfileToneCurve', + 50941: 'ProfileEmbedPolicy', + 50942: 'ProfileCopyright', + 50964: 'ForwardMatrix1', + 50965: 'ForwardMatrix2', + 50966: 'PreviewApplicationName', + 50967: 'PreviewApplicationVersion', + 50968: 'PreviewSettingsName', + 50969: 'PreviewSettingsDigest', + 50970: 'PreviewColorSpace', + 50971: 'PreviewDateTime', + 50972: 'RawImageDigest', + 50973: 'OriginalRawFileDigest', + 50974: 'SubTileBlockSize', + 50975: 'RowInterleaveFactor', + 50981: 'ProfileLookTableDims', + 50982: 'ProfileLookTableData', + 51008: 'OpcodeList1', + 51009: 'OpcodeList2', + 51022: 'OpcodeList3', + 51023: 'FibicsXML', # + 51041: 'NoiseProfile', + 51043: 'TimeCodes', + 51044: 'FrameRate', + 51058: 'TStop', + 51081: 'ReelName', + 51089: 'OriginalDefaultFinalSize', + 51090: 'OriginalBestQualitySize', + 51091: 'OriginalDefaultCropSize', + 51105: 'CameraLabel', + 51107: 'ProfileHueSatMapEncoding', + 51108: 'ProfileLookTableEncoding', + 51109: 'BaselineExposureOffset', + 51110: 'DefaultBlackRender', + 51111: 'NewRawImageDigest', + 51112: 'RawToPreviewGain', + 51125: 'DefaultUserCrop', + 51123: 'MicroManagerMetadata', + 59932: 'Padding', + 59933: 'OffsetSchema', + # Reusable Tags 65000-65535 + # 65000: Dimap_Document XML + # 65000-65112: Photoshop Camera RAW EXIF tags + # 65000: 'OwnerName', + # 65001: 'SerialNumber', + # 65002: 'Lens', + # 65024: 'KDC_IFD', + # 65100: 'RawFile', + # 65101: 'Converter', + # 65102: 'WhiteBalance', + # 65105: 'Exposure', + # 65106: 'Shadows', + # 65107: 'Brightness', + # 65108: 'Contrast', + # 65109: 'Saturation', + # 65110: 'Sharpness', + # 65111: 'Smoothness', + # 65112: 'MoireFilter', + 65200: 'FlexXML', # + 65563: 'PerSample', + } + + def TAG_NAMES(): + return {v: c for c, v in TIFF.TAGS.items()} + + def TAG_READERS(): + # Map TIFF tag codes to import functions + return { + 320: read_colormap, + # 700: read_bytes, # read_utf8, + # 34377: read_bytes, + 33723: read_bytes, + # 34675: read_bytes, + 33628: read_uic1tag, # Universal Imaging Corp STK + 33629: read_uic2tag, + 33630: read_uic3tag, + 33631: read_uic4tag, + 34118: read_cz_sem, # Carl Zeiss SEM + 34361: read_mm_header, # Olympus FluoView + 34362: read_mm_stamp, + 34363: read_numpy, # MM_Unknown + 34386: read_numpy, # MM_UserBlock + 34412: read_cz_lsminfo, # Carl Zeiss LSM + 34680: read_fei_metadata, # S-FEG + 34682: read_fei_metadata, # Helios NanoLab + 37706: read_tvips_header, # TVIPS EMMENU + 37724: read_bytes, # ImageSourceData + 33923: read_bytes, # read_leica_magic + 43314: read_nih_image_header, + # 40001: read_bytes, + 40100: read_bytes, + 50288: read_bytes, + 50296: read_bytes, + 50839: read_bytes, + 51123: read_json, + 34665: read_exif_ifd, + 34853: read_gps_ifd, + 40965: read_interoperability_ifd, + } + + def TAG_TUPLE(): + # Tags whose values must be stored as tuples + return frozenset((273, 279, 324, 325, 530, 531, 34736)) + + def TAG_ATTRIBUTES(): + # Map tag codes to TiffPage attribute names + return { + 'ImageWidth': 'imagewidth', + 'ImageLength': 'imagelength', + 'BitsPerSample': 'bitspersample', + 'Compression': 'compression', + 'PlanarConfiguration': 'planarconfig', + 'FillOrder': 'fillorder', + 'PhotometricInterpretation': 'photometric', + 'ColorMap': 'colormap', + 'ImageDescription': 'description', + 'ImageDescription1': 'description1', + 'SamplesPerPixel': 'samplesperpixel', + 'RowsPerStrip': 'rowsperstrip', + 'Software': 'software', + 'Predictor': 'predictor', + 'TileWidth': 'tilewidth', + 'TileLength': 'tilelength', + 'ExtraSamples': 'extrasamples', + 'SampleFormat': 'sampleformat', + 'ImageDepth': 'imagedepth', + 'TileDepth': 'tiledepth', + } + + def TAG_ENUM(): + return { + # 254: TIFF.FILETYPE, + 255: TIFF.OFILETYPE, + 259: TIFF.COMPRESSION, + 262: TIFF.PHOTOMETRIC, + 263: TIFF.THRESHHOLD, + 266: TIFF.FILLORDER, + 274: TIFF.ORIENTATION, + 284: TIFF.PLANARCONFIG, + 290: TIFF.GRAYRESPONSEUNIT, + # 292: TIFF.GROUP3OPT, + # 293: TIFF.GROUP4OPT, + 296: TIFF.RESUNIT, + 300: TIFF.COLORRESPONSEUNIT, + 317: TIFF.PREDICTOR, + 338: TIFF.EXTRASAMPLE, + 339: TIFF.SAMPLEFORMAT, + # 512: TIFF.JPEGPROC, + # 531: TIFF.YCBCRPOSITION, + } + + def FILETYPE(): + class FILETYPE(enum.IntFlag): + # Python 3.6 only + UNDEFINED = 0 + REDUCEDIMAGE = 1 + PAGE = 2 + MASK = 4 + return FILETYPE + + def OFILETYPE(): + class OFILETYPE(enum.IntEnum): + UNDEFINED = 0 + IMAGE = 1 + REDUCEDIMAGE = 2 + PAGE = 3 + return OFILETYPE + + def COMPRESSION(): + class COMPRESSION(enum.IntEnum): + NONE = 1 # Uncompressed + CCITTRLE = 2 # CCITT 1D + CCITT_T4 = 3 # 'T4/Group 3 Fax', + CCITT_T6 = 4 # 'T6/Group 4 Fax', + LZW = 5 + OJPEG = 6 # old-style JPEG + JPEG = 7 + ADOBE_DEFLATE = 8 + JBIG_BW = 9 + JBIG_COLOR = 10 + JPEG_99 = 99 + KODAK_262 = 262 + NEXT = 32766 + SONY_ARW = 32767 + PACKED_RAW = 32769 + SAMSUNG_SRW = 32770 + CCIRLEW = 32771 + SAMSUNG_SRW2 = 32772 + PACKBITS = 32773 + THUNDERSCAN = 32809 + IT8CTPAD = 32895 + IT8LW = 32896 + IT8MP = 32897 + IT8BL = 32898 + PIXARFILM = 32908 + PIXARLOG = 32909 + DEFLATE = 32946 + DCS = 32947 + APERIO_JP2000_YCBC = 33003 # Leica Aperio + APERIO_JP2000_RGB = 33005 # Leica Aperio + JBIG = 34661 + SGILOG = 34676 + SGILOG24 = 34677 + JPEG2000 = 34712 + NIKON_NEF = 34713 + JBIG2 = 34715 + MDI_BINARY = 34718 # 'Microsoft Document Imaging + MDI_PROGRESSIVE = 34719 # 'Microsoft Document Imaging + MDI_VECTOR = 34720 # 'Microsoft Document Imaging + JPEG_LOSSY = 34892 + LZMA = 34925 + ZSTD = 34926 + OPS_PNG = 34933 # Objective Pathology Services + OPS_JPEGXR = 34934 # Objective Pathology Services + PIXTIFF = 50013 + KODAK_DCR = 65000 + PENTAX_PEF = 65535 + # def __bool__(self): return self != 1 # Python 3.6 only + return COMPRESSION + + def PHOTOMETRIC(): + class PHOTOMETRIC(enum.IntEnum): + MINISWHITE = 0 + MINISBLACK = 1 + RGB = 2 + PALETTE = 3 + MASK = 4 + SEPARATED = 5 # CMYK + YCBCR = 6 + CIELAB = 8 + ICCLAB = 9 + ITULAB = 10 + CFA = 32803 # Color Filter Array + LOGL = 32844 + LOGLUV = 32845 + LINEAR_RAW = 34892 + return PHOTOMETRIC + + def THRESHHOLD(): + class THRESHHOLD(enum.IntEnum): + BILEVEL = 1 + HALFTONE = 2 + ERRORDIFFUSE = 3 + return THRESHHOLD + + def FILLORDER(): + class FILLORDER(enum.IntEnum): + MSB2LSB = 1 + LSB2MSB = 2 + return FILLORDER + + def ORIENTATION(): + class ORIENTATION(enum.IntEnum): + TOPLEFT = 1 + TOPRIGHT = 2 + BOTRIGHT = 3 + BOTLEFT = 4 + LEFTTOP = 5 + RIGHTTOP = 6 + RIGHTBOT = 7 + LEFTBOT = 8 + return ORIENTATION + + def PLANARCONFIG(): + class PLANARCONFIG(enum.IntEnum): + CONTIG = 1 + SEPARATE = 2 + return PLANARCONFIG + + def GRAYRESPONSEUNIT(): + class GRAYRESPONSEUNIT(enum.IntEnum): + _10S = 1 + _100S = 2 + _1000S = 3 + _10000S = 4 + _100000S = 5 + return GRAYRESPONSEUNIT + + def GROUP4OPT(): + class GROUP4OPT(enum.IntEnum): + UNCOMPRESSED = 2 + return GROUP4OPT + + def RESUNIT(): + class RESUNIT(enum.IntEnum): + NONE = 1 + INCH = 2 + CENTIMETER = 3 + # def __bool__(self): return self != 1 # Python 3.6 only + return RESUNIT + + def COLORRESPONSEUNIT(): + class COLORRESPONSEUNIT(enum.IntEnum): + _10S = 1 + _100S = 2 + _1000S = 3 + _10000S = 4 + _100000S = 5 + return COLORRESPONSEUNIT + + def PREDICTOR(): + class PREDICTOR(enum.IntEnum): + NONE = 1 + HORIZONTAL = 2 + FLOATINGPOINT = 3 + # def __bool__(self): return self != 1 # Python 3.6 only + return PREDICTOR + + def EXTRASAMPLE(): + class EXTRASAMPLE(enum.IntEnum): + UNSPECIFIED = 0 + ASSOCALPHA = 1 + UNASSALPHA = 2 + return EXTRASAMPLE + + def SAMPLEFORMAT(): + class SAMPLEFORMAT(enum.IntEnum): + UINT = 1 + INT = 2 + IEEEFP = 3 + VOID = 4 + COMPLEXINT = 5 + COMPLEXIEEEFP = 6 + return SAMPLEFORMAT + + def DATATYPES(): + class DATATYPES(enum.IntEnum): + NOTYPE = 0 + BYTE = 1 + ASCII = 2 + SHORT = 3 + LONG = 4 + RATIONAL = 5 + SBYTE = 6 + UNDEFINED = 7 + SSHORT = 8 + SLONG = 9 + SRATIONAL = 10 + FLOAT = 11 + DOUBLE = 12 + IFD = 13 + UNICODE = 14 + COMPLEX = 15 + LONG8 = 16 + SLONG8 = 17 + IFD8 = 18 + return DATATYPES + + def DATA_FORMATS(): + # Map TIFF DATATYPES to Python struct formats + return { + 1: '1B', # BYTE 8-bit unsigned integer. + 2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code; + # the last byte must be NULL (binary zero). + 3: '1H', # SHORT 16-bit (2-byte) unsigned integer + 4: '1I', # LONG 32-bit (4-byte) unsigned integer. + 5: '2I', # RATIONAL Two LONGs: the first represents the numerator + # of a fraction; the second, the denominator. + 6: '1b', # SBYTE An 8-bit signed (twos-complement) integer. + 7: '1B', # UNDEFINED An 8-bit byte that may contain anything, + # depending on the definition of the field. + 8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) + # integer. + 9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) + # integer. + 10: '2i', # SRATIONAL Two SLONGs: the first represents the + # numerator of a fraction, the second the denominator. + 11: '1f', # FLOAT Single precision (4-byte) IEEE format. + 12: '1d', # DOUBLE Double precision (8-byte) IEEE format. + 13: '1I', # IFD unsigned 4 byte IFD offset. + # 14: '', # UNICODE + # 15: '', # COMPLEX + 16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff) + 17: '1q', # SLONG8 signed 8 byte integer (BigTiff) + 18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff) + } + + def DATA_DTYPES(): + # Map numpy dtypes to TIFF DATATYPES + return {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6, + 'h': 8, 'i': 9, '2i': 10, 'f': 11, 'd': 12, 'Q': 16, 'q': 17} + + def SAMPLE_DTYPES(): + # Map TIFF SampleFormats and BitsPerSample to numpy dtype + return { + (1, 1): '?', # bitmap + (1, 2): 'B', + (1, 3): 'B', + (1, 4): 'B', + (1, 5): 'B', + (1, 6): 'B', + (1, 7): 'B', + (1, 8): 'B', + (1, 9): 'H', + (1, 10): 'H', + (1, 11): 'H', + (1, 12): 'H', + (1, 13): 'H', + (1, 14): 'H', + (1, 15): 'H', + (1, 16): 'H', + (1, 17): 'I', + (1, 18): 'I', + (1, 19): 'I', + (1, 20): 'I', + (1, 21): 'I', + (1, 22): 'I', + (1, 23): 'I', + (1, 24): 'I', + (1, 25): 'I', + (1, 26): 'I', + (1, 27): 'I', + (1, 28): 'I', + (1, 29): 'I', + (1, 30): 'I', + (1, 31): 'I', + (1, 32): 'I', + (1, 64): 'Q', + (2, 8): 'b', + (2, 16): 'h', + (2, 32): 'i', + (2, 64): 'q', + (3, 16): 'e', + (3, 32): 'f', + (3, 64): 'd', + (6, 64): 'F', + (6, 128): 'D', + (1, (5, 6, 5)): 'B', + } + + def COMPESSORS(): + # Map COMPRESSION to compress functions and default compression levels + + class Compressors(object): + """Delay import compressor functions.""" + def __init__(self): + self._compressors = {8: (zlib.compress, 6), + 32946: (zlib.compress, 6)} + + def __getitem__(self, key): + if key in self._compressors: + return self._compressors[key] + + if key == 34925: + try: + import lzma # delayed import + except ImportError: + try: + import backports.lzma as lzma # delayed import + except ImportError: + raise KeyError + + def lzma_compress(x, level): + return lzma.compress(x) + + self._compressors[key] = lzma_compress, 0 + return lzma_compress, 0 + + if key == 34926: + try: + import zstd # delayed import + except ImportError: + raise KeyError + self._compressors[key] = zstd.compress, 9 + return zstd.compress, 9 + + raise KeyError + + def __contains__(self, key): + try: + self[key] + return True + except KeyError: + return False + + return Compressors() + + def DECOMPESSORS(): + # Map COMPRESSION to decompress functions + + class Decompressors(object): + """Delay import decompressor functions.""" + def __init__(self): + self._decompressors = {None: identityfunc, + 1: identityfunc, + 5: decode_lzw, + 8: zlib.decompress, + 32773: decode_packbits, + 32946: zlib.decompress} + + def __getitem__(self, key): + if key in self._decompressors: + return self._decompressors[key] + + if key == 7: + try: + from imagecodecs import jpeg, jpeg_12 + except ImportError: + raise KeyError + + def decode_jpeg(x, table, bps, colorspace=None): + if bps == 8: + return jpeg.decode_jpeg(x, table, colorspace) + elif bps == 12: + return jpeg_12.decode_jpeg_12(x, table, colorspace) + else: + raise ValueError('bitspersample not supported') + + self._decompressors[key] = decode_jpeg + return decode_jpeg + + if key == 34925: + try: + import lzma # delayed import + except ImportError: + try: + import backports.lzma as lzma # delayed import + except ImportError: + raise KeyError + self._decompressors[key] = lzma.decompress + return lzma.decompress + + if key == 34926: + try: + import zstd # delayed import + except ImportError: + raise KeyError + self._decompressors[key] = zstd.decompress + return zstd.decompress + raise KeyError + + def __contains__(self, item): + try: + self[item] + return True + except KeyError: + return False + + return Decompressors() + + def FRAME_ATTRS(): + # Attributes that a TiffFrame shares with its keyframe + return set('shape ndim size dtype axes is_final'.split()) + + def FILE_FLAGS(): + # TiffFile and TiffPage 'is_\*' attributes + exclude = set('reduced final memmappable contiguous tiled ' + 'chroma_subsampled'.split()) + return set(a[3:] for a in dir(TiffPage) + if a[:3] == 'is_' and a[3:] not in exclude) + + def FILE_EXTENSIONS(): + # TIFF file extensions + return tuple('tif tiff ome.tif lsm stk qptiff pcoraw ' + 'gel seq svs bif tf8 tf2 btf'.split()) + + def FILEOPEN_FILTER(): + # String for use in Windows File Open box + return [('%s files' % ext.upper(), '*.%s' % ext) + for ext in TIFF.FILE_EXTENSIONS] + [('allfiles', '*')] + + def AXES_LABELS(): + # TODO: is there a standard for character axes labels? + axes = { + 'X': 'width', + 'Y': 'height', + 'Z': 'depth', + 'S': 'sample', # rgb(a) + 'I': 'series', # general sequence, plane, page, IFD + 'T': 'time', + 'C': 'channel', # color, emission wavelength + 'A': 'angle', + 'P': 'phase', # formerly F # P is Position in LSM! + 'R': 'tile', # region, point, mosaic + 'H': 'lifetime', # histogram + 'E': 'lambda', # excitation wavelength + 'L': 'exposure', # lux + 'V': 'event', + 'Q': 'other', + 'M': 'mosaic', # LSM 6 + } + axes.update(dict((v, k) for k, v in axes.items())) + return axes + + def ANDOR_TAGS(): + # Andor Technology tags #4864 - 5030 + return set(range(4864, 5030)) + + def EXIF_TAGS(): + tags = { + # 65000 - 65112 Photoshop Camera RAW EXIF tags + 65000: 'OwnerName', + 65001: 'SerialNumber', + 65002: 'Lens', + 65100: 'RawFile', + 65101: 'Converter', + 65102: 'WhiteBalance', + 65105: 'Exposure', + 65106: 'Shadows', + 65107: 'Brightness', + 65108: 'Contrast', + 65109: 'Saturation', + 65110: 'Sharpness', + 65111: 'Smoothness', + 65112: 'MoireFilter', + } + tags.update(TIFF.TAGS) + return tags + + def GPS_TAGS(): + return { + 0: 'GPSVersionID', + 1: 'GPSLatitudeRef', + 2: 'GPSLatitude', + 3: 'GPSLongitudeRef', + 4: 'GPSLongitude', + 5: 'GPSAltitudeRef', + 6: 'GPSAltitude', + 7: 'GPSTimeStamp', + 8: 'GPSSatellites', + 9: 'GPSStatus', + 10: 'GPSMeasureMode', + 11: 'GPSDOP', + 12: 'GPSSpeedRef', + 13: 'GPSSpeed', + 14: 'GPSTrackRef', + 15: 'GPSTrack', + 16: 'GPSImgDirectionRef', + 17: 'GPSImgDirection', + 18: 'GPSMapDatum', + 19: 'GPSDestLatitudeRef', + 20: 'GPSDestLatitude', + 21: 'GPSDestLongitudeRef', + 22: 'GPSDestLongitude', + 23: 'GPSDestBearingRef', + 24: 'GPSDestBearing', + 25: 'GPSDestDistanceRef', + 26: 'GPSDestDistance', + 27: 'GPSProcessingMethod', + 28: 'GPSAreaInformation', + 29: 'GPSDateStamp', + 30: 'GPSDifferential', + 31: 'GPSHPositioningError', + } + + def IOP_TAGS(): + return { + 1: 'InteroperabilityIndex', + 2: 'InteroperabilityVersion', + 4096: 'RelatedImageFileFormat', + 4097: 'RelatedImageWidth', + 4098: 'RelatedImageLength', + } + + def GEO_KEYS(): + return { + 1024: 'GTModelTypeGeoKey', + 1025: 'GTRasterTypeGeoKey', + 1026: 'GTCitationGeoKey', + 2048: 'GeographicTypeGeoKey', + 2049: 'GeogCitationGeoKey', + 2050: 'GeogGeodeticDatumGeoKey', + 2051: 'GeogPrimeMeridianGeoKey', + 2052: 'GeogLinearUnitsGeoKey', + 2053: 'GeogLinearUnitSizeGeoKey', + 2054: 'GeogAngularUnitsGeoKey', + 2055: 'GeogAngularUnitsSizeGeoKey', + 2056: 'GeogEllipsoidGeoKey', + 2057: 'GeogSemiMajorAxisGeoKey', + 2058: 'GeogSemiMinorAxisGeoKey', + 2059: 'GeogInvFlatteningGeoKey', + 2060: 'GeogAzimuthUnitsGeoKey', + 2061: 'GeogPrimeMeridianLongGeoKey', + 2062: 'GeogTOWGS84GeoKey', + 3059: 'ProjLinearUnitsInterpCorrectGeoKey', # GDAL + 3072: 'ProjectedCSTypeGeoKey', + 3073: 'PCSCitationGeoKey', + 3074: 'ProjectionGeoKey', + 3075: 'ProjCoordTransGeoKey', + 3076: 'ProjLinearUnitsGeoKey', + 3077: 'ProjLinearUnitSizeGeoKey', + 3078: 'ProjStdParallel1GeoKey', + 3079: 'ProjStdParallel2GeoKey', + 3080: 'ProjNatOriginLongGeoKey', + 3081: 'ProjNatOriginLatGeoKey', + 3082: 'ProjFalseEastingGeoKey', + 3083: 'ProjFalseNorthingGeoKey', + 3084: 'ProjFalseOriginLongGeoKey', + 3085: 'ProjFalseOriginLatGeoKey', + 3086: 'ProjFalseOriginEastingGeoKey', + 3087: 'ProjFalseOriginNorthingGeoKey', + 3088: 'ProjCenterLongGeoKey', + 3089: 'ProjCenterLatGeoKey', + 3090: 'ProjCenterEastingGeoKey', + 3091: 'ProjFalseOriginNorthingGeoKey', + 3092: 'ProjScaleAtNatOriginGeoKey', + 3093: 'ProjScaleAtCenterGeoKey', + 3094: 'ProjAzimuthAngleGeoKey', + 3095: 'ProjStraightVertPoleLongGeoKey', + 3096: 'ProjRectifiedGridAngleGeoKey', + 4096: 'VerticalCSTypeGeoKey', + 4097: 'VerticalCitationGeoKey', + 4098: 'VerticalDatumGeoKey', + 4099: 'VerticalUnitsGeoKey', + } + + def GEO_CODES(): + try: + from .tifffile_geodb import GEO_CODES # delayed import + except (ImportError, ValueError): + try: + from tifffile_geodb import GEO_CODES # delayed import + except (ImportError, ValueError): + GEO_CODES = {} + return GEO_CODES + + def CZ_LSMINFO(): + return [ + ('MagicNumber', 'u4'), + ('StructureSize', 'i4'), + ('DimensionX', 'i4'), + ('DimensionY', 'i4'), + ('DimensionZ', 'i4'), + ('DimensionChannels', 'i4'), + ('DimensionTime', 'i4'), + ('DataType', 'i4'), # DATATYPES + ('ThumbnailX', 'i4'), + ('ThumbnailY', 'i4'), + ('VoxelSizeX', 'f8'), + ('VoxelSizeY', 'f8'), + ('VoxelSizeZ', 'f8'), + ('OriginX', 'f8'), + ('OriginY', 'f8'), + ('OriginZ', 'f8'), + ('ScanType', 'u2'), + ('SpectralScan', 'u2'), + ('TypeOfData', 'u4'), # TYPEOFDATA + ('OffsetVectorOverlay', 'u4'), + ('OffsetInputLut', 'u4'), + ('OffsetOutputLut', 'u4'), + ('OffsetChannelColors', 'u4'), + ('TimeIntervall', 'f8'), + ('OffsetChannelDataTypes', 'u4'), + ('OffsetScanInformation', 'u4'), # SCANINFO + ('OffsetKsData', 'u4'), + ('OffsetTimeStamps', 'u4'), + ('OffsetEventList', 'u4'), + ('OffsetRoi', 'u4'), + ('OffsetBleachRoi', 'u4'), + ('OffsetNextRecording', 'u4'), + # LSM 2.0 ends here + ('DisplayAspectX', 'f8'), + ('DisplayAspectY', 'f8'), + ('DisplayAspectZ', 'f8'), + ('DisplayAspectTime', 'f8'), + ('OffsetMeanOfRoisOverlay', 'u4'), + ('OffsetTopoIsolineOverlay', 'u4'), + ('OffsetTopoProfileOverlay', 'u4'), + ('OffsetLinescanOverlay', 'u4'), + ('ToolbarFlags', 'u4'), + ('OffsetChannelWavelength', 'u4'), + ('OffsetChannelFactors', 'u4'), + ('ObjectiveSphereCorrection', 'f8'), + ('OffsetUnmixParameters', 'u4'), + # LSM 3.2, 4.0 end here + ('OffsetAcquisitionParameters', 'u4'), + ('OffsetCharacteristics', 'u4'), + ('OffsetPalette', 'u4'), + ('TimeDifferenceX', 'f8'), + ('TimeDifferenceY', 'f8'), + ('TimeDifferenceZ', 'f8'), + ('InternalUse1', 'u4'), + ('DimensionP', 'i4'), + ('DimensionM', 'i4'), + ('DimensionsReserved', '16i4'), + ('OffsetTilePositions', 'u4'), + ('', '9u4'), # Reserved + ('OffsetPositions', 'u4'), + # ('', '21u4'), # must be 0 + ] + + def CZ_LSMINFO_READERS(): + # Import functions for CZ_LSMINFO sub-records + # TODO: read more CZ_LSMINFO sub-records + return { + 'ScanInformation': read_lsm_scaninfo, + 'TimeStamps': read_lsm_timestamps, + 'EventList': read_lsm_eventlist, + 'ChannelColors': read_lsm_channelcolors, + 'Positions': read_lsm_floatpairs, + 'TilePositions': read_lsm_floatpairs, + 'VectorOverlay': None, + 'InputLut': None, + 'OutputLut': None, + 'TimeIntervall': None, + 'ChannelDataTypes': None, + 'KsData': None, + 'Roi': None, + 'BleachRoi': None, + 'NextRecording': None, + 'MeanOfRoisOverlay': None, + 'TopoIsolineOverlay': None, + 'TopoProfileOverlay': None, + 'ChannelWavelength': None, + 'SphereCorrection': None, + 'ChannelFactors': None, + 'UnmixParameters': None, + 'AcquisitionParameters': None, + 'Characteristics': None, + } + + def CZ_LSMINFO_SCANTYPE(): + # Map CZ_LSMINFO.ScanType to dimension order + return { + 0: 'XYZCT', # 'Stack' normal x-y-z-scan + 1: 'XYZCT', # 'Z-Scan' x-z-plane Y=1 + 2: 'XYZCT', # 'Line' + 3: 'XYTCZ', # 'Time Series Plane' time series x-y XYCTZ ? Z=1 + 4: 'XYZTC', # 'Time Series z-Scan' time series x-z + 5: 'XYTCZ', # 'Time Series Mean-of-ROIs' + 6: 'XYZTC', # 'Time Series Stack' time series x-y-z + 7: 'XYCTZ', # Spline Scan + 8: 'XYCZT', # Spline Plane x-z + 9: 'XYTCZ', # Time Series Spline Plane x-z + 10: 'XYZCT', # 'Time Series Point' point mode + } + + def CZ_LSMINFO_DIMENSIONS(): + # Map dimension codes to CZ_LSMINFO attribute + return { + 'X': 'DimensionX', + 'Y': 'DimensionY', + 'Z': 'DimensionZ', + 'C': 'DimensionChannels', + 'T': 'DimensionTime', + 'P': 'DimensionP', + 'M': 'DimensionM', + } + + def CZ_LSMINFO_DATATYPES(): + # Description of CZ_LSMINFO.DataType + return { + 0: 'varying data types', + 1: '8 bit unsigned integer', + 2: '12 bit unsigned integer', + 5: '32 bit float', + } + + def CZ_LSMINFO_TYPEOFDATA(): + # Description of CZ_LSMINFO.TypeOfData + return { + 0: 'Original scan data', + 1: 'Calculated data', + 2: '3D reconstruction', + 3: 'Topography height map', + } + + def CZ_LSMINFO_SCANINFO_ARRAYS(): + return { + 0x20000000: 'Tracks', + 0x30000000: 'Lasers', + 0x60000000: 'DetectionChannels', + 0x80000000: 'IlluminationChannels', + 0xa0000000: 'BeamSplitters', + 0xc0000000: 'DataChannels', + 0x11000000: 'Timers', + 0x13000000: 'Markers', + } + + def CZ_LSMINFO_SCANINFO_STRUCTS(): + return { + # 0x10000000: 'Recording', + 0x40000000: 'Track', + 0x50000000: 'Laser', + 0x70000000: 'DetectionChannel', + 0x90000000: 'IlluminationChannel', + 0xb0000000: 'BeamSplitter', + 0xd0000000: 'DataChannel', + 0x12000000: 'Timer', + 0x14000000: 'Marker', + } + + def CZ_LSMINFO_SCANINFO_ATTRIBUTES(): + return { + # Recording + 0x10000001: 'Name', + 0x10000002: 'Description', + 0x10000003: 'Notes', + 0x10000004: 'Objective', + 0x10000005: 'ProcessingSummary', + 0x10000006: 'SpecialScanMode', + 0x10000007: 'ScanType', + 0x10000008: 'ScanMode', + 0x10000009: 'NumberOfStacks', + 0x1000000a: 'LinesPerPlane', + 0x1000000b: 'SamplesPerLine', + 0x1000000c: 'PlanesPerVolume', + 0x1000000d: 'ImagesWidth', + 0x1000000e: 'ImagesHeight', + 0x1000000f: 'ImagesNumberPlanes', + 0x10000010: 'ImagesNumberStacks', + 0x10000011: 'ImagesNumberChannels', + 0x10000012: 'LinscanXySize', + 0x10000013: 'ScanDirection', + 0x10000014: 'TimeSeries', + 0x10000015: 'OriginalScanData', + 0x10000016: 'ZoomX', + 0x10000017: 'ZoomY', + 0x10000018: 'ZoomZ', + 0x10000019: 'Sample0X', + 0x1000001a: 'Sample0Y', + 0x1000001b: 'Sample0Z', + 0x1000001c: 'SampleSpacing', + 0x1000001d: 'LineSpacing', + 0x1000001e: 'PlaneSpacing', + 0x1000001f: 'PlaneWidth', + 0x10000020: 'PlaneHeight', + 0x10000021: 'VolumeDepth', + 0x10000023: 'Nutation', + 0x10000034: 'Rotation', + 0x10000035: 'Precession', + 0x10000036: 'Sample0time', + 0x10000037: 'StartScanTriggerIn', + 0x10000038: 'StartScanTriggerOut', + 0x10000039: 'StartScanEvent', + 0x10000040: 'StartScanTime', + 0x10000041: 'StopScanTriggerIn', + 0x10000042: 'StopScanTriggerOut', + 0x10000043: 'StopScanEvent', + 0x10000044: 'StopScanTime', + 0x10000045: 'UseRois', + 0x10000046: 'UseReducedMemoryRois', + 0x10000047: 'User', + 0x10000048: 'UseBcCorrection', + 0x10000049: 'PositionBcCorrection1', + 0x10000050: 'PositionBcCorrection2', + 0x10000051: 'InterpolationY', + 0x10000052: 'CameraBinning', + 0x10000053: 'CameraSupersampling', + 0x10000054: 'CameraFrameWidth', + 0x10000055: 'CameraFrameHeight', + 0x10000056: 'CameraOffsetX', + 0x10000057: 'CameraOffsetY', + 0x10000059: 'RtBinning', + 0x1000005a: 'RtFrameWidth', + 0x1000005b: 'RtFrameHeight', + 0x1000005c: 'RtRegionWidth', + 0x1000005d: 'RtRegionHeight', + 0x1000005e: 'RtOffsetX', + 0x1000005f: 'RtOffsetY', + 0x10000060: 'RtZoom', + 0x10000061: 'RtLinePeriod', + 0x10000062: 'Prescan', + 0x10000063: 'ScanDirectionZ', + # Track + 0x40000001: 'MultiplexType', # 0 After Line; 1 After Frame + 0x40000002: 'MultiplexOrder', + 0x40000003: 'SamplingMode', # 0 Sample; 1 Line Avg; 2 Frame Avg + 0x40000004: 'SamplingMethod', # 1 Mean; 2 Sum + 0x40000005: 'SamplingNumber', + 0x40000006: 'Acquire', + 0x40000007: 'SampleObservationTime', + 0x4000000b: 'TimeBetweenStacks', + 0x4000000c: 'Name', + 0x4000000d: 'Collimator1Name', + 0x4000000e: 'Collimator1Position', + 0x4000000f: 'Collimator2Name', + 0x40000010: 'Collimator2Position', + 0x40000011: 'IsBleachTrack', + 0x40000012: 'IsBleachAfterScanNumber', + 0x40000013: 'BleachScanNumber', + 0x40000014: 'TriggerIn', + 0x40000015: 'TriggerOut', + 0x40000016: 'IsRatioTrack', + 0x40000017: 'BleachCount', + 0x40000018: 'SpiCenterWavelength', + 0x40000019: 'PixelTime', + 0x40000021: 'CondensorFrontlens', + 0x40000023: 'FieldStopValue', + 0x40000024: 'IdCondensorAperture', + 0x40000025: 'CondensorAperture', + 0x40000026: 'IdCondensorRevolver', + 0x40000027: 'CondensorFilter', + 0x40000028: 'IdTransmissionFilter1', + 0x40000029: 'IdTransmission1', + 0x40000030: 'IdTransmissionFilter2', + 0x40000031: 'IdTransmission2', + 0x40000032: 'RepeatBleach', + 0x40000033: 'EnableSpotBleachPos', + 0x40000034: 'SpotBleachPosx', + 0x40000035: 'SpotBleachPosy', + 0x40000036: 'SpotBleachPosz', + 0x40000037: 'IdTubelens', + 0x40000038: 'IdTubelensPosition', + 0x40000039: 'TransmittedLight', + 0x4000003a: 'ReflectedLight', + 0x4000003b: 'SimultanGrabAndBleach', + 0x4000003c: 'BleachPixelTime', + # Laser + 0x50000001: 'Name', + 0x50000002: 'Acquire', + 0x50000003: 'Power', + # DetectionChannel + 0x70000001: 'IntegrationMode', + 0x70000002: 'SpecialMode', + 0x70000003: 'DetectorGainFirst', + 0x70000004: 'DetectorGainLast', + 0x70000005: 'AmplifierGainFirst', + 0x70000006: 'AmplifierGainLast', + 0x70000007: 'AmplifierOffsFirst', + 0x70000008: 'AmplifierOffsLast', + 0x70000009: 'PinholeDiameter', + 0x7000000a: 'CountingTrigger', + 0x7000000b: 'Acquire', + 0x7000000c: 'PointDetectorName', + 0x7000000d: 'AmplifierName', + 0x7000000e: 'PinholeName', + 0x7000000f: 'FilterSetName', + 0x70000010: 'FilterName', + 0x70000013: 'IntegratorName', + 0x70000014: 'ChannelName', + 0x70000015: 'DetectorGainBc1', + 0x70000016: 'DetectorGainBc2', + 0x70000017: 'AmplifierGainBc1', + 0x70000018: 'AmplifierGainBc2', + 0x70000019: 'AmplifierOffsetBc1', + 0x70000020: 'AmplifierOffsetBc2', + 0x70000021: 'SpectralScanChannels', + 0x70000022: 'SpiWavelengthStart', + 0x70000023: 'SpiWavelengthStop', + 0x70000026: 'DyeName', + 0x70000027: 'DyeFolder', + # IlluminationChannel + 0x90000001: 'Name', + 0x90000002: 'Power', + 0x90000003: 'Wavelength', + 0x90000004: 'Aquire', + 0x90000005: 'DetchannelName', + 0x90000006: 'PowerBc1', + 0x90000007: 'PowerBc2', + # BeamSplitter + 0xb0000001: 'FilterSet', + 0xb0000002: 'Filter', + 0xb0000003: 'Name', + # DataChannel + 0xd0000001: 'Name', + 0xd0000003: 'Acquire', + 0xd0000004: 'Color', + 0xd0000005: 'SampleType', + 0xd0000006: 'BitsPerSample', + 0xd0000007: 'RatioType', + 0xd0000008: 'RatioTrack1', + 0xd0000009: 'RatioTrack2', + 0xd000000a: 'RatioChannel1', + 0xd000000b: 'RatioChannel2', + 0xd000000c: 'RatioConst1', + 0xd000000d: 'RatioConst2', + 0xd000000e: 'RatioConst3', + 0xd000000f: 'RatioConst4', + 0xd0000010: 'RatioConst5', + 0xd0000011: 'RatioConst6', + 0xd0000012: 'RatioFirstImages1', + 0xd0000013: 'RatioFirstImages2', + 0xd0000014: 'DyeName', + 0xd0000015: 'DyeFolder', + 0xd0000016: 'Spectrum', + 0xd0000017: 'Acquire', + # Timer + 0x12000001: 'Name', + 0x12000002: 'Description', + 0x12000003: 'Interval', + 0x12000004: 'TriggerIn', + 0x12000005: 'TriggerOut', + 0x12000006: 'ActivationTime', + 0x12000007: 'ActivationNumber', + # Marker + 0x14000001: 'Name', + 0x14000002: 'Description', + 0x14000003: 'TriggerIn', + 0x14000004: 'TriggerOut', + } + + def NIH_IMAGE_HEADER(): + return [ + ('FileID', 'a8'), + ('nLines', 'i2'), + ('PixelsPerLine', 'i2'), + ('Version', 'i2'), + ('OldLutMode', 'i2'), + ('OldnColors', 'i2'), + ('Colors', 'u1', (3, 32)), + ('OldColorStart', 'i2'), + ('ColorWidth', 'i2'), + ('ExtraColors', 'u2', (6, 3)), + ('nExtraColors', 'i2'), + ('ForegroundIndex', 'i2'), + ('BackgroundIndex', 'i2'), + ('XScale', 'f8'), + ('Unused2', 'i2'), + ('Unused3', 'i2'), + ('UnitsID', 'i2'), # NIH_UNITS_TYPE + ('p1', [('x', 'i2'), ('y', 'i2')]), + ('p2', [('x', 'i2'), ('y', 'i2')]), + ('CurveFitType', 'i2'), # NIH_CURVEFIT_TYPE + ('nCoefficients', 'i2'), + ('Coeff', 'f8', 6), + ('UMsize', 'u1'), + ('UM', 'a15'), + ('UnusedBoolean', 'u1'), + ('BinaryPic', 'b1'), + ('SliceStart', 'i2'), + ('SliceEnd', 'i2'), + ('ScaleMagnification', 'f4'), + ('nSlices', 'i2'), + ('SliceSpacing', 'f4'), + ('CurrentSlice', 'i2'), + ('FrameInterval', 'f4'), + ('PixelAspectRatio', 'f4'), + ('ColorStart', 'i2'), + ('ColorEnd', 'i2'), + ('nColors', 'i2'), + ('Fill1', '3u2'), + ('Fill2', '3u2'), + ('Table', 'u1'), # NIH_COLORTABLE_TYPE + ('LutMode', 'u1'), # NIH_LUTMODE_TYPE + ('InvertedTable', 'b1'), + ('ZeroClip', 'b1'), + ('XUnitSize', 'u1'), + ('XUnit', 'a11'), + ('StackType', 'i2'), # NIH_STACKTYPE_TYPE + # ('UnusedBytes', 'u1', 200) + ] + + def NIH_COLORTABLE_TYPE(): + return ('CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', + 'Rainbow', 'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum') + + def NIH_LUTMODE_TYPE(): + return ('PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale', + 'ColorLut', 'CustomGrayscale') + + def NIH_CURVEFIT_TYPE(): + return ('StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit', + 'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', + 'Uncalibrated', 'UncalibratedOD') + + def NIH_UNITS_TYPE(): + return ('Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', + 'Meters', 'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', + 'OtherUnits') + + def NIH_STACKTYPE_TYPE(): + return ('VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack') + + def TVIPS_HEADER_V1(): + # TVIPS TemData structure from EMMENU Help file + return [ + ('Version', 'i4'), + ('CommentV1', 'a80'), + ('HighTension', 'i4'), + ('SphericalAberration', 'i4'), + ('IlluminationAperture', 'i4'), + ('Magnification', 'i4'), + ('PostMagnification', 'i4'), + ('FocalLength', 'i4'), + ('Defocus', 'i4'), + ('Astigmatism', 'i4'), + ('AstigmatismDirection', 'i4'), + ('BiprismVoltage', 'i4'), + ('SpecimenTiltAngle', 'i4'), + ('SpecimenTiltDirection', 'i4'), + ('IlluminationTiltDirection', 'i4'), + ('IlluminationTiltAngle', 'i4'), + ('ImageMode', 'i4'), + ('EnergySpread', 'i4'), + ('ChromaticAberration', 'i4'), + ('ShutterType', 'i4'), + ('DefocusSpread', 'i4'), + ('CcdNumber', 'i4'), + ('CcdSize', 'i4'), + ('OffsetXV1', 'i4'), + ('OffsetYV1', 'i4'), + ('PhysicalPixelSize', 'i4'), + ('Binning', 'i4'), + ('ReadoutSpeed', 'i4'), + ('GainV1', 'i4'), + ('SensitivityV1', 'i4'), + ('ExposureTimeV1', 'i4'), + ('FlatCorrected', 'i4'), + ('DeadPxCorrected', 'i4'), + ('ImageMean', 'i4'), + ('ImageStd', 'i4'), + ('DisplacementX', 'i4'), + ('DisplacementY', 'i4'), + ('DateV1', 'i4'), + ('TimeV1', 'i4'), + ('ImageMin', 'i4'), + ('ImageMax', 'i4'), + ('ImageStatisticsQuality', 'i4'), + ] + + def TVIPS_HEADER_V2(): + return [ + ('ImageName', 'V160'), # utf16 + ('ImageFolder', 'V160'), + ('ImageSizeX', 'i4'), + ('ImageSizeY', 'i4'), + ('ImageSizeZ', 'i4'), + ('ImageSizeE', 'i4'), + ('ImageDataType', 'i4'), + ('Date', 'i4'), + ('Time', 'i4'), + ('Comment', 'V1024'), + ('ImageHistory', 'V1024'), + ('Scaling', '16f4'), + ('ImageStatistics', '16c16'), + ('ImageType', 'i4'), + ('ImageDisplaType', 'i4'), + ('PixelSizeX', 'f4'), # distance between two px in x, [nm] + ('PixelSizeY', 'f4'), # distance between two px in y, [nm] + ('ImageDistanceZ', 'f4'), + ('ImageDistanceE', 'f4'), + ('ImageMisc', '32f4'), + ('TemType', 'V160'), + ('TemHighTension', 'f4'), + ('TemAberrations', '32f4'), + ('TemEnergy', '32f4'), + ('TemMode', 'i4'), + ('TemMagnification', 'f4'), + ('TemMagnificationCorrection', 'f4'), + ('PostMagnification', 'f4'), + ('TemStageType', 'i4'), + ('TemStagePosition', '5f4'), # x, y, z, a, b + ('TemImageShift', '2f4'), + ('TemBeamShift', '2f4'), + ('TemBeamTilt', '2f4'), + ('TilingParameters', '7f4'), # 0: tiling? 1:x 2:y 3: max x + # 4: max y 5: overlap x 6: overlap y + ('TemIllumination', '3f4'), # 0: spotsize 1: intensity + ('TemShutter', 'i4'), + ('TemMisc', '32f4'), + ('CameraType', 'V160'), + ('PhysicalPixelSizeX', 'f4'), + ('PhysicalPixelSizeY', 'f4'), + ('OffsetX', 'i4'), + ('OffsetY', 'i4'), + ('BinningX', 'i4'), + ('BinningY', 'i4'), + ('ExposureTime', 'f4'), + ('Gain', 'f4'), + ('ReadoutRate', 'f4'), + ('FlatfieldDescription', 'V160'), + ('Sensitivity', 'f4'), + ('Dose', 'f4'), + ('CamMisc', '32f4'), + ('FeiMicroscopeInformation', 'V1024'), + ('FeiSpecimenInformation', 'V1024'), + ('Magic', 'u4'), + ] + + def MM_HEADER(): + # Olympus FluoView MM_Header + MM_DIMENSION = [ + ('Name', 'a16'), + ('Size', 'i4'), + ('Origin', 'f8'), + ('Resolution', 'f8'), + ('Unit', 'a64')] + return [ + ('HeaderFlag', 'i2'), + ('ImageType', 'u1'), + ('ImageName', 'a257'), + ('OffsetData', 'u4'), + ('PaletteSize', 'i4'), + ('OffsetPalette0', 'u4'), + ('OffsetPalette1', 'u4'), + ('CommentSize', 'i4'), + ('OffsetComment', 'u4'), + ('Dimensions', MM_DIMENSION, 10), + ('OffsetPosition', 'u4'), + ('MapType', 'i2'), + ('MapMin', 'f8'), + ('MapMax', 'f8'), + ('MinValue', 'f8'), + ('MaxValue', 'f8'), + ('OffsetMap', 'u4'), + ('Gamma', 'f8'), + ('Offset', 'f8'), + ('GrayChannel', MM_DIMENSION), + ('OffsetThumbnail', 'u4'), + ('VoiceField', 'i4'), + ('OffsetVoiceField', 'u4'), + ] + + def MM_DIMENSIONS(): + # Map FluoView MM_Header.Dimensions to axes characters + return { + 'X': 'X', + 'Y': 'Y', + 'Z': 'Z', + 'T': 'T', + 'CH': 'C', + 'WAVELENGTH': 'C', + 'TIME': 'T', + 'XY': 'R', + 'EVENT': 'V', + 'EXPOSURE': 'L', + } + + def UIC_TAGS(): + # Map Universal Imaging Corporation MetaMorph internal tag ids to + # name and type + from fractions import Fraction # delayed import + + return [ + ('AutoScale', int), + ('MinScale', int), + ('MaxScale', int), + ('SpatialCalibration', int), + ('XCalibration', Fraction), + ('YCalibration', Fraction), + ('CalibrationUnits', str), + ('Name', str), + ('ThreshState', int), + ('ThreshStateRed', int), + ('tagid_10', None), # undefined + ('ThreshStateGreen', int), + ('ThreshStateBlue', int), + ('ThreshStateLo', int), + ('ThreshStateHi', int), + ('Zoom', int), + ('CreateTime', julian_datetime), + ('LastSavedTime', julian_datetime), + ('currentBuffer', int), + ('grayFit', None), + ('grayPointCount', None), + ('grayX', Fraction), + ('grayY', Fraction), + ('grayMin', Fraction), + ('grayMax', Fraction), + ('grayUnitName', str), + ('StandardLUT', int), + ('wavelength', int), + ('StagePosition', '(%i,2,2)u4'), # N xy positions as fract + ('CameraChipOffset', '(%i,2,2)u4'), # N xy offsets as fract + ('OverlayMask', None), + ('OverlayCompress', None), + ('Overlay', None), + ('SpecialOverlayMask', None), + ('SpecialOverlayCompress', None), + ('SpecialOverlay', None), + ('ImageProperty', read_uic_image_property), + ('StageLabel', '%ip'), # N str + ('AutoScaleLoInfo', Fraction), + ('AutoScaleHiInfo', Fraction), + ('AbsoluteZ', '(%i,2)u4'), # N fractions + ('AbsoluteZValid', '(%i,)u4'), # N long + ('Gamma', 'I'), # 'I' uses offset + ('GammaRed', 'I'), + ('GammaGreen', 'I'), + ('GammaBlue', 'I'), + ('CameraBin', '2I'), + ('NewLUT', int), + ('ImagePropertyEx', None), + ('PlaneProperty', int), + ('UserLutTable', '(256,3)u1'), + ('RedAutoScaleInfo', int), + ('RedAutoScaleLoInfo', Fraction), + ('RedAutoScaleHiInfo', Fraction), + ('RedMinScaleInfo', int), + ('RedMaxScaleInfo', int), + ('GreenAutoScaleInfo', int), + ('GreenAutoScaleLoInfo', Fraction), + ('GreenAutoScaleHiInfo', Fraction), + ('GreenMinScaleInfo', int), + ('GreenMaxScaleInfo', int), + ('BlueAutoScaleInfo', int), + ('BlueAutoScaleLoInfo', Fraction), + ('BlueAutoScaleHiInfo', Fraction), + ('BlueMinScaleInfo', int), + ('BlueMaxScaleInfo', int), + # ('OverlayPlaneColor', read_uic_overlay_plane_color), + ] + + def PILATUS_HEADER(): + # PILATUS CBF Header Specification, Version 1.4 + # Map key to [value_indices], type + return { + 'Detector': ([slice(1, None)], str), + 'Pixel_size': ([1, 4], float), + 'Silicon': ([3], float), + 'Exposure_time': ([1], float), + 'Exposure_period': ([1], float), + 'Tau': ([1], float), + 'Count_cutoff': ([1], int), + 'Threshold_setting': ([1], float), + 'Gain_setting': ([1, 2], str), + 'N_excluded_pixels': ([1], int), + 'Excluded_pixels': ([1], str), + 'Flat_field': ([1], str), + 'Trim_file': ([1], str), + 'Image_path': ([1], str), + # optional + 'Wavelength': ([1], float), + 'Energy_range': ([1, 2], float), + 'Detector_distance': ([1], float), + 'Detector_Voffset': ([1], float), + 'Beam_xy': ([1, 2], float), + 'Flux': ([1], str), + 'Filter_transmission': ([1], float), + 'Start_angle': ([1], float), + 'Angle_increment': ([1], float), + 'Detector_2theta': ([1], float), + 'Polarization': ([1], float), + 'Alpha': ([1], float), + 'Kappa': ([1], float), + 'Phi': ([1], float), + 'Phi_increment': ([1], float), + 'Chi': ([1], float), + 'Chi_increment': ([1], float), + 'Oscillation_axis': ([slice(1, None)], str), + 'N_oscillations': ([1], int), + 'Start_position': ([1], float), + 'Position_increment': ([1], float), + 'Shutter_time': ([1], float), + 'Omega': ([1], float), + 'Omega_increment': ([1], float) + } + + def REVERSE_BITORDER_BYTES(): + # Bytes with reversed bitorder + return ( + b'\x00\x80@\xc0 \xa0`\xe0\x10\x90P\xd00\xb0p\xf0\x08\x88H\xc8(' + b'\xa8h\xe8\x18\x98X\xd88\xb8x\xf8\x04\x84D\xc4$\xa4d\xe4\x14' + b'\x94T\xd44\xb4t\xf4\x0c\x8cL\xcc,\xacl\xec\x1c\x9c\\\xdc<\xbc|' + b'\xfc\x02\x82B\xc2"\xa2b\xe2\x12\x92R\xd22\xb2r\xf2\n\x8aJ\xca*' + b'\xaaj\xea\x1a\x9aZ\xda:\xbaz\xfa\x06\x86F\xc6&\xa6f\xe6\x16' + b'\x96V\xd66\xb6v\xf6\x0e\x8eN\xce.\xaen\xee\x1e\x9e^\xde>\xbe~' + b'\xfe\x01\x81A\xc1!\xa1a\xe1\x11\x91Q\xd11\xb1q\xf1\t\x89I\xc9)' + b'\xa9i\xe9\x19\x99Y\xd99\xb9y\xf9\x05\x85E\xc5%\xa5e\xe5\x15' + b'\x95U\xd55\xb5u\xf5\r\x8dM\xcd-\xadm\xed\x1d\x9d]\xdd=\xbd}' + b'\xfd\x03\x83C\xc3#\xa3c\xe3\x13\x93S\xd33\xb3s\xf3\x0b\x8bK' + b'\xcb+\xabk\xeb\x1b\x9b[\xdb;\xbb{\xfb\x07\x87G\xc7\'\xa7g\xe7' + b'\x17\x97W\xd77\xb7w\xf7\x0f\x8fO\xcf/\xafo\xef\x1f\x9f_' + b'\xdf?\xbf\x7f\xff') + + def REVERSE_BITORDER_ARRAY(): + # Numpy array of bytes with reversed bitorder + return numpy.frombuffer(TIFF.REVERSE_BITORDER_BYTES, dtype='uint8') + + def ALLOCATIONGRANULARITY(): + # alignment for writing contiguous data to TIFF + import mmap # delayed import + return mmap.ALLOCATIONGRANULARITY + + +def read_tags(fh, byteorder, offsetsize, tagnames, + customtags=None, maxifds=None): + """Read tags from chain of IFDs and return as list of dicts. + + The file handle position must be at a valid IFD header. + + """ + if offsetsize == 4: + offsetformat = byteorder+'I' + tagnosize = 2 + tagnoformat = byteorder+'H' + tagsize = 12 + tagformat1 = byteorder+'HH' + tagformat2 = byteorder+'I4s' + elif offsetsize == 8: + offsetformat = byteorder+'Q' + tagnosize = 8 + tagnoformat = byteorder+'Q' + tagsize = 20 + tagformat1 = byteorder+'HH' + tagformat2 = byteorder+'Q8s' + else: + raise ValueError('invalid offset size') + + if customtags is None: + customtags = {} + if maxifds is None: + maxifds = 2**32 + + result = [] + unpack = struct.unpack + offset = fh.tell() + while len(result) < maxifds: + # loop over IFDs + try: + tagno = unpack(tagnoformat, fh.read(tagnosize))[0] + if tagno > 4096: + raise ValueError('suspicious number of tags') + except Exception: + warnings.warn('corrupted tag list at offset %i' % offset) + break + + tags = {} + data = fh.read(tagsize * tagno) + pos = fh.tell() + index = 0 + for _ in range(tagno): + code, type_ = unpack(tagformat1, data[index:index+4]) + count, value = unpack(tagformat2, data[index+4:index+tagsize]) + index += tagsize + name = tagnames.get(code, str(code)) + try: + dtype = TIFF.DATA_FORMATS[type_] + except KeyError: + raise TiffTag.Error('unknown tag data type %i' % type_) + + fmt = '%s%i%s' % (byteorder, count * int(dtype[0]), dtype[1]) + size = struct.calcsize(fmt) + if size > offsetsize or code in customtags: + offset = unpack(offsetformat, value)[0] + if offset < 8 or offset > fh.size - size: + raise TiffTag.Error('invalid tag value offset %i' % offset) + fh.seek(offset) + if code in customtags: + readfunc = customtags[code][1] + value = readfunc(fh, byteorder, dtype, count, offsetsize) + elif type_ == 7 or (count > 1 and dtype[-1] == 'B'): + value = read_bytes(fh, byteorder, dtype, count, offsetsize) + elif code in tagnames or dtype[-1] == 's': + value = unpack(fmt, fh.read(size)) + else: + value = read_numpy(fh, byteorder, dtype, count, offsetsize) + elif dtype[-1] == 'B' or type_ == 7: + value = value[:size] + else: + value = unpack(fmt, value[:size]) + + if code not in customtags and code not in TIFF.TAG_TUPLE: + if len(value) == 1: + value = value[0] + if type_ != 7 and dtype[-1] == 's' and isinstance(value, bytes): + # TIFF ASCII fields can contain multiple strings, + # each terminated with a NUL + try: + value = bytes2str(stripascii(value).strip()) + except UnicodeDecodeError: + warnings.warn( + 'tag %i: coercing invalid ASCII to bytes' % code) + + tags[name] = value + + result.append(tags) + # read offset to next page + fh.seek(pos) + offset = unpack(offsetformat, fh.read(offsetsize))[0] + if offset == 0: + break + if offset >= fh.size: + warnings.warn('invalid page offset %i' % offset) + break + fh.seek(offset) + + if result and maxifds == 1: + result = result[0] + return result + + +def read_exif_ifd(fh, byteorder, dtype, count, offsetsize): + """Read EXIF tags from file and return as dict.""" + exif = read_tags(fh, byteorder, offsetsize, TIFF.EXIF_TAGS, maxifds=1) + for name in ('ExifVersion', 'FlashpixVersion'): + try: + exif[name] = bytes2str(exif[name]) + except Exception: + pass + if 'UserComment' in exif: + idcode = exif['UserComment'][:8] + try: + if idcode == b'ASCII\x00\x00\x00': + exif['UserComment'] = bytes2str(exif['UserComment'][8:]) + elif idcode == b'UNICODE\x00': + exif['UserComment'] = exif['UserComment'][8:].decode('utf-16') + except Exception: + pass + return exif + + +def read_gps_ifd(fh, byteorder, dtype, count, offsetsize): + """Read GPS tags from file and return as dict.""" + return read_tags(fh, byteorder, offsetsize, TIFF.GPS_TAGS, maxifds=1) + + +def read_interoperability_ifd(fh, byteorder, dtype, count, offsetsize): + """Read Interoperability tags from file and return as dict.""" + tag_names = {1: 'InteroperabilityIndex'} + return read_tags(fh, byteorder, offsetsize, tag_names, maxifds=1) + + +def read_bytes(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as byte string.""" + dtype = 'B' if dtype[-1] == 's' else byteorder+dtype[-1] + count *= numpy.dtype(dtype).itemsize + data = fh.read(count) + if len(data) != count: + warnings.warn('failed to read all bytes: %i, %i' % (len(data), count)) + return data + + +def read_utf8(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as unicode string.""" + return fh.read(count).decode('utf-8') + + +def read_numpy(fh, byteorder, dtype, count, offsetsize): + """Read tag data from file and return as numpy array.""" + dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1] + return fh.read_array(dtype, count) + + +def read_colormap(fh, byteorder, dtype, count, offsetsize): + """Read ColorMap data from file and return as numpy array.""" + cmap = fh.read_array(byteorder+dtype[-1], count) + cmap.shape = (3, -1) + return cmap + + +def read_json(fh, byteorder, dtype, count, offsetsize): + """Read JSON tag data from file and return as object.""" + data = fh.read(count) + try: + return json.loads(unicode(stripnull(data), 'utf-8')) + except ValueError: + warnings.warn("invalid JSON '%s'" % data) + + +def read_mm_header(fh, byteorder, dtype, count, offsetsize): + """Read FluoView mm_header tag from file and return as dict.""" + mmh = fh.read_record(TIFF.MM_HEADER, byteorder=byteorder) + mmh = recarray2dict(mmh) + mmh['Dimensions'] = [ + (bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) + for d in mmh['Dimensions']] + d = mmh['GrayChannel'] + mmh['GrayChannel'] = ( + bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) + return mmh + + +def read_mm_stamp(fh, byteorder, dtype, count, offsetsize): + """Read FluoView mm_stamp tag from file and return as numpy.ndarray.""" + return fh.read_array(byteorder+'f8', 8) + + +def read_uic1tag(fh, byteorder, dtype, count, offsetsize, planecount=None): + """Read MetaMorph STK UIC1Tag from file and return as dict. + + Return empty dictionary if planecount is unknown. + + """ + assert dtype in ('2I', '1I') and byteorder == '<' + result = {} + if dtype == '2I': + # pre MetaMorph 2.5 (not tested) + values = fh.read_array(' structure_size: + break + lsminfo.append((name, dtype)) + else: + lsminfo = TIFF.CZ_LSMINFO + + lsminfo = fh.read_record(lsminfo, byteorder=byteorder) + lsminfo = recarray2dict(lsminfo) + + # read LSM info subrecords at offsets + for name, reader in TIFF.CZ_LSMINFO_READERS.items(): + if reader is None: + continue + offset = lsminfo.get('Offset' + name, 0) + if offset < 8: + continue + fh.seek(offset) + try: + lsminfo[name] = reader(fh) + except ValueError: + pass + return lsminfo + + +def read_lsm_floatpairs(fh): + """Read LSM sequence of float pairs from file and return as list.""" + size = struct.unpack(' 0: + esize, etime, etype = struct.unpack(' 4: + size = struct.unpack(' 1 else {} + return frame_data, roi_data + + +def read_micromanager_metadata(fh): + """Read MicroManager non-TIFF settings from open file and return as dict. + + The settings can be used to read image data without parsing the TIFF file. + + Raise ValueError if the file does not contain valid MicroManager metadata. + + """ + fh.seek(0) + try: + byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)] + except IndexError: + raise ValueError('not a MicroManager TIFF file') + + result = {} + fh.seek(8) + (index_header, index_offset, display_header, display_offset, + comments_header, comments_offset, summary_header, summary_length + ) = struct.unpack(byteorder + 'IIIIIIII', fh.read(32)) + + if summary_header != 2355492: + raise ValueError('invalid MicroManager summary header') + result['Summary'] = read_json(fh, byteorder, None, summary_length, None) + + if index_header != 54773648: + raise ValueError('invalid MicroManager index header') + fh.seek(index_offset) + header, count = struct.unpack(byteorder + 'II', fh.read(8)) + if header != 3453623: + raise ValueError('invalid MicroManager index header') + data = struct.unpack(byteorder + 'IIIII'*count, fh.read(20*count)) + result['IndexMap'] = {'Channel': data[::5], + 'Slice': data[1::5], + 'Frame': data[2::5], + 'Position': data[3::5], + 'Offset': data[4::5]} + + if display_header != 483765892: + raise ValueError('invalid MicroManager display header') + fh.seek(display_offset) + header, count = struct.unpack(byteorder + 'II', fh.read(8)) + if header != 347834724: + raise ValueError('invalid MicroManager display header') + result['DisplaySettings'] = read_json(fh, byteorder, None, count, None) + + if comments_header != 99384722: + raise ValueError('invalid MicroManager comments header') + fh.seek(comments_offset) + header, count = struct.unpack(byteorder + 'II', fh.read(8)) + if header != 84720485: + raise ValueError('invalid MicroManager comments header') + result['Comments'] = read_json(fh, byteorder, None, count, None) + + return result + + +def read_metaseries_catalog(fh): + """Read MetaSeries non-TIFF hint catalog from file. + + Raise ValueError if the file does not contain a valid hint catalog. + + """ + # TODO: implement read_metaseries_catalog + raise NotImplementedError() + + +def imagej_metadata_tags(metadata, byteorder): + """Return IJMetadata and IJMetadataByteCounts tags from metadata dict. + + The tags can be passed to the TiffWriter.save function as extratags. + + The metadata dict may contain the following keys and values: + + Info : str + Human-readable information as string. + Labels : sequence of str + Human-readable labels for each channel. + Ranges : sequence of doubles + Lower and upper values for each channel. + LUTs : sequence of (3, 256) uint8 ndarrays + Color palettes for each channel. + Plot : bytes + Undocumented ImageJ internal format. + ROI: bytes + Undocumented ImageJ internal region of interest format. + Overlays : bytes + Undocumented ImageJ internal format. + + """ + header = [{'>': b'IJIJ', '<': b'JIJI'}[byteorder]] + bytecounts = [0] + body = [] + + def _string(data, byteorder): + return data.encode('utf-16' + {'>': 'be', '<': 'le'}[byteorder]) + + def _doubles(data, byteorder): + return struct.pack(byteorder+('d' * len(data)), *data) + + def _ndarray(data, byteorder): + return data.tobytes() + + def _bytes(data, byteorder): + return data + + metadata_types = ( + ('Info', b'info', 1, _string), + ('Labels', b'labl', None, _string), + ('Ranges', b'rang', 1, _doubles), + ('LUTs', b'luts', None, _ndarray), + ('Plot', b'plot', 1, _bytes), + ('ROI', b'roi ', 1, _bytes), + ('Overlays', b'over', None, _bytes)) + + for key, mtype, count, func in metadata_types: + if key.lower() in metadata: + key = key.lower() + elif key not in metadata: + continue + if byteorder == '<': + mtype = mtype[::-1] + values = metadata[key] + if count is None: + count = len(values) + else: + values = [values] + header.append(mtype + struct.pack(byteorder+'I', count)) + for value in values: + data = func(value, byteorder) + body.append(data) + bytecounts.append(len(data)) + + if not body: + return () + body = b''.join(body) + header = b''.join(header) + data = header + body + bytecounts[0] = len(header) + bytecounts = struct.pack(byteorder+('I' * len(bytecounts)), *bytecounts) + return ((50839, 'B', len(data), data, True), + (50838, 'I', len(bytecounts)//4, bytecounts, True)) + + +def imagej_metadata(data, bytecounts, byteorder): + """Return IJMetadata tag value as dict. + + The 'Info' string can have multiple formats, e.g. OIF or ScanImage, + that might be parsed into dicts using the matlabstr2py or + oiffile.SettingsFile functions. + + """ + def _string(data, byteorder): + return data.decode('utf-16' + {'>': 'be', '<': 'le'}[byteorder]) + + def _doubles(data, byteorder): + return struct.unpack(byteorder+('d' * (len(data) // 8)), data) + + def _lut(data, byteorder): + return numpy.frombuffer(data, 'uint8').reshape(-1, 256) + + def _bytes(data, byteorder): + return data + + metadata_types = { # big-endian + b'info': ('Info', _string), + b'labl': ('Labels', _string), + b'rang': ('Ranges', _doubles), + b'luts': ('LUTs', _lut), + b'plot': ('Plots', _bytes), + b'roi ': ('ROI', _bytes), + b'over': ('Overlays', _bytes)} + metadata_types.update( # little-endian + dict((k[::-1], v) for k, v in metadata_types.items())) + + if not bytecounts: + raise ValueError('no ImageJ metadata') + + if data[:4] not in (b'IJIJ', b'JIJI'): + raise ValueError('invalid ImageJ metadata') + + header_size = bytecounts[0] + if header_size < 12 or header_size > 804: + raise ValueError('invalid ImageJ metadata header size') + + ntypes = (header_size - 4) // 8 + header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8]) + pos = 4 + ntypes * 8 + counter = 0 + result = {} + for mtype, count in zip(header[::2], header[1::2]): + values = [] + name, func = metadata_types.get(mtype, (bytes2str(mtype), read_bytes)) + for _ in range(count): + counter += 1 + pos1 = pos + bytecounts[counter] + values.append(func(data[pos:pos1], byteorder)) + pos = pos1 + result[name.strip()] = values[0] if count == 1 else values + return result + + +def imagej_description_metadata(description): + """Return metatata from ImageJ image description as dict. + + Raise ValueError if not a valid ImageJ description. + + >>> description = 'ImageJ=1.11a\\nimages=510\\nhyperstack=true\\n' + >>> imagej_description_metadata(description) # doctest: +SKIP + {'ImageJ': '1.11a', 'images': 510, 'hyperstack': True} + + """ + def _bool(val): + return {'true': True, 'false': False}[val.lower()] + + result = {} + for line in description.splitlines(): + try: + key, val = line.split('=') + except Exception: + continue + key = key.strip() + val = val.strip() + for dtype in (int, float, _bool): + try: + val = dtype(val) + break + except Exception: + pass + result[key] = val + + if 'ImageJ' not in result: + raise ValueError('not a ImageJ image description') + return result + + +def imagej_description(shape, rgb=None, colormaped=False, version='1.11a', + hyperstack=None, mode=None, loop=None, **kwargs): + """Return ImageJ image description from data shape. + + ImageJ can handle up to 6 dimensions in order TZCYXS. + + >>> imagej_description((51, 5, 2, 196, 171)) # doctest: +SKIP + ImageJ=1.11a + images=510 + channels=2 + slices=5 + frames=51 + hyperstack=true + mode=grayscale + loop=false + + """ + if colormaped: + raise NotImplementedError('ImageJ colormapping not supported') + shape = imagej_shape(shape, rgb=rgb) + rgb = shape[-1] in (3, 4) + + result = ['ImageJ=%s' % version] + append = [] + result.append('images=%i' % product(shape[:-3])) + if hyperstack is None: + hyperstack = True + append.append('hyperstack=true') + else: + append.append('hyperstack=%s' % bool(hyperstack)) + if shape[2] > 1: + result.append('channels=%i' % shape[2]) + if mode is None and not rgb: + mode = 'grayscale' + if hyperstack and mode: + append.append('mode=%s' % mode) + if shape[1] > 1: + result.append('slices=%i' % shape[1]) + if shape[0] > 1: + result.append('frames=%i' % shape[0]) + if loop is None: + append.append('loop=false') + if loop is not None: + append.append('loop=%s' % bool(loop)) + for key, value in kwargs.items(): + append.append('%s=%s' % (key.lower(), value)) + + return '\n'.join(result + append + ['']) + + +def imagej_shape(shape, rgb=None): + """Return shape normalized to 6D ImageJ hyperstack TZCYXS. + + Raise ValueError if not a valid ImageJ hyperstack shape. + + >>> imagej_shape((2, 3, 4, 5, 3), False) + (2, 3, 4, 5, 3, 1) + + """ + shape = tuple(int(i) for i in shape) + ndim = len(shape) + if 1 > ndim > 6: + raise ValueError('invalid ImageJ hyperstack: not 2 to 6 dimensional') + if rgb is None: + rgb = shape[-1] in (3, 4) and ndim > 2 + if rgb and shape[-1] not in (3, 4): + raise ValueError('invalid ImageJ hyperstack: not a RGB image') + if not rgb and ndim == 6 and shape[-1] != 1: + raise ValueError('invalid ImageJ hyperstack: not a non-RGB image') + if rgb or shape[-1] == 1: + return (1, ) * (6 - ndim) + shape + return (1, ) * (5 - ndim) + shape + (1,) + + +def json_description(shape, **metadata): + """Return JSON image description from data shape and other meta data. + + Return UTF-8 encoded JSON. + + >>> json_description((256, 256, 3), axes='YXS') # doctest: +SKIP + b'{"shape": [256, 256, 3], "axes": "YXS"}' + + """ + metadata.update(shape=shape) + return json.dumps(metadata) # .encode('utf-8') + + +def json_description_metadata(description): + """Return metatata from JSON formated image description as dict. + + Raise ValuError if description is of unknown format. + + >>> description = '{"shape": [256, 256, 3], "axes": "YXS"}' + >>> json_description_metadata(description) # doctest: +SKIP + {'shape': [256, 256, 3], 'axes': 'YXS'} + >>> json_description_metadata('shape=(256, 256, 3)') + {'shape': (256, 256, 3)} + + """ + if description[:6] == 'shape=': + # old style 'shaped' description; not JSON + shape = tuple(int(i) for i in description[7:-1].split(',')) + return dict(shape=shape) + if description[:1] == '{' and description[-1:] == '}': + # JSON description + return json.loads(description) + raise ValueError('invalid JSON image description', description) + + +def fluoview_description_metadata(description, ignoresections=None): + """Return metatata from FluoView image description as dict. + + The FluoView image description format is unspecified. Expect failures. + + >>> descr = ('[Intensity Mapping]\\nMap Ch0: Range=00000 to 02047\\n' + ... '[Intensity Mapping End]') + >>> fluoview_description_metadata(descr) + {'Intensity Mapping': {'Map Ch0: Range': '00000 to 02047'}} + + """ + if not description.startswith('['): + raise ValueError('invalid FluoView image description') + if ignoresections is None: + ignoresections = {'Region Info (Fields)', 'Protocol Description'} + + result = {} + sections = [result] + comment = False + for line in description.splitlines(): + if not comment: + line = line.strip() + if not line: + continue + if line[0] == '[': + if line[-5:] == ' End]': + # close section + del sections[-1] + section = sections[-1] + name = line[1:-5] + if comment: + section[name] = '\n'.join(section[name]) + if name[:4] == 'LUT ': + a = numpy.array(section[name], dtype='uint8') + a.shape = -1, 3 + section[name] = a + continue + # new section + comment = False + name = line[1:-1] + if name[:4] == 'LUT ': + section = [] + elif name in ignoresections: + section = [] + comment = True + else: + section = {} + sections.append(section) + result[name] = section + continue + # add entry + if comment: + section.append(line) + continue + line = line.split('=', 1) + if len(line) == 1: + section[line[0].strip()] = None + continue + key, value = line + if key[:4] == 'RGB ': + section.extend(int(rgb) for rgb in value.split()) + else: + section[key.strip()] = astype(value.strip()) + return result + + +def pilatus_description_metadata(description): + """Return metatata from Pilatus image description as dict. + + Return metadata from Pilatus pixel array detectors by Dectris, created + by camserver or TVX software. + + >>> pilatus_description_metadata('# Pixel_size 172e-6 m x 172e-6 m') + {'Pixel_size': (0.000172, 0.000172)} + + """ + result = {} + if not description.startswith('# '): + return result + for c in '#:=,()': + description = description.replace(c, ' ') + for line in description.split('\n'): + if line[:2] != ' ': + continue + line = line.split() + name = line[0] + if line[0] not in TIFF.PILATUS_HEADER: + try: + result['DateTime'] = datetime.datetime.strptime( + ' '.join(line), '%Y-%m-%dT%H %M %S.%f') + except Exception: + result[name] = ' '.join(line[1:]) + continue + indices, dtype = TIFF.PILATUS_HEADER[line[0]] + if isinstance(indices[0], slice): + # assumes one slice + values = line[indices[0]] + else: + values = [line[i] for i in indices] + if dtype is float and values[0] == 'not': + values = ['NaN'] + values = tuple(dtype(v) for v in values) + if dtype == str: + values = ' '.join(values) + elif len(values) == 1: + values = values[0] + result[name] = values + return result + + +def svs_description_metadata(description): + """Return metatata from Aperio image description as dict. + + The Aperio image description format is unspecified. Expect failures. + + >>> svs_description_metadata('Aperio Image Library v1.0') + {'Aperio Image Library': 'v1.0'} + + """ + if not description.startswith('Aperio Image Library '): + raise ValueError('invalid Aperio image description') + result = {} + lines = description.split('\n') + key, value = lines[0].strip().rsplit(None, 1) # 'Aperio Image Library' + result[key.strip()] = value.strip() + if len(lines) == 1: + return result + items = lines[1].split('|') + result[''] = items[0].strip() # TODO: parse this? + for item in items[1:]: + key, value = item.split(' = ') + result[key.strip()] = astype(value.strip()) + return result + + +def stk_description_metadata(description): + """Return metadata from MetaMorph image description as list of dict. + + The MetaMorph image description format is unspecified. Expect failures. + + """ + description = description.strip() + if not description: + return [] + try: + description = bytes2str(description) + except UnicodeDecodeError: + warnings.warn('failed to parse MetaMorph image description') + return [] + result = [] + for plane in description.split('\x00'): + d = {} + for line in plane.split('\r\n'): + line = line.split(':', 1) + if len(line) > 1: + name, value = line + d[name.strip()] = astype(value.strip()) + else: + value = line[0].strip() + if value: + if '' in d: + d[''].append(value) + else: + d[''] = [value] + result.append(d) + return result + + +def metaseries_description_metadata(description): + """Return metatata from MetaSeries image description as dict.""" + if not description.startswith(''): + raise ValueError('invalid MetaSeries image description') + + from xml.etree import cElementTree as etree # delayed import + root = etree.fromstring(description) + types = {'float': float, 'int': int, + 'bool': lambda x: asbool(x, 'on', 'off')} + + def parse(root, result): + # recursive + for child in root: + attrib = child.attrib + if not attrib: + result[child.tag] = parse(child, {}) + continue + if 'id' in attrib: + i = attrib['id'] + t = attrib['type'] + v = attrib['value'] + if t in types: + result[i] = types[t](v) + else: + result[i] = v + return result + + adict = parse(root, {}) + if 'Description' in adict: + adict['Description'] = adict['Description'].replace(' ', '\n') + return adict + + +def scanimage_description_metadata(description): + """Return metatata from ScanImage image description as dict.""" + return matlabstr2py(description) + + +def scanimage_artist_metadata(artist): + """Return metatata from ScanImage artist tag as dict.""" + try: + return json.loads(artist) + except ValueError: + warnings.warn("invalid JSON '%s'" % artist) + + +def _replace_by(module_function, package=__package__, warn=None, prefix='_'): + """Try replace decorated function by module.function.""" + return lambda f: f # imageio: just use what's in here + def _warn(e, warn): + if warn is None: + warn = '\n Functionality might be degraded or be slow.\n' + elif warn is True: + warn = '' + elif not warn: + return + warnings.warn('%s%s' % (e, warn)) + + try: + from importlib import import_module + except ImportError as e: + _warn(e, warn) + return identityfunc + + def decorate(func, module_function=module_function, warn=warn): + module, function = module_function.split('.') + try: + if package: + module = import_module('.' + module, package=package) + else: + module = import_module(module) + except Exception as e: + _warn(e, warn) + return func + try: + func, oldfunc = getattr(module, function), func + except Exception as e: + _warn(e, warn) + return func + globals()[prefix + func.__name__] = oldfunc + return func + + return decorate + + +def decode_floats(data): + """Decode floating point horizontal differencing. + + The TIFF predictor type 3 reorders the bytes of the image values and + applies horizontal byte differencing to improve compression of floating + point images. The ordering of interleaved color channels is preserved. + + Parameters + ---------- + data : numpy.ndarray + The image to be decoded. The dtype must be a floating point. + The shape must include the number of contiguous samples per pixel + even if 1. + + """ + shape = data.shape + dtype = data.dtype + if len(shape) < 3: + raise ValueError('invalid data shape') + if dtype.char not in 'dfe': + raise ValueError('not a floating point image') + littleendian = data.dtype.byteorder == '<' or ( + sys.byteorder == 'little' and data.dtype.byteorder == '=') + # undo horizontal byte differencing + data = data.view('uint8') + data.shape = shape[:-2] + (-1,) + shape[-1:] + numpy.cumsum(data, axis=-2, dtype='uint8', out=data) + # reorder bytes + if littleendian: + data.shape = shape[:-2] + (-1,) + shape[-2:] + data = numpy.swapaxes(data, -3, -2) + data = numpy.swapaxes(data, -2, -1) + data = data[..., ::-1] + # back to float + data = numpy.ascontiguousarray(data) + data = data.view(dtype) + data.shape = shape + return data + + +@_replace_by('_tifffile.decode_packbits') +def decode_packbits(encoded): + """Decompress PackBits encoded byte string. + + PackBits is a simple byte-oriented run-length compression scheme. + + """ + func = ord if sys.version[0] == '2' else identityfunc + result = [] + result_extend = result.extend + i = 0 + try: + while True: + n = func(encoded[i]) + 1 + i += 1 + if n < 129: + result_extend(encoded[i:i+n]) + i += n + elif n > 129: + result_extend(encoded[i:i+1] * (258-n)) + i += 1 + except IndexError: + pass + return b''.join(result) if sys.version[0] == '2' else bytes(result) + + +@_replace_by('_tifffile.decode_lzw') +def decode_lzw(encoded): + """Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string). + + The strip must begin with a CLEAR code and end with an EOI code. + + This implementation of the LZW decoding algorithm is described in (1) and + is not compatible with old style LZW compressed files like quad-lzw.tif. + + """ + len_encoded = len(encoded) + bitcount_max = len_encoded * 8 + unpack = struct.unpack + + if sys.version[0] == '2': + newtable = [chr(i) for i in range(256)] + else: + newtable = [bytes([i]) for i in range(256)] + newtable.extend((0, 0)) + + def next_code(): + """Return integer of 'bitw' bits at 'bitcount' position in encoded.""" + start = bitcount // 8 + s = encoded[start:start+4] + try: + code = unpack('>I', s)[0] + except Exception: + code = unpack('>I', s + b'\x00'*(4-len(s)))[0] + code <<= bitcount % 8 + code &= mask + return code >> shr + + switchbitch = { # code: bit-width, shr-bits, bit-mask + 255: (9, 23, int(9*'1'+'0'*23, 2)), + 511: (10, 22, int(10*'1'+'0'*22, 2)), + 1023: (11, 21, int(11*'1'+'0'*21, 2)), + 2047: (12, 20, int(12*'1'+'0'*20, 2)), } + bitw, shr, mask = switchbitch[255] + bitcount = 0 + + if len_encoded < 4: + raise ValueError('strip must be at least 4 characters long') + + if next_code() != 256: + raise ValueError('strip must begin with CLEAR code') + + code = 0 + oldcode = 0 + result = [] + result_append = result.append + while True: + code = next_code() # ~5% faster when inlining this function + bitcount += bitw + if code == 257 or bitcount >= bitcount_max: # EOI + break + if code == 256: # CLEAR + table = newtable[:] + table_append = table.append + lentable = 258 + bitw, shr, mask = switchbitch[255] + code = next_code() + bitcount += bitw + if code == 257: # EOI + break + result_append(table[code]) + else: + if code < lentable: + decoded = table[code] + newcode = table[oldcode] + decoded[:1] + else: + newcode = table[oldcode] + newcode += newcode[:1] + decoded = newcode + result_append(decoded) + table_append(newcode) + lentable += 1 + oldcode = code + if lentable in switchbitch: + bitw, shr, mask = switchbitch[lentable] + + if code != 257: + warnings.warn('unexpected end of LZW stream (code %i)' % code) + + return b''.join(result) + + +@_replace_by('_tifffile.unpack_ints') +def unpack_ints(data, dtype, itemsize, runlen=0): + """Decompress byte string to array of integers of any bit size <= 32. + + This Python implementation is slow and only handles itemsizes 1, 2, 4, 8, + 16, 32, and 64. + + Parameters + ---------- + data : byte str + Data to decompress. + dtype : numpy.dtype or str + A numpy boolean or integer type. + itemsize : int + Number of bits per integer. + runlen : int + Number of consecutive integers, after which to start at next byte. + + Examples + -------- + >>> unpack_ints(b'a', 'B', 1) + array([0, 1, 1, 0, 0, 0, 0, 1], dtype=uint8) + >>> unpack_ints(b'ab', 'B', 2) + array([1, 2, 0, 1, 1, 2, 0, 2], dtype=uint8) + + """ + if itemsize == 1: # bitarray + data = numpy.frombuffer(data, '|B') + data = numpy.unpackbits(data) + if runlen % 8: + data = data.reshape(-1, runlen + (8 - runlen % 8)) + data = data[:, :runlen].reshape(-1) + return data.astype(dtype) + + dtype = numpy.dtype(dtype) + if itemsize in (8, 16, 32, 64): + return numpy.frombuffer(data, dtype) + if itemsize not in (1, 2, 4, 8, 16, 32): + raise ValueError('itemsize not supported: %i' % itemsize) + if dtype.kind not in 'biu': + raise ValueError('invalid dtype') + + itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize) + if itembytes != dtype.itemsize: + raise ValueError('dtype.itemsize too small') + if runlen == 0: + runlen = (8 * len(data)) // itemsize + skipbits = runlen * itemsize % 8 + if skipbits: + skipbits = 8 - skipbits + shrbits = itembytes*8 - itemsize + bitmask = int(itemsize*'1'+'0'*shrbits, 2) + dtypestr = '>' + dtype.char # dtype always big-endian? + + unpack = struct.unpack + size = runlen * (len(data)*8 // (runlen*itemsize + skipbits)) + result = numpy.empty((size,), dtype) + bitcount = 0 + for i in range(size): + start = bitcount // 8 + s = data[start:start+itembytes] + try: + code = unpack(dtypestr, s)[0] + except Exception: + code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0] + code <<= bitcount % 8 + code &= bitmask + result[i] = code >> shrbits + bitcount += itemsize + if (i+1) % runlen == 0: + bitcount += skipbits + return result + + +def unpack_rgb(data, dtype='>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff) + >>> print(unpack_rgb(data, '>> print(unpack_rgb(data, '>> print(unpack_rgb(data, '= bits) + data = numpy.frombuffer(data, dtype.byteorder+dt) + result = numpy.empty((data.size, len(bitspersample)), dtype.char) + for i, bps in enumerate(bitspersample): + t = data >> int(numpy.sum(bitspersample[i+1:])) + t &= int('0b'+'1'*bps, 2) + if rescale: + o = ((dtype.itemsize * 8) // bps + 1) * bps + if o > data.dtype.itemsize * 8: + t = t.astype('I') + t *= (2**o - 1) // (2**bps - 1) + t //= 2**(o - (dtype.itemsize * 8)) + result[:, i] = t + return result.reshape(-1) + + +@_replace_by('_tifffile.reverse_bitorder') +def reverse_bitorder(data): + """Reverse bits in each byte of byte string or numpy array. + + Decode data where pixels with lower column values are stored in the + lower-order bits of the bytes (FillOrder is LSB2MSB). + + Parameters + ---------- + data : byte string or ndarray + The data to be bit reversed. If byte string, a new bit-reversed byte + string is returned. Numpy arrays are bit-reversed in-place. + + Examples + -------- + >>> reverse_bitorder(b'\\x01\\x64') + b'\\x80&' + >>> data = numpy.array([1, 666], dtype='uint16') + >>> reverse_bitorder(data) + >>> data + array([ 128, 16473], dtype=uint16) + + """ + try: + view = data.view('uint8') + numpy.take(TIFF.REVERSE_BITORDER_ARRAY, view, out=view) + except AttributeError: + return data.translate(TIFF.REVERSE_BITORDER_BYTES) + except ValueError: + raise NotImplementedError('slices of arrays not supported') + + +def apply_colormap(image, colormap, contig=True): + """Return palette-colored image. + + The image values are used to index the colormap on axis 1. The returned + image is of shape image.shape+colormap.shape[0] and dtype colormap.dtype. + + Parameters + ---------- + image : numpy.ndarray + Indexes into the colormap. + colormap : numpy.ndarray + RGB lookup table aka palette of shape (3, 2**bits_per_sample). + contig : bool + If True, return a contiguous array. + + Examples + -------- + >>> image = numpy.arange(256, dtype='uint8') + >>> colormap = numpy.vstack([image, image, image]).astype('uint16') * 256 + >>> apply_colormap(image, colormap)[-1] + array([65280, 65280, 65280], dtype=uint16) + + """ + image = numpy.take(colormap, image, axis=1) + image = numpy.rollaxis(image, 0, image.ndim) + if contig: + image = numpy.ascontiguousarray(image) + return image + + +def reorient(image, orientation): + """Return reoriented view of image array. + + Parameters + ---------- + image : numpy.ndarray + Non-squeezed output of asarray() functions. + Axes -3 and -2 must be image length and width respectively. + orientation : int or str + One of TIFF.ORIENTATION names or values. + + """ + ORIENTATION = TIFF.ORIENTATION + orientation = enumarg(ORIENTATION, orientation) + + if orientation == ORIENTATION.TOPLEFT: + return image + elif orientation == ORIENTATION.TOPRIGHT: + return image[..., ::-1, :] + elif orientation == ORIENTATION.BOTLEFT: + return image[..., ::-1, :, :] + elif orientation == ORIENTATION.BOTRIGHT: + return image[..., ::-1, ::-1, :] + elif orientation == ORIENTATION.LEFTTOP: + return numpy.swapaxes(image, -3, -2) + elif orientation == ORIENTATION.RIGHTTOP: + return numpy.swapaxes(image, -3, -2)[..., ::-1, :] + elif orientation == ORIENTATION.RIGHTBOT: + return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] + elif orientation == ORIENTATION.LEFTBOT: + return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :] + + +def repeat_nd(a, repeats): + """Return read-only view into input array with elements repeated. + + Zoom nD image by integer factors using nearest neighbor interpolation + (box filter). + + Parameters + ---------- + a : array_like + Input array. + repeats : sequence of int + The number of repetitions to apply along each dimension of input array. + + Example + ------- + >>> repeat_nd([[1, 2], [3, 4]], (2, 2)) + array([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 3, 4, 4], + [3, 3, 4, 4]]) + + """ + a = numpy.asarray(a) + reshape = [] + shape = [] + strides = [] + for i, j, k in zip(a.strides, a.shape, repeats): + shape.extend((j, k)) + strides.extend((i, 0)) + reshape.append(j * k) + return numpy.lib.stride_tricks.as_strided( + a, shape, strides, writeable=False).reshape(reshape) + + +def reshape_nd(data_or_shape, ndim): + """Return image array or shape with at least ndim dimensions. + + Prepend 1s to image shape as necessary. + + >>> reshape_nd(numpy.empty(0), 1).shape + (0,) + >>> reshape_nd(numpy.empty(1), 2).shape + (1, 1) + >>> reshape_nd(numpy.empty((2, 3)), 3).shape + (1, 2, 3) + >>> reshape_nd(numpy.empty((3, 4, 5)), 3).shape + (3, 4, 5) + >>> reshape_nd((2, 3), 3) + (1, 2, 3) + + """ + is_shape = isinstance(data_or_shape, tuple) + shape = data_or_shape if is_shape else data_or_shape.shape + if len(shape) >= ndim: + return data_or_shape + shape = (1,) * (ndim - len(shape)) + shape + return shape if is_shape else data_or_shape.reshape(shape) + + +def squeeze_axes(shape, axes, skip='XY'): + """Return shape and axes with single-dimensional entries removed. + + Remove unused dimensions unless their axes are listed in 'skip'. + + >>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC') + ((5, 2, 1), 'TYX') + + """ + if len(shape) != len(axes): + raise ValueError('dimensions of axes and shape do not match') + shape, axes = zip(*(i for i in zip(shape, axes) + if i[0] > 1 or i[1] in skip)) + return tuple(shape), ''.join(axes) + + +def transpose_axes(image, axes, asaxes='CTZYX'): + """Return image with its axes permuted to match specified axes. + + A view is returned if possible. + + >>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape + (5, 2, 1, 3, 4) + + """ + for ax in axes: + if ax not in asaxes: + raise ValueError('unknown axis %s' % ax) + # add missing axes to image + shape = image.shape + for ax in reversed(asaxes): + if ax not in axes: + axes = ax + axes + shape = (1,) + shape + image = image.reshape(shape) + # transpose axes + image = image.transpose([axes.index(ax) for ax in asaxes]) + return image + + +def reshape_axes(axes, shape, newshape, unknown='Q'): + """Return axes matching new shape. + + Unknown dimensions are labelled 'Q'. + + >>> reshape_axes('YXS', (219, 301, 1), (219, 301)) + 'YX' + >>> reshape_axes('IYX', (12, 219, 301), (3, 4, 219, 1, 301, 1)) + 'QQYQXQ' + + """ + shape = tuple(shape) + newshape = tuple(newshape) + if len(axes) != len(shape): + raise ValueError('axes do not match shape') + + size = product(shape) + newsize = product(newshape) + if size != newsize: + raise ValueError('cannot reshape %s to %s' % (shape, newshape)) + if not axes or not newshape: + return '' + + lendiff = max(0, len(shape) - len(newshape)) + if lendiff: + newshape = newshape + (1,) * lendiff + + i = len(shape)-1 + prodns = 1 + prods = 1 + result = [] + for ns in newshape[::-1]: + prodns *= ns + while i > 0 and shape[i] == 1 and ns != 1: + i -= 1 + if ns == shape[i] and prodns == prods*shape[i]: + prods *= shape[i] + result.append(axes[i]) + i -= 1 + else: + result.append(unknown) + + return ''.join(reversed(result[lendiff:])) + + +def stack_pages(pages, out=None, maxworkers=1, *args, **kwargs): + """Read data from sequence of TiffPage and stack them vertically. + + Additional parameters are passsed to the TiffPage.asarray function. + + """ + npages = len(pages) + if npages == 0: + raise ValueError('no pages') + + if npages == 1: + return pages[0].asarray(out=out, *args, **kwargs) + + page0 = next(p for p in pages if p is not None) + page0.asarray(validate=None) # ThreadPoolExecutor swallows exceptions + shape = (npages,) + page0.keyframe.shape + dtype = page0.keyframe.dtype + out = create_output(out, shape, dtype) + + if maxworkers is None: + maxworkers = multiprocessing.cpu_count() // 2 + page0.parent.filehandle.lock = maxworkers > 1 + + filecache = OpenFileCache(size=max(4, maxworkers), + lock=page0.parent.filehandle.lock) + + def func(page, index, out=out, filecache=filecache, + args=args, kwargs=kwargs): + """Read, decode, and copy page data.""" + if page is not None: + filecache.open(page.parent.filehandle) + out[index] = page.asarray(lock=filecache.lock, reopen=False, + validate=False, *args, **kwargs) + filecache.close(page.parent.filehandle) + + if maxworkers < 2: + for i, page in enumerate(pages): + func(page, i) + else: + with concurrent.futures.ThreadPoolExecutor(maxworkers) as executor: + executor.map(func, pages, range(npages)) + + filecache.clear() + page0.parent.filehandle.lock = None + + return out + + +def clean_offsets_counts(offsets, counts): + """Return cleaned offsets and byte counts. + + Remove zero offsets and counts. Use to sanitize _offsets and _bytecounts + tag values for strips or tiles. + + """ + offsets = list(offsets) + counts = list(counts) + assert len(offsets) == len(counts) + j = 0 + for i, (o, b) in enumerate(zip(offsets, counts)): + if o > 0 and b > 0: + if i > j: + offsets[j] = o + counts[j] = b + j += 1 + elif b > 0 and o <= 0: + raise ValueError('invalid offset') + else: + warnings.warn('empty byte count') + if j == 0: + j = 1 + return offsets[:j], counts[:j] + + +def buffered_read(fh, lock, offsets, bytecounts, buffersize=2**26): + """Return iterator over blocks read from file.""" + length = len(offsets) + i = 0 + while i < length: + data = [] + with lock: + size = 0 + while size < buffersize and i < length: + fh.seek(offsets[i]) + bytecount = bytecounts[i] + data.append(fh.read(bytecount)) + size += bytecount + i += 1 + for block in data: + yield block + + +def create_output(out, shape, dtype, mode='w+', suffix='.memmap'): + """Return numpy array where image data of shape and dtype can be copied. + + The 'out' parameter may have the following values or types: + + None + An empty array of shape and dtype is created and returned. + numpy.ndarray + An existing writable array of compatible dtype and shape. A view of + the same array is returned after verification. + 'memmap' or 'memmap:tempdir' + A memory-map to an array stored in a temporary binary file on disk + is created and returned. + str or open file + The file name or file object used to create a memory-map to an array + stored in a binary file on disk. The created memory-mapped array is + returned. + + """ + if out is None: + return numpy.zeros(shape, dtype) + if isinstance(out, str) and out[:6] == 'memmap': + tempdir = out[7:] if len(out) > 7 else None + with tempfile.NamedTemporaryFile(dir=tempdir, suffix=suffix) as fh: + return numpy.memmap(fh, shape=shape, dtype=dtype, mode=mode) + if isinstance(out, numpy.ndarray): + if product(shape) != product(out.shape): + raise ValueError('incompatible output shape') + if not numpy.can_cast(dtype, out.dtype): + raise ValueError('incompatible output dtype') + return out.reshape(shape) + if isinstance(out, pathlib.Path): + out = str(out) + return numpy.memmap(out, shape=shape, dtype=dtype, mode=mode) + + +def matlabstr2py(string): + """Return Python object from Matlab string representation. + + Return str, bool, int, float, list (Matlab arrays or cells), or + dict (Matlab structures) types. + + Use to access ScanImage metadata. + + >>> matlabstr2py('1') + 1 + >>> matlabstr2py("['x y z' true false; 1 2.0 -3e4; NaN Inf @class]") + [['x y z', True, False], [1, 2.0, -30000.0], [nan, inf, '@class']] + >>> d = matlabstr2py("SI.hChannels.channelType = {'stripe' 'stripe'}\\n" + ... "SI.hChannels.channelsActive = 2") + >>> d['SI.hChannels.channelType'] + ['stripe', 'stripe'] + + """ + # TODO: handle invalid input + # TODO: review unboxing of multidimensional arrays + + def lex(s): + # return sequence of tokens from matlab string representation + tokens = ['['] + while True: + t, i = next_token(s) + if t is None: + break + if t == ';': + tokens.extend((']', '[')) + elif t == '[': + tokens.extend(('[', '[')) + elif t == ']': + tokens.extend((']', ']')) + else: + tokens.append(t) + s = s[i:] + tokens.append(']') + return tokens + + def next_token(s): + # return next token in matlab string + length = len(s) + if length == 0: + return None, 0 + i = 0 + while i < length and s[i] == ' ': + i += 1 + if i == length: + return None, i + if s[i] in '{[;]}': + return s[i], i + 1 + if s[i] == "'": + j = i + 1 + while j < length and s[j] != "'": + j += 1 + return s[i: j+1], j + 1 + if s[i] == '<': + j = i + 1 + while j < length and s[j] != '>': + j += 1 + return s[i: j+1], j + 1 + j = i + while j < length and s[j] not in ' {[;]}': + j += 1 + return s[i:j], j + + def value(s, fail=False): + # return Python value of token + s = s.strip() + if not s: + return s + if len(s) == 1: + try: + return int(s) + except Exception: + if fail: + raise ValueError() + return s + if s[0] == "'": + if fail and s[-1] != "'" or "'" in s[1:-1]: + raise ValueError() + return s[1:-1] + if s[0] == '<': + if fail and s[-1] != '>' or '<' in s[1:-1]: + raise ValueError() + return s + if fail and any(i in s for i in " ';[]{}"): + raise ValueError() + if s[0] == '@': + return s + if s in ('true', 'True'): + return True + if s in ('false', 'False'): + return False + if s[:6] == 'zeros(': + return numpy.zeros([int(i) for i in s[6:-1].split(',')]).tolist() + if s[:5] == 'ones(': + return numpy.ones([int(i) for i in s[5:-1].split(',')]).tolist() + if '.' in s or 'e' in s: + try: + return float(s) + except Exception: + pass + try: + return int(s) + except Exception: + pass + try: + return float(s) # nan, inf + except Exception: + if fail: + raise ValueError() + return s + + def parse(s): + # return Python value from string representation of Matlab value + s = s.strip() + try: + return value(s, fail=True) + except ValueError: + pass + result = add2 = [] + levels = [add2] + for t in lex(s): + if t in '[{': + add2 = [] + levels.append(add2) + elif t in ']}': + x = levels.pop() + if len(x) == 1 and isinstance(x[0], (list, str)): + x = x[0] + add2 = levels[-1] + add2.append(x) + else: + add2.append(value(t)) + if len(result) == 1 and isinstance(result[0], (list, str)): + result = result[0] + return result + + if '\r' in string or '\n' in string: + # structure + d = {} + for line in string.splitlines(): + line = line.strip() + if not line or line[0] == '%': + continue + k, v = line.split('=', 1) + k = k.strip() + if any(c in k for c in " ';[]{}<>"): + continue + d[k] = parse(v) + return d + return parse(string) + + +def stripnull(string, null=b'\x00'): + """Return string truncated at first null character. + + Clean NULL terminated C strings. For unicode strings use null='\\0'. + + >>> stripnull(b'string\\x00') + b'string' + >>> stripnull('string\\x00', null='\\0') + 'string' + + """ + i = string.find(null) + return string if (i < 0) else string[:i] + + +def stripascii(string): + """Return string truncated at last byte that is 7-bit ASCII. + + Clean NULL separated and terminated TIFF strings. + + >>> stripascii(b'string\\x00string\\n\\x01\\x00') + b'string\\x00string\\n' + >>> stripascii(b'\\x00') + b'' + + """ + # TODO: pythonize this + i = len(string) + while i: + i -= 1 + if 8 < byte2int(string[i]) < 127: + break + else: + i = -1 + return string[:i+1] + + +def asbool(value, true=(b'true', u'true'), false=(b'false', u'false')): + """Return string as bool if possible, else raise TypeError. + + >>> asbool(b' False ') + False + + """ + value = value.strip().lower() + if value in true: # might raise UnicodeWarning/BytesWarning + return True + if value in false: + return False + raise TypeError() + + +def astype(value, types=None): + """Return argument as one of types if possible. + + >>> astype('42') + 42 + >>> astype('3.14') + 3.14 + >>> astype('True') + True + >>> astype(b'Neee-Wom') + 'Neee-Wom' + + """ + if types is None: + types = int, float, asbool, bytes2str + for typ in types: + try: + return typ(value) + except (ValueError, AttributeError, TypeError, UnicodeEncodeError): + pass + return value + + +def format_size(size, threshold=1536): + """Return file size as string from byte size. + + >>> format_size(1234) + '1234 B' + >>> format_size(12345678901) + '11.50 GiB' + + """ + if size < threshold: + return "%i B" % size + for unit in ('KiB', 'MiB', 'GiB', 'TiB', 'PiB'): + size /= 1024.0 + if size < threshold: + return "%.2f %s" % (size, unit) + + +def identityfunc(arg): + """Single argument identity function. + + >>> identityfunc('arg') + 'arg' + + """ + return arg + + +def nullfunc(*args, **kwargs): + """Null function. + + >>> nullfunc('arg', kwarg='kwarg') + + """ + return + + +def sequence(value): + """Return tuple containing value if value is not a sequence. + + >>> sequence(1) + (1,) + >>> sequence([1]) + [1] + + """ + try: + len(value) + return value + except TypeError: + return (value,) + + +def product(iterable): + """Return product of sequence of numbers. + + Equivalent of functools.reduce(operator.mul, iterable, 1). + Multiplying numpy integers might overflow. + + >>> product([2**8, 2**30]) + 274877906944 + >>> product([]) + 1 + + """ + prod = 1 + for i in iterable: + prod *= i + return prod + + +def natural_sorted(iterable): + """Return human sorted list of strings. + + E.g. for sorting file names. + + >>> natural_sorted(['f1', 'f2', 'f10']) + ['f1', 'f2', 'f10'] + + """ + def sortkey(x): + return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)] + + numbers = re.compile(r'(\d+)') + return sorted(iterable, key=sortkey) + + +def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)): + """Return datetime object from timestamp in Excel serial format. + + Convert LSM time stamps. + + >>> excel_datetime(40237.029999999795) + datetime.datetime(2010, 2, 28, 0, 43, 11, 999982) + + """ + return epoch + datetime.timedelta(timestamp) + + +def julian_datetime(julianday, milisecond=0): + """Return datetime from days since 1/1/4713 BC and ms since midnight. + + Convert Julian dates according to MetaMorph. + + >>> julian_datetime(2451576, 54362783) + datetime.datetime(2000, 2, 2, 15, 6, 2, 783) + + """ + if julianday <= 1721423: + # no datetime before year 1 + return None + + a = julianday + 1 + if a > 2299160: + alpha = math.trunc((a - 1867216.25) / 36524.25) + a += 1 + alpha - alpha // 4 + b = a + (1524 if a > 1721423 else 1158) + c = math.trunc((b - 122.1) / 365.25) + d = math.trunc(365.25 * c) + e = math.trunc((b - d) / 30.6001) + + day = b - d - math.trunc(30.6001 * e) + month = e - (1 if e < 13.5 else 13) + year = c - (4716 if month > 2.5 else 4715) + + hour, milisecond = divmod(milisecond, 1000 * 60 * 60) + minute, milisecond = divmod(milisecond, 1000 * 60) + second, milisecond = divmod(milisecond, 1000) + + return datetime.datetime(year, month, day, + hour, minute, second, milisecond) + + +def byteorder_isnative(byteorder): + """Return if byteorder matches the system's byteorder. + + >>> byteorder_isnative('=') + True + + """ + if byteorder == '=' or byteorder == sys.byteorder: + return True + keys = {'big': '>', 'little': '<'} + return keys.get(byteorder, byteorder) == keys[sys.byteorder] + + +def recarray2dict(recarray): + """Return numpy.recarray as dict.""" + # TODO: subarrays + result = {} + for descr, value in zip(recarray.dtype.descr, recarray): + name, dtype = descr[:2] + if dtype[1] == 'S': + value = bytes2str(stripnull(value)) + elif value.ndim < 2: + value = value.tolist() + result[name] = value + return result + + +def xml2dict(xml, sanitize=True, prefix=None): + """Return XML as dict. + + >>> xml2dict('1') + {'root': {'key': 1, 'attr': 'name'}} + + """ + from xml.etree import cElementTree as etree # delayed import + + at = tx = '' + if prefix: + at, tx = prefix + + def astype(value): + # return value as int, float, bool, or str + for t in (int, float, asbool): + try: + return t(value) + except Exception: + pass + return value + + def etree2dict(t): + # adapted from https://stackoverflow.com/a/10077069/453463 + key = t.tag + if sanitize: + key = key.rsplit('}', 1)[-1] + d = {key: {} if t.attrib else None} + children = list(t) + if children: + dd = collections.defaultdict(list) + for dc in map(etree2dict, children): + for k, v in dc.items(): + dd[k].append(astype(v)) + d = {key: {k: astype(v[0]) if len(v) == 1 else astype(v) + for k, v in dd.items()}} + if t.attrib: + d[key].update((at + k, astype(v)) for k, v in t.attrib.items()) + if t.text: + text = t.text.strip() + if children or t.attrib: + if text: + d[key][tx + 'value'] = astype(text) + else: + d[key] = astype(text) + return d + + return etree2dict(etree.fromstring(xml)) + + +def hexdump(bytestr, width=75, height=24, snipat=-2, modulo=2, ellipsis='...'): + """Return hexdump representation of byte string. + + >>> hexdump(binascii.unhexlify('49492a00080000000e00fe0004000100')) + '49 49 2a 00 08 00 00 00 0e 00 fe 00 04 00 01 00 II*.............' + + """ + size = len(bytestr) + if size < 1 or width < 2 or height < 1: + return '' + if height == 1: + addr = b'' + bytesperline = min(modulo * (((width - len(addr)) // 4) // modulo), + size) + if bytesperline < 1: + return '' + nlines = 1 + else: + addr = b'%%0%ix: ' % len(b'%x' % size) + bytesperline = min(modulo * (((width - len(addr % 1)) // 4) // modulo), + size) + if bytesperline < 1: + return '' + width = 3*bytesperline + len(addr % 1) + nlines = (size - 1) // bytesperline + 1 + + if snipat is None or snipat == 1: + snipat = height + elif 0 < abs(snipat) < 1: + snipat = int(math.floor(height * snipat)) + if snipat < 0: + snipat += height + + if height == 1 or nlines == 1: + blocks = [(0, bytestr[:bytesperline])] + addr = b'' + height = 1 + width = 3 * bytesperline + elif height is None or nlines <= height: + blocks = [(0, bytestr)] + elif snipat <= 0: + start = bytesperline * (nlines - height) + blocks = [(start, bytestr[start:])] # (start, None) + elif snipat >= height or height < 3: + end = bytesperline * height + blocks = [(0, bytestr[:end])] # (end, None) + else: + end1 = bytesperline * snipat + end2 = bytesperline * (height - snipat - 1) + blocks = [(0, bytestr[:end1]), + (size-end1-end2, None), + (size-end2, bytestr[size-end2:])] + + ellipsis = str2bytes(ellipsis) + result = [] + for start, bytestr in blocks: + if bytestr is None: + result.append(ellipsis) # 'skip %i bytes' % start) + continue + hexstr = binascii.hexlify(bytestr) + strstr = re.sub(br'[^\x20-\x7f]', b'.', bytestr) + for i in range(0, len(bytestr), bytesperline): + h = hexstr[2*i:2*i+bytesperline*2] + r = (addr % (i + start)) if height > 1 else addr + r += b' '.join(h[i:i+2] for i in range(0, 2*bytesperline, 2)) + r += b' ' * (width - len(r)) + r += strstr[i:i+bytesperline] + result.append(r) + result = b'\n'.join(result) + if sys.version_info[0] == 3: + result = result.decode('ascii') + return result + + +def isprintable(string): + """Return if all characters in string are printable. + + >>> isprintable('abc') + True + >>> isprintable(b'\01') + False + + """ + string = string.strip() + if len(string) < 1: + return True + if sys.version_info[0] == 3: + try: + return string.isprintable() + except Exception: + pass + try: + return string.decode('utf-8').isprintable() + except Exception: + pass + else: + if string.isalnum(): + return True + printable = ('0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST' + 'UVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~ \t\n\r\x0b\x0c') + return all(c in printable for c in string) + + +def clean_whitespace(string, compact=False): + """Return string with compressed whitespace.""" + for a, b in (('\r\n', '\n'), ('\r', '\n'), ('\n\n', '\n'), + ('\t', ' '), (' ', ' ')): + string = string.replace(a, b) + if compact: + for a, b in (('\n', ' '), ('[ ', '['), + (' ', ' '), (' ', ' '), (' ', ' ')): + string = string.replace(a, b) + return string.strip() + + +def pformat_xml(xml): + """Return pretty formatted XML.""" + try: + import lxml.etree as etree # delayed import + if not isinstance(xml, bytes): + xml = xml.encode('utf-8') + xml = etree.parse(io.BytesIO(xml)) + xml = etree.tostring(xml, pretty_print=True, xml_declaration=True, + encoding=xml.docinfo.encoding) + xml = bytes2str(xml) + except Exception: + if isinstance(xml, bytes): + xml = bytes2str(xml) + xml = xml.replace('><', '>\n<') + return xml.replace(' ', ' ').replace('\t', ' ') + + +def pformat(arg, width=79, height=24, compact=True): + """Return pretty formatted representation of object as string. + + Whitespace might be altered. + + """ + if height is None or height < 1: + height = 1024 + if width is None or width < 1: + width = 256 + + npopt = numpy.get_printoptions() + numpy.set_printoptions(threshold=100, linewidth=width) + + if isinstance(arg, basestring): + if arg[:5].lower() in (' height: + arg = '\n'.join(argl[:height//2] + ['...'] + argl[-height//2:]) + return arg + + +def snipstr(string, width=79, snipat=0.5, ellipsis='...'): + """Return string cut to specified length. + + >>> snipstr('abcdefghijklmnop', 8) + 'abc...op' + + """ + if ellipsis is None: + if isinstance(string, bytes): + ellipsis = b'...' + else: + ellipsis = u'\u2026' # does not print on win-py3.5 + esize = len(ellipsis) + + splitlines = string.splitlines() + # TODO: finish and test multiline snip + + result = [] + for line in splitlines: + if line is None: + result.append(ellipsis) + continue + linelen = len(line) + if linelen <= width: + result.append(string) + continue + + split = snipat + if split is None or split == 1: + split = linelen + elif 0 < abs(split) < 1: + split = int(math.floor(linelen * split)) + if split < 0: + split += linelen + if split < 0: + split = 0 + + if esize == 0 or width < esize + 1: + if split <= 0: + result.append(string[-width:]) + else: + result.append(string[:width]) + elif split <= 0: + result.append(ellipsis + string[esize-width:]) + elif split >= linelen or width < esize + 4: + result.append(string[:width-esize] + ellipsis) + else: + splitlen = linelen - width + esize + end1 = split - splitlen // 2 + end2 = end1 + splitlen + result.append(string[:end1] + ellipsis + string[end2:]) + + if isinstance(string, bytes): + return b'\n'.join(result) + else: + return '\n'.join(result) + + +def enumarg(enum, arg): + """Return enum member from its name or value. + + >>> enumarg(TIFF.PHOTOMETRIC, 2) + + >>> enumarg(TIFF.PHOTOMETRIC, 'RGB') + + + """ + try: + return enum(arg) + except Exception: + try: + return enum[arg.upper()] + except Exception: + raise ValueError('invalid argument %s' % arg) + + +def parse_kwargs(kwargs, *keys, **keyvalues): + """Return dict with keys from keys|keyvals and values from kwargs|keyvals. + + Existing keys are deleted from kwargs. + + >>> kwargs = {'one': 1, 'two': 2, 'four': 4} + >>> kwargs2 = parse_kwargs(kwargs, 'two', 'three', four=None, five=5) + >>> kwargs == {'one': 1} + True + >>> kwargs2 == {'two': 2, 'four': 4, 'five': 5} + True + + """ + result = {} + for key in keys: + if key in kwargs: + result[key] = kwargs[key] + del kwargs[key] + for key, value in keyvalues.items(): + if key in kwargs: + result[key] = kwargs[key] + del kwargs[key] + else: + result[key] = value + return result + + +def update_kwargs(kwargs, **keyvalues): + """Update dict with keys and values if keys do not already exist. + + >>> kwargs = {'one': 1, } + >>> update_kwargs(kwargs, one=None, two=2) + >>> kwargs == {'one': 1, 'two': 2} + True + + """ + for key, value in keyvalues.items(): + if key not in kwargs: + kwargs[key] = value + + +def validate_jhove(filename, jhove='jhove', ignore=('More than 50 IFDs',)): + """Validate TIFF file using jhove -m TIFF-hul. + + Raise ValueError if jhove outputs an error message unless the message + contains one of the strings in 'ignore'. + + JHOVE does not support bigtiff or more than 50 IFDs. + + See `JHOVE TIFF-hul Module `_ + + """ + import subprocess # noqa: delayed import + out = subprocess.check_output([jhove, filename, '-m', 'TIFF-hul']) + if b'ErrorMessage: ' in out: + for line in out.splitlines(): + line = line.strip() + if line.startswith(b'ErrorMessage: '): + error = line[14:].decode('utf8') + for i in ignore: + if i in error: + break + else: + raise ValueError(error) + break + + +def lsm2bin(lsmfile, binfile=None, tile=(256, 256), verbose=True): + """Convert [MP]TZCYX LSM file to series of BIN files. + + One BIN file containing 'ZCYX' data are created for each position, time, + and tile. The position, time, and tile indices are encoded at the end + of the filenames. + + """ + verbose = print_ if verbose else nullfunc + + if binfile is None: + binfile = lsmfile + elif binfile.lower() == 'none': + binfile = None + if binfile: + binfile += '_(z%ic%iy%ix%i)_m%%ip%%it%%03iy%%ix%%i.bin' + + verbose('\nOpening LSM file... ', end='', flush=True) + start_time = time.time() + + with TiffFile(lsmfile) as lsm: + if not lsm.is_lsm: + verbose('\n', lsm, flush=True) + raise ValueError('not a LSM file') + series = lsm.series[0] # first series contains the image data + shape = series.shape + axes = series.axes + dtype = series.dtype + size = product(shape) * dtype.itemsize + + verbose('%.3f s' % (time.time() - start_time)) + # verbose(lsm, flush=True) + verbose('Image\n axes: %s\n shape: %s\n dtype: %s\n size: %s' + % (axes, shape, dtype, format_size(size)), flush=True) + if not series.axes.endswith('TZCYX'): + raise ValueError('not a *TZCYX LSM file') + + verbose('Copying image from LSM to BIN files', end='', flush=True) + start_time = time.time() + tiles = shape[-2] // tile[-2], shape[-1] // tile[-1] + if binfile: + binfile = binfile % (shape[-4], shape[-3], tile[0], tile[1]) + shape = (1,) * (7-len(shape)) + shape + # cache for ZCYX stacks and output files + data = numpy.empty(shape[3:], dtype=dtype) + out = numpy.empty((shape[-4], shape[-3], tile[0], tile[1]), + dtype=dtype) + # iterate over Tiff pages containing data + pages = iter(series.pages) + for m in range(shape[0]): # mosaic axis + for p in range(shape[1]): # position axis + for t in range(shape[2]): # time axis + for z in range(shape[3]): # z slices + data[z] = next(pages).asarray() + for y in range(tiles[0]): # tile y + for x in range(tiles[1]): # tile x + out[:] = data[..., + y*tile[0]:(y+1)*tile[0], + x*tile[1]:(x+1)*tile[1]] + if binfile: + out.tofile(binfile % (m, p, t, y, x)) + verbose('.', end='', flush=True) + verbose(' %.3f s' % (time.time() - start_time)) + + +def imshow(data, title=None, vmin=0, vmax=None, cmap=None, bitspersample=None, + photometric='RGB', interpolation=None, dpi=96, figure=None, + subplot=111, maxdim=32768, **kwargs): + """Plot n-dimensional images using matplotlib.pyplot. + + Return figure, subplot and plot axis. + Requires pyplot already imported C{from matplotlib import pyplot}. + + Parameters + ---------- + bitspersample : int or None + Number of bits per channel in integer RGB images. + photometric : {'MINISWHITE', 'MINISBLACK', 'RGB', or 'PALETTE'} + The color space of the image data. + title : str + Window and subplot title. + figure : matplotlib.figure.Figure (optional). + Matplotlib to use for plotting. + subplot : int + A matplotlib.pyplot.subplot axis. + maxdim : int + maximum image width and length. + kwargs : optional + Arguments for matplotlib.pyplot.imshow. + + """ + isrgb = photometric in ('RGB',) # 'PALETTE', 'YCBCR' + if data.dtype.kind == 'b': + isrgb = False + if isrgb and not (data.shape[-1] in (3, 4) or ( + data.ndim > 2 and data.shape[-3] in (3, 4))): + isrgb = False + photometric = 'MINISBLACK' + + data = data.squeeze() + if photometric in ('MINISWHITE', 'MINISBLACK', None): + data = reshape_nd(data, 2) + else: + data = reshape_nd(data, 3) + + dims = data.ndim + if dims < 2: + raise ValueError('not an image') + elif dims == 2: + dims = 0 + isrgb = False + else: + if isrgb and data.shape[-3] in (3, 4): + data = numpy.swapaxes(data, -3, -2) + data = numpy.swapaxes(data, -2, -1) + elif not isrgb and (data.shape[-1] < data.shape[-2] // 8 and + data.shape[-1] < data.shape[-3] // 8 and + data.shape[-1] < 5): + data = numpy.swapaxes(data, -3, -1) + data = numpy.swapaxes(data, -2, -1) + isrgb = isrgb and data.shape[-1] in (3, 4) + dims -= 3 if isrgb else 2 + + if isrgb: + data = data[..., :maxdim, :maxdim, :maxdim] + else: + data = data[..., :maxdim, :maxdim] + + if photometric == 'PALETTE' and isrgb: + datamax = data.max() + if datamax > 255: + data = data >> 8 # possible precision loss + data = data.astype('B') + elif data.dtype.kind in 'ui': + if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None: + try: + bitspersample = int(math.ceil(math.log(data.max(), 2))) + except Exception: + bitspersample = data.dtype.itemsize * 8 + elif not isinstance(bitspersample, inttypes): + # bitspersample can be tuple, e.g. (5, 6, 5) + bitspersample = data.dtype.itemsize * 8 + datamax = 2**bitspersample + if isrgb: + if bitspersample < 8: + data = data << (8 - bitspersample) + elif bitspersample > 8: + data = data >> (bitspersample - 8) # precision loss + data = data.astype('B') + elif data.dtype.kind == 'f': + datamax = data.max() + if isrgb and datamax > 1.0: + if data.dtype.char == 'd': + data = data.astype('f') + data /= datamax + else: + data = data / datamax + elif data.dtype.kind == 'b': + datamax = 1 + elif data.dtype.kind == 'c': + data = numpy.absolute(data) + datamax = data.max() + + if not isrgb: + if vmax is None: + vmax = datamax + if vmin is None: + if data.dtype.kind == 'i': + dtmin = numpy.iinfo(data.dtype).min + vmin = numpy.min(data) + if vmin == dtmin: + vmin = numpy.min(data > dtmin) + if data.dtype.kind == 'f': + dtmin = numpy.finfo(data.dtype).min + vmin = numpy.min(data) + if vmin == dtmin: + vmin = numpy.min(data > dtmin) + else: + vmin = 0 + + pyplot = sys.modules['matplotlib.pyplot'] + + if figure is None: + pyplot.rc('font', family='sans-serif', weight='normal', size=8) + figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, + facecolor='1.0', edgecolor='w') + try: + figure.canvas.manager.window.title(title) + except Exception: + pass + size = len(title.splitlines()) if title else 1 + pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.98-size*0.03, + left=0.1, right=0.95, hspace=0.05, wspace=0.0) + subplot = pyplot.subplot(subplot) + + if title: + try: + title = unicode(title, 'Windows-1252') + except TypeError: + pass + pyplot.title(title, size=11) + + if cmap is None: + if data.dtype.char == '?': + cmap = 'gray' + elif data.dtype.kind in 'buf' or vmin == 0: + cmap = 'viridis' + else: + cmap = 'coolwarm' + if photometric == 'MINISWHITE': + cmap += '_r' + + image = pyplot.imshow(numpy.atleast_2d(data[(0,) * dims].squeeze()), + vmin=vmin, vmax=vmax, cmap=cmap, + interpolation=interpolation, **kwargs) + + if not isrgb: + pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 + + def format_coord(x, y): + # callback function to format coordinate display in toolbar + x = int(x + 0.5) + y = int(y + 0.5) + try: + if dims: + return '%s @ %s [%4i, %4i]' % ( + curaxdat[1][y, x], current, y, x) + return '%s @ [%4i, %4i]' % (data[y, x], y, x) + except IndexError: + return '' + + def none(event): + return '' + + subplot.format_coord = format_coord + image.get_cursor_data = none + image.format_cursor_data = none + + if dims: + current = list((0,) * dims) + curaxdat = [0, data[tuple(current)].squeeze()] + sliders = [pyplot.Slider( + pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), + 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', + valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] + for slider in sliders: + slider.drawon = False + + def set_image(current, sliders=sliders, data=data): + # change image and redraw canvas + curaxdat[1] = data[tuple(current)].squeeze() + image.set_data(curaxdat[1]) + for ctrl, index in zip(sliders, current): + ctrl.eventson = False + ctrl.set_val(index) + ctrl.eventson = True + figure.canvas.draw() + + def on_changed(index, axis, data=data, current=current): + # callback function for slider change event + index = int(round(index)) + curaxdat[0] = axis + if index == current[axis]: + return + if index >= data.shape[axis]: + index = 0 + elif index < 0: + index = data.shape[axis] - 1 + current[axis] = index + set_image(current) + + def on_keypressed(event, data=data, current=current): + # callback function for key press event + key = event.key + axis = curaxdat[0] + if str(key) in '0123456789': + on_changed(key, axis) + elif key == 'right': + on_changed(current[axis] + 1, axis) + elif key == 'left': + on_changed(current[axis] - 1, axis) + elif key == 'up': + curaxdat[0] = 0 if axis == len(data.shape)-1 else axis + 1 + elif key == 'down': + curaxdat[0] = len(data.shape)-1 if axis == 0 else axis - 1 + elif key == 'end': + on_changed(data.shape[axis] - 1, axis) + elif key == 'home': + on_changed(0, axis) + + figure.canvas.mpl_connect('key_press_event', on_keypressed) + for axis, ctrl in enumerate(sliders): + ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) + + return figure, subplot, image + + +def _app_show(): + """Block the GUI. For use as skimage plugin.""" + pyplot = sys.modules['matplotlib.pyplot'] + pyplot.show() + + +def askopenfilename(**kwargs): + """Return file name(s) from Tkinter's file open dialog.""" + try: + from Tkinter import Tk + import tkFileDialog as filedialog + except ImportError: + from tkinter import Tk, filedialog + root = Tk() + root.withdraw() + root.update() + filenames = filedialog.askopenfilename(**kwargs) + root.destroy() + return filenames + + +def main(argv=None): + """Command line usage main function.""" + if float(sys.version[0:3]) < 2.7: + print('This script requires Python version 2.7 or better.') + print('This is Python version %s' % sys.version) + return 0 + if argv is None: + argv = sys.argv + + import optparse # TODO: use argparse + + parser = optparse.OptionParser( + usage='usage: %prog [options] path', + description='Display image data in TIFF files.', + version='%%prog %s' % __version__) + opt = parser.add_option + opt('-p', '--page', dest='page', type='int', default=-1, + help='display single page') + opt('-s', '--series', dest='series', type='int', default=-1, + help='display series of pages of same shape') + opt('--nomultifile', dest='nomultifile', action='store_true', + default=False, help='do not read OME series from multiple files') + opt('--noplots', dest='noplots', type='int', default=8, + help='maximum number of plots') + opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear', + help='image interpolation method') + opt('--dpi', dest='dpi', type='int', default=96, + help='plot resolution') + opt('--vmin', dest='vmin', type='int', default=None, + help='minimum value for colormapping') + opt('--vmax', dest='vmax', type='int', default=None, + help='maximum value for colormapping') + opt('--debug', dest='debug', action='store_true', default=False, + help='raise exception on failures') + opt('--doctest', dest='doctest', action='store_true', default=False, + help='runs the docstring examples') + opt('-v', '--detail', dest='detail', type='int', default=2) + opt('-q', '--quiet', dest='quiet', action='store_true') + + settings, path = parser.parse_args() + path = ' '.join(path) + + if settings.doctest: + import doctest + doctest.testmod(optionflags=doctest.ELLIPSIS) + return 0 + if not path: + path = askopenfilename(title='Select a TIFF file', + filetypes=TIFF.FILEOPEN_FILTER) + if not path: + parser.error('No file specified') + + if any(i in path for i in '?*'): + path = glob.glob(path) + if not path: + print('no files match the pattern') + return 0 + # TODO: handle image sequences + path = path[0] + + if not settings.quiet: + print('\nReading file structure...', end=' ') + start = time.time() + try: + tif = TiffFile(path, multifile=not settings.nomultifile) + except Exception as e: + if settings.debug: + raise + else: + print('\n', e) + sys.exit(0) + if not settings.quiet: + print('%.3f ms' % ((time.time()-start) * 1e3)) + + if tif.is_ome: + settings.norgb = True + + images = [] + if settings.noplots > 0: + if not settings.quiet: + print('Reading image data... ', end=' ') + + def notnone(x): + return next(i for i in x if i is not None) + + start = time.time() + try: + if settings.page >= 0: + images = [(tif.asarray(key=settings.page), + tif[settings.page], None)] + elif settings.series >= 0: + images = [(tif.asarray(series=settings.series), + notnone(tif.series[settings.series]._pages), + tif.series[settings.series])] + else: + images = [] + for i, s in enumerate(tif.series[:settings.noplots]): + try: + images.append((tif.asarray(series=i), + notnone(s._pages), + tif.series[i])) + except ValueError as e: + images.append((None, notnone(s.pages), None)) + if settings.debug: + raise + else: + print('\nSeries %i failed: %s... ' % (i, e), + end='') + if not settings.quiet: + print('%.3f ms' % ((time.time()-start) * 1e3)) + except Exception as e: + if settings.debug: + raise + else: + print(e) + + if not settings.quiet: + print() + print(TiffFile.__str__(tif, detail=int(settings.detail))) + print() + tif.close() + + if images and settings.noplots > 0: + try: + import matplotlib + matplotlib.use('TkAgg') + from matplotlib import pyplot + except ImportError as e: + warnings.warn('failed to import matplotlib.\n%s' % e) + else: + for img, page, series in images: + if img is None: + continue + vmin, vmax = settings.vmin, settings.vmax + if 'GDAL_NODATA' in page.tags: + try: + vmin = numpy.min( + img[img > float(page.tags['GDAL_NODATA'].value)]) + except ValueError: + pass + if tif.is_stk: + try: + vmin = tif.stk_metadata['MinScale'] + vmax = tif.stk_metadata['MaxScale'] + except KeyError: + pass + else: + if vmax <= vmin: + vmin, vmax = settings.vmin, settings.vmax + if series: + title = '%s\n%s\n%s' % (str(tif), str(page), str(series)) + else: + title = '%s\n %s' % (str(tif), str(page)) + photometric = 'MINISBLACK' + if page.photometric not in (3,): + photometric = TIFF.PHOTOMETRIC(page.photometric).name + imshow(img, title=title, vmin=vmin, vmax=vmax, + bitspersample=page.bitspersample, + photometric=photometric, + interpolation=settings.interpol, + dpi=settings.dpi) + pyplot.show() + + +if sys.version_info[0] == 2: + inttypes = int, long # noqa + + def print_(*args, **kwargs): + """Print function with flush support.""" + flush = kwargs.pop('flush', False) + print(*args, **kwargs) + if flush: + sys.stdout.flush() + + def bytes2str(b, encoding=None, errors=None): + """Return string from bytes.""" + return b + + def str2bytes(s, encoding=None): + """Return bytes from string.""" + return s + + def byte2int(b): + """Return value of byte as int.""" + return ord(b) + + class FileNotFoundError(IOError): + pass + + TiffFrame = TiffPage # noqa +else: + inttypes = int + basestring = str, bytes + unicode = str + print_ = print + + def bytes2str(b, encoding=None, errors='strict'): + """Return unicode string from encoded bytes.""" + if encoding is not None: + return b.decode(encoding, errors) + try: + return b.decode('utf-8', errors) + except UnicodeDecodeError: + return b.decode('cp1252', errors) + + def str2bytes(s, encoding='cp1252'): + """Return bytes from unicode string.""" + return s.encode(encoding) + + def byte2int(b): + """Return value of byte as int.""" + return b + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/venv/Lib/site-packages/imageio/plugins/bsdf.py b/venv/Lib/site-packages/imageio/plugins/bsdf.py new file mode 100644 index 000000000..a7f6a6d00 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/bsdf.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" BSDF plugin. +""" + +import numpy as np + +from .. import formats +from ..core import Format + + +def get_bsdf_serializer(options): + from . import _bsdf as bsdf + + class NDArrayExtension(bsdf.Extension): + """ Copy of BSDF's NDArrayExtension but deal with lazy blobs. + """ + + name = "ndarray" + cls = np.ndarray + + def encode(self, s, v): + return dict(shape=v.shape, dtype=str(v.dtype), data=v.tobytes()) + + def decode(self, s, v): + return v # return as dict, because of lazy blobs, decode in Image + + class ImageExtension(bsdf.Extension): + """ We implement two extensions that trigger on the Image classes. + """ + + def encode(self, s, v): + return dict(array=v.array, meta=v.meta) + + def decode(self, s, v): + return Image(v["array"], v["meta"]) + + class Image2DExtension(ImageExtension): + + name = "image2d" + cls = Image2D + + class Image3DExtension(ImageExtension): + + name = "image3d" + cls = Image3D + + exts = [NDArrayExtension, Image2DExtension, Image3DExtension] + serializer = bsdf.BsdfSerializer(exts, **options) + + return bsdf, serializer + + +class Image: + """ Class in which we wrap the array and meta data. By using an extension + we can make BSDF trigger on these classes and thus encode the images. + as actual images. + """ + + def __init__(self, array, meta): + self.array = array + self.meta = meta + + def get_array(self): + if not isinstance(self.array, np.ndarray): + v = self.array + blob = v["data"] + if not isinstance(blob, bytes): # then it's a lazy bsdf.Blob + blob = blob.get_bytes() + self.array = np.frombuffer(blob, dtype=v["dtype"]) + self.array.shape = v["shape"] + return self.array + + def get_meta(self): + return self.meta + + +class Image2D(Image): + pass + + +class Image3D(Image): + pass + + +class BsdfFormat(Format): + """ The BSDF format enables reading and writing of image data in the + BSDF serialization format. This format allows storage of images, volumes, + and series thereof. Data can be of any numeric data type, and can + optionally be compressed. Each image/volume can have associated + meta data, which can consist of any data type supported by BSDF. + + By default, image data is lazily loaded; the actual image data is + not read until it is requested. This allows storing multiple images + in a single file and still have fast access to individual images. + Alternatively, a series of images can be read in streaming mode, reading + images as they are read (e.g. from http). + + BSDF is a simple generic binary format. It is easy to extend and there + are standard extension definitions for 2D and 3D image data. + Read more at http://bsdf.io. + + Parameters for reading + ---------------------- + random_access : bool + Whether individual images in the file can be read in random order. + Defaults to True for normal files, and to False when reading from HTTP. + If False, the file is read in "streaming mode", allowing reading + files as they are read, but without support for "rewinding". + Note that setting this to True when reading from HTTP, the whole file + is read upon opening it (since lazy loading is not possible over HTTP). + + Parameters for saving + --------------------- + compression : {0, 1, 2} + Use ``0`` or "no" for no compression, ``1`` or "zlib" for Zlib + compression (same as zip files and PNG), and ``2`` or "bz2" for Bz2 + compression (more compact but slower). Default 1 (zlib). + Note that some BSDF implementations may not support compression + (e.g. JavaScript). + + """ + + def _can_read(self, request): + if request.mode[1] in (self.modes + "?"): + # if request.extension in self.extensions: + # return True + if request.firstbytes.startswith(b"BSDF"): + return True + + def _can_write(self, request): + if request.mode[1] in (self.modes + "?"): + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, random_access=None): + # Validate - we need a BSDF file consisting of a list of images + # The list is typically a stream, but does not have to be. + assert self.request.firstbytes[:4] == b"BSDF", "Not a BSDF file" + # self.request.firstbytes[5:6] == major and minor version + if not ( + self.request.firstbytes[6:15] == b"M\x07image2D" + or self.request.firstbytes[6:15] == b"M\x07image3D" + or self.request.firstbytes[6:7] == b"l" + ): + pass # Actually, follow a more duck-type approach ... + # raise RuntimeError('BSDF file does not look like an ' + # 'image container.') + # Set options. If we think that seeking is allowed, we lazily load + # blobs, and set streaming to False (i.e. the whole file is read, + # but we skip over binary blobs), so that we subsequently allow + # random access to the images. + # If seeking is not allowed (e.g. with a http request), we cannot + # lazily load blobs, but we can still load streaming from the web. + options = {} + if self.request.filename.startswith(("http://", "https://")): + ra = False if random_access is None else bool(random_access) + options["lazy_blob"] = False # Because we cannot seek now + options["load_streaming"] = not ra # Load as a stream? + else: + ra = True if random_access is None else bool(random_access) + options["lazy_blob"] = ra # Don't read data until needed + options["load_streaming"] = not ra + + file = self.request.get_file() + bsdf, self._serializer = get_bsdf_serializer(options) + self._stream = self._serializer.load(file) + # Another validation + if ( + isinstance(self._stream, dict) + and "meta" in self._stream + and "array" in self._stream + ): + self._stream = Image(self._stream["array"], self._stream["meta"]) + if not isinstance(self._stream, (Image, list, bsdf.ListStream)): + raise RuntimeError( + "BSDF file does not look seem to have an " "image container." + ) + + def _close(self): + pass + + def _get_length(self): + if isinstance(self._stream, Image): + return 1 + elif isinstance(self._stream, list): + return len(self._stream) + elif self._stream.count < 0: + return np.inf + return self._stream.count + + def _get_data(self, index): + # Validate + if index < 0 or index >= self.get_length(): + raise IndexError( + "Image index %i not in [0 %i]." % (index, self.get_length()) + ) + # Get Image object + if isinstance(self._stream, Image): + image_ob = self._stream # singleton + elif isinstance(self._stream, list): + # Easy when we have random access + image_ob = self._stream[index] + else: + # For streaming, we need to skip over frames + if index < self._stream.index: + raise IndexError( + "BSDF file is being read in streaming " + "mode, thus does not allow rewinding." + ) + while index > self._stream.index: + self._stream.next() + image_ob = self._stream.next() # Can raise StopIteration + # Is this an image? + if ( + isinstance(image_ob, dict) + and "meta" in image_ob + and "array" in image_ob + ): + image_ob = Image(image_ob["array"], image_ob["meta"]) + if isinstance(image_ob, Image): + # Return as array (if we have lazy blobs, they are read now) + return image_ob.get_array(), image_ob.get_meta() + else: + r = repr(image_ob) + r = r if len(r) < 200 else r[:197] + "..." + raise RuntimeError("BSDF file contains non-image " + r) + + def _get_meta_data(self, index): # pragma: no cover + return {} # This format does not support global meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, compression=1): + options = {"compression": compression} + bsdf, self._serializer = get_bsdf_serializer(options) + if self.request.mode[1] in "iv": + self._stream = None # Singleton image + self._written = False + else: + # Series (stream) of images + file = self.request.get_file() + self._stream = bsdf.ListStream() + self._serializer.save(file, self._stream) + + def _close(self): + # We close the stream here, which will mark the number of written + # elements. If we would not close it, the file would be fine, it's + # just that upon reading it would not be known how many items are + # in there. + if self._stream is not None: + self._stream.close(False) # False says "keep this a stream" + + def _append_data(self, im, meta): + # Determine dimension + ndim = None + if self.request.mode[1] in "iI": + ndim = 2 + elif self.request.mode[1] in "vV": + ndim = 3 + else: + ndim = 3 # Make an educated guess + if im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4): + ndim = 2 + # Validate shape + assert ndim in (2, 3) + if ndim == 2: + assert im.ndim == 2 or (im.ndim == 3 and im.shape[-1] <= 4) + else: + assert im.ndim == 3 or (im.ndim == 4 and im.shape[-1] <= 4) + # Wrap data and meta data in our special class that will trigger + # the BSDF image2D or image3D extension. + if ndim == 2: + ob = Image2D(im, meta) + else: + ob = Image3D(im, meta) + # Write directly or to stream + if self._stream is None: + assert not self._written, "Cannot write singleton image twice" + self._written = True + file = self.request.get_file() + self._serializer.save(file, ob) + else: + self._stream.append(ob) + + def set_meta_data(self, meta): # pragma: no cover + raise RuntimeError("The BSDF format only supports " "per-image meta data.") + + +format = BsdfFormat( + "bsdf", # short name + "Format based on the Binary Structured Data Format", + ".bsdf", + "iIvV", +) +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/dicom.py b/venv/Lib/site-packages/imageio/plugins/dicom.py new file mode 100644 index 000000000..8185c0926 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/dicom.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin for reading DICOM files. +""" + +# todo: Use pydicom: +# * Note: is not py3k ready yet +# * Allow reading the full meta info +# I think we can more or less replace the SimpleDicomReader with a +# pydicom.Dataset For series, only ned to read the full info from one +# file: speed still high +# * Perhaps allow writing? + +import os +import sys +import logging +import subprocess + +from .. import formats +from ..core import Format, BaseProgressIndicator, StdoutProgressIndicator +from ..core import read_n_bytes + +_dicom = None # lazily loaded in load_lib() + +logger = logging.getLogger(__name__) + + +def load_lib(): + global _dicom + from . import _dicom + + return _dicom + + +# Determine endianity of system +sys_is_little_endian = sys.byteorder == "little" + + +def get_dcmdjpeg_exe(): + fname = "dcmdjpeg" + ".exe" * sys.platform.startswith("win") + for dir in ( + "c:\\dcmtk", + "c:\\Program Files", + "c:\\Program Files\\dcmtk", + "c:\\Program Files (x86)\\dcmtk", + ): + filename = os.path.join(dir, fname) + if os.path.isfile(filename): + return [filename] + + try: + subprocess.check_call([fname, "--version"]) + return [fname] + except Exception: + return None + + +def get_gdcmconv_exe(): + fname = "gdcmconv" + ".exe" * sys.platform.startswith("win") + # Maybe it's on the path + try: + subprocess.check_call([fname, "--version"]) + return [fname, "--raw"] + except Exception: + pass + # Select directories where it could be + candidates = [] + base_dirs = [r"c:\Program Files"] + for base_dir in base_dirs: + if os.path.isdir(base_dir): + for dname in os.listdir(base_dir): + if dname.lower().startswith("gdcm"): + suffix = dname[4:].strip() + candidates.append((suffix, os.path.join(base_dir, dname))) + # Sort, so higher versions are tried earlier + candidates.sort(reverse=True) + # Select executable + filename = None + for _, dirname in candidates: + exe1 = os.path.join(dirname, "gdcmconv.exe") + exe2 = os.path.join(dirname, "bin", "gdcmconv.exe") + if os.path.isfile(exe1): + filename = exe1 + break + if os.path.isfile(exe2): + filename = exe2 + break + else: + return None + return [filename, "--raw"] + + +class DicomFormat(Format): + """ A format for reading DICOM images: a common format used to store + medical image data, such as X-ray, CT and MRI. + + This format borrows some code (and ideas) from the pydicom project. However, + only a predefined subset of tags are extracted from the file. This allows + for great simplifications allowing us to make a stand-alone reader, and + also results in a much faster read time. + + By default, only uncompressed and deflated transfer syntaxes are supported. + If gdcm or dcmtk is installed, these will be used to automatically convert + the data. See https://github.com/malaterre/GDCM/releases for installing GDCM. + + This format provides functionality to group images of the same + series together, thus extracting volumes (and multiple volumes). + Using volread will attempt to yield a volume. If multiple volumes + are present, the first one is given. Using mimread will simply yield + all images in the given directory (not taking series into account). + + Parameters for reading + ---------------------- + progress : {True, False, BaseProgressIndicator} + Whether to show progress when reading from multiple files. + Default True. By passing an object that inherits from + BaseProgressIndicator, the way in which progress is reported + can be costumized. + + """ + + def _can_read(self, request): + # If user URI was a directory, we check whether it has a DICOM file + if os.path.isdir(request.filename): + files = os.listdir(request.filename) + for fname in sorted(files): # Sorting make it consistent + filename = os.path.join(request.filename, fname) + if os.path.isfile(filename) and "DICOMDIR" not in fname: + with open(filename, "rb") as f: + first_bytes = read_n_bytes(f, 140) + return first_bytes[128:132] == b"DICM" + else: + return False + # Check + return request.firstbytes[128:132] == b"DICM" + + def _can_write(self, request): + # We cannot save yet. May be possible if we will used pydicom as + # a backend. + return False + + # -- + + class Reader(Format.Reader): + + _compressed_warning_dirs = set() + + def _open(self, progress=True): + if not _dicom: + load_lib() + if os.path.isdir(self.request.filename): + # A dir can be given if the user used the format explicitly + self._info = {} + self._data = None + else: + # Read the given dataset now ... + try: + dcm = _dicom.SimpleDicomReader(self.request.get_file()) + except _dicom.CompressedDicom as err: + # We cannot do this on our own. Perhaps with some help ... + cmd = get_gdcmconv_exe() + if not cmd and "JPEG" in str(err): + cmd = get_dcmdjpeg_exe() + if not cmd: + msg = err.args[0].replace("using", "installing") + msg = msg.replace("convert", "auto-convert") + err.args = (msg,) + raise + else: + fname1 = self.request.get_local_filename() + fname2 = fname1 + ".raw" + try: + subprocess.check_call(cmd + [fname1, fname2]) + except Exception: + raise err + d = os.path.dirname(fname1) + if d not in self._compressed_warning_dirs: + self._compressed_warning_dirs.add(d) + logger.warning( + "DICOM file contained compressed data. " + + "Autoconverting with " + + cmd[0] + + " (this warning is shown once for each directory)" + ) + dcm = _dicom.SimpleDicomReader(fname2) + + self._info = dcm._info + self._data = dcm.get_numpy_array() + + # Initialize series, list of DicomSeries objects + self._series = None # only created if needed + + # Set progress indicator + if isinstance(progress, BaseProgressIndicator): + self._progressIndicator = progress + elif progress is True: + p = StdoutProgressIndicator("Reading DICOM") + self._progressIndicator = p + elif progress in (None, False): + self._progressIndicator = BaseProgressIndicator("Dummy") + else: + raise ValueError("Invalid value for progress.") + + def _close(self): + # Clean up + self._info = None + self._data = None + self._series = None + + @property + def series(self): + if self._series is None: + pi = self._progressIndicator + self._series = _dicom.process_directory(self.request, pi) + return self._series + + def _get_length(self): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + if self.request.mode[1] == "i": + # User expects one, but lets be honest about this file + return nslices + elif self.request.mode[1] == "I": + # User expects multiple, if this file has multiple slices, ok. + # Otherwise we have to check the series. + if nslices > 1: + return nslices + else: + return sum([len(serie) for serie in self.series]) + elif self.request.mode[1] == "v": + # User expects a volume, if this file has one, ok. + # Otherwise we have to check the series + if nslices > 1: + return 1 + else: + return len(self.series) # We assume one volume per series + elif self.request.mode[1] == "V": + # User expects multiple volumes. We have to check the series + return len(self.series) # We assume one volume per series + else: + raise RuntimeError("DICOM plugin should know what to expect.") + + def _get_data(self, index): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + if self.request.mode[1] == "i": + # Allow index >1 only if this file contains >1 + if nslices > 1: + return self._data[index], self._info + elif index == 0: + return self._data, self._info + else: + raise IndexError("Dicom file contains only one slice.") + elif self.request.mode[1] == "I": + # Return slice from volume, or return item from series + if index == 0 and nslices > 1: + return self._data[index], self._info + else: + L = [] + for serie in self.series: + L.extend([dcm_ for dcm_ in serie]) + return L[index].get_numpy_array(), L[index].info + elif self.request.mode[1] in "vV": + # Return volume or series + if index == 0 and nslices > 1: + return self._data, self._info + else: + return ( + self.series[index].get_numpy_array(), + self.series[index].info, + ) + else: # pragma: no cover + raise ValueError("DICOM plugin should know what to expect.") + + def _get_meta_data(self, index): + if self._data is None: + dcm = self.series[0][0] + self._info = dcm._info + self._data = dcm.get_numpy_array() + + nslices = self._data.shape[0] if (self._data.ndim == 3) else 1 + + # Default is the meta data of the given file, or the "first" file. + if index is None: + return self._info + + if self.request.mode[1] == "i": + return self._info + elif self.request.mode[1] == "I": + # Return slice from volume, or return item from series + if index == 0 and nslices > 1: + return self._info + else: + L = [] + for serie in self.series: + L.extend([dcm_ for dcm_ in serie]) + return L[index].info + elif self.request.mode[1] in "vV": + # Return volume or series + if index == 0 and nslices > 1: + return self._info + else: + return self.series[index].info + else: # pragma: no cover + raise ValueError("DICOM plugin should know what to expect.") + + +# Add this format +formats.add_format( + DicomFormat( + "DICOM", + "Digital Imaging and Communications in Medicine", + ".dcm .ct .mri", + "iIvV", + ) +) # Often DICOM files have weird or no extensions diff --git a/venv/Lib/site-packages/imageio/plugins/example.py b/venv/Lib/site-packages/imageio/plugins/example.py new file mode 100644 index 000000000..d444e0ea9 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/example.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Example plugin. You can use this as a template for your own plugin. +""" + +import numpy as np + +from .. import formats +from ..core import Format + + +class DummyFormat(Format): + """ The dummy format is an example format that does nothing. + It will never indicate that it can read or write a file. When + explicitly asked to read, it will simply read the bytes. When + explicitly asked to write, it will raise an error. + + This documentation is shown when the user does ``help('thisformat')``. + + Parameters for reading + ---------------------- + Specify arguments in numpy doc style here. + + Parameters for saving + --------------------- + Specify arguments in numpy doc style here. + + """ + + def _can_read(self, request): + # This method is called when the format manager is searching + # for a format to read a certain image. Return True if this format + # can do it. + # + # The format manager is aware of the extensions and the modes + # that each format can handle. It will first ask all formats + # that *seem* to be able to read it whether they can. If none + # can, it will ask the remaining formats if they can: the + # extension might be missing, and this allows formats to provide + # functionality for certain extensions, while giving preference + # to other plugins. + # + # If a format says it can, it should live up to it. The format + # would ideally check the request.firstbytes and look for a + # header of some kind. + # + # The request object has: + # request.filename: a representation of the source (only for reporting) + # request.firstbytes: the first 256 bytes of the file. + # request.mode[0]: read or write mode + # request.mode[1]: what kind of data the user expects: one of 'iIvV?' + + if request.mode[1] in (self.modes + "?"): + if request.extension in self.extensions: + return True + + def _can_write(self, request): + # This method is called when the format manager is searching + # for a format to write a certain image. It will first ask all + # formats that *seem* to be able to write it whether they can. + # If none can, it will ask the remaining formats if they can. + # + # Return True if the format can do it. + + # In most cases, this code does suffice: + if request.mode[1] in (self.modes + "?"): + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, some_option=False, length=1): + # Specify kwargs here. Optionally, the user-specified kwargs + # can also be accessed via the request.kwargs object. + # + # The request object provides two ways to get access to the + # data. Use just one: + # - Use request.get_file() for a file object (preferred) + # - Use request.get_local_filename() for a file on the system + self._fp = self.request.get_file() + self._length = length # passed as an arg in this case for testing + self._data = None + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _get_length(self): + # Return the number of images. Can be np.inf + return self._length + + def _get_data(self, index): + # Return the data and meta data for the given index + if index >= self._length: + raise IndexError("Image index %i > %i" % (index, self._length)) + # Read all bytes + if self._data is None: + self._data = self._fp.read() + # Put in a numpy array + im = np.frombuffer(self._data, "uint8") + im.shape = len(im), 1 + # Return array and dummy meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + return {} # This format does not support meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, flags=0): + # Specify kwargs here. Optionally, the user-specified kwargs + # can also be accessed via the request.kwargs object. + # + # The request object provides two ways to write the data. + # Use just one: + # - Use request.get_file() for a file object (preferred) + # - Use request.get_local_filename() for a file on the system + self._fp = self.request.get_file() + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _append_data(self, im, meta): + # Process the given data and meta data. + raise RuntimeError("The dummy format cannot write image data.") + + def set_meta_data(self, meta): + # Process the given meta data (global for all images) + # It is not mandatory to support this. + raise RuntimeError("The dummy format cannot write meta data.") + + +# Register. You register an *instance* of a Format class. Here specify: +format = DummyFormat( + "dummy", # short name + "An example format that does nothing.", # one line descr. + ".foobar .nonexistentext", # list of extensions + "iI", # modes, characters in iIvV +) +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/feisem.py b/venv/Lib/site-packages/imageio/plugins/feisem.py new file mode 100644 index 000000000..289ce3600 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/feisem.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +from .tifffile import TiffFormat + +from .. import formats + + +class FEISEMFormat(TiffFormat): + """Provide read support for TIFFs produced by an FEI SEM microscope. + + This format is based on TIFF, and supports the same parameters. + + FEI microscopes append metadata as ASCII text at the end of the file, + which this reader correctly extracts. + + Parameters for get_data + ----------------------- + discard_watermark : bool + If True (default), discard the bottom rows of the image, which + contain no image data, only a watermark with metadata. + watermark_height : int + The height in pixels of the FEI watermark. The default is 70. + """ + + def _can_write(self, request): + return False # FEI-SEM only supports reading + + class Reader(TiffFormat.Reader): + def _get_data(self, index=0, discard_watermark=True, watermark_height=70): + """Get image and metadata from given index. + + FEI images usually (always?) contain a watermark at the + bottom of the image, 70 pixels high. We discard this by + default as it does not contain any information not present + in the metadata. + """ + im, meta = super(FEISEMFormat.Reader, self)._get_data(index) + if discard_watermark: + im = im[:-watermark_height] + return im, meta + + def _get_meta_data(self, index=None): + """Read the metadata from an FEI SEM TIFF. + + This metadata is included as ASCII text at the end of the file. + + The index, if provided, is ignored. + + Returns + ------- + metadata : dict + Dictionary of metadata. + """ + md = {"root": {}} + current_tag = "root" + reading_metadata = False + filename = self.request.get_local_filename() + with open(filename, encoding="utf8", errors="ignore") as fin: + for line in fin: + if not reading_metadata: + if not line.startswith("Date="): + continue + else: + reading_metadata = True + line = line.rstrip() + if line.startswith("["): + current_tag = line.lstrip("[").rstrip("]") + md[current_tag] = {} + else: + if "=" in line: # ignore empty and irrelevant lines + key, val = line.split("=", maxsplit=1) + for tag_type in (int, float): + try: + val = tag_type(val) + except ValueError: + continue + else: + break + md[current_tag][key] = val + if not md["root"] and len(md) == 1: + raise ValueError("Input file %s contains no FEI metadata." % filename) + self._meta.update(md) + return md + + +# Register plugin +format = FEISEMFormat( + "fei", "FEI-SEM TIFF format", extensions=[".tif", ".tiff"], modes="iv" +) +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/ffmpeg.py b/venv/Lib/site-packages/imageio/plugins/ffmpeg.py new file mode 100644 index 000000000..99f8adbd3 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/ffmpeg.py @@ -0,0 +1,710 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin that uses ffmpeg to read and write series of images to +a wide range of video formats. + +Code inspired/based on code from moviepy: https://github.com/Zulko/moviepy/ +by Zulko + +""" + +import sys +import time +import logging +import threading +import subprocess as sp + +import numpy as np + +from .. import formats +from ..core import Format, image_as_uint + +logger = logging.getLogger(__name__) + +# Get camera format +if sys.platform.startswith("win"): + CAM_FORMAT = "dshow" # dshow or vfwcap +elif sys.platform.startswith("linux"): + CAM_FORMAT = "video4linux2" +elif sys.platform.startswith("darwin"): + CAM_FORMAT = "avfoundation" +else: # pragma: no cover + CAM_FORMAT = "unknown-cam-format" + + +def download(directory=None, force_download=False): # pragma: no cover + raise RuntimeError( + "imageio.ffmpeg.download() has been deprecated. " + "Use 'pip install imageio-ffmpeg' instead.'" + ) + + +# For backwards compatibility - we dont use this ourselves +def get_exe(): # pragma: no cover + """ Wrapper for imageio_ffmpeg.get_ffmpeg_exe() + """ + import imageio_ffmpeg + + return imageio_ffmpeg.get_ffmpeg_exe() + + +_ffmpeg_api = None + + +def _get_ffmpeg_api(): + global _ffmpeg_api + if _ffmpeg_api is None: + try: + import imageio_ffmpeg + except ImportError: + raise ImportError( + "To use the imageio ffmpeg plugin you need to " + "'pip install imageio-ffmpeg'" + ) + _ffmpeg_api = imageio_ffmpeg + return _ffmpeg_api + + +class FfmpegFormat(Format): + """ The ffmpeg format provides reading and writing for a wide range + of movie formats such as .avi, .mpeg, .mp4, etc. And also to read + streams from webcams and USB cameras. + + To read from camera streams, supply "" as the filename, + where the "0" can be replaced with any index of cameras known to + the system. + + To use this plugin, the ``imageio-ffmpeg`` library should be installed + (e.g. via pip). For most platforms this includes the ffmpeg executable. + One can use the ``IMAGEIO_FFMPEG_EXE`` environment variable to force + using a specific ffmpeg executable. + + When reading from a video, the number of available frames is hard/expensive + to calculate, which is why its set to inf by default, indicating + "stream mode". To get the number of frames before having read them all, + you can use the ``reader.count_frames()`` method (the reader will then use + ``imageio_ffmpeg.count_frames_and_secs()`` to get the exact number of + frames, note that this operation can take a few seconds on large files). + Alternatively, the number of frames can be estimated from the fps and + duration in the meta data (though these values themselves are not always + present/reliable). + + Parameters for reading + ---------------------- + fps : scalar + The number of frames per second to read the data at. Default None (i.e. + read at the file's own fps). One can use this for files with a + variable fps, or in cases where imageio is unable to correctly detect + the fps. + loop : bool + If True, the video will rewind as soon as a frame is requested + beyond the last frame. Otherwise, IndexError is raised. Default False. + Setting this to True will internally call ``count_frames()``, + and set the reader's length to that value instead of inf. + size : str | tuple + The frame size (i.e. resolution) to read the images, e.g. + (100, 100) or "640x480". For camera streams, this allows setting + the capture resolution. For normal video data, ffmpeg will + rescale the data. + dtype : str | type + The dtype for the output arrays. Determines the bit-depth that + is requested from ffmpeg. Supported dtypes: uint8, uint16. + Default: uint8. + pixelformat : str + The pixel format for the camera to use (e.g. "yuyv422" or + "gray"). The camera needs to support the format in order for + this to take effect. Note that the images produced by this + reader are always RGB. + input_params : list + List additional arguments to ffmpeg for input file options. + (Can also be provided as ``ffmpeg_params`` for backwards compatibility) + Example ffmpeg arguments to use aggressive error handling: + ['-err_detect', 'aggressive'] + output_params : list + List additional arguments to ffmpeg for output file options (i.e. the + stream being read by imageio). + print_info : bool + Print information about the video file as reported by ffmpeg. + + Parameters for saving + --------------------- + fps : scalar + The number of frames per second. Default 10. + codec : str + the video codec to use. Default 'libx264', which represents the + widely available mpeg4. Except when saving .wmv files, then the + defaults is 'msmpeg4' which is more commonly supported for windows + quality : float | None + Video output quality. Default is 5. Uses variable bit rate. Highest + quality is 10, lowest is 0. Set to None to prevent variable bitrate + flags to FFMPEG so you can manually specify them using output_params + instead. Specifying a fixed bitrate using 'bitrate' disables this + parameter. + bitrate : int | None + Set a constant bitrate for the video encoding. Default is None causing + 'quality' parameter to be used instead. Better quality videos with + smaller file sizes will result from using the 'quality' variable + bitrate parameter rather than specifiying a fixed bitrate with this + parameter. + pixelformat: str + The output video pixel format. Default is 'yuv420p' which most widely + supported by video players. + input_params : list + List additional arguments to ffmpeg for input file options (i.e. the + stream that imageio provides). + output_params : list + List additional arguments to ffmpeg for output file options. + (Can also be provided as ``ffmpeg_params`` for backwards compatibility) + Example ffmpeg arguments to use only intra frames and set aspect ratio: + ['-intra', '-aspect', '16:9'] + ffmpeg_log_level: str + Sets ffmpeg output log level. Default is "warning". + Values can be "quiet", "panic", "fatal", "error", "warning", "info" + "verbose", or "debug". Also prints the FFMPEG command being used by + imageio if "info", "verbose", or "debug". + macro_block_size: int + Size constraint for video. Width and height, must be divisible by this + number. If not divisible by this number imageio will tell ffmpeg to + scale the image up to the next closest size + divisible by this number. Most codecs are compatible with a macroblock + size of 16 (default), some can go smaller (4, 8). To disable this + automatic feature set it to None or 1, however be warned many players + can't decode videos that are odd in size and some codecs will produce + poor results or fail. See https://en.wikipedia.org/wiki/Macroblock. + """ + + def _can_read(self, request): + if request.mode[1] not in "I?": + return False + + # Read from video stream? + # Note that we could write the _video flag here, but a user might + # select this format explicitly (and this code is not run) + if request.filename in ["" % i for i in range(10)]: + return True + + # Read from file that we know? + if request.extension in self.extensions: + return True + + def _can_write(self, request): + if request.mode[1] in (self.modes + "?"): + if request.extension in self.extensions: + return True + + # -- + + class Reader(Format.Reader): + + _frame_catcher = None + _read_gen = None + + def _get_cam_inputname(self, index): + if sys.platform.startswith("linux"): + return "/dev/" + self.request._video[1:-1] + + elif sys.platform.startswith("win"): + # Ask ffmpeg for list of dshow device names + ffmpeg_api = _get_ffmpeg_api() + cmd = [ + ffmpeg_api.get_ffmpeg_exe(), + "-list_devices", + "true", + "-f", + CAM_FORMAT, + "-i", + "dummy", + ] + # Set `shell=True` in sp.Popen to prevent popup of a command line + # window in frozen applications. Note: this would be a security + # vulnerability if user-input goes into the cmd. + proc = sp.Popen( + cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, shell=True + ) + proc.stdout.readline() + proc.terminate() + infos = proc.stderr.read().decode("utf-8", errors="ignore") + # Return device name at index + try: + name = parse_device_names(infos)[index] + except IndexError: + raise IndexError("No ffdshow camera at index %i." % index) + return "video=%s" % name + + elif sys.platform.startswith("darwin"): + # Appears that newer ffmpeg builds don't support -list-devices + # on OS X. But you can directly open the camera by index. + name = str(index) + return name + + else: # pragma: no cover + return "??" + + def _open( + self, + loop=False, + size=None, + dtype=None, + pixelformat=None, + print_info=False, + ffmpeg_params=None, + input_params=None, + output_params=None, + fps=None, + ): + # Get generator functions + self._ffmpeg_api = _get_ffmpeg_api() + # Process input args + self._arg_loop = bool(loop) + if size is None: + self._arg_size = None + elif isinstance(size, tuple): + self._arg_size = "%ix%i" % size + elif isinstance(size, str) and "x" in size: + self._arg_size = size + else: + raise ValueError('FFMPEG size must be tuple of "NxM"') + if pixelformat is None: + pass + elif not isinstance(pixelformat, str): + raise ValueError("FFMPEG pixelformat must be str") + if dtype is None: + self._dtype = np.dtype("uint8") + else: + self._dtype = np.dtype(dtype) + allowed_dtypes = ["uint8", "uint16"] + if self._dtype.name not in allowed_dtypes: + raise ValueError( + "dtype must be one of: {}".format(", ".join(allowed_dtypes)) + ) + self._arg_pixelformat = pixelformat + self._arg_input_params = input_params or [] + self._arg_output_params = output_params or [] + self._arg_input_params += ffmpeg_params or [] # backward compat + # Write "_video"_arg - indicating webcam support + self.request._video = None + if self.request.filename in ["" % i for i in range(10)]: + self.request._video = self.request.filename + # Specify input framerate? + if self.request._video: + if "-framerate" not in str(self._arg_input_params): + self._arg_input_params.extend(["-framerate", str(float(fps or 30))]) + # Get local filename + if self.request._video: + index = int(self.request._video[-2]) + self._filename = self._get_cam_inputname(index) + else: + self._filename = self.request.get_local_filename() + # When passed to ffmpeg on command line, carets need to be escaped. + self._filename = self._filename.replace("^", "^^") + # Determine pixel format and depth + self._depth = 3 + if self._dtype.name == "uint8": + self._pix_fmt = "rgb24" + self._bytes_per_channel = 1 + else: + self._pix_fmt = "rgb48le" + self._bytes_per_channel = 2 + # Initialize parameters + self._pos = -1 + self._meta = {"plugin": "ffmpeg"} + self._lastread = None + + # Calculating this from fps and duration is not accurate, + # and calculating it exactly with ffmpeg_api.count_frames_and_secs + # takes too long to do for each video. But we need it for looping. + self._nframes = float("inf") + if self._arg_loop and not self.request._video: + self._nframes = self.count_frames() + self._meta["nframes"] = self._nframes + + # Start ffmpeg subprocess and get meta information + self._initialize() + + # For cameras, create thread that keeps reading the images + if self.request._video: + self._frame_catcher = FrameCatcher(self._read_gen) + + # For reference - but disabled, because it is inaccurate + # if self._meta["nframes"] == float("inf"): + # if self._meta.get("fps", 0) > 0: + # if self._meta.get("duration", 0) > 0: + # n = round(self._meta["duration"] * self._meta["fps"]) + # self._meta["nframes"] = int(n) + + def _close(self): + # First close the frame catcher, because we cannot close the gen + # if the frame catcher thread is using it + if self._frame_catcher is not None: + self._frame_catcher.stop_me() + self._frame_catcher = None + if self._read_gen is not None: + self._read_gen.close() + self._read_gen = None + + def count_frames(self): + """ Count the number of frames. Note that this can take a few + seconds for large files. Also note that it counts the number + of frames in the original video and does not take a given fps + into account. + """ + # This would have been nice, but this does not work :( + # oargs = [] + # if self.request.kwargs.get("fps", None): + # fps = float(self.request.kwargs["fps"]) + # oargs += ["-r", "%.02f" % fps] + cf = self._ffmpeg_api.count_frames_and_secs + return cf(self._filename)[0] + + def _get_length(self): + return self._nframes # only not inf if loop is True + + def _get_data(self, index): + """ Reads a frame at index. Note for coders: getting an + arbitrary frame in the video with ffmpeg can be painfully + slow if some decoding has to be done. This function tries + to avoid fectching arbitrary frames whenever possible, by + moving between adjacent frames. """ + # Modulo index (for looping) + if self._arg_loop and self._nframes < float("inf"): + index %= self._nframes + + if index == self._pos: + return self._lastread, dict(new=False) + elif index < 0: + raise IndexError("Frame index must be >= 0") + elif index >= self._nframes: + raise IndexError("Reached end of video") + else: + if (index < self._pos) or (index > self._pos + 100): + self._initialize(index) + else: + self._skip_frames(index - self._pos - 1) + result, is_new = self._read_frame() + self._pos = index + return result, dict(new=is_new) + + def _get_meta_data(self, index): + return self._meta + + def _initialize(self, index=0): + + # Close the current generator, and thereby terminate its subprocess + if self._read_gen is not None: + self._read_gen.close() + + iargs = [] + oargs = [] + + # Create input args + iargs += self._arg_input_params + if self.request._video: + iargs += ["-f", CAM_FORMAT] + if self._arg_pixelformat: + iargs += ["-pix_fmt", self._arg_pixelformat] + if self._arg_size: + iargs += ["-s", self._arg_size] + elif index > 0: # re-initialize / seek + # Note: only works if we initialized earlier, and now have meta + # Some info here: https://trac.ffmpeg.org/wiki/Seeking + # There are two ways to seek, one before -i (input_params) and + # after (output_params). The former is fast, because it uses + # keyframes, the latter is slow but accurate. According to + # the article above, the fast method should also be accurate + # from ffmpeg version 2.1, however in version 4.1 our tests + # start failing again. Not sure why, but we can solve this + # by combining slow and fast. Seek the long stretch using + # the fast method, and seek the last 10s the slow way. + starttime = index / self._meta["fps"] + seek_slow = min(10, starttime) + seek_fast = starttime - seek_slow + # We used to have this epsilon earlier, when we did not use + # the slow seek. I don't think we need it anymore. + # epsilon = -1 / self._meta["fps"] * 0.1 + iargs += ["-ss", "%.06f" % (seek_fast)] + oargs += ["-ss", "%.06f" % (seek_slow)] + + # Output args, for writing to pipe + if self._arg_size: + oargs += ["-s", self._arg_size] + if self.request.kwargs.get("fps", None): + fps = float(self.request.kwargs["fps"]) + oargs += ["-r", "%.02f" % fps] + oargs += self._arg_output_params + + # Get pixelformat and bytes per pixel + pix_fmt = self._pix_fmt + bpp = self._depth * self._bytes_per_channel + + # Create generator + rf = self._ffmpeg_api.read_frames + self._read_gen = rf( + self._filename, pix_fmt, bpp, input_params=iargs, output_params=oargs + ) + + # Read meta data. This start the generator (and ffmpeg subprocess) + if self.request._video: + # With cameras, catch error and turn into IndexError + try: + meta = self._read_gen.__next__() + except IOError as err: + err_text = str(err) + if "darwin" in sys.platform: + if "Unknown input format: 'avfoundation'" in err_text: + err_text += ( + "Try installing FFMPEG using " + "home brew to get a version with " + "support for cameras." + ) + raise IndexError( + "No (working) camera at {}.\n\n{}".format( + self.request._video, err_text + ) + ) + else: + self._meta.update(meta) + elif index == 0: + self._meta.update(self._read_gen.__next__()) + else: + self._read_gen.__next__() # we already have meta data + + def _skip_frames(self, n=1): + """ Reads and throws away n frames """ + for i in range(n): + self._read_gen.__next__() + self._pos += n + + def _read_frame(self): + # Read and convert to numpy array + w, h = self._meta["size"] + framesize = w * h * self._depth * self._bytes_per_channel + # t0 = time.time() + + # Read frame + if self._frame_catcher: # pragma: no cover - camera thing + s, is_new = self._frame_catcher.get_frame() + else: + s = self._read_gen.__next__() + is_new = True + + # Check + if len(s) != framesize: + raise RuntimeError( + "Frame is %i bytes, but expected %i." % (len(s), framesize) + ) + + result = np.frombuffer(s, dtype=self._dtype).copy() + result = result.reshape((h, w, self._depth)) + # t1 = time.time() + # print('etime', t1-t0) + + # Store and return + self._lastread = result + return result, is_new + + # -- + + class Writer(Format.Writer): + + _write_gen = None + + def _open( + self, + fps=10, + codec="libx264", + bitrate=None, + pixelformat="yuv420p", + ffmpeg_params=None, + input_params=None, + output_params=None, + ffmpeg_log_level="quiet", + quality=5, + macro_block_size=16, + ): + self._ffmpeg_api = _get_ffmpeg_api() + self._filename = self.request.get_local_filename() + self._pix_fmt = None + self._depth = None + self._size = None + + def _close(self): + if self._write_gen is not None: + self._write_gen.close() + self._write_gen = None + + def _append_data(self, im, meta): + + # Get props of image + h, w = im.shape[:2] + size = w, h + depth = 1 if im.ndim == 2 else im.shape[2] + + # Ensure that image is in uint8 + im = image_as_uint(im, bitdepth=8) + # To be written efficiently, ie. without creating an immutable + # buffer, by calling im.tostring() the array must be contiguous. + if not im.flags.c_contiguous: + # checkign the flag is a micro optimization. + # the image will be a numpy subclass. See discussion + # https://github.com/numpy/numpy/issues/11804 + im = np.ascontiguousarray(im) + + # Set size and initialize if not initialized yet + if self._size is None: + map = {1: "gray", 2: "gray8a", 3: "rgb24", 4: "rgba"} + self._pix_fmt = map.get(depth, None) + if self._pix_fmt is None: + raise ValueError("Image must have 1, 2, 3 or 4 channels") + self._size = size + self._depth = depth + self._initialize() + + # Check size of image + if size != self._size: + raise ValueError("All images in a movie should have same size") + if depth != self._depth: + raise ValueError( + "All images in a movie should have same " "number of channels" + ) + + assert self._write_gen is not None # Check status + + # Write. Yes, we can send the data in as a numpy array + self._write_gen.send(im) + + def set_meta_data(self, meta): + raise RuntimeError( + "The ffmpeg format does not support setting " "meta data." + ) + + def _initialize(self): + + # Close existing generator + if self._write_gen is not None: + self._write_gen.close() + + # Get parameters + # Use None to let imageio-ffmpeg (or ffmpeg) select good results + fps = self.request.kwargs.get("fps", 10) + codec = self.request.kwargs.get("codec", None) + bitrate = self.request.kwargs.get("bitrate", None) + quality = self.request.kwargs.get("quality", None) + input_params = self.request.kwargs.get("input_params") or [] + output_params = self.request.kwargs.get("output_params") or [] + output_params += self.request.kwargs.get("ffmpeg_params") or [] + pixelformat = self.request.kwargs.get("pixelformat", None) + macro_block_size = self.request.kwargs.get("macro_block_size", 16) + ffmpeg_log_level = self.request.kwargs.get("ffmpeg_log_level", None) + + macro_block_size = macro_block_size or 1 # None -> 1 + + # Create generator + self._write_gen = self._ffmpeg_api.write_frames( + self._filename, + self._size, + pix_fmt_in=self._pix_fmt, + pix_fmt_out=pixelformat, + fps=fps, + quality=quality, + bitrate=bitrate, + codec=codec, + macro_block_size=macro_block_size, + ffmpeg_log_level=ffmpeg_log_level, + input_params=input_params, + output_params=output_params, + ) + + # Seed the generator (this is where the ffmpeg subprocess starts) + self._write_gen.send(None) + + +class FrameCatcher(threading.Thread): + """ Thread to keep reading the frame data from stdout. This is + useful when streaming from a webcam. Otherwise, if the user code + does not grab frames fast enough, the buffer will fill up, leading + to lag, and ffmpeg can also stall (experienced on Linux). The + get_frame() method always returns the last available image. + """ + + def __init__(self, gen): + self._gen = gen + self._frame = None + self._frame_is_new = False + self._lock = threading.RLock() + threading.Thread.__init__(self) + self.setDaemon(True) # do not let this thread hold up Python shutdown + self._should_stop = False + self.start() + + def stop_me(self): + self._should_stop = True + while self.is_alive(): + time.sleep(0.001) + + def get_frame(self): + while self._frame is None: # pragma: no cover - an init thing + time.sleep(0.001) + with self._lock: + is_new = self._frame_is_new + self._frame_is_new = False # reset + return self._frame, is_new + + def run(self): + # This runs in the worker thread + try: + while not self._should_stop: + time.sleep(0) # give control to other threads + frame = self._gen.__next__() + with self._lock: + self._frame = frame + self._frame_is_new = True + except (StopIteration, EOFError): + pass + + +def parse_device_names(ffmpeg_output): + """ Parse the output of the ffmpeg -list-devices command""" + # Collect device names - get [friendly_name, alt_name] of each + device_names = [] + in_video_devices = False + for line in ffmpeg_output.splitlines(): + if line.startswith("[dshow"): + logger.debug(line) + line = line.split("]", 1)[1].strip() + if in_video_devices and line.startswith('"'): + friendly_name = line[1:-1] + device_names.append([friendly_name, ""]) + elif in_video_devices and line.lower().startswith("alternative name"): + alt_name = line.split(" name ", 1)[1].strip()[1:-1] + if sys.platform.startswith("win"): + alt_name = alt_name.replace("&", "^&") # Tested to work + else: + alt_name = alt_name.replace("&", "\\&") # Does this work? + device_names[-1][-1] = alt_name + elif "video devices" in line: + in_video_devices = True + elif "devices" in line: + # set False for subsequent "devices" sections + in_video_devices = False + # Post-process, see #441 + # prefer friendly names, use alt name if two cams have same friendly name + device_names2 = [] + for friendly_name, alt_name in device_names: + if friendly_name not in device_names2: + device_names2.append(friendly_name) + elif alt_name: + device_names2.append(alt_name) + else: + device_names2.append(friendly_name) # duplicate, but not much we can do + return device_names2 + + +# Register. You register an *instance* of a Format class. +format = FfmpegFormat( + "ffmpeg", + "Many video formats and cameras (via ffmpeg)", + ".mov .avi .mpg .mpeg .mp4 .mkv .wmv", + "I", +) +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/fits.py b/venv/Lib/site-packages/imageio/plugins/fits.py new file mode 100644 index 000000000..991a05852 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/fits.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin for reading FITS files. +""" + +from .. import formats +from ..core import Format + +_fits = None # lazily loaded + + +def load_lib(): + global _fits + try: + from astropy.io import fits as _fits + except ImportError: + raise ImportError( + "The FITS format relies on the astropy package." + "Please refer to http://www.astropy.org/ " + "for further instructions." + ) + return _fits + + +class FitsFormat(Format): + + """ Flexible Image Transport System (FITS) is an open standard defining a + digital file format useful for storage, transmission and processing of + scientific and other images. FITS is the most commonly used digital + file format in astronomy. + + This format requires the ``astropy`` package. + + Parameters for reading + ---------------------- + cache : bool + If the file name is a URL, `~astropy.utils.data.download_file` is used + to open the file. This specifies whether or not to save the file + locally in Astropy's download cache (default: `True`). + uint : bool + Interpret signed integer data where ``BZERO`` is the + central value and ``BSCALE == 1`` as unsigned integer + data. For example, ``int16`` data with ``BZERO = 32768`` + and ``BSCALE = 1`` would be treated as ``uint16`` data. + + Note, for backward compatibility, the kwarg **uint16** may + be used instead. The kwarg was renamed when support was + added for integers of any size. + ignore_missing_end : bool + Do not issue an exception when opening a file that is + missing an ``END`` card in the last header. + checksum : bool or str + If `True`, verifies that both ``DATASUM`` and + ``CHECKSUM`` card values (when present in the HDU header) + match the header and data of all HDU's in the file. Updates to a + file that already has a checksum will preserve and update the + existing checksums unless this argument is given a value of + 'remove', in which case the CHECKSUM and DATASUM values are not + checked, and are removed when saving changes to the file. + disable_image_compression : bool, optional + If `True`, treats compressed image HDU's like normal + binary table HDU's. + do_not_scale_image_data : bool + If `True`, image data is not scaled using BSCALE/BZERO values + when read. + ignore_blank : bool + If `True`, the BLANK keyword is ignored if present. + scale_back : bool + If `True`, when saving changes to a file that contained scaled + image data, restore the data to the original type and reapply the + original BSCALE/BZERO values. This could lead to loss of accuracy + if scaling back to integer values after performing floating point + operations on the data. + """ + + def _can_read(self, request): + # We return True if ext matches, because this is the only plugin + # that can. If astropy is not installed, a useful error follows. + return request.extension in self.extensions + + def _can_write(self, request): + # No write support + return False + + # -- reader + + class Reader(Format.Reader): + def _open(self, cache=False, **kwargs): + if not _fits: + load_lib() + hdulist = _fits.open(self.request.get_file(), cache=cache, **kwargs) + + self._index = [] + allowed_hdu_types = (_fits.ImageHDU, _fits.PrimaryHDU, _fits.CompImageHDU) + for n, hdu in zip(range(len(hdulist)), hdulist): + if isinstance(hdu, allowed_hdu_types): + # Ignore (primary) header units with no data (use '.size' + # rather than '.data' to avoid actually loading the image): + if hdu.size > 0: + self._index.append(n) + self._hdulist = hdulist + + def _close(self): + self._hdulist.close() + + def _get_length(self): + return len(self._index) + + def _get_data(self, index): + # Get data + if index < 0 or index >= len(self._index): + raise IndexError("Index out of range while reading from fits") + im = self._hdulist[self._index[index]].data + # Return array and empty meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index + raise RuntimeError("The fits format does not support meta data.") + + +# Register +format = FitsFormat( + "fits", "Flexible Image Transport System (FITS) format", "fits fit fts fz", "iIvV" +) +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/freeimage.py b/venv/Lib/site-packages/imageio/plugins/freeimage.py new file mode 100644 index 000000000..906d35011 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/freeimage.py @@ -0,0 +1,513 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin that wraps the freeimage lib. The wrapper for Freeimage is +part of the core of imageio, but it's functionality is exposed via +the plugin system (therefore this plugin is very thin). +""" + +import numpy as np + +from .. import formats +from ..core import Format, image_as_uint +from ._freeimage import fi, download, IO_FLAGS, FNAME_PER_PLATFORM # noqa + + +# todo: support files with only meta data + + +class FreeimageFormat(Format): + """ This is the default format used for FreeImage. Each Freeimage + format has the 'flags' keyword argument. See the Freeimage + documentation for more information. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + flags : int + A freeimage-specific option. In most cases we provide explicit + parameters for influencing image reading. + + Parameters for saving + ---------------------- + flags : int + A freeimage-specific option. In most cases we provide explicit + parameters for influencing image saving. + """ + + _modes = "i" + + @property + def fif(self): + return self._fif # Set when format is created + + def _can_read(self, request): + # Ask freeimage if it can read it, maybe ext missing + if fi.has_lib(): + if not hasattr(request, "_fif"): + try: + request._fif = fi.getFIF(request.filename, "r", request.firstbytes) + except Exception: # pragma: no cover + request._fif = -1 + if request._fif == self.fif: + return True + + def _can_write(self, request): + # Ask freeimage, because we are not aware of all formats + if fi.has_lib(): + if not hasattr(request, "_fif"): + try: + request._fif = fi.getFIF(request.filename, "w") + except Exception: # pragma: no cover + request._fif = -1 + if request._fif is self.fif: + return True + + # -- + + class Reader(Format.Reader): + def _get_length(self): + return 1 + + def _open(self, flags=0): + self._bm = fi.create_bitmap(self.request.filename, self.format.fif, flags) + self._bm.load_from_filename(self.request.get_local_filename()) + + def _close(self): + self._bm.close() + + def _get_data(self, index): + if index != 0: + raise IndexError("This format only supports singleton images.") + return self._bm.get_image_data(), self._bm.get_meta_data() + + def _get_meta_data(self, index): + if not (index is None or index == 0): + raise IndexError() + return self._bm.get_meta_data() + + # -- + + class Writer(Format.Writer): + def _open(self, flags=0): + self._flags = flags # Store flags for later use + self._bm = None + self._is_set = False # To prevent appending more than one image + self._meta = {} + + def _close(self): + # Set global meta data + self._bm.set_meta_data(self._meta) + # Write and close + self._bm.save_to_filename(self.request.get_local_filename()) + self._bm.close() + + def _append_data(self, im, meta): + # Check if set + if not self._is_set: + self._is_set = True + else: + raise RuntimeError( + "Singleton image; " "can only append image data once." + ) + # Pop unit dimension for grayscale images + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + # Lazy instantaion of the bitmap, we need image data + if self._bm is None: + self._bm = fi.create_bitmap( + self.request.filename, self.format.fif, self._flags + ) + self._bm.allocate(im) + # Set data + self._bm.set_image_data(im) + # There is no distinction between global and per-image meta data + # for singleton images + self._meta = meta + + def _set_meta_data(self, meta): + self._meta = meta + + +## Special plugins + +# todo: there is also FIF_LOAD_NOPIXELS, +# but perhaps that should be used with get_meta_data. + + +class FreeimageBmpFormat(FreeimageFormat): + """ A BMP format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for saving + --------------------- + compression : bool + Whether to compress the bitmap using RLE when saving. Default False. + It seems this does not always work, but who cares, you should use + PNG anyway. + + """ + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, compression=False): + # Build flags from kwargs + flags = int(flags) + if compression: + flags |= IO_FLAGS.BMP_SAVE_RLE + else: + flags |= IO_FLAGS.BMP_DEFAULT + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + im = image_as_uint(im, bitdepth=8) + return FreeimageFormat.Writer._append_data(self, im, meta) + + +class FreeimagePngFormat(FreeimageFormat): + """ A PNG format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + ignoregamma : bool + Avoid gamma correction. Default True. + + Parameters for saving + --------------------- + compression : {0, 1, 6, 9} + The compression factor. Higher factors result in more + compression at the cost of speed. Note that PNG compression is + always lossless. Default 9. + quantize : int + If specified, turn the given RGB or RGBA image in a paletted image + for more efficient storage. The value should be between 2 and 256. + If the value of 0 the image is not quantized. + interlaced : bool + Save using Adam7 interlacing. Default False. + """ + + class Reader(FreeimageFormat.Reader): + def _open(self, flags=0, ignoregamma=True): + # Build flags from kwargs + flags = int(flags) + if ignoregamma: + flags |= IO_FLAGS.PNG_IGNOREGAMMA + # Enter as usual, with modified flags + return FreeimageFormat.Reader._open(self, flags) + + # -- + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, compression=9, quantize=0, interlaced=False): + compression_map = { + 0: IO_FLAGS.PNG_Z_NO_COMPRESSION, + 1: IO_FLAGS.PNG_Z_BEST_SPEED, + 6: IO_FLAGS.PNG_Z_DEFAULT_COMPRESSION, + 9: IO_FLAGS.PNG_Z_BEST_COMPRESSION, + } + # Build flags from kwargs + flags = int(flags) + if interlaced: + flags |= IO_FLAGS.PNG_INTERLACED + try: + flags |= compression_map[compression] + except KeyError: + raise ValueError("Png compression must be 0, 1, 6, or 9.") + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + if str(im.dtype) == "uint16": + im = image_as_uint(im, bitdepth=16) + else: + im = image_as_uint(im, bitdepth=8) + FreeimageFormat.Writer._append_data(self, im, meta) + # Quantize? + q = int(self.request.kwargs.get("quantize", False)) + if not q: + pass + elif not (im.ndim == 3 and im.shape[-1] == 3): + raise ValueError("Can only quantize RGB images") + elif q < 2 or q > 256: + raise ValueError("PNG quantize param must be 2..256") + else: + bm = self._bm.quantize(0, q) + self._bm.close() + self._bm = bm + + +class FreeimageJpegFormat(FreeimageFormat): + """ A JPEG format based on the Freeimage library. + + This format supports grayscale and RGB images. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + exifrotate : bool + Automatically rotate the image according to the exif flag. + Default True. If 2 is given, do the rotation in Python instead + of freeimage. + quickread : bool + Read the image more quickly, at the expense of quality. + Default False. + + Parameters for saving + --------------------- + quality : scalar + The compression factor of the saved image (1..100), higher + numbers result in higher quality but larger file size. Default 75. + progressive : bool + Save as a progressive JPEG file (e.g. for images on the web). + Default False. + optimize : bool + On saving, compute optimal Huffman coding tables (can reduce a + few percent of file size). Default False. + baseline : bool + Save basic JPEG, without metadata or any markers. Default False. + + """ + + class Reader(FreeimageFormat.Reader): + def _open(self, flags=0, exifrotate=True, quickread=False): + # Build flags from kwargs + flags = int(flags) + if exifrotate and exifrotate != 2: + flags |= IO_FLAGS.JPEG_EXIFROTATE + if not quickread: + flags |= IO_FLAGS.JPEG_ACCURATE + # Enter as usual, with modified flags + return FreeimageFormat.Reader._open(self, flags) + + def _get_data(self, index): + im, meta = FreeimageFormat.Reader._get_data(self, index) + im = self._rotate(im, meta) + return im, meta + + def _rotate(self, im, meta): + """ Use Orientation information from EXIF meta data to + orient the image correctly. Freeimage is also supposed to + support that, and I am pretty sure it once did, but now it + does not, so let's just do it in Python. + Edit: and now it works again, just leave in place as a fallback. + """ + if self.request.kwargs.get("exifrotate", None) == 2: + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(FreeimageFormat.Writer): + def _open( + self, flags=0, quality=75, progressive=False, optimize=False, baseline=False + ): + # Test quality + quality = int(quality) + if quality < 1 or quality > 100: + raise ValueError("JPEG quality should be between 1 and 100.") + # Build flags from kwargs + flags = int(flags) + flags |= quality + if progressive: + flags |= IO_FLAGS.JPEG_PROGRESSIVE + if optimize: + flags |= IO_FLAGS.JPEG_OPTIMIZE + if baseline: + flags |= IO_FLAGS.JPEG_BASELINE + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError("JPEG does not support alpha channel.") + im = image_as_uint(im, bitdepth=8) + return FreeimageFormat.Writer._append_data(self, im, meta) + + +class FreeimagePnmFormat(FreeimageFormat): + """ A PNM format based on the Freeimage library. + + This format supports single bit (PBM), grayscale (PGM) and RGB (PPM) + images, even with ASCII or binary coding. + + The freeimage plugin requires a `freeimage` binary. If this binary + not available on the system, it can be downloaded manually from + by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for saving + --------------------- + use_ascii : bool + Save with ASCII coding. Default True. + """ + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0, use_ascii=True): + # Build flags from kwargs + flags = int(flags) + if use_ascii: + flags |= IO_FLAGS.PNM_SAVE_ASCII + # Act as usual, but with modified flags + return FreeimageFormat.Writer._open(self, flags) + + +## Create the formats + +SPECIAL_CLASSES = { + "jpeg": FreeimageJpegFormat, + "png": FreeimagePngFormat, + "bmp": FreeimageBmpFormat, + "ppm": FreeimagePnmFormat, + "ppmraw": FreeimagePnmFormat, + "gif": None, # defined in freeimagemulti + "ico": None, # defined in freeimagemulti + "mng": None, # defined in freeimagemulti +} + +# rename TIFF to make way for the tiffile plugin +NAME_MAP = {"TIFF": "FI_TIFF"} + +# This is a dump of supported FreeImage formats on Linux fi verion 3.16.0 +# > imageio.plugins.freeimage.create_freeimage_formats() +# > for i in sorted(imageio.plugins.freeimage.fiformats): print('%r,' % (i, )) +fiformats = [ + ("BMP", 0, "Windows or OS/2 Bitmap", "bmp"), + ("CUT", 21, "Dr. Halo", "cut"), + ("DDS", 24, "DirectX Surface", "dds"), + ("EXR", 29, "ILM OpenEXR", "exr"), + ("G3", 27, "Raw fax format CCITT G.3", "g3"), + ("GIF", 25, "Graphics Interchange Format", "gif"), + ("HDR", 26, "High Dynamic Range Image", "hdr"), + ("ICO", 1, "Windows Icon", "ico"), + ("IFF", 5, "IFF Interleaved Bitmap", "iff,lbm"), + ("J2K", 30, "JPEG-2000 codestream", "j2k,j2c"), + ("JNG", 3, "JPEG Network Graphics", "jng"), + ("JP2", 31, "JPEG-2000 File Format", "jp2"), + ("JPEG", 2, "JPEG - JFIF Compliant", "jpg,jif,jpeg,jpe"), + ("JPEG-XR", 36, "JPEG XR image format", "jxr,wdp,hdp"), + ("KOALA", 4, "C64 Koala Graphics", "koa"), + ("MNG", 6, "Multiple-image Network Graphics", "mng"), + ("PBM", 7, "Portable Bitmap (ASCII)", "pbm"), + ("PBMRAW", 8, "Portable Bitmap (RAW)", "pbm"), + ("PCD", 9, "Kodak PhotoCD", "pcd"), + ("PCX", 10, "Zsoft Paintbrush", "pcx"), + ("PFM", 32, "Portable floatmap", "pfm"), + ("PGM", 11, "Portable Greymap (ASCII)", "pgm"), + ("PGMRAW", 12, "Portable Greymap (RAW)", "pgm"), + ("PICT", 33, "Macintosh PICT", "pct,pict,pic"), + ("PNG", 13, "Portable Network Graphics", "png"), + ("PPM", 14, "Portable Pixelmap (ASCII)", "ppm"), + ("PPMRAW", 15, "Portable Pixelmap (RAW)", "ppm"), + ("PSD", 20, "Adobe Photoshop", "psd"), + ("RAS", 16, "Sun Raster Image", "ras"), + ( + "RAW", + 34, + "RAW camera image", + "3fr,arw,bay,bmq,cap,cine,cr2,crw,cs1,dc2," + "dcr,drf,dsc,dng,erf,fff,ia,iiq,k25,kc2,kdc,mdc,mef,mos,mrw,nef,nrw,orf," + "pef,ptx,pxn,qtk,raf,raw,rdc,rw2,rwl,rwz,sr2,srf,srw,sti", + ), + ("SGI", 28, "SGI Image Format", "sgi,rgb,rgba,bw"), + ("TARGA", 17, "Truevision Targa", "tga,targa"), + ("TIFF", 18, "Tagged Image File Format", "tif,tiff"), + ("WBMP", 19, "Wireless Bitmap", "wap,wbmp,wbm"), + ("WebP", 35, "Google WebP image format", "webp"), + ("XBM", 22, "X11 Bitmap Format", "xbm"), + ("XPM", 23, "X11 Pixmap Format", "xpm"), +] + + +def _create_predefined_freeimage_formats(): + + for name, i, des, ext in fiformats: + # name = NAME_MAP.get(name, name) + # Get class for format + FormatClass = SPECIAL_CLASSES.get(name.lower(), FreeimageFormat) + if FormatClass: + # Create Format and add + format = FormatClass(name + "-FI", des, ext, FormatClass._modes) + format._fif = i + formats.add_format(format) + + +def create_freeimage_formats(): + """ By default, imageio registers a list of predefined formats + that freeimage can handle. If your version of imageio can handle + more formats, you can call this function to register them. + """ + fiformats[:] = [] + + # Freeimage available? + if fi is None: # pragma: no cover + return + + # Init + lib = fi._lib + + # Create formats + for i in range(lib.FreeImage_GetFIFCount()): + if lib.FreeImage_IsPluginEnabled(i): + # Get info + name = lib.FreeImage_GetFormatFromFIF(i).decode("ascii") + des = lib.FreeImage_GetFIFDescription(i).decode("ascii") + ext = lib.FreeImage_GetFIFExtensionList(i).decode("ascii") + fiformats.append((name, i, des, ext)) + # name = NAME_MAP.get(name, name) + # Get class for format + FormatClass = SPECIAL_CLASSES.get(name.lower(), FreeimageFormat) + if not FormatClass: + continue + # Create Format and add + format = FormatClass(name + "-FI", des, ext, FormatClass._modes) + format._fif = i + formats.add_format(format, overwrite=True) + + +_create_predefined_freeimage_formats() diff --git a/venv/Lib/site-packages/imageio/plugins/freeimagemulti.py b/venv/Lib/site-packages/imageio/plugins/freeimagemulti.py new file mode 100644 index 000000000..1e1a8fdc8 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/freeimagemulti.py @@ -0,0 +1,330 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin for multi-image freeimafe formats, like animated GIF and ico. +""" + +import logging +import numpy as np + +from .. import formats +from ..core import Format, image_as_uint +from ._freeimage import fi, IO_FLAGS +from .freeimage import FreeimageFormat + +logger = logging.getLogger(__name__) + + +class FreeimageMulti(FreeimageFormat): + """ Base class for freeimage formats that support multiple images. + """ + + _modes = "iI" + _fif = -1 + + class Reader(Format.Reader): + def _open(self, flags=0): + flags = int(flags) + # Create bitmap + self._bm = fi.create_multipage_bitmap( + self.request.filename, self.format.fif, flags + ) + self._bm.load_from_filename(self.request.get_local_filename()) + + def _close(self): + self._bm.close() + + def _get_length(self): + return len(self._bm) + + def _get_data(self, index): + sub = self._bm.get_page(index) + try: + return sub.get_image_data(), sub.get_meta_data() + finally: + sub.close() + + def _get_meta_data(self, index): + index = index or 0 + if index < 0 or index >= len(self._bm): + raise IndexError() + sub = self._bm.get_page(index) + try: + return sub.get_meta_data() + finally: + sub.close() + + # -- + + class Writer(FreeimageFormat.Writer): + def _open(self, flags=0): + # Set flags + self._flags = flags = int(flags) + # Instantiate multi-page bitmap + self._bm = fi.create_multipage_bitmap( + self.request.filename, self.format.fif, flags + ) + self._bm.save_to_filename(self.request.get_local_filename()) + + def _close(self): + # Close bitmap + self._bm.close() + + def _append_data(self, im, meta): + # Prepare data + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + im = image_as_uint(im, bitdepth=8) + # Create sub bitmap + sub1 = fi.create_bitmap(self._bm._filename, self.format.fif) + # Let subclass add data to bitmap, optionally return new + sub2 = self._append_bitmap(im, meta, sub1) + # Add + self._bm.append_bitmap(sub2) + sub2.close() + if sub1 is not sub2: + sub1.close() + + def _append_bitmap(self, im, meta, bitmap): + # Set data + bitmap.allocate(im) + bitmap.set_image_data(im) + bitmap.set_meta_data(meta) + # Return that same bitmap + return bitmap + + def _set_meta_data(self, meta): + pass # ignore global meta data + + +class MngFormat(FreeimageMulti): + """ An Mng format based on the Freeimage library. + + Read only. Seems broken. + """ + + _fif = 6 + + def _can_write(self, request): # pragma: no cover + return False + + +class IcoFormat(FreeimageMulti): + """ An ICO format based on the Freeimage library. + + This format supports grayscale, RGB and RGBA images. + + The freeimage plugin requires a `freeimage` binary. If this binary + is not available on the system, it can be downloaded by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + makealpha : bool + Convert to 32-bit and create an alpha channel from the AND- + mask when loading. Default False. Note that this returns wrong + results if the image was already RGBA. + + """ + + _fif = 1 + + class Reader(FreeimageMulti.Reader): + def _open(self, flags=0, makealpha=False): + # Build flags from kwargs + flags = int(flags) + if makealpha: + flags |= IO_FLAGS.ICO_MAKEALPHA + return FreeimageMulti.Reader._open(self, flags) + + +class GifFormat(FreeimageMulti): + """ A format for reading and writing static and animated GIF, based + on the Freeimage library. + + Images read with this format are always RGBA. Currently, + the alpha channel is ignored when saving RGB images with this + format. + + The freeimage plugin requires a `freeimage` binary. If this binary + is not available on the system, it can be downloaded by either + + - the command line script ``imageio_download_bin freeimage`` + - the Python method ``imageio.plugins.freeimage.download()`` + + Parameters for reading + ---------------------- + playback : bool + 'Play' the GIF to generate each frame (as 32bpp) instead of + returning raw frame data when loading. Default True. + + Parameters for saving + --------------------- + loop : int + The number of iterations. Default 0 (meaning loop indefinitely) + duration : {float, list} + The duration (in seconds) of each frame. Either specify one value + that is used for all frames, or one value for each frame. + Note that in the GIF format the duration/delay is expressed in + hundredths of a second, which limits the precision of the duration. + fps : float + The number of frames per second. If duration is not given, the + duration for each frame is set to 1/fps. Default 10. + palettesize : int + The number of colors to quantize the image to. Is rounded to + the nearest power of two. Default 256. + quantizer : {'wu', 'nq'} + The quantization algorithm: + * wu - Wu, Xiaolin, Efficient Statistical Computations for + Optimal Color Quantization + * nq (neuqant) - Dekker A. H., Kohonen neural networks for + optimal color quantization + subrectangles : bool + If True, will try and optimize the GIF by storing only the + rectangular parts of each frame that change with respect to the + previous. Unfortunately, this option seems currently broken + because FreeImage does not handle DisposalMethod correctly. + Default False. + """ + + _fif = 25 + + class Reader(FreeimageMulti.Reader): + def _open(self, flags=0, playback=True): + # Build flags from kwargs + flags = int(flags) + if playback: + flags |= IO_FLAGS.GIF_PLAYBACK + FreeimageMulti.Reader._open(self, flags) + + def _get_data(self, index): + im, meta = FreeimageMulti.Reader._get_data(self, index) + # im = im[:, :, :3] # Drop alpha channel + return im, meta + + # -- writer + + class Writer(FreeimageMulti.Writer): + + # todo: subrectangles + # todo: global palette + + def _open( + self, + flags=0, + loop=0, + duration=None, + fps=10, + palettesize=256, + quantizer="Wu", + subrectangles=False, + ): + # Check palettesize + if palettesize < 2 or palettesize > 256: + raise ValueError("GIF quantize param must be 2..256") + if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]: + palettesize = 2 ** int(np.log2(128) + 0.999) + logger.warning( + "Warning: palettesize (%r) modified to a factor of " + "two between 2-256." % palettesize + ) + self._palettesize = palettesize + # Check quantizer + self._quantizer = {"wu": 0, "nq": 1}.get(quantizer.lower(), None) + if self._quantizer is None: + raise ValueError('Invalid quantizer, must be "wu" or "nq".') + # Check frametime + if duration is None: + self._frametime = [int(1000 / float(fps) + 0.5)] + elif isinstance(duration, list): + self._frametime = [int(1000 * d) for d in duration] + elif isinstance(duration, (float, int)): + self._frametime = [int(1000 * duration)] + else: + raise ValueError("Invalid value for duration: %r" % duration) + # Check subrectangles + self._subrectangles = bool(subrectangles) + self._prev_im = None + # Init + FreeimageMulti.Writer._open(self, flags) + # Set global meta data + self._meta = {} + self._meta["ANIMATION"] = { + # 'GlobalPalette': np.array([0]).astype(np.uint8), + "Loop": np.array([loop]).astype(np.uint32), + # 'LogicalWidth': np.array([x]).astype(np.uint16), + # 'LogicalHeight': np.array([x]).astype(np.uint16), + } + + def _append_bitmap(self, im, meta, bitmap): + # Prepare meta data + meta = meta.copy() + meta_a = meta["ANIMATION"] = {} + # If this is the first frame, assign it our "global" meta data + if len(self._bm) == 0: + meta.update(self._meta) + meta_a = meta["ANIMATION"] + # Set frame time + index = len(self._bm) + if index < len(self._frametime): + ft = self._frametime[index] + else: + ft = self._frametime[-1] + meta_a["FrameTime"] = np.array([ft]).astype(np.uint32) + # Check array + if im.ndim == 3 and im.shape[-1] == 4: + im = im[:, :, :3] + # Process subrectangles + im_uncropped = im + if self._subrectangles and self._prev_im is not None: + im, xy = self._get_sub_rectangles(self._prev_im, im) + meta_a["DisposalMethod"] = np.array([1]).astype(np.uint8) + meta_a["FrameLeft"] = np.array([xy[0]]).astype(np.uint16) + meta_a["FrameTop"] = np.array([xy[1]]).astype(np.uint16) + self._prev_im = im_uncropped + # Set image data + sub2 = sub1 = bitmap + sub1.allocate(im) + sub1.set_image_data(im) + # Quantize it if its RGB + if im.ndim == 3 and im.shape[-1] == 3: + sub2 = sub1.quantize(self._quantizer, self._palettesize) + # If single image, omit animation data + if self.request.mode[1] == "i": + del meta["ANIMATION"] + # Set meta data and return + sub2.set_meta_data(meta) + return sub2 + + def _get_sub_rectangles(self, prev, im): + """ + Calculate the minimal rectangles that need updating each frame. + Returns a two-element tuple containing the cropped images and a + list of x-y positions. + """ + # Get difference, sum over colors + diff = np.abs(im - prev) + if diff.ndim == 3: + diff = diff.sum(2) + # Get begin and end for both dimensions + X = np.argwhere(diff.sum(0)) + Y = np.argwhere(diff.sum(1)) + # Get rect coordinates + if X.size and Y.size: + x0, x1 = int(X[0]), int(X[-1]) + 1 + y0, y1 = int(Y[0]), int(Y[-1]) + 1 + else: # No change ... make it minimal + x0, x1 = 0, 2 + y0, y1 = 0, 2 + # Cut out and return + return im[y0:y1, x0:x1], (x0, y0) + + +# formats.add_format(MngFormat('MNG', 'Multiple network graphics', +# '.mng', 'iI')) +formats.add_format(IcoFormat("ICO-FI", "Windows icon", ".ico", "iI")) +formats.add_format( + GifFormat("GIF-FI", "Static and animated gif (FreeImage)", ".gif", "iI") +) diff --git a/venv/Lib/site-packages/imageio/plugins/gdal.py b/venv/Lib/site-packages/imageio/plugins/gdal.py new file mode 100644 index 000000000..aa5823819 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/gdal.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin for reading gdal files. +""" + +from .. import formats +from ..core import Format, has_module + +_gdal = None # lazily loaded in load_lib() + + +def load_lib(): + global _gdal + try: + import osgeo.gdal as _gdal + except ImportError: + raise ImportError( + "The GDAL format relies on the GDAL package." + "Please refer to http://www.gdal.org/" + "for further instructions." + ) + return _gdal + + +GDAL_FORMATS = (".tiff", " .tif", ".img", ".ecw", ".jpg", ".jpeg") + + +class GdalFormat(Format): + + """ + + Parameters for reading + ---------------------- + None + + """ + + def _can_read(self, request): + if request.extension in (".ecw",): + return True + if has_module("osgeo.gdal"): + return request.extension in self.extensions + + def _can_write(self, request): + return False + + # -- + + class Reader(Format.Reader): + def _open(self): + if not _gdal: + load_lib() + self._ds = _gdal.Open(self.request.get_local_filename()) + + def _close(self): + del self._ds + + def _get_length(self): + return 1 + + def _get_data(self, index): + if index != 0: + raise IndexError("Gdal file contains only one dataset") + return self._ds.ReadAsArray(), self._get_meta_data(index) + + def _get_meta_data(self, index): + return self._ds.GetMetadata() + + +# Add this format +formats.add_format( + GdalFormat( + "gdal", "Geospatial Data Abstraction Library", " ".join(GDAL_FORMATS), "iIvV" + ) +) diff --git a/venv/Lib/site-packages/imageio/plugins/grab.py b/venv/Lib/site-packages/imageio/plugins/grab.py new file mode 100644 index 000000000..13645844b --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/grab.py @@ -0,0 +1,123 @@ +""" +PIL-based formats to take screenshots and grab from the clipboard. +""" + +import threading + +import numpy as np + +from .. import formats +from ..core import Format + + +class BaseGrabFormat(Format): + """ Base format for grab formats. + """ + + _pillow_imported = False + _ImageGrab = None + + def __init__(self, *args, **kwargs): + super(BaseGrabFormat, self).__init__(*args, **kwargs) + self._lock = threading.RLock() + + def _can_write(self, request): + return False + + def _init_pillow(self): + with self._lock: + if not self._pillow_imported: + self._pillow_imported = True # more like tried to import + import PIL + + if not hasattr(PIL, "__version__"): # pragma: no cover + raise ImportError("Imageio Pillow requires " "Pillow, not PIL!") + try: + from PIL import ImageGrab + except ImportError: + return None + self._ImageGrab = ImageGrab + return self._ImageGrab + + class Reader(Format.Reader): + def _open(self): + pass + + def _close(self): + pass + + def _get_data(self, index): + return self.format._get_data(index) + + +class ScreenGrabFormat(BaseGrabFormat): + """ The ScreenGrabFormat provided a means to grab screenshots using + the uri of "". + + This functionality is provided via Pillow. Note that "" is + only supported on Windows and OS X. + + Parameters for reading + ---------------------- + No parameters. + """ + + def _can_read(self, request): + if request.mode[1] not in "i?": + return False + if request.filename != "": + return False + return bool(self._init_pillow()) + + def _get_data(self, index): + ImageGrab = self._init_pillow() + assert ImageGrab + + pil_im = ImageGrab.grab() + assert pil_im is not None + im = np.asarray(pil_im) + return im, {} + + +class ClipboardGrabFormat(BaseGrabFormat): + """ The ClipboardGrabFormat provided a means to grab image data from + the clipboard, using the uri "" + + This functionality is provided via Pillow. Note that "" is + only supported on Windows. + + Parameters for reading + ---------------------- + No parameters. + """ + + def _can_read(self, request): + if request.mode[1] not in "i?": + return False + if request.filename != "": + return False + return bool(self._init_pillow()) + + def _get_data(self, index): + ImageGrab = self._init_pillow() + assert ImageGrab + + pil_im = ImageGrab.grabclipboard() + if pil_im is None: + raise RuntimeError( + "There seems to be no image data on the " "clipboard now." + ) + im = np.asarray(pil_im) + return im, {} + + +# Register. You register an *instance* of a Format class. +format = ScreenGrabFormat( + "screengrab", "Grab screenshots (Windows and OS X only)", [], "i" +) +formats.add_format(format) + +format = ClipboardGrabFormat( + "clipboardgrab", "Grab from clipboard (Windows only)", [], "i" +) +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/lytro.py b/venv/Lib/site-packages/imageio/plugins/lytro.py new file mode 100644 index 000000000..b9f88d655 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/lytro.py @@ -0,0 +1,705 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, imageio contributors +# imageio is distributed under the terms of the (new) BSD License. +# + +""" Lytro Illum Plugin. + Plugin to read Lytro Illum .lfr and .raw files as produced + by the Lytro Illum light field camera. +""" +# +# +# This code is based on work by +# David Uhlig and his lfr_reader +# (https://www.iiit.kit.edu/uhlig.php) +# Donald Dansereau and his Matlab LF Toolbox +# (http://dgd.vision/Tools/LFToolbox/) +# and Behnam Esfahbod and his Python LFP-Reader +# (https://github.com/behnam/python-lfp-reader/) + + +import os +import json +import struct +import logging + + +import numpy as np + +from .. import formats +from ..core import Format +from .. import imread + + +logger = logging.getLogger(__name__) + + +# Sensor size of Lytro Illum resp. Lytro F01 light field camera sensor +LYTRO_ILLUM_IMAGE_SIZE = (5368, 7728) +LYTRO_F01_IMAGE_SIZE = (3280, 3280) + +# Parameter of lfr file format +HEADER_LENGTH = 12 +SIZE_LENGTH = 4 # = 16 - header_length +SHA1_LENGTH = 45 # = len("sha1-") + (160 / 4) +PADDING_LENGTH = 35 # = (4*16) - header_length - size_length - sha1_length +DATA_CHUNKS_ILLUM = 11 +DATA_CHUNKS_F01 = 3 + + +class LytroFormat(Format): + """ Base class for Lytro format. + The subclasses LytroLfrFormat, LytroLfpFormat, LytroIllumRawFormat and + LytroF01RawFormat implement the Lytro-LFR, Lytro-LFP and Lytro-RAW format + for the Illum and original F01 camera respectively. + Writing is not supported. + """ + + # Only single images are supported. + _modes = "i" + + def _can_write(self, request): + # Writing of Lytro files is not supported + return False + + # -- writer + + class Writer(Format.Writer): + def _open(self, flags=0): + self._fp = self.request.get_file() + + def _close(self): + # Close the reader. + # Note that the request object will close self._fp + pass + + def _append_data(self, im, meta): + # Process the given data and meta data. + raise RuntimeError("The lytro format cannot write image data.") + + def _set_meta_data(self, meta): + # Process the given meta data (global for all images) + # It is not mandatory to support this. + raise RuntimeError("The lytro format cannot write meta data.") + + +class LytroIllumRawFormat(LytroFormat): + """ This is the Lytro Illum RAW format. + The raw format is a 10bit image format as used by the Lytro Illum + light field camera. The format will read the specified raw file and will + try to load a .txt or .json file with the associated meta data. + This format does not support writing. + + + Parameters for reading + ---------------------- + None + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.mode[1] in (self.modes + "?"): + if request.extension in (".raw",): + return True + + @staticmethod + def rearrange_bits(array): + # Do bit rearrangement for the 10-bit lytro raw format + # Normalize output to 1.0 as float64 + t0 = array[0::5] + t1 = array[1::5] + t2 = array[2::5] + t3 = array[3::5] + lsb = array[4::5] + + t0 = np.left_shift(t0, 2) + np.bitwise_and(lsb, 3) + t1 = np.left_shift(t1, 2) + np.right_shift(np.bitwise_and(lsb, 12), 2) + t2 = np.left_shift(t2, 2) + np.right_shift(np.bitwise_and(lsb, 48), 4) + t3 = np.left_shift(t3, 2) + np.right_shift(np.bitwise_and(lsb, 192), 6) + + image = np.zeros(LYTRO_ILLUM_IMAGE_SIZE, dtype=np.uint16) + image[:, 0::4] = t0.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 1::4] = t1.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 2::4] = t2.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + image[:, 3::4] = t3.reshape( + (LYTRO_ILLUM_IMAGE_SIZE[0], LYTRO_ILLUM_IMAGE_SIZE[1] // 4) + ) + + # Normalize data to 1.0 as 64-bit float. + # Division is by 1023 as the Lytro Illum saves 10-bit raw data. + return np.divide(image, 1023.0).astype(np.float64) + + # -- reader + + class Reader(Format.Reader): + def _open(self): + self._file = self.request.get_file() + self._data = None + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. + return 1 + + def _get_data(self, index): + # Return the data and meta data for the given index + + if index not in [0, "None"]: + raise IndexError("Lytro file contains only one dataset") + + # Read all bytes + if self._data is None: + self._data = self._file.read() + + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16) + + # Rearrange bits + img = LytroIllumRawFormat.rearrange_bits(raw) + + # Return image and meta data + return img, self._get_meta_data(index=0) + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + # Try to read meta data from meta data file corresponding + # to the raw data file, extension in [.txt, .TXT, .json, .JSON] + filename_base = os.path.splitext(self.request.get_local_filename())[0] + + meta_data = None + + for ext in [".txt", ".TXT", ".json", ".JSON"]: + if os.path.isfile(filename_base + ext): + meta_data = json.load(open(filename_base + ext)) + + if meta_data is not None: + return meta_data + + else: + logger.warning("No metadata file found for provided raw file.") + return {} + + +class LytroLfrFormat(LytroFormat): + """ This is the Lytro Illum LFR format. + The lfr is a image and meta data container format as used by the + Lytro Illum light field camera. + The format will read the specified lfr file. + This format does not support writing. + + Parameters for reading + ---------------------- + None + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.mode[1] in (self.modes + "?"): + if request.extension in (".lfr",): + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self): + self._file = self.request.get_file() + self._data = None + self._chunks = {} + self.metadata = {} + self._content = None + + self._find_header() + self._find_chunks() + self._find_meta() + + try: + # Get sha1 dict and check if it is in dictionary of data chunks + chunk_dict = self._content["frames"][0]["frame"] + if ( + chunk_dict["metadataRef"] in self._chunks + and chunk_dict["imageRef"] in self._chunks + and chunk_dict["privateMetadataRef"] in self._chunks + ): + + # Read raw image data byte buffer + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + self.raw_image_data = self._file.read(size) + + # Read meta data + data_pos, size = self._chunks[chunk_dict["metadataRef"]] + self._file.seek(data_pos, 0) + metadata = self._file.read(size) + # Add metadata to meta data dict + self.metadata["metadata"] = json.loads(metadata.decode("ASCII")) + + # Read private metadata + data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]] + self._file.seek(data_pos, 0) + serial_numbers = self._file.read(size) + self.serial_numbers = json.loads(serial_numbers.decode("ASCII")) + # Add private metadata to meta data dict + self.metadata["privateMetadata"] = self.serial_numbers + + # Read image preview thumbnail + chunk_dict = self._content["thumbnails"][0] + if chunk_dict["imageRef"] in self._chunks: + # Read thumbnail image from thumbnail chunk + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + # Read binary data, read image as jpeg + thumbnail_data = self._file.read(size) + thumbnail_img = imread(thumbnail_data, format="jpeg") + + thumbnail_height = chunk_dict["height"] + thumbnail_width = chunk_dict["width"] + + # Add thumbnail to metadata + self.metadata["thumbnail"] = { + "image": thumbnail_img, + "height": thumbnail_height, + "width": thumbnail_width, + } + + except KeyError: + raise RuntimeError("The specified file is not a valid LFR file.") + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. Can be np.inf + return 1 + + def _find_header(self): + """ + Checks if file has correct header and skip it. + """ + file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01" + # Read and check header of file + header = self._file.read(HEADER_LENGTH) + if header != file_header: + raise RuntimeError("The LFR file header is invalid.") + + # Read first bytes to skip header + self._file.read(SIZE_LENGTH) + + def _find_chunks(self): + """ + Gets start position and size of data chunks in file. + """ + chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + for i in range(0, DATA_CHUNKS_ILLUM): + data_pos, size, sha1 = self._get_chunk(chunk_header) + self._chunks[sha1] = (data_pos, size) + + def _find_meta(self): + """ + Gets a data chunk that contains information over content + of other data chunks. + """ + meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + data_pos, size, sha1 = self._get_chunk(meta_header) + + # Get content + self._file.seek(data_pos, 0) + data = self._file.read(size) + self._content = json.loads(data.decode("ASCII")) + + def _get_chunk(self, header): + """ + Checks if chunk has correct header and skips it. + Finds start position and length of next chunk and reads + sha1-string that identifies the following data chunk. + + Parameters + ---------- + header : bytes + Byte string that identifies start of chunk. + + Returns + ------- + data_pos : int + Start position of data chunk in file. + size : int + Size of data chunk. + sha1 : str + Sha1 value of chunk. + """ + # Read and check header of chunk + header_chunk = self._file.read(HEADER_LENGTH) + if header_chunk != header: + raise RuntimeError("The LFR chunk header is invalid.") + + data_pos = None + sha1 = None + + # Read size + size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0] + if size > 0: + # Read sha1 + sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII")) + # Skip fixed null chars + self._file.read(PADDING_LENGTH) + # Find start of data and skip data + data_pos = self._file.tell() + self._file.seek(size, 1) + # Skip extra null chars + ch = self._file.read(1) + while ch == b"\0": + ch = self._file.read(1) + self._file.seek(-1, 1) + + return data_pos, size, sha1 + + def _get_data(self, index): + # Return the data and meta data for the given index + if index not in [0, None]: + raise IndexError("Lytro lfr file contains only one dataset") + + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(np.uint16) + im = LytroIllumRawFormat.rearrange_bits(raw) + + # Return array and dummy meta data + return im, self.metadata + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, + # it returns the global meta data. + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + return self.metadata + + +class LytroF01RawFormat(LytroFormat): + """ This is the Lytro RAW format for the original F01 Lytro camera. + The raw format is a 12bit image format as used by the Lytro F01 + light field camera. The format will read the specified raw file and will + try to load a .txt or .json file with the associated meta data. + This format does not support writing. + + + Parameters for reading + ---------------------- + None + + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.mode[1] in (self.modes + "?"): + if request.extension in (".raw",): + return True + + @staticmethod + def rearrange_bits(array): + # Do bit rearrangement for the 12-bit lytro raw format + # Normalize output to 1.0 as float64 + t0 = array[0::3] + t1 = array[1::3] + t2 = array[2::3] + + a0 = np.left_shift(t0, 4) + np.right_shift(np.bitwise_and(t1, 240), 4) + a1 = np.left_shift(np.bitwise_and(t1, 15), 8) + t2 + + image = np.zeros(LYTRO_F01_IMAGE_SIZE, dtype=np.uint16) + image[:, 0::2] = a0.reshape( + (LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2) + ) + image[:, 1::2] = a1.reshape( + (LYTRO_F01_IMAGE_SIZE[0], LYTRO_F01_IMAGE_SIZE[1] // 2) + ) + + # Normalize data to 1.0 as 64-bit float. + # Division is by 4095 as the Lytro F01 saves 12-bit raw data. + return np.divide(image, 4095.0).astype(np.float64) + + # -- reader + + class Reader(Format.Reader): + def _open(self): + self._file = self.request.get_file() + self._data = None + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. + return 1 + + def _get_data(self, index): + # Return the data and meta data for the given index + + if index not in [0, "None"]: + raise IndexError("Lytro file contains only one dataset") + + # Read all bytes + if self._data is None: + self._data = self._file.read() + + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self._data, dtype=np.uint8).astype(np.uint16) + + # Rearrange bits + img = LytroF01RawFormat.rearrange_bits(raw) + + # Return image and meta data + return img, self._get_meta_data(index=0) + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, it + # should return the global meta data. + + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + # Try to read meta data from meta data file corresponding + # to the raw data file, extension in [.txt, .TXT, .json, .JSON] + filename_base = os.path.splitext(self.request.get_local_filename())[0] + + meta_data = None + + for ext in [".txt", ".TXT", ".json", ".JSON"]: + if os.path.isfile(filename_base + ext): + meta_data = json.load(open(filename_base + ext)) + + if meta_data is not None: + return meta_data + + else: + logger.warning("No metadata file found for provided raw file.") + return {} + + +class LytroLfpFormat(LytroFormat): + """ This is the Lytro Illum LFP format. + The lfp is a image and meta data container format as used by the + Lytro F01 light field camera. + The format will read the specified lfp file. + This format does not support writing. + + Parameters for reading + ---------------------- + None + """ + + def _can_read(self, request): + # Check if mode and extensions are supported by the format + if request.mode[1] in (self.modes + "?"): + if request.extension in (".lfp",): + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self): + self._file = self.request.get_file() + self._data = None + self._chunks = {} + self.metadata = {} + self._content = None + + self._find_header() + self._find_meta() + self._find_chunks() + + try: + # Get sha1 dict and check if it is in dictionary of data chunks + chunk_dict = self._content["picture"]["frameArray"][0]["frame"] + if ( + chunk_dict["metadataRef"] in self._chunks + and chunk_dict["imageRef"] in self._chunks + and chunk_dict["privateMetadataRef"] in self._chunks + ): + + # Read raw image data byte buffer + data_pos, size = self._chunks[chunk_dict["imageRef"]] + self._file.seek(data_pos, 0) + self.raw_image_data = self._file.read(size) + + # Read meta data + data_pos, size = self._chunks[chunk_dict["metadataRef"]] + self._file.seek(data_pos, 0) + metadata = self._file.read(size) + # Add metadata to meta data dict + self.metadata["metadata"] = json.loads(metadata.decode("ASCII")) + + # Read private metadata + data_pos, size = self._chunks[chunk_dict["privateMetadataRef"]] + self._file.seek(data_pos, 0) + serial_numbers = self._file.read(size) + self.serial_numbers = json.loads(serial_numbers.decode("ASCII")) + # Add private metadata to meta data dict + self.metadata["privateMetadata"] = self.serial_numbers + + except KeyError: + raise RuntimeError("The specified file is not a valid LFP file.") + + def _close(self): + # Close the reader. + # Note that the request object will close self._file + del self._data + + def _get_length(self): + # Return the number of images. Can be np.inf + return 1 + + def _find_header(self): + """ + Checks if file has correct header and skip it. + """ + file_header = b"\x89LFP\x0D\x0A\x1A\x0A\x00\x00\x00\x01" + + # Read and check header of file + header = self._file.read(HEADER_LENGTH) + if header != file_header: + raise RuntimeError("The LFP file header is invalid.") + + # Read first bytes to skip header + self._file.read(SIZE_LENGTH) + + def _find_chunks(self): + """ + Gets start position and size of data chunks in file. + """ + chunk_header = b"\x89LFC\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + for i in range(0, DATA_CHUNKS_F01): + data_pos, size, sha1 = self._get_chunk(chunk_header) + self._chunks[sha1] = (data_pos, size) + + def _find_meta(self): + """ + Gets a data chunk that contains information over content + of other data chunks. + """ + meta_header = b"\x89LFM\x0D\x0A\x1A\x0A\x00\x00\x00\x00" + + data_pos, size, sha1 = self._get_chunk(meta_header) + + # Get content + self._file.seek(data_pos, 0) + data = self._file.read(size) + self._content = json.loads(data.decode("ASCII")) + data = self._file.read(5) # Skip 5 + + def _get_chunk(self, header): + """ + Checks if chunk has correct header and skips it. + Finds start position and length of next chunk and reads + sha1-string that identifies the following data chunk. + + Parameters + ---------- + header : bytes + Byte string that identifies start of chunk. + + Returns + ------- + data_pos : int + Start position of data chunk in file. + size : int + Size of data chunk. + sha1 : str + Sha1 value of chunk. + """ + # Read and check header of chunk + header_chunk = self._file.read(HEADER_LENGTH) + if header_chunk != header: + raise RuntimeError("The LFP chunk header is invalid.") + + data_pos = None + sha1 = None + + # Read size + size = struct.unpack(">i", self._file.read(SIZE_LENGTH))[0] + if size > 0: + # Read sha1 + sha1 = str(self._file.read(SHA1_LENGTH).decode("ASCII")) + # Skip fixed null chars + self._file.read(PADDING_LENGTH) + # Find start of data and skip data + data_pos = self._file.tell() + self._file.seek(size, 1) + # Skip extra null chars + ch = self._file.read(1) + while ch == b"\0": + ch = self._file.read(1) + self._file.seek(-1, 1) + + return data_pos, size, sha1 + + def _get_data(self, index): + # Return the data and meta data for the given index + if index not in [0, None]: + raise IndexError("Lytro lfp file contains only one dataset") + + # Read bytes from string and convert to uint16 + raw = np.frombuffer(self.raw_image_data, dtype=np.uint8).astype(np.uint16) + im = LytroF01RawFormat.rearrange_bits(raw) + + # Return array and dummy meta data + return im, self.metadata + + def _get_meta_data(self, index): + # Get the meta data for the given index. If index is None, + # it returns the global meta data. + if index not in [0, None]: + raise IndexError("Lytro meta data file contains only one dataset") + + return self.metadata + + +# Create the formats +SPECIAL_CLASSES = { + "lytro-lfr": LytroLfrFormat, + "lytro-illum-raw": LytroIllumRawFormat, + "lytro-lfp": LytroLfpFormat, + "lytro-f01-raw": LytroF01RawFormat, +} + +# Supported Formats. +# Only single image files supported. +file_formats = [ + ("LYTRO-LFR", "Lytro Illum lfr image file", "lfr", "i"), + ("LYTRO-ILLUM-RAW", "Lytro Illum raw image file", "raw", "i"), + ("LYTRO-LFP", "Lytro F01 lfp image file", "lfp", "i"), + ("LYTRO-F01-RAW", "Lytro F01 raw image file", "raw", "i"), +] + + +def _create_predefined_lytro_formats(): + for name, des, ext, i in file_formats: + # Get format class for format + format_class = SPECIAL_CLASSES.get(name.lower(), LytroFormat) + if format_class: + # Create Format and add + format = format_class(name, des, ext, i) + formats.add_format(format=format) + + +# Register all created formats. +_create_predefined_lytro_formats() diff --git a/venv/Lib/site-packages/imageio/plugins/npz.py b/venv/Lib/site-packages/imageio/plugins/npz.py new file mode 100644 index 000000000..4f9800eb8 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/npz.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Storage of image data in npz format. Not a great format, but at least +it supports volumetric data. And its less than 100 lines. +""" + +import numpy as np + +from .. import formats +from ..core import Format + + +class NpzFormat(Format): + """ NPZ is a file format by numpy that provides storage of array + data using gzip compression. This imageio plugin supports data of any + shape, and also supports multiple images per file. + + However, the npz format does not provide streaming; all data is + read/written at once. Further, there is no support for meta data. + + Beware that the numpy npz format has a bug on a certain combination + of Python 2.7 and numpy, which can cause the resulting files to + become unreadable on Python 3. Also, this format is not available + on Pypy. + + See the BSDF format for a similar (but more fully featured) format. + + Parameters for reading + ---------------------- + None + + Parameters for saving + --------------------- + None + """ + + def _can_read(self, request): + # We support any kind of image data + return request.extension in self.extensions + + def _can_write(self, request): + # We support any kind of image data + return request.extension in self.extensions + + # -- reader + + class Reader(Format.Reader): + def _open(self): + # Load npz file, which provides another file like object + self._npz = np.load(self.request.get_file()) + assert isinstance(self._npz, np.lib.npyio.NpzFile) + # Get list of names, ordered by name, but smarter + sorter = lambda x: x.split("_")[-1] + self._names = sorted(self._npz.files, key=sorter) + + def _close(self): + self._npz.close() + + def _get_length(self): + return len(self._names) + + def _get_data(self, index): + # Get data + if index < 0 or index >= len(self._names): + raise IndexError("Index out of range while reading from nzp") + im = self._npz[self._names[index]] + # Return array and empty meta data + return im, {} + + def _get_meta_data(self, index): + # Get the meta data for the given index + raise RuntimeError("The npz format does not support meta data.") + + # -- writer + + class Writer(Format.Writer): + def _open(self): + # Npz is not such a great format. We cannot stream to the file. + # So we remember all images and write them to file at the end. + self._images = [] + + def _close(self): + # Write everything + np.savez_compressed(self.request.get_file(), *self._images) + + def _append_data(self, im, meta): + self._images.append(im) # discart meta data + + def set_meta_data(self, meta): + raise RuntimeError("The npz format does not support meta data.") + + +# Register +format = NpzFormat("npz", "Numpy's compressed array format", "npz", "iIvV") +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/pillow.py b/venv/Lib/site-packages/imageio/plugins/pillow.py new file mode 100644 index 000000000..bd0f6456e --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/pillow.py @@ -0,0 +1,868 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Plugin that wraps the the Pillow library. +""" + +import logging +import threading + +import numpy as np + +from .. import formats +from ..core import Format, image_as_uint + +# Get info about pillow formats without having to import PIL +from .pillow_info import pillow_formats, pillow_docs + + +logger = logging.getLogger(__name__) + + +# todo: Pillow ImageGrab module supports grabbing the screen on Win and OSX. + + +GENERIC_DOCS = """ + Parameters for reading + ---------------------- + + pilmode : str + From the Pillow documentation: + + * 'L' (8-bit pixels, grayscale) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to grayscale (mode 'L', 'I' or 'F'), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + as_gray : bool + If True, the image is converted using mode 'F'. When `mode` is + not None and `as_gray` is True, the image is first converted + according to `mode`, and the result is then "flattened" using + mode 'F'. +""" + + +class PillowFormat(Format): + """ + Base format class for Pillow formats. + """ + + _pillow_imported = False + _Image = None + _modes = "i" + _description = "" + + def __init__(self, *args, **kwargs): + super(PillowFormat, self).__init__(*args, **kwargs) + # Used to synchronize _init_pillow(), see #244 + self._lock = threading.RLock() + + @property + def plugin_id(self): + """ The PIL plugin id. + """ + return self._plugin_id # Set when format is created + + def _init_pillow(self): + with self._lock: + if not self._pillow_imported: + self._pillow_imported = True # more like tried to import + import PIL + + if not hasattr(PIL, "__version__"): # pragma: no cover + raise ImportError( + "Imageio Pillow plugin requires " "Pillow, not PIL!" + ) + from PIL import Image + + self._Image = Image + elif self._Image is None: # pragma: no cover + raise RuntimeError("Imageio Pillow plugin requires " "Pillow lib.") + Image = self._Image + + if self.plugin_id in ("PNG", "JPEG", "BMP", "GIF", "PPM"): + Image.preinit() + else: + Image.init() + return Image + + def _can_read(self, request): + Image = self._init_pillow() + if request.mode[1] in (self.modes + "?"): + if self.plugin_id in Image.OPEN: + factory, accept = Image.OPEN[self.plugin_id] + if accept: + if request.firstbytes and accept(request.firstbytes): + return True + + def _can_write(self, request): + Image = self._init_pillow() + if request.mode[1] in (self.modes + "?"): + if request.extension in self.extensions: + if self.plugin_id in Image.SAVE: + return True + + class Reader(Format.Reader): + def _open(self, pilmode=None, as_gray=False): + Image = self.format._init_pillow() + try: + factory, accept = Image.OPEN[self.format.plugin_id] + except KeyError: + raise RuntimeError("Format %s cannot read images." % self.format.name) + self._fp = self._get_file() + self._im = factory(self._fp, "") + if hasattr(Image, "_decompression_bomb_check"): + Image._decompression_bomb_check(self._im.size) + # Save the raw mode used by the palette for a BMP because it may not be the number of channels + # When the data is read, imageio hands the palette to PIL to handle and clears the rawmode argument + # However, there is a bug in PIL with handling animated GIFs with a different color palette on each frame. + # This issue is resolved by using the raw palette data but the rawmode information is now lost. So we + # store the raw mode for later use + if self._im.palette and self._im.palette.dirty: + self._im.palette.rawmode_saved = self._im.palette.rawmode + pil_try_read(self._im) + # Store args + self._kwargs = dict( + as_gray=as_gray, is_gray=_palette_is_grayscale(self._im) + ) + # setting mode=None is not the same as just not providing it + if pilmode is not None: + self._kwargs["mode"] = pilmode + # Set length + self._length = 1 + if hasattr(self._im, "n_frames"): + self._length = self._im.n_frames + + def _get_file(self): + self._we_own_fp = False + return self.request.get_file() + + def _close(self): + save_pillow_close(self._im) + if self._we_own_fp: + self._fp.close() + # else: request object handles closing the _fp + + def _get_length(self): + return self._length + + def _seek(self, index): + try: + self._im.seek(index) + except EOFError: + raise IndexError("Could not seek to index %i" % index) + + def _get_data(self, index): + if index >= self._length: + raise IndexError("Image index %i > %i" % (index, self._length)) + i = self._im.tell() + if i > index: + self._seek(index) # just try + else: + while i < index: # some formats need to be read in sequence + i += 1 + self._seek(i) + if self._im.palette and self._im.palette.dirty: + self._im.palette.rawmode_saved = self._im.palette.rawmode + self._im.getdata()[0] + im = pil_get_frame(self._im, **self._kwargs) + return im, self._im.info + + def _get_meta_data(self, index): + if not (index is None or index == 0): + raise IndexError() + return self._im.info + + class Writer(Format.Writer): + def _open(self): + Image = self.format._init_pillow() + try: + self._save_func = Image.SAVE[self.format.plugin_id] + except KeyError: + raise RuntimeError("Format %s cannot write images." % self.format.name) + self._fp = self.request.get_file() + self._meta = {} + self._written = False + + def _close(self): + pass # request object handled closing _fp + + def _append_data(self, im, meta): + if self._written: + raise RuntimeError( + "Format %s only supports single images." % self.format.name + ) + # Pop unit dimension for grayscale images + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + self._written = True + self._meta.update(meta) + img = ndarray_to_pil( + im, self.format.plugin_id, self._meta.pop("prefer_uint8", True) + ) + if "bits" in self._meta: + img = img.quantize() # Make it a P image, so bits arg is used + img.save(self._fp, format=self.format.plugin_id, **self._meta) + save_pillow_close(img) + + def set_meta_data(self, meta): + self._meta.update(meta) + + +class PNGFormat(PillowFormat): + """A PNG format based on Pillow. + + This format supports grayscale, RGB and RGBA images. + + Parameters for reading + ---------------------- + ignoregamma : bool + Avoid gamma correction. Default True. + pilmode : str + From the Pillow documentation: + + * 'L' (8-bit pixels, grayscale) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to grayscale (mode 'L', 'I' or 'F'), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + as_gray : bool + If True, the image is converted using mode 'F'. When `mode` is + not None and `as_gray` is True, the image is first converted + according to `mode`, and the result is then "flattened" using + mode 'F'. + + Parameters for saving + --------------------- + optimize : bool + If present and true, instructs the PNG writer to make the output file + as small as possible. This includes extra processing in order to find + optimal encoder settings. + transparency: + This option controls what color image to mark as transparent. + dpi: tuple of two scalars + The desired dpi in each direction. + pnginfo: PIL.PngImagePlugin.PngInfo + Object containing text tags. + compress_level: int + ZLIB compression level, a number between 0 and 9: 1 gives best speed, + 9 gives best compression, 0 gives no compression at all. Default is 9. + When ``optimize`` option is True ``compress_level`` has no effect + (it is set to 9 regardless of a value passed). + compression: int + Compatibility with the freeimage PNG format. If given, it overrides + compress_level. + icc_profile: + The ICC Profile to include in the saved file. + bits (experimental): int + This option controls how many bits to store. If omitted, + the PNG writer uses 8 bits (256 colors). + quantize: + Compatibility with the freeimage PNG format. If given, it overrides + bits. In this case, given as a number between 1-256. + dictionary (experimental): dict + Set the ZLIB encoder dictionary. + prefer_uint8: bool + Let the PNG writer truncate uint16 image arrays to uint8 if their values fall + within the range [0, 255]. Defaults to true for legacy compatibility, however + it is recommended to set this to false to avoid unexpected behavior when + saving e.g. weakly saturated images. + """ + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False, ignoregamma=True): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + if not self.request.kwargs.get("ignoregamma", True): + # The gamma value in the file represents the gamma factor for the + # hardware on the system where the file was created, and is meant + # to be able to match the colors with the system on which the + # image is shown. See also issue #366 + try: + gamma = float(info["gamma"]) + except (KeyError, ValueError): + pass + else: + scale = float(65536 if im.dtype == np.uint16 else 255) + gain = 1.0 + im[:] = ((im / scale) ** gamma) * scale * gain + 0.4999 + return im, info + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, compression=None, quantize=None, interlaced=False, **kwargs): + + # Better default for compression + kwargs["compress_level"] = kwargs.get("compress_level", 9) + + if compression is not None: + if compression < 0 or compression > 9: + raise ValueError("Invalid PNG compression level: %r" % compression) + kwargs["compress_level"] = compression + if quantize is not None: + for bits in range(1, 9): + if 2 ** bits == quantize: + break + else: + raise ValueError( + "PNG quantize must be power of two, " "not %r" % quantize + ) + kwargs["bits"] = bits + if interlaced: + logger.warning("PIL PNG writer cannot produce interlaced images.") + + ok_keys = ( + "optimize", + "transparency", + "dpi", + "pnginfo", + "bits", + "compress_level", + "icc_profile", + "dictionary", + "prefer_uint8", + ) + for key in kwargs: + if key not in ok_keys: + raise TypeError("Invalid arg for PNG writer: %r" % key) + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if str(im.dtype) == "uint16" and (im.ndim == 2 or im.shape[-1] == 1): + im = image_as_uint(im, bitdepth=16) + else: + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + + +class JPEGFormat(PillowFormat): + """A JPEG format based on Pillow. + + This format supports grayscale, RGB and RGBA images. + + Parameters for reading + ---------------------- + exifrotate : bool + Automatically rotate the image according to exif flag. Default True. + pilmode : str + From the Pillow documentation: + + * 'L' (8-bit pixels, grayscale) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to grayscale (mode 'L', 'I' or 'F'), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + as_gray : bool + If True, the image is converted using mode 'F'. When `mode` is + not None and `as_gray` is True, the image is first converted + according to `mode`, and the result is then "flattened" using + mode 'F'. + + Parameters for saving + --------------------- + quality : scalar + The compression factor of the saved image (1..100), higher + numbers result in higher quality but larger file size. Default 75. + progressive : bool + Save as a progressive JPEG file (e.g. for images on the web). + Default False. + optimize : bool + On saving, compute optimal Huffman coding tables (can reduce a few + percent of file size). Default False. + dpi : tuple of int + The pixel density, ``(x,y)``. + icc_profile : object + If present and true, the image is stored with the provided ICC profile. + If this parameter is not provided, the image will be saved with no + profile attached. + exif : dict + If present, the image will be stored with the provided raw EXIF data. + subsampling : str + Sets the subsampling for the encoder. See Pillow docs for details. + qtables : object + Set the qtables for the encoder. See Pillow docs for details. + """ + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False, exifrotate=True): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_file(self): + # Pillow uses seek for JPG, so we cannot directly stream from web + if self.request.filename.startswith( + ("http://", "https://") + ) or ".zip/" in self.request.filename.replace("\\", "/"): + self._we_own_fp = True + return open(self.request.get_local_filename(), "rb") + else: + self._we_own_fp = False + return self.request.get_file() + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + + # Handle exif + if "exif" in info: + from PIL.ExifTags import TAGS + + info["EXIF_MAIN"] = {} + for tag, value in self._im._getexif().items(): + decoded = TAGS.get(tag, tag) + info["EXIF_MAIN"][decoded] = value + + im = self._rotate(im, info) + return im, info + + def _rotate(self, im, meta): + """ Use Orientation information from EXIF meta data to + orient the image correctly. Similar code as in FreeImage plugin. + """ + if self.request.kwargs.get("exifrotate", True): + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, quality=75, progressive=False, optimize=False, **kwargs): + + # Check quality - in Pillow it should be no higher than 95 + quality = int(quality) + if quality < 1 or quality > 100: + raise ValueError("JPEG quality should be between 1 and 100.") + quality = min(95, max(1, quality)) + + kwargs["quality"] = quality + kwargs["progressive"] = bool(progressive) + kwargs["optimize"] = bool(progressive) + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError("JPEG does not support alpha channel.") + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + return + + +class JPEG2000Format(PillowFormat): + """A JPEG 2000 format based on Pillow. + + This format supports grayscale and RGB images. + + Parameters for reading + ---------------------- + pilmode : str + From the Pillow documentation: + + * 'L' (8-bit pixels, grayscale) + * 'P' (8-bit pixels, mapped to any other mode using a color palette) + * 'RGB' (3x8-bit pixels, true color) + * 'RGBA' (4x8-bit pixels, true color with transparency mask) + * 'CMYK' (4x8-bit pixels, color separation) + * 'YCbCr' (3x8-bit pixels, color video format) + * 'I' (32-bit signed integer pixels) + * 'F' (32-bit floating point pixels) + + PIL also provides limited support for a few special modes, including + 'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa' + (true color with premultiplied alpha). + + When translating a color image to grayscale (mode 'L', 'I' or 'F'), + the library uses the ITU-R 601-2 luma transform:: + + L = R * 299/1000 + G * 587/1000 + B * 114/1000 + as_gray : bool + If True, the image is converted using mode 'F'. When `mode` is + not None and `as_gray` is True, the image is first converted + according to `mode`, and the result is then "flattened" using + mode 'F'. + + Parameters for saving + --------------------- + **quality_mode** + Either `"rates"` or `"dB"` depending on the units you want to use to + specify image quality. + + **quality** + Approximate size reduction (if quality mode is `rates`) or a signal to noise ratio + in decibels (if quality mode is `dB`). + + .. note:: + + To enable JPEG 2000 support, you need to build and install the OpenJPEG + library, version 2.0.0 or higher, before building the Python Imaging + Library. + + Windows users can install the OpenJPEG binaries available on the + OpenJPEG website, but must add them to their PATH in order to use PIL (if + you fail to do this, you will get errors about not being able to load the + ``_imaging`` DLL). + + """ + + class Reader(PillowFormat.Reader): + def _open(self, pilmode=None, as_gray=False): + return PillowFormat.Reader._open(self, pilmode=pilmode, as_gray=as_gray) + + def _get_file(self): + # Pillow uses seek for JPG, so we cannot directly stream from web + if self.request.filename.startswith( + ("http://", "https://") + ) or ".zip/" in self.request.filename.replace("\\", "/"): + self._we_own_fp = True + return open(self.request.get_local_filename(), "rb") + else: + self._we_own_fp = False + return self.request.get_file() + + def _get_data(self, index): + im, info = PillowFormat.Reader._get_data(self, index) + + # Handle exif + if "exif" in info: + from PIL.ExifTags import TAGS + + info["EXIF_MAIN"] = {} + for tag, value in self._im._getexif().items(): + decoded = TAGS.get(tag, tag) + info["EXIF_MAIN"][decoded] = value + + im = self._rotate(im, info) + return im, info + + def _rotate(self, im, meta): + """ Use Orientation information from EXIF meta data to + orient the image correctly. Similar code as in FreeImage plugin. + """ + if self.request.kwargs.get("exifrotate", True): + try: + ori = meta["EXIF_MAIN"]["Orientation"] + except KeyError: # pragma: no cover + pass # Orientation not available + else: # pragma: no cover - we cannot touch all cases + # www.impulseadventure.com/photo/exif-orientation.html + if ori in [1, 2]: + pass + if ori in [3, 4]: + im = np.rot90(im, 2) + if ori in [5, 6]: + im = np.rot90(im, 3) + if ori in [7, 8]: + im = np.rot90(im) + if ori in [2, 4, 5, 7]: # Flipped cases (rare) + im = np.fliplr(im) + return im + + # -- + + class Writer(PillowFormat.Writer): + def _open(self, quality_mode="rates", quality=5, **kwargs): + + # Check quality - in Pillow it should be no higher than 95 + if quality_mode not in {"rates", "dB"}: + raise ValueError("Quality mode should be either 'rates' or 'dB'") + + quality = float(quality) + + if quality_mode == "rates" and (quality < 1 or quality > 1000): + raise ValueError( + "The quality value {} seems to be an invalid rate!".format(quality) + ) + elif quality_mode == "dB" and (quality < 15 or quality > 100): + raise ValueError( + "The quality value {} seems to be an invalid PSNR!".format(quality) + ) + + kwargs["quality_mode"] = quality_mode + kwargs["quality_layers"] = [quality] + + PillowFormat.Writer._open(self) + self._meta.update(kwargs) + + def _append_data(self, im, meta): + if im.ndim == 3 and im.shape[-1] == 4: + raise IOError( + "The current implementation of JPEG 2000 does not support alpha channel." + ) + im = image_as_uint(im, bitdepth=8) + PillowFormat.Writer._append_data(self, im, meta) + return + + +def save_pillow_close(im): + # see issue #216 and #300 + if hasattr(im, "close"): + if hasattr(getattr(im, "fp", None), "close"): + im.close() + + +## Func from skimage + +# This cells contains code from scikit-image, in particular from +# http://github.com/scikit-image/scikit-image/blob/master/ +# skimage/io/_plugins/pil_plugin.py +# The scikit-image license applies. + + +def pil_try_read(im): + try: + # this will raise an IOError if the file is not readable + im.getdata()[0] + except IOError as e: + site = "http://pillow.readthedocs.io/en/latest/installation.html" + site += "#external-libraries" + pillow_error_message = str(e) + error_message = ( + 'Could not load "%s" \n' + 'Reason: "%s"\n' + "Please see documentation at: %s" + % (im.filename, pillow_error_message, site) + ) + raise ValueError(error_message) + + +def _palette_is_grayscale(pil_image): + if pil_image.mode != "P": + return False + elif pil_image.info.get("transparency", None): # see issue #475 + return False + # get palette as an array with R, G, B columns + palette = np.asarray(pil_image.getpalette()).reshape((256, 3)) + # Not all palette colors are used; unused colors have junk values. + start, stop = pil_image.getextrema() + valid_palette = palette[start : stop + 1] + # Image is grayscale if channel differences (R - G and G - B) + # are all zero. + return np.allclose(np.diff(valid_palette), 0) + + +def pil_get_frame(im, is_gray=None, as_gray=None, mode=None, dtype=None): + """ + is_gray: Whether the image *is* gray (by inspecting its palette). + as_gray: Whether the resulting image must be converted to gaey. + mode: The mode to convert to. + """ + + if is_gray is None: + is_gray = _palette_is_grayscale(im) + + frame = im + + # Convert ... + if mode is not None: + # Mode is explicitly given ... + if mode != im.mode: + frame = im.convert(mode) + elif as_gray: + pass # don't do any auto-conversions (but do the explit one above) + elif im.mode == "P" and is_gray: + # Paletted images that are already gray by their palette + # are converted so that the resulting numpy array is 2D. + frame = im.convert("L") + elif im.mode == "P": + # Paletted images are converted to RGB/RGBA. We jump some loops to make + # this work well. + if im.info.get("transparency", None) is not None: + # Let Pillow apply the transparency, see issue #210 and #246 + frame = im.convert("RGBA") + elif im.palette.mode in ("RGB", "RGBA"): + # We can do this ourselves. Pillow seems to sometimes screw + # this up if a multi-gif has a palette for each frame ... + # Create palette array + p = np.frombuffer(im.palette.getdata()[1], np.uint8) + # Restore the raw mode that was saved to be used to parse the palette + if hasattr(im.palette, "rawmode_saved"): + im.palette.rawmode = im.palette.rawmode_saved + mode = im.palette.rawmode if im.palette.rawmode else im.palette.mode + nchannels = len(mode) + # Shape it. + p.shape = -1, nchannels + if p.shape[1] == 3 or (p.shape[1] == 4 and mode[-1] == "X"): + p = np.column_stack((p[:, :3], 255 * np.ones(p.shape[0], p.dtype))) + # Swap the axes if the mode is in BGR and not RGB + if mode.startswith("BGR"): + p = p[:, [2, 1, 0]] if p.shape[1] == 3 else p[:, [2, 1, 0, 3]] + # Apply palette + frame_paletted = np.array(im, np.uint8) + try: + frame = p[frame_paletted] + except Exception: + # Ok, let PIL do it. The introduction of the branch that + # tests `im.info['transparency']` should make this happen + # much less often, but let's keep it, to be safe. + frame = im.convert("RGBA") + else: + # Let Pillow do it. Unlinke skimage, we always convert + # to RGBA; palettes can be RGBA. + if True: # im.format == 'PNG' and 'transparency' in im.info: + frame = im.convert("RGBA") + else: + frame = im.convert("RGB") + elif "A" in im.mode: + frame = im.convert("RGBA") + elif im.mode == "CMYK": + frame = im.convert("RGB") + + # Apply a post-convert if necessary + if as_gray: + frame = frame.convert("F") # Scipy compat + elif not isinstance(frame, np.ndarray) and frame.mode == "1": + # Workaround for crash in PIL. When im is 1-bit, the call array(im) + # can cause a segfault, or generate garbage. See + # https://github.com/scipy/scipy/issues/2138 and + # https://github.com/python-pillow/Pillow/issues/350. + # + # This converts im from a 1-bit image to an 8-bit image. + frame = frame.convert("L") + + # Convert to numpy array + if im.mode.startswith("I;16"): + # e.g. in16 PNG's + shape = im.size + dtype = ">u2" if im.mode.endswith("B") else "= 0: + arr = arr.astype(np.uint8) + mode = mode_base = "L" + + else: + arr = image_as_uint(arr, bitdepth=16) + + else: + arr = image_as_uint(arr, bitdepth=8) + mode = "L" + mode_base = "L" + + if mode == "I;16" and int(getattr(Image, "__version__", "0").split(".")[0]) < 6: + # Pillow < v6.0.0 has limited support for the "I;16" mode, + # requiring us to fall back to this expensive workaround. + # tobytes actually creates a copy of the image, which is costly. + array_buffer = arr.tobytes() + if arr.ndim == 2: + im = Image.new(mode_base, arr.T.shape) + im.frombytes(array_buffer, "raw", mode) + else: + image_shape = (arr.shape[1], arr.shape[0]) + im = Image.frombytes(mode, image_shape, array_buffer) + return im + else: + return Image.fromarray(arr, mode) + + +## End of code from scikit-image + + +from .pillowmulti import GIFFormat, TIFFFormat + +IGNORE_FORMATS = "MPEG" + +SPECIAL_FORMATS = dict( + PNG=PNGFormat, + JPEG=JPEGFormat, + GIF=GIFFormat, + TIFF=TIFFFormat, + JPEG2000=JPEG2000Format, +) + + +def register_pillow_formats(): + + for id, summary, ext in pillow_formats: + if id in IGNORE_FORMATS: + continue + FormatCls = SPECIAL_FORMATS.get(id, PillowFormat) + summary = FormatCls._description or summary + format = FormatCls(id + "-PIL", summary, ext, FormatCls._modes) + format._plugin_id = id + if FormatCls is PillowFormat or not FormatCls.__doc__: + format.__doc__ = pillow_docs[id] + GENERIC_DOCS + formats.add_format(format) + + +register_pillow_formats() diff --git a/venv/Lib/site-packages/imageio/plugins/pillow_info.py b/venv/Lib/site-packages/imageio/plugins/pillow_info.py new file mode 100644 index 000000000..8708d31b1 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/pillow_info.py @@ -0,0 +1,1045 @@ +# -*- coding: utf-8 -*- + +# styletest: ignore E122 E123 E501 + +""" +Module that contain info about the Pillow formats. The first part of +this module generates this info and writes it to its own bottom half +if run as a script. +""" + + +def generate_info(): # pragma: no cover + from urllib.request import urlopen + import PIL + from PIL import Image + + Image.init() + + ids = [] + formats = [] + docs = {} + + # Collect formats and their summary from plugin modules + for mod_name in dir(PIL): + if "ImagePlugin" in mod_name: + mod = getattr(PIL, mod_name) + for ob_name in dir(mod): + ob = getattr(mod, ob_name) + if isinstance(ob, type) and issubclass(ob, Image.Image): + if ob.format in ids: + print("Found duplicate for", ob.format) + else: + ids.append(ob.format) + formats.append((ob.format, ob.format_description)) + + # Add extension info + for i in range(len(formats)): + id, summary = formats[i] + ext = " ".join([e for e in Image.EXTENSION if Image.EXTENSION[e] == id]) + formats[i] = id, summary, ext + + # Get documentation of formats + url = "https://raw.githubusercontent.com/python-pillow/Pillow/master/docs/handbook/image-file-formats.rst" # noqa + lines = urlopen(url).read().decode().splitlines() + lines.append("End") + lines.append("---") # for the end + + # Parse documentation + cur_name = "" + cur_part = [] + for i in range(len(lines)): + line = lines[i] + if line.startswith(("^^^", "---", "===")): + if cur_name and cur_name in ids: + text = "\n".join(cur_part[:-1]) + text = text.replace("versionadded::", "versionadded:: Pillow ") + text = text.replace("Image.open`", "Image.write`") + docs[cur_name] = text + cur_part = [] + cur_name = lines[i - 1].strip().replace(" ", "").upper() + else: + cur_part.append(" " + line) + + # Fill in the blancs + for id in ids: + if id in docs: + docs[id] = "*From the Pillow docs:*\n\n" + docs[id] + else: + docs[id] = "No docs for %s." % id + print("no docs for", id) + + # Sort before writing + formats.sort(key=lambda x: x[0]) + ids.sort() + + # Read file ... + code = open(__file__, "rb").read().decode() + code, divider, _ = code.partition("## BELOW IS " + "AUTOGENERATED") + code += divider + "\n\n" + + # Write formats + code += "pillow_formats = [\n" + for i in range(len(formats)): + print(formats[i]) + code += " (%r, %r, %r),\n" % formats[i] + code += " ]\n\n\n" + + # Write docs + code += "pillow_docs = {\n" + for id in ids: + code += '%r:\nu"""%s""",\n' % (id, docs[id]) + code += "}\n" + + # Write back + with open(__file__, "wb") as f: + f.write(code.encode()) + + +if __name__ == "__main__": + generate_info() + + +## BELOW IS AUTOGENERATED + +pillow_formats = [ + ("BMP", "Windows Bitmap", ".bmp"), + ("BUFR", "BUFR", ".bufr"), + ("CUR", "Windows Cursor", ".cur"), + ("DCX", "Intel DCX", ".dcx"), + ("DDS", "DirectDraw Surface", ".dds"), + ("DIB", "Windows Bitmap", ""), + ("EPS", "Encapsulated Postscript", ".ps .eps"), + ("FITS", "FITS", ".fit .fits"), + ("FLI", "Autodesk FLI/FLC Animation", ".fli .flc"), + ("FPX", "FlashPix", ".fpx"), + ("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu"), + ("GBR", "GIMP brush file", ".gbr"), + ("GIF", "Compuserve GIF", ".gif"), + ("GRIB", "GRIB", ".grib"), + ("HDF5", "HDF5", ".h5 .hdf"), + ("ICNS", "Mac OS icns resource", ".icns"), + ("ICO", "Windows Icon", ".ico"), + ("IM", "IFUNC Image Memory", ".im"), + ("IMT", "IM Tools", ""), + ("IPTC", "IPTC/NAA", ".iim"), + ("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg"), + ("JPEG2000", "JPEG 2000 (ISO 15444)", ".jp2 .j2k .jpc .jpf .jpx .j2c"), + ("MCIDAS", "McIdas area file", ""), + ("MIC", "Microsoft Image Composer", ".mic"), + ("MPEG", "MPEG", ".mpg .mpeg"), + ("MPO", "MPO (CIPA DC-007)", ".mpo"), + ("MSP", "Windows Paint", ".msp"), + ("PCD", "Kodak PhotoCD", ".pcd"), + ("PCX", "Paintbrush", ".pcx"), + ("PIXAR", "PIXAR raster image", ".pxr"), + ("PNG", "Portable network graphics", ".png"), + ("PPM", "Pbmplus image", ".pbm .pgm .ppm"), + ("PSD", "Adobe Photoshop", ".psd"), + ("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi"), + ("SPIDER", "Spider 2D image", ""), + ("SUN", "Sun Raster File", ".ras"), + ("TGA", "Targa", ".tga"), + ("TIFF", "Adobe TIFF", ".tif .tiff"), + ("WMF", "Windows Metafile", ".wmf .emf"), + ("XBM", "X11 Bitmap", ".xbm"), + ("XPM", "X11 Pixel Map", ".xpm"), + ("XVThumb", "XV thumbnail image", ""), +] + + +pillow_docs = { + "BMP": u"""*From the Pillow docs:* + + + PIL reads and writes Windows and OS/2 BMP files containing ``1``, ``L``, ``P``, + or ``RGB`` data. 16-colour images are read as ``P`` images. Run-length encoding + is not supported. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **compression** + Set to ``bmp_rle`` if the file is run-length encoded. + """, + "BUFR": u"""*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.3 + + PIL provides a stub driver for BUFR files. + + To add read or write support to your application, use + :py:func:`PIL.BufrStubImagePlugin.register_handler`. + """, + "CUR": u"""*From the Pillow docs:* + + + CUR is used to store cursors on Windows. The CUR decoder reads the largest + available cursor. Animated cursors are not supported. + """, + "DCX": u"""*From the Pillow docs:* + + + DCX is a container file format for PCX files, defined by Intel. The DCX format + is commonly used in fax applications. The DCX decoder can read files containing + ``1``, ``L``, ``P``, or ``RGB`` data. + + When the file is opened, only the first image is read. You can use + :py:meth:`~file.seek` or :py:mod:`~PIL.ImageSequence` to read other images. + + """, + "DDS": u"""*From the Pillow docs:* + + + DDS is a popular container texture format used in video games and natively + supported by DirectX. + Currently, DXT1, DXT3, and DXT5 pixel formats are supported and only in ``RGBA`` + mode. + + .. versionadded:: Pillow 3.4.0 DXT3 + """, + "DIB": u"""No docs for DIB.""", + "EPS": u"""*From the Pillow docs:* + + + PIL identifies EPS files containing image data, and can read files that contain + embedded raster images (ImageData descriptors). If Ghostscript is available, + other EPS files can be read as well. The EPS driver can also write EPS + images. The EPS driver can read EPS images in ``L``, ``LAB``, ``RGB`` and + ``CMYK`` mode, but Ghostscript may convert the images to ``RGB`` mode rather + than leaving them in the original color space. The EPS driver can write images + in ``L``, ``RGB`` and ``CMYK`` modes. + + If Ghostscript is available, you can call the :py:meth:`~PIL.Image.Image.load` + method with the following parameter to affect how Ghostscript renders the EPS + + **scale** + Affects the scale of the resultant rasterized image. If the EPS suggests + that the image be rendered at 100px x 100px, setting this parameter to + 2 will make the Ghostscript render a 200px x 200px image instead. The + relative position of the bounding box is maintained:: + + im = Image.open(...) + im.size #(100,100) + im.load(scale=2) + im.size #(200,200) + """, + "FITS": u"""*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for FITS files. + + To add read or write support to your application, use + :py:func:`PIL.FitsStubImagePlugin.register_handler`. + """, + "FLI": u"""No docs for FLI.""", + "FPX": u"""*From the Pillow docs:* + + + PIL reads Kodak FlashPix files. In the current version, only the highest + resolution image is read from the file, and the viewing transform is not taken + into account. + + .. note:: + + To enable full FlashPix support, you need to build and install the IJG JPEG + library before building the Python Imaging Library. See the distribution + README for details. + """, + "FTEX": u"""*From the Pillow docs:* + + + .. versionadded:: Pillow 3.2.0 + + The FTEX decoder reads textures used for 3D objects in + Independence War 2: Edge Of Chaos. The plugin reads a single texture + per file, in the compressed and uncompressed formats. + """, + "GBR": u"""*From the Pillow docs:* + + + The GBR decoder reads GIMP brush files, version 1 and 2. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **comment** + The brush name. + + **spacing** + The spacing between the brushes, in pixels. Version 2 only. + + GD + ^^ + + PIL reads uncompressed GD files. Note that this file format cannot be + automatically identified, so you must use :py:func:`PIL.GdImageFile.open` to + read such a file. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + """, + "GIF": u"""*From the Pillow docs:* + + + PIL reads GIF87a and GIF89a versions of the GIF file format. The library writes + run-length encoded files in GIF87a by default, unless GIF89a features + are used or GIF89a is already in use. + + Note that GIF files are always read as grayscale (``L``) + or palette mode (``P``) images. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **background** + Default background color (a palette color index). + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + + **version** + Version (either ``GIF87a`` or ``GIF89a``). + + **duration** + May not be present. The time to display the current frame + of the GIF, in milliseconds. + + **loop** + May not be present. The number of times the GIF should loop. + + Reading sequences + ~~~~~~~~~~~~~~~~~ + + The GIF loader supports the :py:meth:`~file.seek` and :py:meth:`~file.tell` + methods. You can seek to the next frame (``im.seek(im.tell() + 1)``), or rewind + the file by seeking to the first frame. Random access is not supported. + + ``im.seek()`` raises an ``EOFError`` if you try to seek after the last frame. + + Saving + ~~~~~~ + + When calling :py:meth:`~PIL.Image.Image.save`, the following options + are available:: + + im.save(out, save_all=True, append_images=[im1, im2, ...]) + + **save_all** + If present and true, all frames of the image will be saved. If + not, then only the first frame of a multiframe image will be saved. + + **append_images** + A list of images to append as additional frames. Each of the + images in the list can be single or multiframe images. + This is currently only supported for GIF, PDF, TIFF, and WebP. + + **duration** + The display duration of each frame of the multiframe gif, in + milliseconds. Pass a single integer for a constant duration, or a + list or tuple to set the duration for each frame separately. + + **loop** + Integer number of times the GIF should loop. + + **optimize** + If present and true, attempt to compress the palette by + eliminating unused colors. This is only useful if the palette can + be compressed to the next smaller power of 2 elements. + + **palette** + Use the specified palette for the saved image. The palette should + be a bytes or bytearray object containing the palette entries in + RGBRGB... form. It should be no more than 768 bytes. Alternately, + the palette can be passed in as an + :py:class:`PIL.ImagePalette.ImagePalette` object. + + **disposal** + Indicates the way in which the graphic is to be treated after being displayed. + + * 0 - No disposal specified. + * 1 - Do not dispose. + * 2 - Restore to background color. + * 3 - Restore to previous content. + + Pass a single integer for a constant disposal, or a list or tuple + to set the disposal for each frame separately. + + Reading local images + ~~~~~~~~~~~~~~~~~~~~ + + The GIF loader creates an image memory the same size as the GIF file’s *logical + screen size*, and pastes the actual pixel data (the *local image*) into this + image. If you only want the actual pixel rectangle, you can manipulate the + :py:attr:`~PIL.Image.Image.size` and :py:attr:`~PIL.Image.Image.tile` + attributes before loading the file:: + + im = Image.open(...) + + if im.tile[0][0] == "gif": + # only read the first "local image" from this GIF file + tag, (x0, y0, x1, y1), offset, extra = im.tile[0] + im.size = (x1 - x0, y1 - y0) + im.tile = [(tag, (0, 0) + im.size, offset, extra)] + """, + "GRIB": u"""*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for GRIB files. + + The driver requires the file to start with a GRIB header. If you have files + with embedded GRIB data, or files with multiple GRIB fields, your application + has to seek to the header before passing the file handle to PIL. + + To add read or write support to your application, use + :py:func:`PIL.GribStubImagePlugin.register_handler`. + """, + "HDF5": u"""*From the Pillow docs:* + + + .. versionadded:: Pillow 1.1.5 + + PIL provides a stub driver for HDF5 files. + + To add read or write support to your application, use + :py:func:`PIL.Hdf5StubImagePlugin.register_handler`. + """, + "ICNS": u"""*From the Pillow docs:* + + + PIL reads and (macOS only) writes macOS ``.icns`` files. By default, the + largest available icon is read, though you can override this by setting the + :py:attr:`~PIL.Image.Image.size` property before calling + :py:meth:`~PIL.Image.Image.load`. The :py:meth:`~PIL.Image.Image.write` method + sets the following :py:attr:`~PIL.Image.Image.info` property: + + **sizes** + A list of supported sizes found in this icon file; these are a + 3-tuple, ``(width, height, scale)``, where ``scale`` is 2 for a retina + icon and 1 for a standard icon. You *are* permitted to use this 3-tuple + format for the :py:attr:`~PIL.Image.Image.size` property if you set it + before calling :py:meth:`~PIL.Image.Image.load`; after loading, the size + will be reset to a 2-tuple containing pixel dimensions (so, e.g. if you + ask for ``(512, 512, 2)``, the final value of + :py:attr:`~PIL.Image.Image.size` will be ``(1024, 1024)``). + """, + "ICO": u"""*From the Pillow docs:* + + + ICO is used to store icons on Windows. The largest available icon is read. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **sizes** + A list of sizes including in this ico file; these are a 2-tuple, + ``(width, height)``; Default to ``[(16, 16), (24, 24), (32, 32), (48, 48), + (64, 64), (128, 128), (256, 256)]``. Any sizes bigger than the original + size or 256 will be ignored. + + IM + ^^ + + IM is a format used by LabEye and other applications based on the IFUNC image + processing library. The library reads and writes most uncompressed interchange + versions of this format. + + IM is the only format that can store all internal PIL formats. + """, + "IM": u"""No docs for IM.""", + "IMT": u"""*From the Pillow docs:* + + + PIL reads Image Tools images containing ``L`` data. + """, + "IPTC": u"""No docs for IPTC.""", + "JPEG": u"""*From the Pillow docs:* + + + PIL reads JPEG, JFIF, and Adobe JPEG files containing ``L``, ``RGB``, or + ``CMYK`` data. It writes standard and progressive JFIF files. + + Using the :py:meth:`~PIL.Image.Image.draft` method, you can speed things up by + converting ``RGB`` images to ``L``, and resize images to 1/2, 1/4 or 1/8 of + their original size while loading them. + + The :py:meth:`~PIL.Image.Image.write` method may set the following + :py:attr:`~PIL.Image.Image.info` properties if available: + + **jfif** + JFIF application marker found. If the file is not a JFIF file, this key is + not present. + + **jfif_version** + A tuple representing the jfif version, (major version, minor version). + + **jfif_density** + A tuple representing the pixel density of the image, in units specified + by jfif_unit. + + **jfif_unit** + Units for the jfif_density: + + * 0 - No Units + * 1 - Pixels per Inch + * 2 - Pixels per Centimeter + + **dpi** + A tuple representing the reported pixel density in pixels per inch, if + the file is a jfif file and the units are in inches. + + **adobe** + Adobe application marker found. If the file is not an Adobe JPEG file, this + key is not present. + + **adobe_transform** + Vendor Specific Tag. + + **progression** + Indicates that this is a progressive JPEG file. + + **icc_profile** + The ICC color profile for the image. + + **exif** + Raw EXIF data from the image. + + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **quality** + The image quality, on a scale from 1 (worst) to 95 (best). The default is + 75. Values above 95 should be avoided; 100 disables portions of the JPEG + compression algorithm, and results in large files with hardly any gain in + image quality. + + **optimize** + If present and true, indicates that the encoder should make an extra pass + over the image in order to select optimal encoder settings. + + **progressive** + If present and true, indicates that this image should be stored as a + progressive JPEG file. + + **dpi** + A tuple of integers representing the pixel density, ``(x,y)``. + + **icc_profile** + If present and true, the image is stored with the provided ICC profile. + If this parameter is not provided, the image will be saved with no profile + attached. To preserve the existing profile:: + + im.save(filename, 'jpeg', icc_profile=im.info.get('icc_profile')) + + **exif** + If present, the image will be stored with the provided raw EXIF data. + + **subsampling** + If present, sets the subsampling for the encoder. + + * ``keep``: Only valid for JPEG files, will retain the original image setting. + * ``4:4:4``, ``4:2:2``, ``4:2:0``: Specific sampling values + * ``-1``: equivalent to ``keep`` + * ``0``: equivalent to ``4:4:4`` + * ``1``: equivalent to ``4:2:2`` + * ``2``: equivalent to ``4:2:0`` + + **qtables** + If present, sets the qtables for the encoder. This is listed as an + advanced option for wizards in the JPEG documentation. Use with + caution. ``qtables`` can be one of several types of values: + + * a string, naming a preset, e.g. ``keep``, ``web_low``, or ``web_high`` + * a list, tuple, or dictionary (with integer keys = + range(len(keys))) of lists of 64 integers. There must be + between 2 and 4 tables. + + .. versionadded:: Pillow 2.5.0 + + + .. note:: + + To enable JPEG support, you need to build and install the IJG JPEG library + before building the Python Imaging Library. See the distribution README for + details. + """, + "JPEG2000": u"""*From the Pillow docs:* + + + .. versionadded:: Pillow 2.4.0 + + PIL reads and writes JPEG 2000 files containing ``L``, ``LA``, ``RGB`` or + ``RGBA`` data. It can also read files containing ``YCbCr`` data, which it + converts on read into ``RGB`` or ``RGBA`` depending on whether or not there is + an alpha channel. PIL supports JPEG 2000 raw codestreams (``.j2k`` files), as + well as boxed JPEG 2000 files (``.j2p`` or ``.jpx`` files). PIL does *not* + support files whose components have different sampling frequencies. + + When loading, if you set the ``mode`` on the image prior to the + :py:meth:`~PIL.Image.Image.load` method being invoked, you can ask PIL to + convert the image to either ``RGB`` or ``RGBA`` rather than choosing for + itself. It is also possible to set ``reduce`` to the number of resolutions to + discard (each one reduces the size of the resulting image by a factor of 2), + and ``layers`` to specify the number of quality layers to load. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **offset** + The image offset, as a tuple of integers, e.g. (16, 16) + + **tile_offset** + The tile offset, again as a 2-tuple of integers. + + **tile_size** + The tile size as a 2-tuple. If not specified, or if set to None, the + image will be saved without tiling. + + **quality_mode** + Either `"rates"` or `"dB"` depending on the units you want to use to + specify image quality. + + **quality_layers** + A sequence of numbers, each of which represents either an approximate size + reduction (if quality mode is `"rates"`) or a signal to noise ratio value + in decibels. If not specified, defaults to a single layer of full quality. + + **num_resolutions** + The number of different image resolutions to be stored (which corresponds + to the number of Discrete Wavelet Transform decompositions plus one). + + **codeblock_size** + The code-block size as a 2-tuple. Minimum size is 4 x 4, maximum is 1024 x + 1024, with the additional restriction that no code-block may have more + than 4096 coefficients (i.e. the product of the two numbers must be no + greater than 4096). + + **precinct_size** + The precinct size as a 2-tuple. Must be a power of two along both axes, + and must be greater than the code-block size. + + **irreversible** + If ``True``, use the lossy Irreversible Color Transformation + followed by DWT 9-7. Defaults to ``False``, which means to use the + Reversible Color Transformation with DWT 5-3. + + **progression** + Controls the progression order; must be one of ``"LRCP"``, ``"RLCP"``, + ``"RPCL"``, ``"PCRL"``, ``"CPRL"``. The letters stand for Component, + Position, Resolution and Layer respectively and control the order of + encoding, the idea being that e.g. an image encoded using LRCP mode can + have its quality layers decoded as they arrive at the decoder, while one + encoded using RLCP mode will have increasing resolutions decoded as they + arrive, and so on. + + **cinema_mode** + Set the encoder to produce output compliant with the digital cinema + specifications. The options here are ``"no"`` (the default), + ``"cinema2k-24"`` for 24fps 2K, ``"cinema2k-48"`` for 48fps 2K, and + ``"cinema4k-24"`` for 24fps 4K. Note that for compliant 2K files, + *at least one* of your image dimensions must match 2048 x 1080, while + for compliant 4K files, *at least one* of the dimensions must match + 4096 x 2160. + + .. note:: + + To enable JPEG 2000 support, you need to build and install the OpenJPEG + library, version 2.0.0 or higher, before building the Python Imaging + Library. + + Windows users can install the OpenJPEG binaries available on the + OpenJPEG website, but must add them to their PATH in order to use PIL (if + you fail to do this, you will get errors about not being able to load the + ``_imaging`` DLL). + """, + "MCIDAS": u"""*From the Pillow docs:* + + + PIL identifies and reads 8-bit McIdas area files. + """, + "MIC": u"""*From the Pillow docs:* + + + PIL identifies and reads Microsoft Image Composer (MIC) files. When opened, the + first sprite in the file is loaded. You can use :py:meth:`~file.seek` and + :py:meth:`~file.tell` to read other sprites from the file. + + Note that there may be an embedded gamma of 2.2 in MIC files. + """, + "MPEG": u"""*From the Pillow docs:* + + + PIL identifies MPEG files. + """, + "MPO": u"""*From the Pillow docs:* + + + Pillow identifies and reads Multi Picture Object (MPO) files, loading the primary + image when first opened. The :py:meth:`~file.seek` and :py:meth:`~file.tell` + methods may be used to read other pictures from the file. The pictures are + zero-indexed and random access is supported. + """, + "MSP": u"""*From the Pillow docs:* + + + PIL identifies and reads MSP files from Windows 1 and 2. The library writes + uncompressed (Windows 1) versions of this format. + """, + "PCD": u"""*From the Pillow docs:* + + + PIL reads PhotoCD files containing ``RGB`` data. This only reads the 768x512 + resolution image from the file. Higher resolutions are encoded in a proprietary + encoding. + """, + "PCX": u"""*From the Pillow docs:* + + + PIL reads and writes PCX files containing ``1``, ``L``, ``P``, or ``RGB`` data. + """, + "PIXAR": u"""*From the Pillow docs:* + + + PIL provides limited support for PIXAR raster files. The library can identify + and read “dumped” RGB files. + + The format code is ``PIXAR``. + """, + "PNG": u"""*From the Pillow docs:* + + + PIL identifies, reads, and writes PNG files containing ``1``, ``L``, ``P``, + ``RGB``, or ``RGBA`` data. Interlaced files are supported as of v1.1.7. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties, when appropriate: + + **chromaticity** + The chromaticity points, as an 8 tuple of floats. (``White Point + X``, ``White Point Y``, ``Red X``, ``Red Y``, ``Green X``, ``Green + Y``, ``Blue X``, ``Blue Y``) + + **gamma** + Gamma, given as a floating point number. + + **srgb** + The sRGB rendering intent as an integer. + + * 0 Perceptual + * 1 Relative Colorimetric + * 2 Saturation + * 3 Absolute Colorimetric + + **transparency** + For ``P`` images: Either the palette index for full transparent pixels, + or a byte string with alpha values for each palette entry. + + For ``L`` and ``RGB`` images, the color that represents full transparent + pixels in this image. + + This key is omitted if the image is not a transparent palette image. + + ``Open`` also sets ``Image.text`` to a list of the values of the + ``tEXt``, ``zTXt``, and ``iTXt`` chunks of the PNG image. Individual + compressed chunks are limited to a decompressed size of + ``PngImagePlugin.MAX_TEXT_CHUNK``, by default 1MB, to prevent + decompression bombs. Additionally, the total size of all of the text + chunks is limited to ``PngImagePlugin.MAX_TEXT_MEMORY``, defaulting to + 64MB. + + The :py:meth:`~PIL.Image.Image.save` method supports the following options: + + **optimize** + If present and true, instructs the PNG writer to make the output file as + small as possible. This includes extra processing in order to find optimal + encoder settings. + + **transparency** + For ``P``, ``L``, and ``RGB`` images, this option controls what + color image to mark as transparent. + + For ``P`` images, this can be a either the palette index, + or a byte string with alpha values for each palette entry. + + **dpi** + A tuple of two numbers corresponding to the desired dpi in each direction. + + **pnginfo** + A :py:class:`PIL.PngImagePlugin.PngInfo` instance containing text tags. + + **compress_level** + ZLIB compression level, a number between 0 and 9: 1 gives best speed, + 9 gives best compression, 0 gives no compression at all. Default is 6. + When ``optimize`` option is True ``compress_level`` has no effect + (it is set to 9 regardless of a value passed). + + **icc_profile** + The ICC Profile to include in the saved file. + + **bits (experimental)** + For ``P`` images, this option controls how many bits to store. If omitted, + the PNG writer uses 8 bits (256 colors). + + **dictionary (experimental)** + Set the ZLIB encoder dictionary. + + .. note:: + + To enable PNG support, you need to build and install the ZLIB compression + library before building the Python Imaging Library. See the installation + documentation for details. + """, + "PPM": u"""*From the Pillow docs:* + + + PIL reads and writes PBM, PGM and PPM files containing ``1``, ``L`` or ``RGB`` + data. + """, + "PSD": u"""*From the Pillow docs:* + + + PIL identifies and reads PSD files written by Adobe Photoshop 2.5 and 3.0. + + """, + "SGI": u"""*From the Pillow docs:* + + + Pillow reads and writes uncompressed ``L``, ``RGB``, and ``RGBA`` files. + + """, + "SPIDER": u"""*From the Pillow docs:* + + + PIL reads and writes SPIDER image files of 32-bit floating point data + ("F;32F"). + + PIL also reads SPIDER stack files containing sequences of SPIDER images. The + :py:meth:`~file.seek` and :py:meth:`~file.tell` methods are supported, and + random access is allowed. + + The :py:meth:`~PIL.Image.Image.write` method sets the following attributes: + + **format** + Set to ``SPIDER`` + + **istack** + Set to 1 if the file is an image stack, else 0. + + **nimages** + Set to the number of images in the stack. + + A convenience method, :py:meth:`~PIL.Image.Image.convert2byte`, is provided for + converting floating point data to byte data (mode ``L``):: + + im = Image.open('image001.spi').convert2byte() + + Writing files in SPIDER format + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The extension of SPIDER files may be any 3 alphanumeric characters. Therefore + the output format must be specified explicitly:: + + im.save('newimage.spi', format='SPIDER') + + For more information about the SPIDER image processing package, see the + `SPIDER homepage`_ at `Wadsworth Center`_. + + .. _SPIDER homepage: https://spider.wadsworth.org/spider_doc/spider/docs/spider.html + .. _Wadsworth Center: https://www.wadsworth.org/ + """, + "SUN": u"""No docs for SUN.""", + "TGA": u"""*From the Pillow docs:* + + + PIL reads 24- and 32-bit uncompressed and run-length encoded TGA files. + """, + "TIFF": u"""*From the Pillow docs:* + + + Pillow reads and writes TIFF files. It can read both striped and tiled + images, pixel and plane interleaved multi-band images. If you have + libtiff and its headers installed, PIL can read and write many kinds + of compressed TIFF files. If not, PIL will only read and write + uncompressed files. + + .. note:: + + Beginning in version 5.0.0, Pillow requires libtiff to read or + write compressed files. Prior to that release, Pillow had buggy + support for reading Packbits, LZW and JPEG compressed TIFFs + without using libtiff. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **compression** + Compression mode. + + .. versionadded:: Pillow 2.0.0 + + **dpi** + Image resolution as an ``(xdpi, ydpi)`` tuple, where applicable. You can use + the :py:attr:`~PIL.Image.Image.tag` attribute to get more detailed + information about the image resolution. + + .. versionadded:: Pillow 1.1.5 + + **resolution** + Image resolution as an ``(xres, yres)`` tuple, where applicable. This is a + measurement in whichever unit is specified by the file. + + .. versionadded:: Pillow 1.1.5 + + + The :py:attr:`~PIL.Image.Image.tag_v2` attribute contains a dictionary + of TIFF metadata. The keys are numerical indexes from + :py:attr:`~PIL.TiffTags.TAGS_V2`. Values are strings or numbers for single + items, multiple values are returned in a tuple of values. Rational + numbers are returned as a :py:class:`~PIL.TiffImagePlugin.IFDRational` + object. + + .. versionadded:: Pillow 3.0.0 + + For compatibility with legacy code, the + :py:attr:`~PIL.Image.Image.tag` attribute contains a dictionary of + decoded TIFF fields as returned prior to version 3.0.0. Values are + returned as either strings or tuples of numeric values. Rational + numbers are returned as a tuple of ``(numerator, denominator)``. + + .. deprecated:: 3.0.0 + + + Saving Tiff Images + ~~~~~~~~~~~~~~~~~~ + + The :py:meth:`~PIL.Image.Image.save` method can take the following keyword arguments: + + **save_all** + If true, Pillow will save all frames of the image to a multiframe tiff document. + + .. versionadded:: Pillow 3.4.0 + + **tiffinfo** + A :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` object or dict + object containing tiff tags and values. The TIFF field type is + autodetected for Numeric and string values, any other types + require using an :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` + object and setting the type in + :py:attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype` with + the appropriate numerical value from + ``TiffTags.TYPES``. + + .. versionadded:: Pillow 2.3.0 + + Metadata values that are of the rational type should be passed in + using a :py:class:`~PIL.TiffImagePlugin.IFDRational` object. + + .. versionadded:: Pillow 3.1.0 + + For compatibility with legacy code, a + :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` object may + be passed in this field. However, this is deprecated. + + .. versionadded:: Pillow 3.0.0 + + .. note:: + + Only some tags are currently supported when writing using + libtiff. The supported list is found in + :py:attr:`~PIL:TiffTags.LIBTIFF_CORE`. + + **compression** + A string containing the desired compression method for the + file. (valid only with libtiff installed) Valid compression + methods are: ``None``, ``"tiff_ccitt"``, ``"group3"``, + ``"group4"``, ``"tiff_jpeg"``, ``"tiff_adobe_deflate"``, + ``"tiff_thunderscan"``, ``"tiff_deflate"``, ``"tiff_sgilog"``, + ``"tiff_sgilog24"``, ``"tiff_raw_16"`` + + These arguments to set the tiff header fields are an alternative to + using the general tags available through tiffinfo. + + **description** + + **software** + + **date_time** + + **artist** + + **copyright** + Strings + + **resolution_unit** + A string of "inch", "centimeter" or "cm" + + **resolution** + + **x_resolution** + + **y_resolution** + + **dpi** + Either a Float, 2 tuple of (numerator, denominator) or a + :py:class:`~PIL.TiffImagePlugin.IFDRational`. Resolution implies + an equal x and y resolution, dpi also implies a unit of inches. + + """, + "WMF": u"""*From the Pillow docs:* + + + PIL can identify playable WMF files. + + In PIL 1.1.4 and earlier, the WMF driver provides some limited rendering + support, but not enough to be useful for any real application. + + In PIL 1.1.5 and later, the WMF driver is a stub driver. To add WMF read or + write support to your application, use + :py:func:`PIL.WmfImagePlugin.register_handler` to register a WMF handler. + + :: + + from PIL import Image + from PIL import WmfImagePlugin + + class WmfHandler: + def open(self, im): + ... + def load(self, im): + ... + return image + def save(self, im, fp, filename): + ... + + wmf_handler = WmfHandler() + + WmfImagePlugin.register_handler(wmf_handler) + + im = Image.open("sample.wmf")""", + "XBM": u"""*From the Pillow docs:* + + + PIL reads and writes X bitmap files (mode ``1``). + """, + "XPM": u"""*From the Pillow docs:* + + + PIL reads X pixmap files (mode ``P``) with 256 colors or less. + + The :py:meth:`~PIL.Image.Image.write` method sets the following + :py:attr:`~PIL.Image.Image.info` properties: + + **transparency** + Transparency color index. This key is omitted if the image is not + transparent. + """, + "XVThumb": u"""No docs for XVThumb.""", +} diff --git a/venv/Lib/site-packages/imageio/plugins/pillowmulti.py b/venv/Lib/site-packages/imageio/plugins/pillowmulti.py new file mode 100644 index 000000000..f52592206 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/pillowmulti.py @@ -0,0 +1,364 @@ +""" +PIL formats for multiple images. +""" + +import logging + +import numpy as np + +from .pillow import PillowFormat, ndarray_to_pil, image_as_uint + + +logger = logging.getLogger(__name__) + +NeuQuant = None # we can implement this when we need it + + +class TIFFFormat(PillowFormat): + _modes = "i" # arg, why bother; people should use the tiffile version + _description = "TIFF format (Pillow)" + + +class GIFFormat(PillowFormat): + """ A format for reading and writing static and animated GIF, based + on Pillow. + + Images read with this format are always RGBA. Currently, + the alpha channel is ignored when saving RGB images with this + format. + + Parameters for reading + ---------------------- + None + + Parameters for saving + --------------------- + loop : int + The number of iterations. Default 0 (meaning loop indefinitely). + duration : {float, list} + The duration (in seconds) of each frame. Either specify one value + that is used for all frames, or one value for each frame. + Note that in the GIF format the duration/delay is expressed in + hundredths of a second, which limits the precision of the duration. + fps : float + The number of frames per second. If duration is not given, the + duration for each frame is set to 1/fps. Default 10. + palettesize : int + The number of colors to quantize the image to. Is rounded to + the nearest power of two. Default 256. + subrectangles : bool + If True, will try and optimize the GIF by storing only the + rectangular parts of each frame that change with respect to the + previous. Default False. + """ + + _modes = "iI" + _description = "Static and animated gif (Pillow)" + + class Reader(PillowFormat.Reader): + def _open(self, playback=None): # compat with FI format + return PillowFormat.Reader._open(self) + + class Writer(PillowFormat.Writer): + def _open( + self, + loop=0, + duration=None, + fps=10, + palettesize=256, + quantizer=0, + subrectangles=False, + ): + + # Check palettesize + palettesize = int(palettesize) + if palettesize < 2 or palettesize > 256: + raise ValueError("GIF quantize param must be 2..256") + if palettesize not in [2, 4, 8, 16, 32, 64, 128, 256]: + palettesize = 2 ** int(np.log2(128) + 0.999) + logger.warning( + "Warning: palettesize (%r) modified to a factor of " + "two between 2-256." % palettesize + ) + # Duratrion / fps + if duration is None: + self._duration = 1.0 / float(fps) + elif isinstance(duration, (list, tuple)): + self._duration = [float(d) for d in duration] + else: + self._duration = float(duration) + # loop + loop = float(loop) + if loop <= 0 or loop == float("inf"): + loop = 0 + loop = int(loop) + # Subrectangles / dispose + subrectangles = bool(subrectangles) + self._dispose = 1 if subrectangles else 2 + # The "0" (median cut) quantizer is by far the best + + fp = self.request.get_file() + self._writer = GifWriter( + fp, subrectangles, loop, quantizer, int(palettesize) + ) + + def _close(self): + self._writer.close() + + def _append_data(self, im, meta): + im = image_as_uint(im, bitdepth=8) + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + duration = self._duration + if isinstance(duration, list): + duration = duration[min(len(duration) - 1, self._writer._count)] + dispose = self._dispose + self._writer.add_image(im, duration, dispose) + + return + + +intToBin = lambda i: i.to_bytes(2, byteorder="little") + + +class GifWriter: + """ Class that for helping write the animated GIF file. This is based on + code from images2gif.py (part of visvis). The version here is modified + to allow streamed writing. + """ + + def __init__( + self, + file, + opt_subrectangle=True, + opt_loop=0, + opt_quantizer=0, + opt_palette_size=256, + ): + self.fp = file + + self.opt_subrectangle = opt_subrectangle + self.opt_loop = opt_loop + self.opt_quantizer = opt_quantizer + self.opt_palette_size = opt_palette_size + + self._previous_image = None # as np array + self._global_palette = None # as bytes + self._count = 0 + + from PIL.GifImagePlugin import getdata + + self.getdata = getdata + + def add_image(self, im, duration, dispose): + + # Prepare image + im_rect, rect = im, (0, 0) + if self.opt_subrectangle: + im_rect, rect = self.getSubRectangle(im) + im_pil = self.converToPIL(im_rect, self.opt_quantizer, self.opt_palette_size) + + # Get pallette - apparently, this is the 3d element of the header + # (but it has not always been). Best we've got. Its not the same + # as im_pil.palette.tobytes(). + from PIL.GifImagePlugin import getheader + + palette = getheader(im_pil)[0][3] + + # Write image + if self._count == 0: + self.write_header(im_pil, palette, self.opt_loop) + self._global_palette = palette + self.write_image(im_pil, palette, rect, duration, dispose) + # assert len(palette) == len(self._global_palette) + + # Bookkeeping + self._previous_image = im + self._count += 1 + + def write_header(self, im, globalPalette, loop): + # Gather info + header = self.getheaderAnim(im) + appext = self.getAppExt(loop) + # Write + self.fp.write(header) + self.fp.write(globalPalette) + self.fp.write(appext) + + def close(self): + self.fp.write(";".encode("utf-8")) # end gif + + def write_image(self, im, palette, rect, duration, dispose): + + fp = self.fp + + # Gather local image header and data, using PIL's getdata. That + # function returns a list of bytes objects, but which parts are + # what has changed multiple times, so we put together the first + # parts until we have enough to form the image header. + data = self.getdata(im) + imdes = b"" + while data and len(imdes) < 11: + imdes += data.pop(0) + assert len(imdes) == 11 + + # Make image descriptor suitable for using 256 local color palette + lid = self.getImageDescriptor(im, rect) + graphext = self.getGraphicsControlExt(duration, dispose) + + # Write local header + if (palette != self._global_palette) or (dispose != 2): + # Use local color palette + fp.write(graphext) + fp.write(lid) # write suitable image descriptor + fp.write(palette) # write local color table + fp.write(b"\x08") # LZW minimum size code + else: + # Use global color palette + fp.write(graphext) + fp.write(imdes) # write suitable image descriptor + + # Write image data + for d in data: + fp.write(d) + + def getheaderAnim(self, im): + """ Get animation header. To replace PILs getheader()[0] + """ + bb = b"GIF89a" + bb += intToBin(im.size[0]) + bb += intToBin(im.size[1]) + bb += b"\x87\x00\x00" + return bb + + def getImageDescriptor(self, im, xy=None): + """ Used for the local color table properties per image. + Otherwise global color table applies to all frames irrespective of + whether additional colors comes in play that require a redefined + palette. Still a maximum of 256 color per frame, obviously. + + Written by Ant1 on 2010-08-22 + Modified by Alex Robinson in Janurari 2011 to implement subrectangles. + """ + + # Defaule use full image and place at upper left + if xy is None: + xy = (0, 0) + + # Image separator, + bb = b"\x2C" + + # Image position and size + bb += intToBin(xy[0]) # Left position + bb += intToBin(xy[1]) # Top position + bb += intToBin(im.size[0]) # image width + bb += intToBin(im.size[1]) # image height + + # packed field: local color table flag1, interlace0, sorted table0, + # reserved00, lct size111=7=2^(7 + 1)=256. + bb += b"\x87" + + # LZW minimum size code now comes later, begining of [imagedata] blocks + return bb + + def getAppExt(self, loop): + """ Application extension. This part specifies the amount of loops. + If loop is 0 or inf, it goes on infinitely. + """ + if loop == 1: + return b"" + if loop == 0: + loop = 2 ** 16 - 1 + bb = b"" + if loop != 0: # omit the extension if we would like a nonlooping gif + bb = b"\x21\xFF\x0B" # application extension + bb += b"NETSCAPE2.0" + bb += b"\x03\x01" + bb += intToBin(loop) + bb += b"\x00" # end + return bb + + def getGraphicsControlExt(self, duration=0.1, dispose=2): + """ Graphics Control Extension. A sort of header at the start of + each image. Specifies duration and transparancy. + + Dispose + ------- + * 0 - No disposal specified. + * 1 - Do not dispose. The graphic is to be left in place. + * 2 - Restore to background color. The area used by the graphic + must be restored to the background color. + * 3 - Restore to previous. The decoder is required to restore the + area overwritten by the graphic with what was there prior to + rendering the graphic. + * 4-7 -To be defined. + """ + + bb = b"\x21\xF9\x04" + bb += chr((dispose & 3) << 2).encode("utf-8") + # low bit 1 == transparency, + # 2nd bit 1 == user input , next 3 bits, the low two of which are used, + # are dispose. + bb += intToBin(int(duration * 100 + 0.5)) # in 100th of seconds + bb += b"\x00" # no transparant color + bb += b"\x00" # end + return bb + + def getSubRectangle(self, im): + """ Calculate the minimal rectangle that need updating. Returns + a two-element tuple containing the cropped image and an x-y tuple. + + Calculating the subrectangles takes extra time, obviously. However, + if the image sizes were reduced, the actual writing of the GIF + goes faster. In some cases applying this method produces a GIF faster. + """ + + # Cannot do subrectangle for first image + if self._count == 0: + return im, (0, 0) + + prev = self._previous_image + + # Get difference, sum over colors + diff = np.abs(im - prev) + if diff.ndim == 3: + diff = diff.sum(2) + # Get begin and end for both dimensions + X = np.argwhere(diff.sum(0)) + Y = np.argwhere(diff.sum(1)) + # Get rect coordinates + if X.size and Y.size: + x0, x1 = int(X[0]), int(X[-1] + 1) + y0, y1 = int(Y[0]), int(Y[-1] + 1) + else: # No change ... make it minimal + x0, x1 = 0, 2 + y0, y1 = 0, 2 + + return im[y0:y1, x0:x1], (x0, y0) + + def converToPIL(self, im, quantizer, palette_size=256): + """Convert image to Paletted PIL image. + + PIL used to not do a very good job at quantization, but I guess + this has improved a lot (at least in Pillow). I don't think we need + neuqant (and we can add it later if we really want). + """ + + im_pil = ndarray_to_pil(im, "gif") + + if quantizer in ("nq", "neuquant"): + # NeuQuant algorithm + nq_samplefac = 10 # 10 seems good in general + im_pil = im_pil.convert("RGBA") # NQ assumes RGBA + nqInstance = NeuQuant(im_pil, nq_samplefac) # Learn colors + im_pil = nqInstance.quantize(im_pil, colors=palette_size) + elif quantizer in (0, 1, 2): + # Adaptive PIL algorithm + if quantizer == 2: + im_pil = im_pil.convert("RGBA") + else: + im_pil = im_pil.convert("RGB") + im_pil = im_pil.quantize(colors=palette_size, method=quantizer) + else: + raise ValueError("Invalid value for quantizer: %r" % quantizer) + return im_pil diff --git a/venv/Lib/site-packages/imageio/plugins/simpleitk.py b/venv/Lib/site-packages/imageio/plugins/simpleitk.py new file mode 100644 index 000000000..597a71fe3 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/simpleitk.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Storage of image data in multiple formats. +""" + +from .. import formats +from ..core import Format, has_module + +_itk = None # Defer loading to load_lib() function. + + +def load_lib(): + global _itk, _read_function, _write_function + try: + import itk as _itk + + _read_function = _itk.imread + _write_function = _itk.imwrite + except ImportError: + try: + import SimpleITK as _itk + + _read_function = _itk.ReadImage + _write_function = _itk.WriteImage + except ImportError: + raise ImportError( + "itk could not be found. " + "Please try " + " python -m pip install itk " + "or " + " python -m pip install simpleitk " + "or refer to " + " https://itkpythonpackage.readthedocs.io/ " + "for further instructions." + ) + return _itk + + +# Split up in real ITK and all supported formats. +ITK_FORMATS = ( + ".gipl", + ".ipl", + ".mha", + ".mhd", + ".nhdr", + "nia", + "hdr", + ".nrrd", + ".nii", + ".nii.gz", + ".img", + ".img.gz", + ".vtk", + "hdf5", + "lsm", + "mnc", + "mnc2", + "mgh", + "mnc", + "pic", +) +ALL_FORMATS = ITK_FORMATS + ( + ".bmp", + ".jpeg", + ".jpg", + ".png", + ".tiff", + ".tif", + ".dicom", + ".dcm", + ".gdcm", +) + + +class ItkFormat(Format): + """ The ItkFormat uses the ITK or SimpleITK library to support a range of + ITK-related formats. It also supports a few common formats that are + also supported by the freeimage plugin (e.g. PNG and JPEG). + + This format requires the ``itk`` or ``SimpleITK`` package. + + Parameters for reading + ---------------------- + None. + + Parameters for saving + --------------------- + None. + + """ + + def _can_read(self, request): + # If the request is a format that only this plugin can handle, + # we report that we can do it; a useful error will be raised + # when simpleitk is not installed. For the more common formats + # we only report that we can read if the library is installed. + if request.extension in ITK_FORMATS: + return True + if has_module("itk.ImageIOBase") or has_module("SimpleITK"): + return request.extension in ALL_FORMATS + + def _can_write(self, request): + if request.extension in ITK_FORMATS: + return True + if has_module("itk.ImageIOBase") or has_module("SimpleITK"): + return request.extension in ALL_FORMATS + + # -- reader + + class Reader(Format.Reader): + def _open(self, pixel_type=None, fallback_only=None, **kwargs): + if not _itk: + load_lib() + args = () + if pixel_type is not None: + args += (pixel_type,) + if fallback_only is not None: + args += (fallback_only,) + self._img = _read_function(self.request.get_local_filename(), *args) + + def _get_length(self): + return 1 + + def _close(self): + pass + + def _get_data(self, index): + # Get data + if index != 0: + error_msg = "Index out of range while reading from itk file" + raise IndexError(error_msg) + + # Return array and empty meta data + return _itk.GetArrayFromImage(self._img), {} + + def _get_meta_data(self, index): + error_msg = "The itk plugin does not support meta data, currently." + raise RuntimeError(error_msg) + + # -- writer + class Writer(Format.Writer): + def _open(self): + if not _itk: + load_lib() + + def _close(self): + pass + + def _append_data(self, im, meta): + _itk_img = _itk.GetImageFromArray(im) + _write_function(_itk_img, self.request.get_local_filename()) + + def set_meta_data(self, meta): + error_msg = "The itk plugin does not support meta data, currently." + raise RuntimeError(error_msg) + + +# Register +title = "Insight Segmentation and Registration Toolkit (ITK) format" +format = ItkFormat("itk", title, " ".join(ALL_FORMATS), "iIvV") +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/spe.py b/venv/Lib/site-packages/imageio/plugins/spe.py new file mode 100644 index 000000000..418a7e794 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/spe.py @@ -0,0 +1,469 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" SPE file reader +""" + +import os +import logging + +import numpy as np + +from .. import formats +from ..core import Format + + +logger = logging.getLogger(__name__) + + +class Spec: + """SPE file specification data + + Tuples of (offset, datatype, count), where offset is the offset in the SPE + file and datatype is the datatype as used in `numpy.fromfile`() + + `data_start` is the offset of actual image data. + + `dtypes` translates SPE datatypes (0...4) to numpy ones, e. g. dtypes[0] + is dtype("=3. + data_end = ( + info["xml_footer_offset"] + if info["file_header_ver"] >= 3 + else os.path.getsize(self.request.get_local_filename()) + ) + l = data_end - Spec.data_start + l //= self._shape[0] * self._shape[1] * self._dtype.itemsize + if l != self._len: + logger.warning( + "The file header of %s claims there are %s frames, " + "but there are actually %s frames.", + self.request.filename, + self._len, + l, + ) + self._len = min(l, self._len) + + self._meta = None + + def _get_meta_data(self, index): + if self._meta is None: + if self._file_header_ver < 3: + self._init_meta_data_pre_v3() + else: + self._init_meta_data_post_v3() + return self._meta + + def _close(self): + # The file should be closed by `self.request` + pass + + def _init_meta_data_pre_v3(self): + self._meta = self._parse_header(Spec.metadata) + + nr = self._meta.pop("NumROI", None) + nr = 1 if nr < 1 else nr + self._meta["ROIs"] = roi_array_to_dict(self._meta["ROIs"][:nr]) + + # chip sizes + self._meta["chip_size"] = [ + self._meta.pop("xDimDet", None), + self._meta.pop("yDimDet", None), + ] + self._meta["virt_chip_size"] = [ + self._meta.pop("VChipXdim", None), + self._meta.pop("VChipYdim", None), + ] + self._meta["pre_pixels"] = [ + self._meta.pop("XPrePixels", None), + self._meta.pop("YPrePixels", None), + ] + self._meta["post_pixels"] = [ + self._meta.pop("XPostPixels", None), + self._meta.pop("YPostPixels", None), + ] + + # comments + self._meta["comments"] = [str(c) for c in self._meta["comments"]] + + # geometric operations + g = [] + f = self._meta.pop("geometric", 0) + if f & 1: + g.append("rotate") + if f & 2: + g.append("reverse") + if f & 4: + g.append("flip") + self._meta["geometric"] = g + + # Make some additional information more human-readable + t = self._meta["type"] + if 1 <= t <= len(Spec.controllers): + self._meta["type"] = Spec.controllers[t - 1] + else: + self._meta["type"] = "" + m = self._meta["readout_mode"] + if 1 <= m <= len(Spec.readout_modes): + self._meta["readout_mode"] = Spec.readout_modes[m - 1] + else: + self._meta["readout_mode"] = "" + + # bools + for k in ( + "absorb_live", + "can_do_virtual_chip", + "threshold_min_live", + "threshold_max_live", + ): + self._meta[k] = bool(self._meta[k]) + + # frame shape + self._meta["frame_shape"] = self._shape + + def _parse_header(self, spec): + ret = {} + # Decode each string from the numpy array read by np.fromfile + decode = np.vectorize(lambda x: x.decode(self._char_encoding)) + + for name, sp in spec.items(): + self._file.seek(sp[0]) + cnt = 1 if len(sp) < 3 else sp[2] + v = np.fromfile(self._file, dtype=sp[1], count=cnt) + if v.dtype.kind == "S" and name not in Spec.no_decode: + # Silently ignore string decoding failures + try: + v = decode(v) + except Exception: + logger.warning( + 'Failed to decode "{}" metadata ' + "string. Check `char_encoding` " + "parameter.".format(name) + ) + + try: + # For convenience, if the array contains only one single + # entry, return this entry itself. + v = v.item() + except ValueError: + v = np.squeeze(v) + ret[name] = v + return ret + + def _init_meta_data_post_v3(self): + info = self._parse_header(Spec.basic) + self._file.seek(info["xml_footer_offset"]) + xml = self._file.read() + self._meta = {"__xml": xml} + + def _get_length(self): + if self.request.mode[1] in "vV": + return 1 + else: + return self._len + + def _get_data(self, index): + if index < 0: + raise IndexError("Image index %i < 0" % index) + if index >= self._len: + raise IndexError("Image index %i > %i" % (index, self._len)) + + if self.request.mode[1] in "vV": + if index != 0: + raise IndexError("Index has to be 0 in v and V modes") + self._file.seek(Spec.data_start) + data = np.fromfile( + self._file, + dtype=self._dtype, + count=self._shape[0] * self._shape[1] * self._len, + ) + data = data.reshape((self._len,) + self._shape) + else: + self._file.seek( + Spec.data_start + + index * self._shape[0] * self._shape[1] * self._dtype.itemsize + ) + data = np.fromfile( + self._file, dtype=self._dtype, count=self._shape[0] * self._shape[1] + ) + data = data.reshape(self._shape) + return data, self._get_meta_data(index) + + +def roi_array_to_dict(a): + """Convert the `ROIs` structured arrays to :py:class:`dict` + + Parameters + ---------- + a : numpy.ndarray + Structured array containing ROI data + + Returns + ------- + list of dict + One dict per ROI. Keys are "top_left", "bottom_right", and "bin", + values are tuples whose first element is the x axis value and the + second element is the y axis value. + """ + l = [] + a = a[["startx", "starty", "endx", "endy", "groupx", "groupy"]] + for sx, sy, ex, ey, gx, gy in a: + d = { + "top_left": [int(sx), int(sy)], + "bottom_right": [int(ex), int(ey)], + "bin": [int(gx), int(gy)], + } + l.append(d) + return l + + +fmt = SpeFormat("spe", "SPE file format", ".spe", "iIvV") +formats.add_format(fmt, overwrite=True) diff --git a/venv/Lib/site-packages/imageio/plugins/swf.py b/venv/Lib/site-packages/imageio/plugins/swf.py new file mode 100644 index 000000000..78a13bcf5 --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/swf.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" SWF plugin. Most of the actual work is done in _swf.py. +""" + +import os +import zlib +import logging +from io import BytesIO + +import numpy as np + +from .. import formats +from ..core import Format, read_n_bytes, image_as_uint + + +logger = logging.getLogger(__name__) + +_swf = None # lazily loaded in lib() + + +def load_lib(): + global _swf + from . import _swf + + return _swf + + +class SWFFormat(Format): + """ Shockwave flash (SWF) is a media format designed for rich and + interactive animations. This plugin makes use of this format to + store a series of images in a lossless format with good compression + (zlib). The resulting images can be shown as an animation using + a flash player (such as the browser). + + SWF stores images in RGBA format. RGB or grayscale images are + automatically converted. SWF does not support meta data. + + Parameters for reading + ---------------------- + loop : bool + If True, the video will rewind as soon as a frame is requested + beyond the last frame. Otherwise, IndexError is raised. Default False. + + Parameters for saving + --------------------- + fps : int + The speed to play the animation. Default 12. + loop : bool + If True, add a tag to the end of the file to play again from + the first frame. Most flash players will then play the movie + in a loop. Note that the imageio SWF Reader does not check this + tag. Default True. + html : bool + If the output is a file on the file system, write an html file + (in HTML5) that shows the animation. Default False. + compress : bool + Whether to compress the swf file. Default False. You probably don't + want to use this. This does not decrease the file size since + the images are already compressed. It will result in slower + read and write time. The only purpose of this feature is to + create compressed SWF files, so that we can test the + functionality to read them. + """ + + def _can_read(self, request): + if request.mode[1] in (self.modes + "?"): + tmp = request.firstbytes[0:3].decode("ascii", "ignore") + if tmp in ("FWS", "CWS"): + return True + + def _can_write(self, request): + if request.mode[1] in (self.modes + "?"): + if request.extension in self.extensions: + return True + + # -- reader + + class Reader(Format.Reader): + def _open(self, loop=False): + if not _swf: + load_lib() + + self._arg_loop = bool(loop) + + self._fp = self.request.get_file() + + # Check file ... + tmp = self.request.firstbytes[0:3].decode("ascii", "ignore") + if tmp == "FWS": + pass # OK + elif tmp == "CWS": + # Compressed, we need to decompress + bb = self._fp.read() + bb = bb[:8] + zlib.decompress(bb[8:]) + # Wrap up in a file object + self._fp = BytesIO(bb) + else: + raise IOError("This does not look like a valid SWF file") + + # Skip first bytes. This also tests support got seeking ... + try: + self._fp.seek(8) + self._streaming_mode = False + except Exception: + self._streaming_mode = True + self._fp_read(8) + + # Skip header + # Note that the number of frames is there, which we could + # potentially use, but the number of frames does not necessarily + # correspond to the number of images. + nbits = _swf.bits2int(self._fp_read(1), 5) + nbits = 5 + nbits * 4 + Lrect = nbits / 8.0 + if Lrect % 1: + Lrect += 1 + Lrect = int(Lrect) + self._fp_read(Lrect + 3) + + # Now the rest is basically tags ... + self._imlocs = [] # tuple (loc, sze, T, L1) + if not self._streaming_mode: + # Collect locations of frame, while skipping through the data + # This does not read any of the tag *data*. + try: + while True: + isimage, sze, T, L1 = self._read_one_tag() + loc = self._fp.tell() + if isimage: + # Still need to check if the format is right + format = ord(self._fp_read(3)[2:]) + if format == 5: # RGB or RGBA lossless + self._imlocs.append((loc, sze, T, L1)) + self._fp.seek(loc + sze) # Skip over tag + except IndexError: + pass # done reading + + def _fp_read(self, n): + return read_n_bytes(self._fp, n) + + def _close(self): + pass + + def _get_length(self): + if self._streaming_mode: + return np.inf + else: + return len(self._imlocs) + + def _get_data(self, index): + # Check index + if index < 0: + raise IndexError("Index in swf file must be > 0") + if not self._streaming_mode: + if self._arg_loop and self._imlocs: + index = index % len(self._imlocs) + if index >= len(self._imlocs): + raise IndexError("Index out of bounds") + + if self._streaming_mode: + # Walk over tags until we find an image + while True: + isimage, sze, T, L1 = self._read_one_tag() + bb = self._fp_read(sze) # always read data + if isimage: + im = _swf.read_pixels(bb, 0, T, L1) # can be None + if im is not None: + return im, {} + + else: + # Go to corresponding location, read data, and convert to image + loc, sze, T, L1 = self._imlocs[index] + self._fp.seek(loc) + bb = self._fp_read(sze) + # Read_pixels should return ndarry, since we checked format + im = _swf.read_pixels(bb, 0, T, L1) + return im, {} + + def _read_one_tag(self): + """ + Return (True, loc, size, T, L1) if an image that we can read. + Return (False, loc, size, T, L1) if any other tag. + """ + + # Get head + head = self._fp_read(6) + if not head: # pragma: no cover + raise IndexError("Reached end of swf movie") + + # Determine type and length + T, L1, L2 = _swf.get_type_and_len(head) + if not L2: # pragma: no cover + raise RuntimeError("Invalid tag length, could not proceed") + + # Read data + isimage = False + sze = L2 - 6 + # bb = self._fp_read(L2 - 6) + + # Parse tag + if T == 0: + raise IndexError("Reached end of swf movie") + elif T in [20, 36]: + isimage = True + # im = _swf.read_pixels(bb, 0, T, L1) # can be None + elif T in [6, 21, 35, 90]: # pragma: no cover + logger.warning("Ignoring JPEG image: cannot read JPEG.") + else: + pass # Not an image tag + + # Done. Return image. Can be None + # return im + return isimage, sze, T, L1 + + def _get_meta_data(self, index): + return {} # This format does not support meta data + + # -- writer + + class Writer(Format.Writer): + def _open(self, fps=12, loop=True, html=False, compress=False): + if not _swf: + load_lib() + + self._arg_fps = int(fps) + self._arg_loop = bool(loop) + self._arg_html = bool(html) + self._arg_compress = bool(compress) + + self._fp = self.request.get_file() + self._framecounter = 0 + self._framesize = (100, 100) + + # For compress, we use an in-memory file object + if self._arg_compress: + self._fp_real = self._fp + self._fp = BytesIO() + + def _close(self): + self._complete() + # Get size of (uncompressed) file + sze = self._fp.tell() + # set nframes, this is in the potentially compressed region + self._fp.seek(self._location_to_save_nframes) + self._fp.write(_swf.int2uint16(self._framecounter)) + # Compress body? + if self._arg_compress: + bb = self._fp.getvalue() + self._fp = self._fp_real + self._fp.write(bb[:8]) + self._fp.write(zlib.compress(bb[8:])) + sze = self._fp.tell() # renew sze value + # set size + self._fp.seek(4) + self._fp.write(_swf.int2uint32(sze)) + self._fp = None # Disable + + # Write html? + if self._arg_html and os.path.isfile(self.request.filename): + dirname, fname = os.path.split(self.request.filename) + filename = os.path.join(dirname, fname[:-4] + ".html") + w, h = self._framesize + html = HTML % (fname, w, h, fname) + with open(filename, "wb") as f: + f.write(html.encode("utf-8")) + + def _write_header(self, framesize, fps): + self._framesize = framesize + # Called as soon as we know framesize; when we get first frame + bb = b"" + bb += "FC"[self._arg_compress].encode("ascii") + bb += "WS".encode("ascii") # signature bytes + bb += _swf.int2uint8(8) # version + bb += "0000".encode("ascii") # FileLength (leave open for now) + bb += ( + _swf.Tag().make_rect_record(0, framesize[0], 0, framesize[1]).tobytes() + ) + bb += _swf.int2uint8(0) + _swf.int2uint8(fps) # FrameRate + self._location_to_save_nframes = len(bb) + bb += "00".encode("ascii") # nframes (leave open for now) + self._fp.write(bb) + + # Write some initial tags + taglist = _swf.FileAttributesTag(), _swf.SetBackgroundTag(0, 0, 0) + for tag in taglist: + self._fp.write(tag.get_tag()) + + def _complete(self): + # What if no images were saved? + if not self._framecounter: + self._write_header((10, 10), self._arg_fps) + # Write stop tag if we do not loop + if not self._arg_loop: + self._fp.write(_swf.DoActionTag("stop").get_tag()) + # finish with end tag + self._fp.write("\x00\x00".encode("ascii")) + + def _append_data(self, im, meta): + # Correct shape and type + if im.ndim == 3 and im.shape[-1] == 1: + im = im[:, :, 0] + im = image_as_uint(im, bitdepth=8) + # Get frame size + wh = im.shape[1], im.shape[0] + # Write header on first frame + isfirstframe = False + if self._framecounter == 0: + isfirstframe = True + self._write_header(wh, self._arg_fps) + # Create tags + bm = _swf.BitmapTag(im) + sh = _swf.ShapeTag(bm.id, (0, 0), wh) + po = _swf.PlaceObjectTag(1, sh.id, move=(not isfirstframe)) + sf = _swf.ShowFrameTag() + # Write tags + for tag in [bm, sh, po, sf]: + self._fp.write(tag.get_tag()) + self._framecounter += 1 + + def set_meta_data(self, meta): + pass + + +HTML = """ + + + + Show Flash animation %s + + + + +""" + +# Register. You register an *instance* of a Format class. Here specify: +format = SWFFormat( + "swf", # shot name + "Shockwave flash", # one line descr. + ".swf", # list of extensions as a space separated string + "I", # modes, characters in iIvV +) +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/plugins/tifffile.py b/venv/Lib/site-packages/imageio/plugins/tifffile.py new file mode 100644 index 000000000..4bc6392bf --- /dev/null +++ b/venv/Lib/site-packages/imageio/plugins/tifffile.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- +# imageio is distributed under the terms of the (new) BSD License. + +""" Storage of image data in tiff format. +""" + +import datetime + +from .. import formats +from ..core import Format + +import numpy as np + +_tifffile = None # Defer loading to lib() function. + + +def load_lib(): + global _tifffile + try: + import tifffile as _tifffile + except ImportError: + from . import _tifffile + return _tifffile + + +TIFF_FORMATS = (".tif", ".tiff", ".stk", ".lsm") +WRITE_METADATA_KEYS = ( + "photometric", + "planarconfig", + "resolution", + "description", + "compress", + "predictor", + "volume", + "writeshape", + "extratags", + "datetime", +) +READ_METADATA_KEYS = ( + "planar_configuration", + "is_fluoview", + "is_nih", + "is_contig", + "is_micromanager", + "is_ome", + "is_lsm" "is_palette", + "is_reduced", + "is_rgb", + "is_sgi", + "is_shaped", + "is_stk", + "is_tiled", + "is_mdgel" "resolution_unit", + "compression", + "predictor", + "is_mediacy", + "orientation", + "description", + "description1", + "is_imagej", + "software", +) + + +class TiffFormat(Format): + """ Provides support for a wide range of Tiff images. + + Images that contain multiple pages can be read using ``imageio.mimread()`` + to read the individual pages, or ``imageio.volread()`` to obtain a + single (higher dimensional) array. + + Parameters for reading + ---------------------- + offset : int + Optional start position of embedded file. By default this is + the current file position. + size : int + Optional size of embedded file. By default this is the number + of bytes from the 'offset' to the end of the file. + multifile : bool + If True (default), series may include pages from multiple files. + Currently applies to OME-TIFF only. + multifile_close : bool + If True (default), keep the handles of other files in multifile + series closed. This is inefficient when few files refer to + many pages. If False, the C runtime may run out of resources. + + Parameters for saving + --------------------- + bigtiff : bool + If True, the BigTIFF format is used. + byteorder : {'<', '>'} + The endianness of the data in the file. + By default this is the system's native byte order. + software : str + Name of the software used to create the image. + Saved with the first page only. + + Metadata for reading + -------------------- + planar_configuration : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. + resolution_unit : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. + compression : int + Value indicating the compression algorithm used, e.g. 5 is LZW, + 7 is JPEG, 8 is deflate. + If 1, data are uncompressed. + predictor : int + Value 2 indicates horizontal differencing was used before compression, + while 3 indicates floating point horizontal differencing. + If 1, no prediction scheme was used before compression. + orientation : {'top_left', 'bottom_right', ...} + Oriented of image array. + is_rgb : bool + True if page contains a RGB image. + is_contig : bool + True if page contains a contiguous image. + is_tiled : bool + True if page contains tiled image. + is_palette : bool + True if page contains a palette-colored image and not OME or STK. + is_reduced : bool + True if page is a reduced image of another image. + is_shaped : bool + True if page contains shape in image_description tag. + is_fluoview : bool + True if page contains FluoView MM_STAMP tag. + is_nih : bool + True if page contains NIH image header. + is_micromanager : bool + True if page contains Micro-Manager metadata. + is_ome : bool + True if page contains OME-XML in image_description tag. + is_sgi : bool + True if page contains SGI image and tile depth tags. + is_stk : bool + True if page contains UIC2Tag tag. + is_mdgel : bool + True if page contains md_file_tag tag. + is_mediacy : bool + True if page contains Media Cybernetics Id tag. + is_stk : bool + True if page contains UIC2Tag tag. + is_lsm : bool + True if page contains LSM CZ_LSM_INFO tag. + description : str + Image description + description1 : str + Additional description + is_imagej : None or str + ImageJ metadata + software : str + Software used to create the TIFF file + datetime : datetime.datetime + Creation date and time + + Metadata for writing + -------------------- + photometric : {'minisblack', 'miniswhite', 'rgb'} + The color space of the image data. + By default this setting is inferred from the data shape. + planarconfig : {'contig', 'planar'} + Specifies if samples are stored contiguous or in separate planes. + By default this setting is inferred from the data shape. + 'contig': last dimension contains samples. + 'planar': third last dimension contains samples. + resolution : (float, float) or ((int, int), (int, int)) + X and Y resolution in dots per inch as float or rational numbers. + description : str + The subject of the image. Saved with the first page only. + compress : int + Values from 0 to 9 controlling the level of zlib (deflate) compression. + If 0, data are written uncompressed (default). + predictor : bool + If True, horizontal differencing is applied before compression. + Note that using an int literal 1 actually means no prediction scheme + will be used. + volume : bool + If True, volume data are stored in one tile (if applicable) using + the SGI image_depth and tile_depth tags. + Image width and depth must be multiple of 16. + Few software can read this format, e.g. MeVisLab. + writeshape : bool + If True, write the data shape to the image_description tag + if necessary and no other description is given. + extratags: sequence of tuples + Additional tags as [(code, dtype, count, value, writeonce)]. + + code : int + The TIFF tag Id. + dtype : str + Data type of items in 'value' in Python struct format. + One of B, s, H, I, 2I, b, h, i, f, d, Q, or q. + count : int + Number of data values. Not used for string values. + value : sequence + 'Count' values compatible with 'dtype'. + writeonce : bool + If True, the tag is written to the first page only. + """ + + def _can_read(self, request): + # We support any kind of image data + return request.extension in self.extensions + + def _can_write(self, request): + # We support any kind of image data + return request.extension in self.extensions + + # -- reader + + class Reader(Format.Reader): + def _open(self, **kwargs): + if not _tifffile: + load_lib() + # Allow loading from http; tifffile uses seek, so download first + if self.request.filename.startswith(("http://", "https://")): + self._f = f = open(self.request.get_local_filename(), "rb") + else: + self._f = None + f = self.request.get_file() + self._tf = _tifffile.TiffFile(f, **kwargs) + + # metadata is the same for all images + self._meta = {} + + def _close(self): + self._tf.close() + if self._f is not None: + self._f.close() + + def _get_length(self): + if self.request.mode[1] in "vV": + return 1 # or can there be pages in pages or something? + else: + return len(self._tf.pages) + + def _get_data(self, index): + if self.request.mode[1] in "vV": + # Read data as single 3D (+ color channels) array + if index != 0: + raise IndexError('Tiff support no more than 1 "volume" per file') + im = self._tf.asarray() # request as singleton image + meta = self._meta + else: + # Read as 2D image + if index < 0 or index >= self._get_length(): + raise IndexError("Index out of range while reading from tiff file") + im = self._tf.pages[index].asarray() + meta = self._meta or self._get_meta_data(index) + # Return array and empty meta data + return im, meta + + def _get_meta_data(self, index): + page = self._tf.pages[index or 0] + for key in READ_METADATA_KEYS: + try: + self._meta[key] = getattr(page, key) + except Exception: + pass + + # tifffile <= 0.12.1 use datetime, newer use DateTime + for key in ("datetime", "DateTime"): + try: + self._meta["datetime"] = datetime.datetime.strptime( + page.tags[key].value, "%Y:%m:%d %H:%M:%S" + ) + break + except Exception: + pass + + return self._meta + + # -- writer + class Writer(Format.Writer): + def _open(self, bigtiff=None, byteorder=None, software=None): + if not _tifffile: + load_lib() + + try: + self._tf = _tifffile.TiffWriter( + self.request.get_file(), bigtiff, byteorder, software=software + ) + self._software = None + except TypeError: + # In tifffile >= 0.15, the `software` arg is passed to + # TiffWriter.save + self._tf = _tifffile.TiffWriter( + self.request.get_file(), bigtiff, byteorder + ) + self._software = software + + self._meta = {} + + def _close(self): + self._tf.close() + + def _append_data(self, im, meta): + if meta: + self.set_meta_data(meta) + # No need to check self.request.mode; tifffile figures out whether + # this is a single page, or all page data at once. + if self._software is None: + self._tf.save(np.asanyarray(im), **self._meta) + else: + # tifffile >= 0.15 + self._tf.save(np.asanyarray(im), software=self._software, **self._meta) + + def set_meta_data(self, meta): + self._meta = {} + for (key, value) in meta.items(): + if key in WRITE_METADATA_KEYS: + # Special case of previously read `predictor` int value + # 1(=NONE) translation to False expected by TiffWriter.save + if key == "predictor" and not isinstance(value, bool): + self._meta[key] = value > 1 + else: + self._meta[key] = value + + +# Register +format = TiffFormat("tiff", "TIFF format", TIFF_FORMATS, "iIvV") +formats.add_format(format) diff --git a/venv/Lib/site-packages/imageio/resources/images/astronaut.png b/venv/Lib/site-packages/imageio/resources/images/astronaut.png new file mode 100644 index 000000000..834cda001 Binary files /dev/null and b/venv/Lib/site-packages/imageio/resources/images/astronaut.png differ diff --git a/venv/Lib/site-packages/imageio/resources/images/chelsea.png b/venv/Lib/site-packages/imageio/resources/images/chelsea.png new file mode 100644 index 000000000..d311e8179 Binary files /dev/null and b/venv/Lib/site-packages/imageio/resources/images/chelsea.png differ diff --git a/venv/Lib/site-packages/imageio/resources/images/chelsea.zip b/venv/Lib/site-packages/imageio/resources/images/chelsea.zip new file mode 100644 index 000000000..e9c45b81d Binary files /dev/null and b/venv/Lib/site-packages/imageio/resources/images/chelsea.zip differ diff --git a/venv/Lib/site-packages/imageio/resources/images/cockatoo.mp4 b/venv/Lib/site-packages/imageio/resources/images/cockatoo.mp4 new file mode 100644 index 000000000..1e4099516 Binary files /dev/null and b/venv/Lib/site-packages/imageio/resources/images/cockatoo.mp4 differ diff --git a/venv/Lib/site-packages/imageio/resources/images/newtonscradle.gif b/venv/Lib/site-packages/imageio/resources/images/newtonscradle.gif new file mode 100644 index 000000000..27a6d205b Binary files /dev/null and b/venv/Lib/site-packages/imageio/resources/images/newtonscradle.gif differ diff --git a/venv/Lib/site-packages/imageio/resources/images/realshort.mp4 b/venv/Lib/site-packages/imageio/resources/images/realshort.mp4 new file mode 100644 index 000000000..37c953776 Binary files /dev/null and b/venv/Lib/site-packages/imageio/resources/images/realshort.mp4 differ diff --git a/venv/Lib/site-packages/imageio/resources/images/stent.npz b/venv/Lib/site-packages/imageio/resources/images/stent.npz new file mode 100644 index 000000000..387faeea5 Binary files /dev/null and b/venv/Lib/site-packages/imageio/resources/images/stent.npz differ diff --git a/venv/Lib/site-packages/imageio/resources/shipped_resources_go_here b/venv/Lib/site-packages/imageio/resources/shipped_resources_go_here new file mode 100644 index 000000000..e69de29bb diff --git a/venv/Lib/site-packages/imageio/testing.py b/venv/Lib/site-packages/imageio/testing.py new file mode 100644 index 000000000..ed3edbd8c --- /dev/null +++ b/venv/Lib/site-packages/imageio/testing.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Distributed under the (new) BSD License. See LICENSE.txt for more info. + +""" Functionality used for testing. This code itself is not covered in tests. +""" + +import os +import sys +import inspect +import shutil +import atexit + +import pytest + +# Get root dir +THIS_DIR = os.path.abspath(os.path.dirname(__file__)) +ROOT_DIR = THIS_DIR +for i in range(9): + ROOT_DIR = os.path.dirname(ROOT_DIR) + if os.path.isfile(os.path.join(ROOT_DIR, ".gitignore")): + break + + +## Functions to use in tests + + +def run_tests_if_main(show_coverage=False): + """ Run tests in a given file if it is run as a script + + Coverage is reported for running this single test. Set show_coverage to + launch the report in the web browser. + """ + local_vars = inspect.currentframe().f_back.f_locals + if not local_vars.get("__name__", "") == "__main__": + return + # we are in a "__main__" + os.chdir(ROOT_DIR) + fname = str(local_vars["__file__"]) + _clear_imageio() + _enable_faulthandler() + pytest.main( + [ + "-v", + "-x", + "--color=yes", + "--cov", + "imageio", + "--cov-config", + ".coveragerc", + "--cov-report", + "html", + fname, + ] + ) + if show_coverage: + import webbrowser + + fname = os.path.join(ROOT_DIR, "htmlcov", "index.html") + webbrowser.open_new_tab(fname) + + +_the_test_dir = None + + +def get_test_dir(): + global _the_test_dir + if _the_test_dir is None: + # Define dir + from imageio.core import appdata_dir + + _the_test_dir = os.path.join(appdata_dir("imageio"), "testdir") + # Clear and create it now + clean_test_dir(True) + os.makedirs(_the_test_dir) + os.makedirs(os.path.join(_the_test_dir, "images")) + # And later + atexit.register(clean_test_dir) + return _the_test_dir + + +def clean_test_dir(strict=False): + if os.path.isdir(_the_test_dir): + try: + shutil.rmtree(_the_test_dir) + except Exception: + if strict: + raise + + +def need_internet(): + if os.getenv("IMAGEIO_NO_INTERNET", "").lower() in ("1", "true", "yes"): + pytest.skip("No internet") + + +## Functions to use from invoke tasks + + +def test_unit(cov_report="term"): + """ Run all unit tests. Returns exit code. + """ + orig_dir = os.getcwd() + os.chdir(ROOT_DIR) + try: + _clear_imageio() + _enable_faulthandler() + return pytest.main( + [ + "-v", + "--cov", + "imageio", + "--cov-config", + ".coveragerc", + "--cov-report", + cov_report, + "tests", + ] + ) + finally: + os.chdir(orig_dir) + import imageio + + print("Tests were performed on", str(imageio)) + + +## Requirements + + +def _enable_faulthandler(): + """ Enable faulthandler (if we can), so that we get tracebacks + on segfaults. + """ + try: + import faulthandler + + faulthandler.enable() + print("Faulthandler enabled") + except Exception: + print("Could not enable faulthandler") + + +def _clear_imageio(): + # Remove ourselves from sys.modules to force an import + for key in list(sys.modules.keys()): + if key.startswith("imageio"): + del sys.modules[key] diff --git a/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/DESCRIPTION.rst new file mode 100644 index 000000000..5a3d17ba1 --- /dev/null +++ b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/DESCRIPTION.rst @@ -0,0 +1,24 @@ +Welcome to Kiwi +=============== + +.. image:: https://travis-ci.org/nucleic/kiwi.svg?branch=master + :target: https://travis-ci.org/nucleic/kiwi +.. image:: https://codecov.io/gh/nucleic/kiwi/branch/master/graph/badge.svg + :target: https://codecov.io/gh/nucleic/kiwi +.. image:: https://readthedocs.org/projects/kiwisolver/badge/?version=latest + :target: https://kiwisolver.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +Kiwi is an efficient C++ implementation of the Cassowary constraint solving +algorithm. Kiwi is an implementation of the algorithm based on the seminal +Cassowary paper. It is *not* a refactoring of the original C++ solver. Kiwi +has been designed from the ground up to be lightweight and fast. Kiwi ranges +from 10x to 500x faster than the original Cassowary solver with typical use +cases gaining a 40x improvement. Memory savings are consistently > 5x. + +In addition to the C++ solver, Kiwi ships with hand-rolled Python bindings. + +The version 1.1.0 of the Python bindings will be the last one to support +Python 2, moving forward support will be limited to Python 3.5+. + + diff --git a/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/INSTALLER b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/METADATA b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/METADATA new file mode 100644 index 000000000..c1066495b --- /dev/null +++ b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/METADATA @@ -0,0 +1,41 @@ +Metadata-Version: 2.0 +Name: kiwisolver +Version: 1.2.0 +Summary: A fast implementation of the Cassowary constraint solver +Home-page: https://github.com/nucleic/kiwi +Author: The Nucleic Development Team +Author-email: sccolbert@gmail.com +License: BSD +Platform: UNKNOWN +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Requires-Python: >=3.6 + +Welcome to Kiwi +=============== + +.. image:: https://travis-ci.org/nucleic/kiwi.svg?branch=master + :target: https://travis-ci.org/nucleic/kiwi +.. image:: https://codecov.io/gh/nucleic/kiwi/branch/master/graph/badge.svg + :target: https://codecov.io/gh/nucleic/kiwi +.. image:: https://readthedocs.org/projects/kiwisolver/badge/?version=latest + :target: https://kiwisolver.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +Kiwi is an efficient C++ implementation of the Cassowary constraint solving +algorithm. Kiwi is an implementation of the algorithm based on the seminal +Cassowary paper. It is *not* a refactoring of the original C++ solver. Kiwi +has been designed from the ground up to be lightweight and fast. Kiwi ranges +from 10x to 500x faster than the original Cassowary solver with typical use +cases gaining a 40x improvement. Memory savings are consistently > 5x. + +In addition to the C++ solver, Kiwi ships with hand-rolled Python bindings. + +The version 1.1.0 of the Python bindings will be the last one to support +Python 2, moving forward support will be limited to Python 3.5+. + + diff --git a/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/RECORD b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/RECORD new file mode 100644 index 000000000..ce10d8761 --- /dev/null +++ b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/RECORD @@ -0,0 +1,8 @@ +kiwisolver-1.2.0.dist-info/DESCRIPTION.rst,sha256=gUPxIysDZOUlzRRtG7Se2nKjm-4n1RYbApha6VflaqU,1116 +kiwisolver-1.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +kiwisolver-1.2.0.dist-info/METADATA,sha256=mNVM4ulU_DYS10Vk4LDBic93bklLdc_9xQuWKG6rGds,1718 +kiwisolver-1.2.0.dist-info/RECORD,, +kiwisolver-1.2.0.dist-info/WHEEL,sha256=Z25b6SdXTMgEhPZfHy0ZegEQgAZjd-e6QN_O-DvyLBE,101 +kiwisolver-1.2.0.dist-info/metadata.json,sha256=qDBQg2TjkECqKVWmuZiDUo0z5SMibpgcnNWiUzBNn_s,756 +kiwisolver-1.2.0.dist-info/top_level.txt,sha256=xqwWj7oSHlpIjcw2QMJb8puTFPdjDBO78AZp9gjTh9c,11 +kiwisolver.cp36-win32.pyd,sha256=iexymCzWvx7wxkD_GBVsYFNSYT-jWEBJWfyDw4DOD0E,107520 diff --git a/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/WHEEL b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/WHEEL new file mode 100644 index 000000000..ac69dcd53 --- /dev/null +++ b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.26.0) +Root-Is-Purelib: false +Tag: cp36-none-win32 + diff --git a/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/metadata.json b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/metadata.json new file mode 100644 index 000000000..f5987b1b8 --- /dev/null +++ b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/metadata.json @@ -0,0 +1 @@ +{"generator": "bdist_wheel (0.26.0)", "summary": "A fast implementation of the Cassowary constraint solver", "classifiers": ["Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: Implementation :: CPython"], "extensions": {"python.details": {"project_urls": {"Home": "https://github.com/nucleic/kiwi"}, "contacts": [{"email": "sccolbert@gmail.com", "name": "The Nucleic Development Team", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}}}, "license": "BSD", "metadata_version": "2.0", "name": "kiwisolver", "requires_python": ">=3.6", "version": "1.2.0"} \ No newline at end of file diff --git a/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/top_level.txt b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/top_level.txt new file mode 100644 index 000000000..9b85884d1 --- /dev/null +++ b/venv/Lib/site-packages/kiwisolver-1.2.0.dist-info/top_level.txt @@ -0,0 +1 @@ +kiwisolver diff --git a/venv/Lib/site-packages/kiwisolver.cp36-win32.pyd b/venv/Lib/site-packages/kiwisolver.cp36-win32.pyd new file mode 100644 index 000000000..ce91c211f Binary files /dev/null and b/venv/Lib/site-packages/kiwisolver.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/matplotlib-3.3.2-py3.6-nspkg.pth b/venv/Lib/site-packages/matplotlib-3.3.2-py3.6-nspkg.pth new file mode 100644 index 000000000..2137841f0 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2-py3.6-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('mpl_toolkits',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('mpl_toolkits', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('mpl_toolkits', [os.path.dirname(p)])));m = m or sys.modules.setdefault('mpl_toolkits', types.ModuleType('mpl_toolkits'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/INSTALLER b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE new file mode 100644 index 000000000..ec51537db --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE @@ -0,0 +1,99 @@ +License agreement for matplotlib versions 1.3.0 and later +========================================================= + +1. This LICENSE AGREEMENT is between the Matplotlib Development Team +("MDT"), and the Individual or Organization ("Licensee") accessing and +otherwise using matplotlib software in source or binary form and its +associated documentation. + +2. Subject to the terms and conditions of this License Agreement, MDT +hereby grants Licensee a nonexclusive, royalty-free, world-wide license +to reproduce, analyze, test, perform and/or display publicly, prepare +derivative works, distribute, and otherwise use matplotlib +alone or in any derivative version, provided, however, that MDT's +License Agreement and MDT's notice of copyright, i.e., "Copyright (c) +2012- Matplotlib Development Team; All Rights Reserved" are retained in +matplotlib alone or in any derivative version prepared by +Licensee. + +3. In the event Licensee prepares a derivative work that is based on or +incorporates matplotlib or any part thereof, and wants to +make the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to matplotlib . + +4. MDT is making matplotlib available to Licensee on an "AS +IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB +WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB + FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR +LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING +MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF +THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between MDT and +Licensee. This License Agreement does not grant permission to use MDT +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using matplotlib , +Licensee agrees to be bound by the terms and conditions of this License +Agreement. + +License agreement for matplotlib versions prior to 1.3.0 +======================================================== + +1. This LICENSE AGREEMENT is between John D. Hunter ("JDH"), and the +Individual or Organization ("Licensee") accessing and otherwise using +matplotlib software in source or binary form and its associated +documentation. + +2. Subject to the terms and conditions of this License Agreement, JDH +hereby grants Licensee a nonexclusive, royalty-free, world-wide license +to reproduce, analyze, test, perform and/or display publicly, prepare +derivative works, distribute, and otherwise use matplotlib +alone or in any derivative version, provided, however, that JDH's +License Agreement and JDH's notice of copyright, i.e., "Copyright (c) +2002-2011 John D. Hunter; All Rights Reserved" are retained in +matplotlib alone or in any derivative version prepared by +Licensee. + +3. In the event Licensee prepares a derivative work that is based on or +incorporates matplotlib or any part thereof, and wants to +make the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to matplotlib. + +4. JDH is making matplotlib available to Licensee on an "AS +IS" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB +WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + +5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB + FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR +LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING +MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF +THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between JDH and +Licensee. This License Agreement does not grant permission to use JDH +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using matplotlib, +Licensee agrees to be bound by the terms and conditions of this License +Agreement. \ No newline at end of file diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_AMSFONTS b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_AMSFONTS new file mode 100644 index 000000000..3627bb9bb --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_AMSFONTS @@ -0,0 +1,240 @@ +The cmr10.pfb file is a Type-1 version of one of Knuth's Computer Modern fonts. +It is included here as test data only, but the following license applies. + +Copyright (c) 1997, 2009, American Mathematical Society (http://www.ams.org). +All Rights Reserved. + +"cmb10" is a Reserved Font Name for this Font Software. +"cmbsy10" is a Reserved Font Name for this Font Software. +"cmbsy5" is a Reserved Font Name for this Font Software. +"cmbsy6" is a Reserved Font Name for this Font Software. +"cmbsy7" is a Reserved Font Name for this Font Software. +"cmbsy8" is a Reserved Font Name for this Font Software. +"cmbsy9" is a Reserved Font Name for this Font Software. +"cmbx10" is a Reserved Font Name for this Font Software. +"cmbx12" is a Reserved Font Name for this Font Software. +"cmbx5" is a Reserved Font Name for this Font Software. +"cmbx6" is a Reserved Font Name for this Font Software. +"cmbx7" is a Reserved Font Name for this Font Software. +"cmbx8" is a Reserved Font Name for this Font Software. +"cmbx9" is a Reserved Font Name for this Font Software. +"cmbxsl10" is a Reserved Font Name for this Font Software. +"cmbxti10" is a Reserved Font Name for this Font Software. +"cmcsc10" is a Reserved Font Name for this Font Software. +"cmcsc8" is a Reserved Font Name for this Font Software. +"cmcsc9" is a Reserved Font Name for this Font Software. +"cmdunh10" is a Reserved Font Name for this Font Software. +"cmex10" is a Reserved Font Name for this Font Software. +"cmex7" is a Reserved Font Name for this Font Software. +"cmex8" is a Reserved Font Name for this Font Software. +"cmex9" is a Reserved Font Name for this Font Software. +"cmff10" is a Reserved Font Name for this Font Software. +"cmfi10" is a Reserved Font Name for this Font Software. +"cmfib8" is a Reserved Font Name for this Font Software. +"cminch" is a Reserved Font Name for this Font Software. +"cmitt10" is a Reserved Font Name for this Font Software. +"cmmi10" is a Reserved Font Name for this Font Software. +"cmmi12" is a Reserved Font Name for this Font Software. +"cmmi5" is a Reserved Font Name for this Font Software. +"cmmi6" is a Reserved Font Name for this Font Software. +"cmmi7" is a Reserved Font Name for this Font Software. +"cmmi8" is a Reserved Font Name for this Font Software. +"cmmi9" is a Reserved Font Name for this Font Software. +"cmmib10" is a Reserved Font Name for this Font Software. +"cmmib5" is a Reserved Font Name for this Font Software. +"cmmib6" is a Reserved Font Name for this Font Software. +"cmmib7" is a Reserved Font Name for this Font Software. +"cmmib8" is a Reserved Font Name for this Font Software. +"cmmib9" is a Reserved Font Name for this Font Software. +"cmr10" is a Reserved Font Name for this Font Software. +"cmr12" is a Reserved Font Name for this Font Software. +"cmr17" is a Reserved Font Name for this Font Software. +"cmr5" is a Reserved Font Name for this Font Software. +"cmr6" is a Reserved Font Name for this Font Software. +"cmr7" is a Reserved Font Name for this Font Software. +"cmr8" is a Reserved Font Name for this Font Software. +"cmr9" is a Reserved Font Name for this Font Software. +"cmsl10" is a Reserved Font Name for this Font Software. +"cmsl12" is a Reserved Font Name for this Font Software. +"cmsl8" is a Reserved Font Name for this Font Software. +"cmsl9" is a Reserved Font Name for this Font Software. +"cmsltt10" is a Reserved Font Name for this Font Software. +"cmss10" is a Reserved Font Name for this Font Software. +"cmss12" is a Reserved Font Name for this Font Software. +"cmss17" is a Reserved Font Name for this Font Software. +"cmss8" is a Reserved Font Name for this Font Software. +"cmss9" is a Reserved Font Name for this Font Software. +"cmssbx10" is a Reserved Font Name for this Font Software. +"cmssdc10" is a Reserved Font Name for this Font Software. +"cmssi10" is a Reserved Font Name for this Font Software. +"cmssi12" is a Reserved Font Name for this Font Software. +"cmssi17" is a Reserved Font Name for this Font Software. +"cmssi8" is a Reserved Font Name for this Font Software. +"cmssi9" is a Reserved Font Name for this Font Software. +"cmssq8" is a Reserved Font Name for this Font Software. +"cmssqi8" is a Reserved Font Name for this Font Software. +"cmsy10" is a Reserved Font Name for this Font Software. +"cmsy5" is a Reserved Font Name for this Font Software. +"cmsy6" is a Reserved Font Name for this Font Software. +"cmsy7" is a Reserved Font Name for this Font Software. +"cmsy8" is a Reserved Font Name for this Font Software. +"cmsy9" is a Reserved Font Name for this Font Software. +"cmtcsc10" is a Reserved Font Name for this Font Software. +"cmtex10" is a Reserved Font Name for this Font Software. +"cmtex8" is a Reserved Font Name for this Font Software. +"cmtex9" is a Reserved Font Name for this Font Software. +"cmti10" is a Reserved Font Name for this Font Software. +"cmti12" is a Reserved Font Name for this Font Software. +"cmti7" is a Reserved Font Name for this Font Software. +"cmti8" is a Reserved Font Name for this Font Software. +"cmti9" is a Reserved Font Name for this Font Software. +"cmtt10" is a Reserved Font Name for this Font Software. +"cmtt12" is a Reserved Font Name for this Font Software. +"cmtt8" is a Reserved Font Name for this Font Software. +"cmtt9" is a Reserved Font Name for this Font Software. +"cmu10" is a Reserved Font Name for this Font Software. +"cmvtt10" is a Reserved Font Name for this Font Software. +"euex10" is a Reserved Font Name for this Font Software. +"euex7" is a Reserved Font Name for this Font Software. +"euex8" is a Reserved Font Name for this Font Software. +"euex9" is a Reserved Font Name for this Font Software. +"eufb10" is a Reserved Font Name for this Font Software. +"eufb5" is a Reserved Font Name for this Font Software. +"eufb7" is a Reserved Font Name for this Font Software. +"eufm10" is a Reserved Font Name for this Font Software. +"eufm5" is a Reserved Font Name for this Font Software. +"eufm7" is a Reserved Font Name for this Font Software. +"eurb10" is a Reserved Font Name for this Font Software. +"eurb5" is a Reserved Font Name for this Font Software. +"eurb7" is a Reserved Font Name for this Font Software. +"eurm10" is a Reserved Font Name for this Font Software. +"eurm5" is a Reserved Font Name for this Font Software. +"eurm7" is a Reserved Font Name for this Font Software. +"eusb10" is a Reserved Font Name for this Font Software. +"eusb5" is a Reserved Font Name for this Font Software. +"eusb7" is a Reserved Font Name for this Font Software. +"eusm10" is a Reserved Font Name for this Font Software. +"eusm5" is a Reserved Font Name for this Font Software. +"eusm7" is a Reserved Font Name for this Font Software. +"lasy10" is a Reserved Font Name for this Font Software. +"lasy5" is a Reserved Font Name for this Font Software. +"lasy6" is a Reserved Font Name for this Font Software. +"lasy7" is a Reserved Font Name for this Font Software. +"lasy8" is a Reserved Font Name for this Font Software. +"lasy9" is a Reserved Font Name for this Font Software. +"lasyb10" is a Reserved Font Name for this Font Software. +"lcircle1" is a Reserved Font Name for this Font Software. +"lcirclew" is a Reserved Font Name for this Font Software. +"lcmss8" is a Reserved Font Name for this Font Software. +"lcmssb8" is a Reserved Font Name for this Font Software. +"lcmssi8" is a Reserved Font Name for this Font Software. +"line10" is a Reserved Font Name for this Font Software. +"linew10" is a Reserved Font Name for this Font Software. +"msam10" is a Reserved Font Name for this Font Software. +"msam5" is a Reserved Font Name for this Font Software. +"msam6" is a Reserved Font Name for this Font Software. +"msam7" is a Reserved Font Name for this Font Software. +"msam8" is a Reserved Font Name for this Font Software. +"msam9" is a Reserved Font Name for this Font Software. +"msbm10" is a Reserved Font Name for this Font Software. +"msbm5" is a Reserved Font Name for this Font Software. +"msbm6" is a Reserved Font Name for this Font Software. +"msbm7" is a Reserved Font Name for this Font Software. +"msbm8" is a Reserved Font Name for this Font Software. +"msbm9" is a Reserved Font Name for this Font Software. +"wncyb10" is a Reserved Font Name for this Font Software. +"wncyi10" is a Reserved Font Name for this Font Software. +"wncyr10" is a Reserved Font Name for this Font Software. +"wncysc10" is a Reserved Font Name for this Font Software. +"wncyss10" is a Reserved Font Name for this Font Software. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_BAKOMA b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_BAKOMA new file mode 100644 index 000000000..801e20cd7 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_BAKOMA @@ -0,0 +1,40 @@ + + BaKoMa Fonts Licence + -------------------- + + This licence covers two font packs (known as BaKoMa Fonts Colelction, + which is available at `CTAN:fonts/cm/ps-type1/bakoma/'): + + 1) BaKoMa-CM (1.1/12-Nov-94) + Computer Modern Fonts in PostScript Type 1 and TrueType font formats. + + 2) BaKoMa-AMS (1.2/19-Jan-95) + AMS TeX fonts in PostScript Type 1 and TrueType font formats. + + Copyright (C) 1994, 1995, Basil K. Malyshev. All Rights Reserved. + + Permission to copy and distribute these fonts for any purpose is + hereby granted without fee, provided that the above copyright notice, + author statement and this permission notice appear in all copies of + these fonts and related documentation. + + Permission to modify and distribute modified fonts for any purpose is + hereby granted without fee, provided that the copyright notice, + author statement, this permission notice and location of original + fonts (http://www.ctan.org/tex-archive/fonts/cm/ps-type1/bakoma) + appear in all copies of modified fonts and related documentation. + + Permission to use these fonts (embedding into PostScript, PDF, SVG + and printing by using any software) is hereby granted without fee. + It is not required to provide any notices about using these fonts. + + Basil K. Malyshev + INSTITUTE FOR HIGH ENERGY PHYSICS + IHEP, OMVT + Moscow Region + 142281 PROTVINO + RUSSIA + + E-Mail: bakoma@mail.ru + or malyshev@mail.ihep.ru + diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_CARLOGO b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_CARLOGO new file mode 100644 index 000000000..8c99c656a --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_CARLOGO @@ -0,0 +1,45 @@ +----> we renamed carlito -> carlogo to comply with the terms <---- + +Copyright (c) 2010-2013 by tyPoland Lukasz Dziedzic with Reserved Font Name "Carlito". + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others. + +The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the copyright statement(s). + +"Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment. + +"Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission. + +5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE. \ No newline at end of file diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_COLORBREWER b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_COLORBREWER new file mode 100644 index 000000000..568afe883 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_COLORBREWER @@ -0,0 +1,38 @@ +Apache-Style Software License for ColorBrewer Color Schemes + +Version 1.1 + +Copyright (c) 2002 Cynthia Brewer, Mark Harrower, and The Pennsylvania +State University. All rights reserved. Redistribution and use in source +and binary forms, with or without modification, are permitted provided +that the following conditions are met: + +1. Redistributions as source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. The end-user documentation included with the redistribution, if any, +must include the following acknowledgment: "This product includes color +specifications and designs developed by Cynthia Brewer +(http://colorbrewer.org/)." Alternately, this acknowledgment may appear in +the software itself, if and wherever such third-party acknowledgments +normally appear. + +3. The name "ColorBrewer" must not be used to endorse or promote products +derived from this software without prior written permission. For written +permission, please contact Cynthia Brewer at cbrewer@psu.edu. + +4. Products derived from this software may not be called "ColorBrewer", +nor may "ColorBrewer" appear in their name, without prior written +permission of Cynthia Brewer. + +THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +CYNTHIA BREWER, MARK HARROWER, OR THE PENNSYLVANIA STATE UNIVERSITY BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_QT4_EDITOR b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_QT4_EDITOR new file mode 100644 index 000000000..1c9d94197 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_QT4_EDITOR @@ -0,0 +1,30 @@ + +Module creating PyQt4 form dialogs/layouts to edit various type of parameters + + +formlayout License Agreement (MIT License) +------------------------------------------ + +Copyright (c) 2009 Pierre Raybaut + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. +""" diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_SOLARIZED b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_SOLARIZED new file mode 100644 index 000000000..6e5a0475d --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_SOLARIZED @@ -0,0 +1,20 @@ +https://github.com/altercation/solarized/blob/master/LICENSE +Copyright (c) 2011 Ethan Schoonover + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_STIX b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_STIX new file mode 100644 index 000000000..2f7aeea33 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_STIX @@ -0,0 +1,71 @@ +TERMS AND CONDITIONS + + 1. Permission is hereby granted, free of charge, to any person +obtaining a copy of the STIX Fonts-TM set accompanying this license +(collectively, the "Fonts") and the associated documentation files +(collectively with the Fonts, the "Font Software"), to reproduce and +distribute the Font Software, including the rights to use, copy, merge +and publish copies of the Font Software, and to permit persons to whom +the Font Software is furnished to do so same, subject to the following +terms and conditions (the "License"). + + 2. The following copyright and trademark notice and these Terms and +Conditions shall be included in all copies of one or more of the Font +typefaces and any derivative work created as permitted under this +License: + + Copyright (c) 2001-2005 by the STI Pub Companies, consisting of +the American Institute of Physics, the American Chemical Society, the +American Mathematical Society, the American Physical Society, Elsevier, +Inc., and The Institute of Electrical and Electronic Engineers, Inc. +Portions copyright (c) 1998-2003 by MicroPress, Inc. Portions copyright +(c) 1990 by Elsevier, Inc. All rights reserved. STIX Fonts-TM is a +trademark of The Institute of Electrical and Electronics Engineers, Inc. + + 3. You may (a) convert the Fonts from one format to another (e.g., +from TrueType to PostScript), in which case the normal and reasonable +distortion that occurs during such conversion shall be permitted and (b) +embed or include a subset of the Fonts in a document for the purposes of +allowing users to read text in the document that utilizes the Fonts. In +each case, you may use the STIX Fonts-TM mark to designate the resulting +Fonts or subset of the Fonts. + + 4. You may also (a) add glyphs or characters to the Fonts, or modify +the shape of existing glyphs, so long as the base set of glyphs is not +removed and (b) delete glyphs or characters from the Fonts, provided +that the resulting font set is distributed with the following +disclaimer: "This [name] font does not include all the Unicode points +covered in the STIX Fonts-TM set but may include others." In each case, +the name used to denote the resulting font set shall not include the +term "STIX" or any similar term. + + 5. You may charge a fee in connection with the distribution of the +Font Software, provided that no copy of one or more of the individual +Font typefaces that form the STIX Fonts-TM set may be sold by itself. + + 6. THE FONT SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK OR OTHER RIGHT. IN NO EVENT SHALL +MICROPRESS OR ANY OF THE STI PUB COMPANIES BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, INCLUDING, BUT NOT LIMITED TO, ANY GENERAL, +SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM OR OUT OF THE USE OR +INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT +SOFTWARE. + + 7. Except as contained in the notice set forth in Section 2, the +names MicroPress Inc. and STI Pub Companies, as well as the names of the +companies/organizations that compose the STI Pub Companies, shall not be +used in advertising or otherwise to promote the sale, use or other +dealings in the Font Software without the prior written consent of the +respective company or organization. + + 8. This License shall become null and void in the event of any +material breach of the Terms and Conditions herein by licensee. + + 9. A substantial portion of the STIX Fonts set was developed by +MicroPress Inc. for the STI Pub Companies. To obtain additional +mathematical fonts, please contact MicroPress, Inc., 68-30 Harrow +Street, Forest Hills, NY 11375, USA - Phone: (718) 575-1816. + diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_YORICK b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_YORICK new file mode 100644 index 000000000..8c908509a --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/LICENSE_YORICK @@ -0,0 +1,49 @@ +BSD-style license for gist/yorick colormaps. + +Copyright: + + Copyright (c) 1996. The Regents of the University of California. + All rights reserved. + +Permission to use, copy, modify, and distribute this software for any +purpose without fee is hereby granted, provided that this entire +notice is included in all copies of any software which is or includes +a copy or modification of this software and in all copies of the +supporting documentation for such software. + +This work was produced at the University of California, Lawrence +Livermore National Laboratory under contract no. W-7405-ENG-48 between +the U.S. Department of Energy and The Regents of the University of +California for the operation of UC LLNL. + + + DISCLAIMER + +This software was prepared as an account of work sponsored by an +agency of the United States Government. Neither the United States +Government nor the University of California nor any of their +employees, makes any warranty, express or implied, or assumes any +liability or responsibility for the accuracy, completeness, or +usefulness of any information, apparatus, product, or process +disclosed, or represents that its use would not infringe +privately-owned rights. Reference herein to any specific commercial +products, process, or service by trade name, trademark, manufacturer, +or otherwise, does not necessarily constitute or imply its +endorsement, recommendation, or favoring by the United States +Government or the University of California. The views and opinions of +authors expressed herein do not necessarily state or reflect those of +the United States Government or the University of California, and +shall not be used for advertising or product endorsement purposes. + + + AUTHOR + +David H. Munro wrote Yorick and Gist. Berkeley Yacc (byacc) generated +the Yorick parser. The routines in Math are from LAPACK and FFTPACK; +MathC contains C translations by David H. Munro. The algorithms for +Yorick's random number generator and several special functions in +Yorick/include were taken from Numerical Recipes by Press, et. al., +although the Yorick implementations are unrelated to those in +Numerical Recipes. A small amount of code in Gist was adapted from +the X11R4 release, copyright M.I.T. -- the complete copyright notice +may be found in the (unused) file Gist/host.c. diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/METADATA b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/METADATA new file mode 100644 index 000000000..a6847ae14 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/METADATA @@ -0,0 +1,140 @@ +Metadata-Version: 2.1 +Name: matplotlib +Version: 3.3.2 +Summary: Python plotting package +Home-page: https://matplotlib.org +Author: John D. Hunter, Michael Droettboom +Author-email: matplotlib-users@python.org +License: PSF +Download-URL: https://matplotlib.org/users/installing.html +Project-URL: Documentation, https://matplotlib.org +Project-URL: Source Code, https://github.com/matplotlib/matplotlib +Project-URL: Bug Tracker, https://github.com/matplotlib/matplotlib/issues +Project-URL: Forum, https://discourse.matplotlib.org/ +Project-URL: Donate, https://numfocus.org/donate-to-matplotlib +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Framework :: Matplotlib +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Education +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Topic :: Scientific/Engineering :: Visualization +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +Requires-Dist: certifi (>=2020.06.20) +Requires-Dist: cycler (>=0.10) +Requires-Dist: kiwisolver (>=1.0.1) +Requires-Dist: numpy (>=1.15) +Requires-Dist: pillow (>=6.2.0) +Requires-Dist: pyparsing (!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3) +Requires-Dist: python-dateutil (>=2.1) + +|PyPi|_ |Downloads|_ |NUMFocus|_ + +|DiscourseBadge|_ |Gitter|_ |GitHubIssues|_ |GitTutorial|_ + +|Travis|_ |AzurePipelines|_ |AppVeyor|_ |Codecov|_ |LGTM|_ + +.. |Travis| image:: https://travis-ci.com/matplotlib/matplotlib.svg?branch=master +.. _Travis: https://travis-ci.com/matplotlib/matplotlib + +.. |AzurePipelines| image:: https://dev.azure.com/matplotlib/matplotlib/_apis/build/status/matplotlib.matplotlib?branchName=master +.. _AzurePipelines: https://dev.azure.com/matplotlib/matplotlib/_build/latest?definitionId=1&branchName=master + +.. |AppVeyor| image:: https://ci.appveyor.com/api/projects/status/github/matplotlib/matplotlib?branch=master&svg=true +.. _AppVeyor: https://ci.appveyor.com/project/matplotlib/matplotlib + +.. |Codecov| image:: https://codecov.io/github/matplotlib/matplotlib/badge.svg?branch=master&service=github +.. _Codecov: https://codecov.io/github/matplotlib/matplotlib?branch=master + +.. |LGTM| image:: https://img.shields.io/lgtm/grade/python/g/matplotlib/matplotlib.svg?logo=lgtm&logoWidth=18 +.. _LGTM: https://lgtm.com/projects/g/matplotlib/matplotlib + +.. |DiscourseBadge| image:: https://img.shields.io/badge/help_forum-discourse-blue.svg +.. _DiscourseBadge: https://discourse.matplotlib.org + +.. |Gitter| image:: https://badges.gitter.im/matplotlib/matplotlib.svg +.. _Gitter: https://gitter.im/matplotlib/matplotlib + +.. |GitHubIssues| image:: https://img.shields.io/badge/issue_tracking-github-blue.svg +.. _GitHubIssues: https://github.com/matplotlib/matplotlib/issues + +.. |GitTutorial| image:: https://img.shields.io/badge/PR-Welcome-%23FF8300.svg? +.. _GitTutorial: https://git-scm.com/book/en/v2/GitHub-Contributing-to-a-Project + +.. |PyPi| image:: https://badge.fury.io/py/matplotlib.svg +.. _PyPi: https://badge.fury.io/py/matplotlib + +.. |Downloads| image:: https://pepy.tech/badge/matplotlib/month +.. _Downloads: https://pepy.tech/project/matplotlib/month + +.. |NUMFocus| image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A +.. _NUMFocus: https://numfocus.org + +.. image:: https://matplotlib.org/_static/logo2.svg + +Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations in Python. + +Check out our `home page `_ for more information. + +.. image:: https://matplotlib.org/_static/readme_preview.png + +Matplotlib produces publication-quality figures in a variety of hardcopy formats +and interactive environments across platforms. Matplotlib can be used in Python scripts, +the Python and IPython shell, web application servers, and various +graphical user interface toolkits. + + +Install +======= + +For installation instructions and requirements, see `INSTALL.rst `_ or the +`install `_ documentation. + +Test +==== + +After installation, launch the test suite:: + + python -m pytest + +Read the `testing guide `_ for more information and alternatives. + +Contribute +========== +You've discovered a bug or something else you want to change - excellent! + +You've worked out a way to fix it – even better! + +You want to tell us about it – best of all! + +Start at the `contributing guide `_! + +Contact +======= + +`Discourse `_ is the discussion forum for general questions and discussions and our recommended starting point. + +Our active mailing lists (which are mirrored on Discourse) are: + +* `Users `_ mailing list: matplotlib-users@python.org +* `Announcement `_ mailing list: matplotlib-announce@python.org +* `Development `_ mailing list: matplotlib-devel@python.org + +Gitter_ is for coordinating development and asking questions directly related +to contributing to matplotlib. + + +Citing Matplotlib +================= +If Matplotlib contributes to a project that leads to publication, please +acknowledge this by citing Matplotlib. + +`A ready-made citation entry `_ is available. + + diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/RECORD b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/RECORD new file mode 100644 index 000000000..3d20b01fc --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/RECORD @@ -0,0 +1,822 @@ +__pycache__/pylab.cpython-36.pyc,, +matplotlib-3.3.2-py3.6-nspkg.pth,sha256=g9pwhlfLQRispACfr-Zaah4Psceyhyx9K_qv929IpMo,570 +matplotlib-3.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +matplotlib-3.3.2.dist-info/LICENSE,sha256=ojr3trhyymyw7GeYxkhO2JYoAxUVscbgMFz9b1qrcVM,4928 +matplotlib-3.3.2.dist-info/LICENSE_AMSFONTS,sha256=1nBvhOSH8d3ceR8cyG_bknr7Wg4RSmoB5M_HE72FVyE,12915 +matplotlib-3.3.2.dist-info/LICENSE_BAKOMA,sha256=RMmLfO-TQmiuGbo4NwaPETH4OqNTWAzDwDWPTA8ikdg,1480 +matplotlib-3.3.2.dist-info/LICENSE_CARLOGO,sha256=zpO9wbKCiF7rqj4STQcHWjzLj_67kzhx1mzdwjnLdIE,4499 +matplotlib-3.3.2.dist-info/LICENSE_COLORBREWER,sha256=13Q--YD83BybM3nwuFLBxbUKygI9hALtJ8tZZiSQj5I,2006 +matplotlib-3.3.2.dist-info/LICENSE_QT4_EDITOR,sha256=AlDgmC0knGnjFWMZHYO0xVFQ4ws699gKFMSnKueffdM,1260 +matplotlib-3.3.2.dist-info/LICENSE_SOLARIZED,sha256=RrSaK9xcK12Uhuka3LOhEB_QW5ibbYX3kdwCakxULM0,1141 +matplotlib-3.3.2.dist-info/LICENSE_STIX,sha256=I3calycBxqh5ggJcyDvyYU4vu6Qf2bpleUWbTmWKDL4,3985 +matplotlib-3.3.2.dist-info/LICENSE_YORICK,sha256=iw-4fuTKjfpFYXIStZJ_pmLmIuZZWzUIpz6RwIKCSkk,2362 +matplotlib-3.3.2.dist-info/METADATA,sha256=wU6O5V7R_ajbGQFO2-LQWfU_4bb-I7I-BfNB5gDcQ_Y,5700 +matplotlib-3.3.2.dist-info/RECORD,, +matplotlib-3.3.2.dist-info/WHEEL,sha256=ZFeOeZQCWkgYx9PG5WAxk1yIHroxd2erWFNpu0USMOg,102 +matplotlib-3.3.2.dist-info/namespace_packages.txt,sha256=A2PHFg9NKYOU4pEQ1h97U0Qd-rB-65W34XqC-56ZN9g,13 +matplotlib-3.3.2.dist-info/top_level.txt,sha256=9tEw2ni8DdgX8CceoYHqSH1s50vrJ9SDfgtLIG8e3Y4,30 +matplotlib/__init__.py,sha256=nIxcNK_DSZOgT5aHHbanHrmTZ-zGeoG-tv0hTslODgU,51946 +matplotlib/__pycache__/__init__.cpython-36.pyc,, +matplotlib/__pycache__/_animation_data.cpython-36.pyc,, +matplotlib/__pycache__/_cm.cpython-36.pyc,, +matplotlib/__pycache__/_cm_listed.cpython-36.pyc,, +matplotlib/__pycache__/_color_data.cpython-36.pyc,, +matplotlib/__pycache__/_constrained_layout.cpython-36.pyc,, +matplotlib/__pycache__/_internal_utils.cpython-36.pyc,, +matplotlib/__pycache__/_layoutbox.cpython-36.pyc,, +matplotlib/__pycache__/_mathtext_data.cpython-36.pyc,, +matplotlib/__pycache__/_pylab_helpers.cpython-36.pyc,, +matplotlib/__pycache__/_text_layout.cpython-36.pyc,, +matplotlib/__pycache__/_version.cpython-36.pyc,, +matplotlib/__pycache__/afm.cpython-36.pyc,, +matplotlib/__pycache__/animation.cpython-36.pyc,, +matplotlib/__pycache__/artist.cpython-36.pyc,, +matplotlib/__pycache__/axis.cpython-36.pyc,, +matplotlib/__pycache__/backend_bases.cpython-36.pyc,, +matplotlib/__pycache__/backend_managers.cpython-36.pyc,, +matplotlib/__pycache__/backend_tools.cpython-36.pyc,, +matplotlib/__pycache__/bezier.cpython-36.pyc,, +matplotlib/__pycache__/blocking_input.cpython-36.pyc,, +matplotlib/__pycache__/category.cpython-36.pyc,, +matplotlib/__pycache__/cm.cpython-36.pyc,, +matplotlib/__pycache__/collections.cpython-36.pyc,, +matplotlib/__pycache__/colorbar.cpython-36.pyc,, +matplotlib/__pycache__/colors.cpython-36.pyc,, +matplotlib/__pycache__/container.cpython-36.pyc,, +matplotlib/__pycache__/contour.cpython-36.pyc,, +matplotlib/__pycache__/dates.cpython-36.pyc,, +matplotlib/__pycache__/docstring.cpython-36.pyc,, +matplotlib/__pycache__/dviread.cpython-36.pyc,, +matplotlib/__pycache__/figure.cpython-36.pyc,, +matplotlib/__pycache__/font_manager.cpython-36.pyc,, +matplotlib/__pycache__/fontconfig_pattern.cpython-36.pyc,, +matplotlib/__pycache__/gridspec.cpython-36.pyc,, +matplotlib/__pycache__/hatch.cpython-36.pyc,, +matplotlib/__pycache__/image.cpython-36.pyc,, +matplotlib/__pycache__/legend.cpython-36.pyc,, +matplotlib/__pycache__/legend_handler.cpython-36.pyc,, +matplotlib/__pycache__/lines.cpython-36.pyc,, +matplotlib/__pycache__/markers.cpython-36.pyc,, +matplotlib/__pycache__/mathtext.cpython-36.pyc,, +matplotlib/__pycache__/mlab.cpython-36.pyc,, +matplotlib/__pycache__/offsetbox.cpython-36.pyc,, +matplotlib/__pycache__/patches.cpython-36.pyc,, +matplotlib/__pycache__/path.cpython-36.pyc,, +matplotlib/__pycache__/patheffects.cpython-36.pyc,, +matplotlib/__pycache__/pylab.cpython-36.pyc,, +matplotlib/__pycache__/pyplot.cpython-36.pyc,, +matplotlib/__pycache__/quiver.cpython-36.pyc,, +matplotlib/__pycache__/rcsetup.cpython-36.pyc,, +matplotlib/__pycache__/sankey.cpython-36.pyc,, +matplotlib/__pycache__/scale.cpython-36.pyc,, +matplotlib/__pycache__/spines.cpython-36.pyc,, +matplotlib/__pycache__/stackplot.cpython-36.pyc,, +matplotlib/__pycache__/streamplot.cpython-36.pyc,, +matplotlib/__pycache__/table.cpython-36.pyc,, +matplotlib/__pycache__/texmanager.cpython-36.pyc,, +matplotlib/__pycache__/text.cpython-36.pyc,, +matplotlib/__pycache__/textpath.cpython-36.pyc,, +matplotlib/__pycache__/ticker.cpython-36.pyc,, +matplotlib/__pycache__/tight_bbox.cpython-36.pyc,, +matplotlib/__pycache__/tight_layout.cpython-36.pyc,, +matplotlib/__pycache__/transforms.cpython-36.pyc,, +matplotlib/__pycache__/ttconv.cpython-36.pyc,, +matplotlib/__pycache__/type1font.cpython-36.pyc,, +matplotlib/__pycache__/units.cpython-36.pyc,, +matplotlib/__pycache__/widgets.cpython-36.pyc,, +matplotlib/_animation_data.py,sha256=VBdNJx3GOBmEPjZohLW5uQlNKl90R0j1ojXO7EI0Gvc,7991 +matplotlib/_cm.py,sha256=UhH08HifUQWul3EhJfiuqv2lkWtLSLDDWLU28sC77SI,68000 +matplotlib/_cm_listed.py,sha256=hA_9d8M187heFixIHIY9ywETb7eIOr9Ei4cDOx1y_pc,111533 +matplotlib/_color_data.py,sha256=ZnJq9AKcEl_cviGVCk5f4K6fT0nYB-iv-1k8xiNCOj4,36094 +matplotlib/_constrained_layout.py,sha256=WLLx5fVfh7WPGkji0XpgdEjWwCKbatjrsO7m2rFimAk,28080 +matplotlib/_contour.cp36-win32.pyd,sha256=4y-EuRAdzQ4W4uJMFzrBzw-1aaA2SGyzMDpYYOa84Cw,52224 +matplotlib/_image.cp36-win32.pyd,sha256=Vldi0gw2tqIgyncKde5SZ8Rx2u4EexNia8Ir89zaWGs,148992 +matplotlib/_internal_utils.py,sha256=MI2ymzqrQ1IH2yy6-n9mtm765vzgGSdEx7myejgbzt4,2204 +matplotlib/_layoutbox.py,sha256=5fdldA1l9qkkvHDbOA0-8hOGDLfokfEm6YCG-R8_N1w,24329 +matplotlib/_mathtext_data.py,sha256=HPbLPHO8Fr2ALu_7bx-ZdpVtV5QS4Lqhhwad27LCJm4,57820 +matplotlib/_path.cp36-win32.pyd,sha256=Ucd9kfkr6SGocfPxXRfpft8TKxyZpL3kOU0507EOgcY,130560 +matplotlib/_pylab_helpers.py,sha256=u_A2kpsSrzzHQPZJfrLy_OnoT8ZnERR2PNP5vR-tFHc,4641 +matplotlib/_qhull.cp36-win32.pyd,sha256=pYhhw2h-CsFFronZMBD3II3XUjoOj8Zo8XR3kTMqUWc,332288 +matplotlib/_text_layout.py,sha256=VWUKACJHQDffzTPDZDbOuSbrc05tVWxo1FgyzBKh-WI,1074 +matplotlib/_tri.cp36-win32.pyd,sha256=RU3vff2PDrfc9L6-bf2DJ4QYMnAkhlRo6oIvdwGsGdA,78848 +matplotlib/_ttconv.cp36-win32.pyd,sha256=EefLNliYDD3wvroO7JJX1DB-5ITYIN43Pn5za3l0JxA,53760 +matplotlib/_version.py,sha256=WJ_oZxE8b3IVAW0mlwjXTUofXtcKFkt-zBpxF-eEwPI,492 +matplotlib/afm.py,sha256=XdHzjPPuOS0nyqggqn34gMtVuRRAG7QKx6bFaMt7x9s,17105 +matplotlib/animation.py,sha256=nuxILluQSmMikEb17q00B_YPeT7TX8TTzB7--mHWUyU,68982 +matplotlib/artist.py,sha256=sD_FjY3598zk2IRVtYnLoUaCRi4FOQE9HtCqzSK2oww,55635 +matplotlib/axes/__init__.py,sha256=5LED7GJ0OxIdJfsHtnC1IYjxF9SNkyQg7UpmCkBT76k,48 +matplotlib/axes/__pycache__/__init__.cpython-36.pyc,, +matplotlib/axes/__pycache__/_axes.cpython-36.pyc,, +matplotlib/axes/__pycache__/_base.cpython-36.pyc,, +matplotlib/axes/__pycache__/_secondary_axes.cpython-36.pyc,, +matplotlib/axes/__pycache__/_subplots.cpython-36.pyc,, +matplotlib/axes/_axes.py,sha256=0zBbRvKDo-yRMtKm4CvRpTS2xQ7W1YHSE1AijJeLHAM,321258 +matplotlib/axes/_base.py,sha256=ZJKTPFPvMmuNWNwOT2wv3AgXu605ZufZNw4cCCFKtqM,163369 +matplotlib/axes/_secondary_axes.py,sha256=NySpuooPhdX0QfBL698jg0jEkToTBW21YXP5DDbut0Q,13746 +matplotlib/axes/_subplots.py,sha256=JSpGTJYOy_XQygVwdBNlm_1ZvXhlSmsF42nUHUn9mV4,9900 +matplotlib/axis.py,sha256=s6O_vHWKGVIIDfNTxgz6UoGGvOjJHMLweyir699ZpII,94935 +matplotlib/backend_bases.py,sha256=loykn9lQUPhO8QqxhoD5WYvHgWBzzRlpt2bjGJWHqEc,130875 +matplotlib/backend_managers.py,sha256=jeG3MrkovNCz7GZshQWKmHJx0B6cWlx_nX9Giq8VPBU,14303 +matplotlib/backend_tools.py,sha256=F83TpsNO3IHHQyzkgrG2D0nDsSCb5CPM8hZdsLYT32c,36037 +matplotlib/backends/__init__.py,sha256=ASrypuHdJgwLQwfr7X9ou0hlJohw_7V4t8CmpazOY7I,109 +matplotlib/backends/__pycache__/__init__.cpython-36.pyc,, +matplotlib/backends/__pycache__/_backend_pdf_ps.cpython-36.pyc,, +matplotlib/backends/__pycache__/_backend_tk.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_agg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_cairo.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_gtk3.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_gtk3agg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_gtk3cairo.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_macosx.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_mixed.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_nbagg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_pdf.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_pgf.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_ps.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_qt4.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_qt4agg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_qt4cairo.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_qt5.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_qt5agg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_qt5cairo.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_svg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_template.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_tkagg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_tkcairo.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_webagg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_webagg_core.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_wx.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_wxagg.cpython-36.pyc,, +matplotlib/backends/__pycache__/backend_wxcairo.cpython-36.pyc,, +matplotlib/backends/__pycache__/qt_compat.cpython-36.pyc,, +matplotlib/backends/_backend_agg.cp36-win32.pyd,sha256=F8MjE11SPSBius1sxWbFc184UoQUhrKp7uiJyfw3tfQ,166912 +matplotlib/backends/_backend_pdf_ps.py,sha256=4VlQejw7QYo8p0TGvemnZ-YrN_NUj0goRGkzYJRvooM,3895 +matplotlib/backends/_backend_tk.py,sha256=K0RrP8cPd8lE4auSpxIr1ty8xfPeZWw4t1y6hLhFJv0,33775 +matplotlib/backends/_tkagg.cp36-win32.pyd,sha256=6-4HlFU9csX5sUu31GkRYWlMGtvN0oSOZ6_z2wDOM98,21504 +matplotlib/backends/backend_agg.py,sha256=JkmOGb7mEGju_m08VSygQv_J-fl8okg5sbkUDlbRrkg,23621 +matplotlib/backends/backend_cairo.py,sha256=r03YcTMYdwJV1UbtHcd4xcgVhdHi3x_exaZc3pJFzPc,19219 +matplotlib/backends/backend_gtk3.py,sha256=y5sIGimTq7fKI2LYaxY0OETsAn8NXADye9yuNM5UT0U,34074 +matplotlib/backends/backend_gtk3agg.py,sha256=CZynWriGhHAZfcqDQcwJ3b0ZMgn-Lj6ZVHBDgs0OcUo,2890 +matplotlib/backends/backend_gtk3cairo.py,sha256=Jv4_AoYuQxm-E_ycTw_WZY24Sl40dXGqgM_AbcvfLt0,1391 +matplotlib/backends/backend_macosx.py,sha256=rgqh_ULdJrbqzYyAkKDvxZ5iMnvHhuKHA0SxZO63QoI,5849 +matplotlib/backends/backend_mixed.py,sha256=l27mVBO_YjJi8x1Qzy4qrGy68lSdOv1U74peIymhjv0,5323 +matplotlib/backends/backend_nbagg.py,sha256=2n9tvHQMlumnq6RhrJpY8pPxMDQ3W6I186d5D4G5pFE,8814 +matplotlib/backends/backend_pdf.py,sha256=vpvhoxBZ19g9FWf--XL3j4WsfqqmTe39EZGuG4DMieA,99623 +matplotlib/backends/backend_pgf.py,sha256=3vP15NPoJS8H8TMsj5xM-YJ8j9PCdLGRvLsCk0J5UYs,44633 +matplotlib/backends/backend_ps.py,sha256=Dya3uEPWB23cXJo4zw48Wzto7XzghkfO_cXD3sBBRmI,48079 +matplotlib/backends/backend_qt4.py,sha256=jfPKB0_c44yxOEtFVyCQF278b0FcP3mzVwFqx9hjvwI,528 +matplotlib/backends/backend_qt4agg.py,sha256=i6tgNDEB2Wn8-8VAzoMOIwnprk8tWtw7hF6aJgijFqs,395 +matplotlib/backends/backend_qt4cairo.py,sha256=GI-7aWPfK3m-g8SboesEnX9xbJKjGf235kG-ZjEjojE,327 +matplotlib/backends/backend_qt5.py,sha256=qUPBeWmTV_-NRsTYyswY7k4TQrZ4ibAcc2kw_88eNHE,40269 +matplotlib/backends/backend_qt5agg.py,sha256=csXMx4VBEEhoaSHlTyyZs4haGJK1a4NnTx9ROtwzAro,3247 +matplotlib/backends/backend_qt5cairo.py,sha256=3Nw5QyHNOVF4HZ5L7VRodePqjVjPeOSIm9xjQJSbP_U,1865 +matplotlib/backends/backend_svg.py,sha256=9Rc3YiBycRIqU9wOGPDQoZ4L1-WtjTs4_T1jhru47gg,50534 +matplotlib/backends/backend_template.py,sha256=pmoNUPWnZ239xqdD3513o1aFT6mX2S-6azoqiqTIaYI,8674 +matplotlib/backends/backend_tkagg.py,sha256=D_GuTCki9bCjRvsTIrwH5KBqR0t3YS9dHDUVWpZvPf8,697 +matplotlib/backends/backend_tkcairo.py,sha256=9Qw6AlLvt92blm9-J9fIp5-Rsh0NmvHZtZNIiT6lxzI,1100 +matplotlib/backends/backend_webagg.py,sha256=DQgTUyabZ295QJixlbamxa2Vfa9feKUW782d_DDD0cc,11384 +matplotlib/backends/backend_webagg_core.py,sha256=X25yA6B1M3umY0HTos8Q4_HWgGgeumQaJh9FY_lU7Wo,18565 +matplotlib/backends/backend_wx.py,sha256=QSvZ4BXvgyGPyS387WyxlGkiDT2ieR6qOh740_C9Dr4,62340 +matplotlib/backends/backend_wxagg.py,sha256=je8nTv--IruEOs0KgiJOfl1sEo7wBbkkquqiPFaUKOo,3024 +matplotlib/backends/backend_wxcairo.py,sha256=fsDGrdOKng2qGlN1gOHa6_6UgfNiXWUpu_CO_A67fqc,1880 +matplotlib/backends/qt_compat.py,sha256=xt1i9q0WGqd3vIiSPFLn0f7VBHkX6WmjhyG3qg_bVc8,8289 +matplotlib/backends/qt_editor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +matplotlib/backends/qt_editor/__pycache__/__init__.cpython-36.pyc,, +matplotlib/backends/qt_editor/__pycache__/_formlayout.cpython-36.pyc,, +matplotlib/backends/qt_editor/__pycache__/_formsubplottool.cpython-36.pyc,, +matplotlib/backends/qt_editor/__pycache__/figureoptions.cpython-36.pyc,, +matplotlib/backends/qt_editor/__pycache__/formsubplottool.cpython-36.pyc,, +matplotlib/backends/qt_editor/_formlayout.py,sha256=3YMhRLpmiPygAp69FDzNNNE7tMft5Idq6uBdWj4PDUE,21150 +matplotlib/backends/qt_editor/_formsubplottool.py,sha256=IJRLiHJYNay9dq8soSR30RpzsDy2QGK_VM-de-skcMM,1547 +matplotlib/backends/qt_editor/figureoptions.py,sha256=Bv1fpNcBEGJZNaqaJ8fEfZThl4cxxlnlMAPl5HBpE6g,9731 +matplotlib/backends/qt_editor/formsubplottool.py,sha256=duxTyAhD8XFzgZ9Fnw6a4ZM34QDDEZweabXE4up2jPg,244 +matplotlib/backends/web_backend/.eslintrc.js,sha256=Dv3YGyMCOxbDobwrxr332zNYMCxb6s_o07kQeizIko8,698 +matplotlib/backends/web_backend/.prettierignore,sha256=fhFE5YEVNHXvenOGu5fVvhzhGEMjutAocXz36mDB0iw,104 +matplotlib/backends/web_backend/.prettierrc,sha256=Yz-e2yrtBxjx8MeDh7Z55idCjKgOxGZwSe6PQJo-4z0,156 +matplotlib/backends/web_backend/all_figures.html,sha256=4iWdKDVq2wj-ox_wGB6jT_5a1XIZfx7cCSxptQr11_U,1669 +matplotlib/backends/web_backend/css/boilerplate.css,sha256=y2DbHYWFOmDcKhmUwACIwgZdL8mRqldKiQfABqlrCtA,2387 +matplotlib/backends/web_backend/css/fbm.css,sha256=-5wOcfCz-3RLDtEhOPo855AyDpxEnhN6hjeKuQi7ALE,1570 +matplotlib/backends/web_backend/css/mpl.css,sha256=VfGbqCCnb-3ZCSgUyv1PcmfEPvank9340v-F2oyhapw,1695 +matplotlib/backends/web_backend/css/page.css,sha256=qCCXiXJvwyM3zKpOlrhndn2kZl0CpOcd2ZDXA4JlLwo,1707 +matplotlib/backends/web_backend/ipython_inline_figure.html,sha256=C4mEsVfrNuy5K92pzLtrgw0wP_XG_-2h74CTngsPvCQ,1345 +matplotlib/backends/web_backend/js/mpl.js,sha256=NNW31FIjoKLrtvucppMZ5qON3n8oowAN91Q1yW28Tx0,21583 +matplotlib/backends/web_backend/js/mpl_tornado.js,sha256=k3JjkEWP-C2DCk8qrA3JPXFRl8Xu6gCf7zMtBZO4s5c,310 +matplotlib/backends/web_backend/js/nbagg_mpl.js,sha256=UJ_93YaMoP1wv1cUJOEOkJX9ybl55MMYsA0UazFDMfU,9057 +matplotlib/backends/web_backend/nbagg_uat.ipynb,sha256=m_Bmbn9FfF1jUCvk_7C_x_Ho6uzvj7KRayRhNsehIgQ,16572 +matplotlib/backends/web_backend/package.json,sha256=01wvCHHnw0mTVKiKLo3WHOtd5DxM6dBuHfO-xXpc7yw,493 +matplotlib/backends/web_backend/single_figure.html,sha256=Tv4FxHdVi872xGsC_WHeOsR9zCUmWNR4KoqEur9nTJ4,1276 +matplotlib/bezier.py,sha256=lY6ZggspGISUWtHcgQ0FViih5NicvCeij6Uai0rSN8c,20094 +matplotlib/blocking_input.py,sha256=94vk5DbqlDiXbMRRvN8XwdsCPwv26h_Hd_2YEAO6mdQ,11657 +matplotlib/category.py,sha256=xm5YGFw4Md1HldRFmBv39XmS4UTVX3iIhVrF2DOMkD4,7392 +matplotlib/cbook/__init__.py,sha256=x57PztycDvnYq5w0IYu4hBWSbkduJxGfJY-38I1ukN4,79579 +matplotlib/cbook/__pycache__/__init__.cpython-36.pyc,, +matplotlib/cbook/__pycache__/deprecation.cpython-36.pyc,, +matplotlib/cbook/deprecation.py,sha256=5KlE9p5XReOGJQUk7-3J79za0exp1KxkZSr-On5AIYI,19409 +matplotlib/cm.py,sha256=HIcWq9v_Szs5HpocwBN6-UD5fweBxxHYN0xZPE6XhG8,17227 +matplotlib/collections.py,sha256=bc5s7JGcmjtZfjF-_Cr4llq2We5IaTgcL50TWn2SJDs,79077 +matplotlib/colorbar.py,sha256=373Lgskpxq1aWbNNM3wtUAk0-OL4MuSZ_AeCwiDt4ng,67375 +matplotlib/colors.py,sha256=TnS5YJn_Q6prkRQ24Kltxa-6J3WLwNiokWwxpNmKunc,80447 +matplotlib/compat/__init__.py,sha256=jbHKhpHmaPogoOMoo6GENDl6oHffkq6GMgR9Nwpo0HY,98 +matplotlib/compat/__pycache__/__init__.cpython-36.pyc,, +matplotlib/container.py,sha256=QJuTObESj229OcUGUYeD0DLxG_ssk6cAdfVymk_crwk,4440 +matplotlib/contour.py,sha256=Nmx3BG7pF6-u-lmNZdh13BdOlmpJWbohnNRvvczpDRo,70901 +matplotlib/dates.py,sha256=Qyv_FbS6VX5lRmeQ_dKzdJch8b0q5b2ptI4qVzaHeRA,69193 +matplotlib/docstring.py,sha256=ya7wa0PWtxKSw4KDSkF7dgI8dQq3QLGx7_Exkg4HR_4,2516 +matplotlib/dviread.py,sha256=UJ2T2_lzNG7b4TcnD1Aro841NmLTDQt1H32Tuzv_1qQ,41351 +matplotlib/figure.py,sha256=V37mwC3NOoi_JNEGnNDkQ-YV6hxOrxbNAWrhlC9Vs38,108822 +matplotlib/font_manager.py,sha256=5ut9rsHGXJKj6QATPrAA3obiFVywZoEm3RSMRfDcBBE,50225 +matplotlib/fontconfig_pattern.py,sha256=J5-yG_qyi4ACRcDVw7aopFv3raii8QC4DhRPnhLkb5Q,6860 +matplotlib/ft2font.cp36-win32.pyd,sha256=JWaKXGkwMLwGugb74n0AfAtYgfnMu0gQ1sKtOm43qEc,498688 +matplotlib/gridspec.py,sha256=2ikhEOv4bo_l7sGHgw946qaRr1VsRZiaXoRHIIxbUns,33890 +matplotlib/hatch.py,sha256=6CvCCNXRK5rOfm5QXOZPOw3sqy2exoMJcgF9CrFBmu4,7098 +matplotlib/image.py,sha256=kat4dB1TGQGs9J5D11vozK40lMp754HdVxm1hG6JEx8,68443 +matplotlib/legend.py,sha256=5nVA1rQ0O1ocZcdwHab-SDZSkdQpfOQVj7-Owzws1-w,48832 +matplotlib/legend_handler.py,sha256=LkImW6-iq3Mmdg8HhO332bPRG52CyF0PzJFJC4GURM0,26940 +matplotlib/lines.py,sha256=fe1BMCt7eIjzxhFJMKfa1cTeUOjgHKXIcp4vtZLBlGU,53073 +matplotlib/markers.py,sha256=2uZMUiLqcllco5wDagOR6KTrcC1xFktXvKAHPA3IY1E,32895 +matplotlib/mathtext.py,sha256=kpI5WS3lOv87sxezsfq1_oU-IyxMMzgC2M-HsijHLu8,123994 +matplotlib/mlab.py,sha256=c-Z_VxzBCq70gpS_CeAK41krVKWsAbogdD_WDwKNAdc,36780 +matplotlib/mpl-data/fonts/afm/cmex10.afm,sha256=zdDttyyqQ6Aa5AMVWowpNWgEksTX5KUAFqUjFMTBiUc,12290 +matplotlib/mpl-data/fonts/afm/cmmi10.afm,sha256=dCq-QWC9Vl4WmcD9IOi-CMMB-vmVj8VBTOJS20ODMC0,10742 +matplotlib/mpl-data/fonts/afm/cmr10.afm,sha256=bGb6XAS-H48vh-vmvrF0lMunP1c-RGNB3Uzm1Am9vew,10444 +matplotlib/mpl-data/fonts/afm/cmsy10.afm,sha256=lxhR0CjcTVxKsQ4GPe8ypC4DdYCbn5j4IXlq2QTAcDM,8490 +matplotlib/mpl-data/fonts/afm/cmtt10.afm,sha256=kFzBQ0WX0GZBss0jl_MogJ7ZvECCE8MnLpX58IFRUFU,6657 +matplotlib/mpl-data/fonts/afm/pagd8a.afm,sha256=_-81-K4IGcnEXZmOqkIMyL42gBRcxMEuk8N8onDtLIM,17759 +matplotlib/mpl-data/fonts/afm/pagdo8a.afm,sha256=GrsbRiN4fuoPK-SiaMngnmi5KyZC_nOFf_LYFk_Luxg,17831 +matplotlib/mpl-data/fonts/afm/pagk8a.afm,sha256=Fjz-OUzE9qB-MCosDuUrBnMq8BXmldx6j_zgj2mKc1k,17814 +matplotlib/mpl-data/fonts/afm/pagko8a.afm,sha256=pS716alw6ytmYYSRnz6hPvb1BZPlq3aUiViJFYagsHk,17919 +matplotlib/mpl-data/fonts/afm/pbkd8a.afm,sha256=WOJW5hnEnwBQGQsVxtdXI2PJ1m8qwF-xC4n6aD3-hRI,15572 +matplotlib/mpl-data/fonts/afm/pbkdi8a.afm,sha256=6D2SRhcYc-apq_Qq62bj_FwU_uHxIK09Tt_wjSSJS7c,15695 +matplotlib/mpl-data/fonts/afm/pbkl8a.afm,sha256=_ZJuFBKoz70E-SBlhG4Tb9Yqxm3I6D-jALpy-R-vcew,15407 +matplotlib/mpl-data/fonts/afm/pbkli8a.afm,sha256=1_6b55YPDXk9a3RUnNvja7y2iR3P6BKnh6DvkPnZ5pA,15591 +matplotlib/mpl-data/fonts/afm/pcrb8a.afm,sha256=5CDJe3t71aM5qinDYSE8svWhc6RjteZPNslOvNVp8NA,15696 +matplotlib/mpl-data/fonts/afm/pcrbo8a.afm,sha256=jNfbbBHfwvu0RglxuLeuh1K10KWEkj1lEVJR_hXQSWE,15766 +matplotlib/mpl-data/fonts/afm/pcrr8a.afm,sha256=Hx4X9kbsRm_S3eo9WLjuOQzuOmeZHO33b6uhDTsH1NQ,15683 +matplotlib/mpl-data/fonts/afm/pcrro8a.afm,sha256=DLrBm4iOvjCpKBnyC7yGAO4fdIvRUCO0uV_kQiy9VfQ,15787 +matplotlib/mpl-data/fonts/afm/phvb8a.afm,sha256=PR_ybr2HVx6aOXVRza7a-255AtGEyDwj_pC_xK1VH1o,17725 +matplotlib/mpl-data/fonts/afm/phvb8an.afm,sha256=pFHdjRgEoKxxmlf1PcTc-3Hyzh1Fzz2Xe2A8KzT3JuA,17656 +matplotlib/mpl-data/fonts/afm/phvbo8a.afm,sha256=4ocMbnfWYxd-YhpzbWPDRzDdckBRIlEHPZfAORFiaZQ,17800 +matplotlib/mpl-data/fonts/afm/phvbo8an.afm,sha256=cOAehWQUbLAtfhcWjTgZc8aLvKo_cWom0JdqmKDPsTo,17765 +matplotlib/mpl-data/fonts/afm/phvl8a.afm,sha256=QTqJU4cVVtbvZhqGXFAMRNyZxlJtmq6HE6UIbh6vLYE,16072 +matplotlib/mpl-data/fonts/afm/phvlo8a.afm,sha256=fAEd2GRQzamnndBw4ARWkJNKIBgmW24Jvo4wZDUVPRg,16174 +matplotlib/mpl-data/fonts/afm/phvr8a.afm,sha256=7G6gNk10zsb_wkQ2qov_SuIMtspNafAGppFQ9V-7Fmo,18451 +matplotlib/mpl-data/fonts/afm/phvr8an.afm,sha256=9TCWRgRyCgpwpKiF98j10hq9mHjdGv09aU96mcgfx2k,18393 +matplotlib/mpl-data/fonts/afm/phvro8a.afm,sha256=9eXW8tsJO-8iw98HCd9H7sIbG5d3fQ-ik5-XXXMkm-8,18531 +matplotlib/mpl-data/fonts/afm/phvro8an.afm,sha256=Rgr4U-gChgMcWU5VyFMwPg2gGXB5D9DhrbtYtWb7jSE,18489 +matplotlib/mpl-data/fonts/afm/pncb8a.afm,sha256=ZAfYR6gDZoTjPw1X9CKXAKhdGZzgSlDfdmxFIAaNMP0,16500 +matplotlib/mpl-data/fonts/afm/pncbi8a.afm,sha256=h6gIWhFKh3aiUMuA4QPCKN39ja1CJWDnagz-rLJWeJA,18098 +matplotlib/mpl-data/fonts/afm/pncr8a.afm,sha256=wqphv_7-oIEDrGhzQspyDeFD07jzAs5uaSbLnrZ53q0,17189 +matplotlib/mpl-data/fonts/afm/pncri8a.afm,sha256=mXZEWq-pTgsOG8_Nx5F52DMF8RB63RzAVOnH3QcRWPo,17456 +matplotlib/mpl-data/fonts/afm/pplb8a.afm,sha256=Yd8M-qXEemyVsBt4OY7vKYqr7Yc_KfRnb2E505ij3Ts,16096 +matplotlib/mpl-data/fonts/afm/pplbi8a.afm,sha256=BLReUiSSOvoiaTymK8hwqWCR2ndXJR2U2FAU2YKVhgM,16251 +matplotlib/mpl-data/fonts/afm/pplr8a.afm,sha256=OdM7mp--HfFWfp9IwolGoviuHphoATNFl88OG3h3Uw8,16197 +matplotlib/mpl-data/fonts/afm/pplri8a.afm,sha256=n0_vo-JC8voK6FKHvnZCygzsvTfNllQRya3L0dtTRZY,16172 +matplotlib/mpl-data/fonts/afm/psyr.afm,sha256=HItpBqCppGKaLaLUdijTZ31jzUV13UVEohYVUPSk1Kc,9853 +matplotlib/mpl-data/fonts/afm/ptmb8a.afm,sha256=td8VINDw_7_X3U6dHLcNxT-_2wU_CqaTSntSW696-M8,18631 +matplotlib/mpl-data/fonts/afm/ptmbi8a.afm,sha256=ZbK1H28xxIwcZGSqGR6iVtpMrZ15LRN1OfVLpt1yiZ8,18718 +matplotlib/mpl-data/fonts/afm/ptmr8a.afm,sha256=lwDiLF8RkJ46RoVFY0qEQ9J_h54cQHF6MCRx3ehLG_Y,18590 +matplotlib/mpl-data/fonts/afm/ptmri8a.afm,sha256=rYNz084EPuCgdbZcIvuaXA77ozg5sOgmGZjwpFzDGKQ,18716 +matplotlib/mpl-data/fonts/afm/putb8a.afm,sha256=RJuuhzr-dyocKMX4pgC9UoK3Ive2bnWtk74Oyx7ZBPk,22537 +matplotlib/mpl-data/fonts/afm/putbi8a.afm,sha256=1gyQknXqpiVmyKrcRhmenSJrP0kot5h84HmWtDQ3Leg,22948 +matplotlib/mpl-data/fonts/afm/putr8a.afm,sha256=Y_v97ZJKRPfPI2cFuFstBtBxRbAf3AK-hHYDVswMtdA,23177 +matplotlib/mpl-data/fonts/afm/putri8a.afm,sha256=Hxu2gSVpV93zKO6ecqtZSJZHQyE1xhwcUnh-RJ8TYPo,22899 +matplotlib/mpl-data/fonts/afm/pzcmi8a.afm,sha256=BhzLcod8-nVd2MWeZsO_GoZUXso4OhQktIZ2e7txJY8,16730 +matplotlib/mpl-data/fonts/afm/pzdr.afm,sha256=a7-NgSTSEyakts84h-hXLrbRrxmFhxr_NR51bhOLZYQ,9689 +matplotlib/mpl-data/fonts/pdfcorefonts/Courier-Bold.afm,sha256=rQFQ1L7cyId3Qr-UJR_OwT40jdWZ1GA_Z52SAn0ebpk,15675 +matplotlib/mpl-data/fonts/pdfcorefonts/Courier-BoldOblique.afm,sha256=y4LmnvX21CHo9AT-ALsNmTQlqueXJTMdDr-EfpTpfpI,15741 +matplotlib/mpl-data/fonts/pdfcorefonts/Courier-Oblique.afm,sha256=snEDsqLvYDDBEGJll-KrR7uCeQdaA5q5Fw-ssKofcOE,15783 +matplotlib/mpl-data/fonts/pdfcorefonts/Courier.afm,sha256=Uh4NfHUh79S-eKWpxTmOTGfQdx45YRWwNGvE73StpT0,15677 +matplotlib/mpl-data/fonts/pdfcorefonts/Helvetica-Bold.afm,sha256=uIDZa69W0MwFnyWPYLTXZO9JtVWrnbKUuVnAAW3uQfI,72096 +matplotlib/mpl-data/fonts/pdfcorefonts/Helvetica-BoldOblique.afm,sha256=aZhKNcomlzo58mHPg-DTZ-ouZRfFkLCwMNTUohnZwmk,72192 +matplotlib/mpl-data/fonts/pdfcorefonts/Helvetica-Oblique.afm,sha256=tGCbcbZgo5KsCd81BgJxqHbCzmZhfdg7-Cb3QrudlyE,77443 +matplotlib/mpl-data/fonts/pdfcorefonts/Helvetica.afm,sha256=2jPxhwR0yOaL_j4jU_8QerbG7qH5g2ziqvHhoHsXmC8,77343 +matplotlib/mpl-data/fonts/pdfcorefonts/Symbol.afm,sha256=PSEoqCA3WhDem8i_bPsV3tSCwByg-VzAsyd_N-yL3mY,9953 +matplotlib/mpl-data/fonts/pdfcorefonts/Times-Bold.afm,sha256=tKAA7YXLIsbN2YWqD9P2947VB5t8aGDcTwI04NDjxSI,66839 +matplotlib/mpl-data/fonts/pdfcorefonts/Times-BoldItalic.afm,sha256=k8R0S6lVIV3gLEquC3dxM0Qq2i96taKvMKBAt5KzxV0,62026 +matplotlib/mpl-data/fonts/pdfcorefonts/Times-Italic.afm,sha256=7Tf6LmpntbF9_Ufzb8fpDfMokaRAiGDbyNTLvplZ4kI,68995 +matplotlib/mpl-data/fonts/pdfcorefonts/Times-Roman.afm,sha256=do4cq-oIXUiaY9o-gLlrxavw7JjTBzybTWunbnvMumQ,62879 +matplotlib/mpl-data/fonts/pdfcorefonts/ZapfDingbats.afm,sha256=oyVlyQr9G1enAI_FZ7eNlc8cIq3_XghglNZm2IsDmFk,9752 +matplotlib/mpl-data/fonts/pdfcorefonts/readme.txt,sha256=yQ1iD9441TPPu5-v-4nng62AUWpOPwW1M_NoeYTwGYQ,843 +matplotlib/mpl-data/fonts/ttf/DejaVuSans-Bold.ttf,sha256=sYS4njwQdfIva3FXW2_CDUlys8_TsjMiym_Vltyu8Wc,704128 +matplotlib/mpl-data/fonts/ttf/DejaVuSans-BoldOblique.ttf,sha256=bt8CgxYBhq9FHL7nHnuEXy5Mq_Jku5ks5mjIPCVGXm8,641720 +matplotlib/mpl-data/fonts/ttf/DejaVuSans-Oblique.ttf,sha256=zN90s1DxH9PdV3TeUOXmNGoaXaH1t9X7g1kGZel6UhM,633840 +matplotlib/mpl-data/fonts/ttf/DejaVuSans.ttf,sha256=P99pyr8GBJ6nCgC1kZNA4s4ebQKwzDxLRPtoAb0eDSI,756072 +matplotlib/mpl-data/fonts/ttf/DejaVuSansDisplay.ttf,sha256=ggmdz7paqGjN_CdFGYlSX-MpL3N_s8ngMozpzvWWUvY,25712 +matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Bold.ttf,sha256=uq2ppRcv4giGJRr_BDP8OEYZEtXa8HKH577lZiCo2pY,331536 +matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-BoldOblique.ttf,sha256=ppCBwVx2yCfgonpaf1x0thNchDSZlVSV_6jCDTqYKIs,253116 +matplotlib/mpl-data/fonts/ttf/DejaVuSansMono-Oblique.ttf,sha256=KAUoE_enCfyJ9S0ZLcmV708P3Fw9e3OknWhJsZFtDNA,251472 +matplotlib/mpl-data/fonts/ttf/DejaVuSansMono.ttf,sha256=YC7Ia4lIz82VZIL-ZPlMNshndwFJ7y95HUYT9EO87LM,340240 +matplotlib/mpl-data/fonts/ttf/DejaVuSerif-Bold.ttf,sha256=w3U_Lta8Zz8VhG3EWt2-s7nIcvMvsY_VOiHxvvHtdnY,355692 +matplotlib/mpl-data/fonts/ttf/DejaVuSerif-BoldItalic.ttf,sha256=2T7-x6nS6CZ2jRou6VuVhw4V4pWZqE80hK8d4c7C4YE,347064 +matplotlib/mpl-data/fonts/ttf/DejaVuSerif-Italic.ttf,sha256=PnmU-8VPoQzjNSpC1Uj63X2crbacsRCbydlg9trFfwQ,345612 +matplotlib/mpl-data/fonts/ttf/DejaVuSerif.ttf,sha256=EHJElW6ZYrnpb6zNxVGCXgrgiYrhNzcTPhuSGi_TX_o,379740 +matplotlib/mpl-data/fonts/ttf/DejaVuSerifDisplay.ttf,sha256=KRTzLkfHd8J75Wd6-ufbTeefnkXeb8kJfZlJwjwU99U,14300 +matplotlib/mpl-data/fonts/ttf/LICENSE_DEJAVU,sha256=xhup6GaKURy9C8_e6DKeAspspASKvabKfuisaKBZd2o,4915 +matplotlib/mpl-data/fonts/ttf/LICENSE_STIX,sha256=LVswXqq9O9oRWEuSeKQwviEY8mU7yjTzd4SS_6gFyhY,5599 +matplotlib/mpl-data/fonts/ttf/STIXGeneral.ttf,sha256=FnN4Ax4t3cYhbWeBnJJg6aBv_ExHjk4jy5im_USxg8I,448228 +matplotlib/mpl-data/fonts/ttf/STIXGeneralBol.ttf,sha256=6FM9xwg_o0a9oZM9YOpKg7Z9CUW86vGzVB-CtKDixqA,237360 +matplotlib/mpl-data/fonts/ttf/STIXGeneralBolIta.ttf,sha256=mHiP1LpI37sr0CbA4gokeosGxzcoeWKLemuw1bsJc2w,181152 +matplotlib/mpl-data/fonts/ttf/STIXGeneralItalic.ttf,sha256=bPyzM9IrfDxiO9_UAXTxTIXD1nMcphZsHtyAFA6uhSc,175040 +matplotlib/mpl-data/fonts/ttf/STIXNonUni.ttf,sha256=Ulb34CEzWsSFTRgPDovxmJZOwvyCAXYnbhaqvGU3u1c,59108 +matplotlib/mpl-data/fonts/ttf/STIXNonUniBol.ttf,sha256=XRBqW3jR_8MBdFU0ObhiV7-kXwiBIMs7QVClHcT5tgs,30512 +matplotlib/mpl-data/fonts/ttf/STIXNonUniBolIta.ttf,sha256=pb22DnbDf2yQqizotc3wBDqFGC_g27YcCGJivH9-Le8,41272 +matplotlib/mpl-data/fonts/ttf/STIXNonUniIta.ttf,sha256=BMr9pWiBv2YIZdq04X4c3CgL6NPLUPrl64aV1N4w9Ug,46752 +matplotlib/mpl-data/fonts/ttf/STIXSizFiveSymReg.ttf,sha256=wYuH1gYUpCuusqItRH5kf9p_s6mUD-9X3L5RvRtKSxs,13656 +matplotlib/mpl-data/fonts/ttf/STIXSizFourSymBol.ttf,sha256=yNdvjUoSmsZCULmD7SVq9HabndG9P4dPhboL1JpAf0s,12228 +matplotlib/mpl-data/fonts/ttf/STIXSizFourSymReg.ttf,sha256=-9xVMYL4_1rcO8FiCKrCfR4PaSmKtA42ddLGqwtei1w,15972 +matplotlib/mpl-data/fonts/ttf/STIXSizOneSymBol.ttf,sha256=cYexyo8rZcdqMlpa9fNF5a2IoXLUTZuIvh0JD1Qp0i4,12556 +matplotlib/mpl-data/fonts/ttf/STIXSizOneSymReg.ttf,sha256=0lbHzpndzJmO8S42mlkhsz5NbvJLQCaH5Mcc7QZRDzc,19760 +matplotlib/mpl-data/fonts/ttf/STIXSizThreeSymBol.ttf,sha256=3eBc-VtYbhQU3BnxiypfO6eAzEu8BdDvtIJSFbkS2oY,12192 +matplotlib/mpl-data/fonts/ttf/STIXSizThreeSymReg.ttf,sha256=XFSKCptbESM8uxHtUFSAV2cybwxhSjd8dWVByq6f3w0,15836 +matplotlib/mpl-data/fonts/ttf/STIXSizTwoSymBol.ttf,sha256=MUCYHrA0ZqFiSE_PjIGlJZgMuv79aUgQqE7Dtu3kuo0,12116 +matplotlib/mpl-data/fonts/ttf/STIXSizTwoSymReg.ttf,sha256=_sdxDuEwBDtADpu9CyIXQxV7sIqA2TZVBCUiUjq5UCk,15704 +matplotlib/mpl-data/fonts/ttf/cmb10.ttf,sha256=B0SXtQxD6ldZcYFZH5iT04_BKofpUQT1ZX_CSB9hojo,25680 +matplotlib/mpl-data/fonts/ttf/cmex10.ttf,sha256=ryjwwXByOsd2pxv6WVrKCemNFa5cPVTOGa_VYZyWqQU,21092 +matplotlib/mpl-data/fonts/ttf/cmmi10.ttf,sha256=MJKWW4gR_WpnZXmWZIRRgfwd0TMLk3-RWAjEhdMWI00,32560 +matplotlib/mpl-data/fonts/ttf/cmr10.ttf,sha256=Tdl2GwWMAJ25shRfVe5mF9CTwnPdPWxbPkP_YRD6m_Y,26348 +matplotlib/mpl-data/fonts/ttf/cmss10.ttf,sha256=ffkag9BbLkcexjjLC0NaNgo8eSsJ_EKn2mfpHy55EVo,20376 +matplotlib/mpl-data/fonts/ttf/cmsy10.ttf,sha256=uyJu2TLz8QDNDlL15JEu5VO0G2nnv9uNOFTbDrZgUjI,29396 +matplotlib/mpl-data/fonts/ttf/cmtt10.ttf,sha256=YhHwmuk1mZka_alwwkZp2tGnfiU9kVYk-_IS9wLwcdc,28136 +matplotlib/mpl-data/images/back-symbolic.svg,sha256=yRdMiKsa-awUm2x_JE_rEV20rNTa7FInbFBEoMo-6ik,1512 +matplotlib/mpl-data/images/back.gif,sha256=sdkxFRAh-Mgs44DTvruO5OxcI3Av9CS1g5MqMA_DDkQ,608 +matplotlib/mpl-data/images/back.pdf,sha256=ZR7CJo_dAeCM-KlaGvskgtHQyRtrPIolc8REOmcoqJk,1623 +matplotlib/mpl-data/images/back.png,sha256=E4dGf4Gnz1xJ1v2tMygHV0YNQgShreDeVApaMb-74mU,380 +matplotlib/mpl-data/images/back.svg,sha256=yRdMiKsa-awUm2x_JE_rEV20rNTa7FInbFBEoMo-6ik,1512 +matplotlib/mpl-data/images/back_large.gif,sha256=tqCtecrxNrPuDCUj7FGs8UXWftljKcwgp5cSBBhXwiQ,799 +matplotlib/mpl-data/images/back_large.png,sha256=9A6hUSQeszhYONE4ZuH3kvOItM0JfDVu6tkfromCbsQ,620 +matplotlib/mpl-data/images/filesave-symbolic.svg,sha256=oxPVbLS9Pzelz71C1GCJWB34DZ0sx_pUVPRHBrCZrGs,2029 +matplotlib/mpl-data/images/filesave.gif,sha256=wAyNwOPd9c-EIPwcUAlqHSfLmxq167nhDVppOWPy9UA,723 +matplotlib/mpl-data/images/filesave.pdf,sha256=P1EPPV2g50WTt8UaX-6kFoTZM1xVqo6S2H6FJ6Zd1ec,1734 +matplotlib/mpl-data/images/filesave.png,sha256=b7ctucrM_F2mG-DycTedG_a_y4pHkx3F-zM7l18GLhk,458 +matplotlib/mpl-data/images/filesave.svg,sha256=oxPVbLS9Pzelz71C1GCJWB34DZ0sx_pUVPRHBrCZrGs,2029 +matplotlib/mpl-data/images/filesave_large.gif,sha256=IXrenlwu3wwO8WTRvxHt_q62NF6ZWyqk3jZhm6GE-G8,1498 +matplotlib/mpl-data/images/filesave_large.png,sha256=LNbRD5KZ3Kf7nbp-stx_a1_6XfGBSWUfDdpgmnzoRvk,720 +matplotlib/mpl-data/images/forward-symbolic.svg,sha256=NnQDOenfjsn-o0aJMUfErrP320Zcx9XHZkLh0cjMHsk,1531 +matplotlib/mpl-data/images/forward.gif,sha256=VNL9R-dECOX7wUAYPtU_DWn5hwi3SwLR17DhmBvUIxE,590 +matplotlib/mpl-data/images/forward.pdf,sha256=KIqIL4YId43LkcOxV_TT5uvz1SP8k5iUNUeJmAElMV8,1630 +matplotlib/mpl-data/images/forward.png,sha256=pKbLepgGiGeyY2TCBl8svjvm7Z4CS3iysFxcq4GR-wk,357 +matplotlib/mpl-data/images/forward.svg,sha256=NnQDOenfjsn-o0aJMUfErrP320Zcx9XHZkLh0cjMHsk,1531 +matplotlib/mpl-data/images/forward_large.gif,sha256=H6Jbcc7qJwHJAE294YqI5Bm-5irofX40cKRvYdrG_Ig,786 +matplotlib/mpl-data/images/forward_large.png,sha256=36h7m7DZDHql6kkdpNPckyi2LKCe_xhhyavWARz_2kQ,593 +matplotlib/mpl-data/images/hand.gif,sha256=3lRfmAqQU7A2t1YXXsB9IbwzK7FaRh-IZO84D5-xCrw,1267 +matplotlib/mpl-data/images/hand.pdf,sha256=hspwkNY915KPD7AMWnVQs7LFPOtlcj0VUiLu76dMabQ,4172 +matplotlib/mpl-data/images/hand.png,sha256=2cchRETGKa0hYNKUxnJABwkyYXEBPqJy_VqSPlT0W2Q,979 +matplotlib/mpl-data/images/hand.svg,sha256=tsVIES_nINrAbH4FqdsCGOx0SVE37vcofSYBhnnaOP0,4888 +matplotlib/mpl-data/images/hand_large.gif,sha256=H5IHmVTvOqHQb9FZ_7g7AlPt9gv-zRq0L5_Q9B7OuvU,973 +matplotlib/mpl-data/images/help-symbolic.svg,sha256=KXabvQhqIWen_t2SvZuddFYa3S0iI3W8cAKm3s1fI8Q,1870 +matplotlib/mpl-data/images/help.gif,sha256=3Cjr7YqfH7HFmYCmrJKxnoLPkbUfUcxQOW7RI2-4Cpo,564 +matplotlib/mpl-data/images/help.pdf,sha256=CeE978IMi0YWznWKjIT1R8IrP4KhZ0S7usPUvreSgcA,1813 +matplotlib/mpl-data/images/help.png,sha256=s4pQrqaQ0py8I7vc9hv3BI3DO_tky-7YBMpaHuBDCBY,472 +matplotlib/mpl-data/images/help.ppm,sha256=mVPvgwcddzCM-nGZd8Lnl_CorzDkRIXQE17b7qo8vlU,1741 +matplotlib/mpl-data/images/help.svg,sha256=KXabvQhqIWen_t2SvZuddFYa3S0iI3W8cAKm3s1fI8Q,1870 +matplotlib/mpl-data/images/help_large.png,sha256=1IwEyWfGRgnoCWM-r9CJHEogTJVD5n1c8LXTK4AJ4RE,747 +matplotlib/mpl-data/images/help_large.ppm,sha256=MiCSKp1Su88FXOi9MTtkQDA2srwbX3w5navi6cneAi4,6925 +matplotlib/mpl-data/images/home-symbolic.svg,sha256=n_AosjJVXET3McymFuHgXbUr5vMLdXK2PDgghX8Cch4,1891 +matplotlib/mpl-data/images/home.gif,sha256=NKuFM7tTtFngdfsOpJ4AxYTL8PYS5GWKAoiJjBMwLlU,666 +matplotlib/mpl-data/images/home.pdf,sha256=e0e0pI-XRtPmvUCW2VTKL1DeYu1pvPmUUeRSgEbWmik,1737 +matplotlib/mpl-data/images/home.png,sha256=IcFdAAUa6_A0qt8IO3I8p4rpXpQgAlJ8ndBECCh7C1w,468 +matplotlib/mpl-data/images/home.svg,sha256=n_AosjJVXET3McymFuHgXbUr5vMLdXK2PDgghX8Cch4,1891 +matplotlib/mpl-data/images/home_large.gif,sha256=k86PJCgED46sCFkOlUYHA0s5U7OjRsc517bpAtU2JSw,1422 +matplotlib/mpl-data/images/home_large.png,sha256=uxS2O3tWOHh1iau7CaVV4ermIJaZ007ibm5Z3i8kXYg,790 +matplotlib/mpl-data/images/matplotlib.pdf,sha256=BkSUf-2xoij-eXfpV2t7y1JFKG1zD1gtV6aAg3Xi_wE,22852 +matplotlib/mpl-data/images/matplotlib.png,sha256=w8KLRYVa-voUZXa41hgJauQuoois23f3NFfdc72pUYY,1283 +matplotlib/mpl-data/images/matplotlib.svg,sha256=QiTIcqlQwGaVPtHsEk-vtmJk1wxwZSvijhqBe_b9VCI,62087 +matplotlib/mpl-data/images/matplotlib_128.ppm,sha256=IHPRWXpLFRq3Vb7UjiCkFrN_N86lSPcfrEGunST08d8,49167 +matplotlib/mpl-data/images/matplotlib_large.png,sha256=ElRoue9grUqkZXJngk-nvh4GKfpvJ4gE69WryjCbX5U,3088 +matplotlib/mpl-data/images/move-symbolic.svg,sha256=_ZKpcwGD6DMTkZlbyj0nQbT8Ygt5vslEZ0OqXaXGd4E,2509 +matplotlib/mpl-data/images/move.gif,sha256=FN52MptH4FZiwmV2rQgYCO2FvO3m5LtqYv8jk6Xbeyk,679 +matplotlib/mpl-data/images/move.pdf,sha256=CXk3PGK9WL5t-5J-G2X5Tl-nb6lcErTBS5oUj2St6aU,1867 +matplotlib/mpl-data/images/move.png,sha256=TmjR41IzSzxGbhiUcV64X0zx2BjrxbWH3cSKvnG2vzc,481 +matplotlib/mpl-data/images/move.svg,sha256=_ZKpcwGD6DMTkZlbyj0nQbT8Ygt5vslEZ0OqXaXGd4E,2509 +matplotlib/mpl-data/images/move_large.gif,sha256=RMIAr-G9OOY7vWC04oN6qv5TAHJxhQGhLsw_bNsvWbg,951 +matplotlib/mpl-data/images/move_large.png,sha256=Skjz2nW_RTA5s_0g88gdq2hrVbm6DOcfYW4Fu42Fn9U,767 +matplotlib/mpl-data/images/qt4_editor_options.pdf,sha256=2qu6GVyBrJvVHxychQoJUiXPYxBylbH2j90QnytXs_w,1568 +matplotlib/mpl-data/images/qt4_editor_options.png,sha256=EryQjQ5hh2dwmIxtzCFiMN1U6Tnd11p1CDfgH5ZHjNM,380 +matplotlib/mpl-data/images/qt4_editor_options.svg,sha256=E00YoX7u4NrxMHm_L1TM8PDJ88bX5qRdCrO-Uj59CEA,1244 +matplotlib/mpl-data/images/qt4_editor_options_large.png,sha256=-Pd-9Vh5aIr3PZa8O6Ge_BLo41kiEnpmkdDj8a11JkY,619 +matplotlib/mpl-data/images/subplots-symbolic.svg,sha256=8acBogXIr9OWGn1iD6mUkgahdFZgDybww385zLCLoIs,2130 +matplotlib/mpl-data/images/subplots.gif,sha256=QfhmUdcrko08-WtrzCJUjrVFDTvUZCJEXpARNtzEwkg,691 +matplotlib/mpl-data/images/subplots.pdf,sha256=Q0syPMI5EvtgM-CE-YXKOkL9eFUAZnj_X2Ihoj6R4p4,1714 +matplotlib/mpl-data/images/subplots.png,sha256=MUfCItq3_yzb9yRieGOglpn0Y74h8IA7m5i70B63iRc,445 +matplotlib/mpl-data/images/subplots.svg,sha256=8acBogXIr9OWGn1iD6mUkgahdFZgDybww385zLCLoIs,2130 +matplotlib/mpl-data/images/subplots_large.gif,sha256=Ff3ERmtVAaGP9i1QGUNnIIKac6LGuSW2Qf4DrockZSI,1350 +matplotlib/mpl-data/images/subplots_large.png,sha256=Edu9SwVMQEXJZ5ogU5cyW7VLcwXJdhdf-EtxxmxdkIs,662 +matplotlib/mpl-data/images/zoom_to_rect-symbolic.svg,sha256=1vRxr3cl8QTwTuRlQzD1jxu0fXZofTJ2PMgG97E7Bco,1479 +matplotlib/mpl-data/images/zoom_to_rect.gif,sha256=mTX6h9fh2W9zmvUYqeibK0TZ7qIMKOB1nAXMpD_jDys,696 +matplotlib/mpl-data/images/zoom_to_rect.pdf,sha256=SEvPc24gfZRpl-dHv7nx8KkxPyU66Kq4zgQTvGFm9KA,1609 +matplotlib/mpl-data/images/zoom_to_rect.png,sha256=aNz3QZBrIgxu9E-fFfaQweCVNitGuDUFoC27e5NU2L4,530 +matplotlib/mpl-data/images/zoom_to_rect.svg,sha256=1vRxr3cl8QTwTuRlQzD1jxu0fXZofTJ2PMgG97E7Bco,1479 +matplotlib/mpl-data/images/zoom_to_rect_large.gif,sha256=nx5LUpTAH6ZynM3ZfZDS-wR87jbMUsUnyQ27NGkV0_c,1456 +matplotlib/mpl-data/images/zoom_to_rect_large.png,sha256=V6pkxmm6VwFExdg_PEJWdK37HB7k3cE_corLa7RbUMk,1016 +matplotlib/mpl-data/matplotlibrc,sha256=Qo18I-IbXQawZIgtVzbeQ0nx_yalJjnyjrCr0Q8yp1Q,40658 +matplotlib/mpl-data/sample_data/Minduka_Present_Blue_Pack.png,sha256=XnKGiCanpDKalQ5anvo5NZSAeDP7fyflzQAaivuc0IE,13634 +matplotlib/mpl-data/sample_data/None_vs_nearest-pdf.png,sha256=5CPvcG3SDNfOXx39CMKHCNS9JKZ-fmOUwIfpppNXsQ0,106228 +matplotlib/mpl-data/sample_data/README.txt,sha256=c8JfhUG72jHZj6SyS0hWvlXEtWUJbjRNfMZlA85SWIo,130 +matplotlib/mpl-data/sample_data/aapl.npz,sha256=GssVYka_EccteiXbNRJJ5GsuqU7G8F597qX7srYXZsw,107503 +matplotlib/mpl-data/sample_data/ada.png,sha256=X1hjJK1_1Nc8DN-EEhey3G7Sq8jBwQDKNSl4cCAE0uY,308313 +matplotlib/mpl-data/sample_data/axes_grid/bivariate_normal.npy,sha256=DpWZ9udAh6ospYqneEa27D6EkRgORFwHosacZXVu98U,1880 +matplotlib/mpl-data/sample_data/ct.raw.gz,sha256=LDvvgH-mycRQF2D29-w5MW94ZI0opvwKUoFI8euNpMk,256159 +matplotlib/mpl-data/sample_data/data_x_x2_x3.csv,sha256=IG7mazfIlEyJnqIcZrKBEhjitrI3Wv35uVFVV6hBgMo,143 +matplotlib/mpl-data/sample_data/demodata.csv,sha256=jswIPVUGO-gMkpzyA73BzR3rpwUabVl30SXVrMzZvx0,670 +matplotlib/mpl-data/sample_data/eeg.dat,sha256=KGVjFt8ABKz7p6XZirNfcxSTOpGGNuyA8JYErRKLRBc,25600 +matplotlib/mpl-data/sample_data/embedding_in_wx3.xrc,sha256=IcJ5PddMI2wSxlUGUUv3He3bsmGaRfBp9ZwEQz5QTdo,2250 +matplotlib/mpl-data/sample_data/goog.npz,sha256=QAkXzzDmtmT3sNqT18dFhg06qQCNqLfxYNLdEuajGLE,22845 +matplotlib/mpl-data/sample_data/grace_hopper.jpg,sha256=qMptc0dlcDsJcoq0f-WfRz2Trjln_CTHwCiMPHrbcTA,61306 +matplotlib/mpl-data/sample_data/grace_hopper.png,sha256=MCf0ju2kpC40srQ0xw4HEyOoKhLL4khP3jHfU9_dR7s,628280 +matplotlib/mpl-data/sample_data/jacksboro_fault_dem.npz,sha256=1JP1CjPoKkQgSUxU0fyhU50Xe9wnqxkLxf5ukvYvtjc,174061 +matplotlib/mpl-data/sample_data/logo2.png,sha256=ITxkJUsan2oqXgJDy6DJvwJ4aHviKeWGnxPkTjXUt7A,33541 +matplotlib/mpl-data/sample_data/membrane.dat,sha256=q3lbQpIBpbtXXGNw1eFwkN_PwxdDGqk4L46IE2b0M1c,48000 +matplotlib/mpl-data/sample_data/msft.csv,sha256=4JtKT5me60-GNMUoCMuIDAYAIpylT_EroyBbGh0yi_U,3276 +matplotlib/mpl-data/sample_data/percent_bachelors_degrees_women_usa.csv,sha256=Abap-NFjqwp1ELGNYCoTL4S5vRniAzM5R3ixgEFFpTU,5723 +matplotlib/mpl-data/sample_data/s1045.ima.gz,sha256=MrQk1k9it-ccsk0p_VOTitVmTWCAVaZ6srKvQ2n4uJ4,33229 +matplotlib/mpl-data/sample_data/topobathy.npz,sha256=AkTgMpFwLfRQJNy1ysvE89TLMNct-n_TccSsYcQrT78,45224 +matplotlib/mpl-data/stylelib/Solarize_Light2.mplstyle,sha256=uU84qox3o_tHASXoKLR6nBJmJ9AS0u7TWXxTFZx9tjA,1308 +matplotlib/mpl-data/stylelib/_classic_test_patch.mplstyle,sha256=9XRyb2XzCtS6piLIYFbNHpU-bF4f7YliWLdbLXvBojI,173 +matplotlib/mpl-data/stylelib/bmh.mplstyle,sha256=UTO__T6YaaUY6u5NjAsBGBsv_AOK45nKi1scf-ORxzU,741 +matplotlib/mpl-data/stylelib/classic.mplstyle,sha256=HZfPeokxDqwLhhck_tGh8ugTbjMpxqJsW1r_lo0TZis,24713 +matplotlib/mpl-data/stylelib/dark_background.mplstyle,sha256=Vei27QYOP3dNTaHzmRYneNLTCw30nE75JOUDYuOjnXc,687 +matplotlib/mpl-data/stylelib/fast.mplstyle,sha256=HDqa0GATC9GjNeRA8rYiZM-qh7hTxsraeyYziGlbgzg,299 +matplotlib/mpl-data/stylelib/fivethirtyeight.mplstyle,sha256=IfXwiatqkv6rkauNnjcfDDS6pU-UabtEhbokK5-qAes,872 +matplotlib/mpl-data/stylelib/ggplot.mplstyle,sha256=pWh3RqvTy3fyP_aGOa1TR7NMAi5huuWDJRPeZM5kR3o,996 +matplotlib/mpl-data/stylelib/grayscale.mplstyle,sha256=MnigXJy2ckyQZuiwb-nCXQ0-0cJBz1WPu-CEJXEHWpA,555 +matplotlib/mpl-data/stylelib/seaborn-bright.mplstyle,sha256=DIo92H5LVQVPMeJOcVaOPOovchqMeDvkKoEQ0BX--wA,147 +matplotlib/mpl-data/stylelib/seaborn-colorblind.mplstyle,sha256=M7OYVR1choIo_jlDfMsGSADJahLDauZEOUJJpuDK8Hs,151 +matplotlib/mpl-data/stylelib/seaborn-dark-palette.mplstyle,sha256=HLb5n5XgW-IQ8b5YcTeIlA1QyHjP7wiNPAHD2syptW4,145 +matplotlib/mpl-data/stylelib/seaborn-dark.mplstyle,sha256=IZMc2QEnkTmbOfAr5HIiu6SymcdRbKWSIYGOtprNlDw,697 +matplotlib/mpl-data/stylelib/seaborn-darkgrid.mplstyle,sha256=lY9aae1ZeSJ1WyT42fi0lfuQi2t0vwhic8TBEphKA5c,700 +matplotlib/mpl-data/stylelib/seaborn-deep.mplstyle,sha256=djxxvf898QicTlmeDHJW5HVjrvHGZEOSIPWgFK0wqpw,145 +matplotlib/mpl-data/stylelib/seaborn-muted.mplstyle,sha256=5t2wew5ydrrJraEuuxH918TuAboCzuCVVj4kYq78_LU,146 +matplotlib/mpl-data/stylelib/seaborn-notebook.mplstyle,sha256=g0nB6xP2N5VfW31pBa4mRHZU5kLqZLQncj9ExpTuTi8,403 +matplotlib/mpl-data/stylelib/seaborn-paper.mplstyle,sha256=StESYj-S2Zv9Cngd5bpFqJVw4oBddpqB3C5qHESmzi8,414 +matplotlib/mpl-data/stylelib/seaborn-pastel.mplstyle,sha256=8KO6r5H2jWIophEf7XJVYKyrXSrYGEn2f1F_KXoEEIc,147 +matplotlib/mpl-data/stylelib/seaborn-poster.mplstyle,sha256=8xZxeZiSX2npJ-vCqsSsDcc4GeFrXwfrSNu0xXfA2Uk,424 +matplotlib/mpl-data/stylelib/seaborn-talk.mplstyle,sha256=_c29c8iDdsCMNVERcHHwD8khIcUVxeuoHI2o1eE0Phg,424 +matplotlib/mpl-data/stylelib/seaborn-ticks.mplstyle,sha256=Annui6BdMJqYZsIGCkdmk88r4m_H4esa7bSszkBpm-A,695 +matplotlib/mpl-data/stylelib/seaborn-white.mplstyle,sha256=VY6sw8wkqbl0leWtWa5gz8xfDMfqt5yEhITIjP4FsOI,695 +matplotlib/mpl-data/stylelib/seaborn-whitegrid.mplstyle,sha256=IOm2H1utXO_zR7FWqMLBiRxHyxABL3kq1fh0-6BDJ0E,694 +matplotlib/mpl-data/stylelib/seaborn.mplstyle,sha256=N9lUFHvOn06wT4MODXpVVGQMSueONejeAfCX5UfWrIM,1187 +matplotlib/mpl-data/stylelib/tableau-colorblind10.mplstyle,sha256=PzUMoOtw0V6l0bPk8ApRAKvcxdJmzRU2bVOkNqz8DnU,192 +matplotlib/offsetbox.py,sha256=zdr7NNI2l9Er6G1i7OgH2ZhrwbTpQO10yI-ZqpmbIsk,61753 +matplotlib/patches.py,sha256=-sEU_HwmsPle-Vo_OlfQrYEoswovpqpDi1AigE5z2Q4,153320 +matplotlib/path.py,sha256=SdnGJGVPVIiBJALUR7UpNRNO0qNFjT05dKp2w2PiPWE,40487 +matplotlib/patheffects.py,sha256=sFJ-GfsPwdF-eS5273bgseShb01-hFkTEhdbtIHkGig,13975 +matplotlib/projections/__init__.py,sha256=yaWM60AYTHz3geKwIwnp-dDaAhvaKSlj65uZFDJNe70,1728 +matplotlib/projections/__pycache__/__init__.cpython-36.pyc,, +matplotlib/projections/__pycache__/geo.cpython-36.pyc,, +matplotlib/projections/__pycache__/polar.cpython-36.pyc,, +matplotlib/projections/geo.py,sha256=_njFHfRBw8RNrej5WcOg3qcstFVaDak6f1MjSl62kXA,17967 +matplotlib/projections/polar.py,sha256=iqynlkWG_heAjb3eoLxj-f13y3SjxipzbbwlulsseXo,55707 +matplotlib/pylab.py,sha256=NUr7XkQQSQwKS-l1z1ur54YcZJLN_mgbtsagAuPKz68,1742 +matplotlib/pyplot.py,sha256=1hcTG7XEIjzfDQRkSd3KnDZVItIVj6wAT9TNIP_DIW0,119057 +matplotlib/quiver.py,sha256=cprVfYoP0UEglSroDqfqukjj4lwYtdDavyZfyF46S_s,48503 +matplotlib/rcsetup.py,sha256=0Aj8RaT2e_TOCxRu5A9Th7VuNTXh7iEXwuTtS1TZBfU,58761 +matplotlib/sankey.py,sha256=U5F9LUBI9GoC4PyHJg11Ow7pmZIbwqiX9-CPANH-V5g,37159 +matplotlib/scale.py,sha256=Aiy1JJpllbTg5mlFWmut7Q5GWRLrg-8B7OIMukrWlWU,24311 +matplotlib/sphinxext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +matplotlib/sphinxext/__pycache__/__init__.cpython-36.pyc,, +matplotlib/sphinxext/__pycache__/mathmpl.cpython-36.pyc,, +matplotlib/sphinxext/__pycache__/plot_directive.cpython-36.pyc,, +matplotlib/sphinxext/mathmpl.py,sha256=yY0d7NsVcFiujAadP44DMesGp2Zyhz3USYjsznq4WDU,3885 +matplotlib/sphinxext/plot_directive.py,sha256=8XX4AJ1Na44_O3btZpOIG9oE9zN0z8q3TOwDhZvwnj8,27024 +matplotlib/spines.py,sha256=gyEYiuxVnXx6ewwqiB4j-L4hkcoQqifL5R_-5okWowM,20759 +matplotlib/stackplot.py,sha256=KaB8W5_SVSQeN4bVXhh5wnyu61ZEKJPV5ISOpWIccao,4025 +matplotlib/streamplot.py,sha256=qEVUKdQqgEIO8PGeEEjA_NVLISA09P0G3YPNlPhmOtA,23619 +matplotlib/style/__init__.py,sha256=Lee0QNI5VNg45wfj5h5w5_PhaBOq8zDPrDtqnpJfFWI,68 +matplotlib/style/__pycache__/__init__.cpython-36.pyc,, +matplotlib/style/__pycache__/core.cpython-36.pyc,, +matplotlib/style/core.py,sha256=PkT0m0NfFi8UjYcL1PLTE7tbYe9pJkuwpDMqBiXdnng,8724 +matplotlib/table.py,sha256=9GTl9lNFQX7RF0SrC5gaO0CBHFnVFzLhoVCWNPQ2EwY,27382 +matplotlib/testing/__init__.py,sha256=5A2r174QGv1RzXQovaJzdx4ZuLTtb9lGfEWTRuzncc8,1454 +matplotlib/testing/__pycache__/__init__.cpython-36.pyc,, +matplotlib/testing/__pycache__/compare.cpython-36.pyc,, +matplotlib/testing/__pycache__/conftest.cpython-36.pyc,, +matplotlib/testing/__pycache__/decorators.cpython-36.pyc,, +matplotlib/testing/__pycache__/disable_internet.cpython-36.pyc,, +matplotlib/testing/__pycache__/exceptions.cpython-36.pyc,, +matplotlib/testing/__pycache__/widgets.cpython-36.pyc,, +matplotlib/testing/compare.py,sha256=16h0JgcJJTXJ4M5Y3au-Qv8b30Amlmnene61XPdu7ag,17453 +matplotlib/testing/conftest.py,sha256=4SsbLZYORUGz9GBDDqVVt-JTVXUe_RO2zfYeZ0f3VQY,5704 +matplotlib/testing/decorators.py,sha256=fCXaMvJJ6up0WMpbmpdzXEhW6y72TVEBRnnrLQPwKtU,19219 +matplotlib/testing/disable_internet.py,sha256=iJ6yi2K3Irql3VRpB37B6dV8jF8ppR-9bIJ7CSLbiSY,5064 +matplotlib/testing/exceptions.py,sha256=rTxMs5B6lKjXH6c53eVH7iVPrG5Ty7wInRSgzNiMKK4,142 +matplotlib/testing/jpl_units/Duration.py,sha256=6hE7fLS4BUeCidjlXrNpk9LJ1Y37KaW-Bgg-uYUZeYo,4623 +matplotlib/testing/jpl_units/Epoch.py,sha256=dJL3tvga_kHuW7YYy10PYw5Bw9_XditGf1AQiAw2m6s,6582 +matplotlib/testing/jpl_units/EpochConverter.py,sha256=-mQ_6zbsgGvoka6JAnzoFk8V6ggWmlizJl4kgR6fhXI,3264 +matplotlib/testing/jpl_units/StrConverter.py,sha256=wewzDhcl1A2kG0MHtwrCGqWqhsUz-TfIxzMSpXbqjro,3053 +matplotlib/testing/jpl_units/UnitDbl.py,sha256=CNUbYh_us3Fbf7Eg5O4mWcBXT-zsjfsJ3dGJaemWp98,7873 +matplotlib/testing/jpl_units/UnitDblConverter.py,sha256=dkLzijhvoXNR-pOwYHGN_TSkpw1yV-88B31Xitzo1ho,3190 +matplotlib/testing/jpl_units/UnitDblFormatter.py,sha256=3ZrRTulxYh-fTRtSpN9gCFqmyMA_ZR2_znkQjy3UCJc,709 +matplotlib/testing/jpl_units/__init__.py,sha256=gdnES2cnASttTSfKQn-g400gQ0dDZAQ_Kn09J_HyFJY,2760 +matplotlib/testing/jpl_units/__pycache__/Duration.cpython-36.pyc,, +matplotlib/testing/jpl_units/__pycache__/Epoch.cpython-36.pyc,, +matplotlib/testing/jpl_units/__pycache__/EpochConverter.cpython-36.pyc,, +matplotlib/testing/jpl_units/__pycache__/StrConverter.cpython-36.pyc,, +matplotlib/testing/jpl_units/__pycache__/UnitDbl.cpython-36.pyc,, +matplotlib/testing/jpl_units/__pycache__/UnitDblConverter.cpython-36.pyc,, +matplotlib/testing/jpl_units/__pycache__/UnitDblFormatter.cpython-36.pyc,, +matplotlib/testing/jpl_units/__pycache__/__init__.cpython-36.pyc,, +matplotlib/testing/widgets.py,sha256=JN-ZY_GWrBYM7-WJl_EnJVB7EwwNUPoueZg2tLveg80,1566 +matplotlib/tests/__init__.py,sha256=y2ftcuJhePrKnF_GHdqlGPT_SY-rhoASd2m4iyHqpfE,376 +matplotlib/tests/__pycache__/__init__.cpython-36.pyc,, +matplotlib/tests/__pycache__/conftest.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_afm.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_agg.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_agg_filter.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_animation.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_arrow_patches.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_artist.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_axes.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_bases.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_cairo.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_nbagg.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_pdf.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_pgf.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_ps.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_qt.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_svg.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_tk.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_tools.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backend_webagg.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_backends_interactive.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_basic.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_bbox_tight.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_category.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_cbook.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_collections.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_colorbar.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_colors.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_compare_images.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_constrainedlayout.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_container.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_contour.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_cycles.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_dates.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_determinism.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_dviread.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_figure.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_font_manager.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_fontconfig_pattern.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_gridspec.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_image.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_legend.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_lines.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_marker.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_mathtext.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_matplotlib.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_mlab.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_offsetbox.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_patches.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_path.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_patheffects.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_pickle.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_png.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_polar.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_preprocess_data.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_pyplot.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_quiver.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_rcparams.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_sankey.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_scale.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_simplification.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_skew.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_sphinxext.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_spines.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_streamplot.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_style.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_subplots.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_table.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_testing.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_texmanager.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_text.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_ticker.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_tightlayout.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_transforms.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_triangulation.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_ttconv.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_type1font.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_units.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_usetex.cpython-36.pyc,, +matplotlib/tests/__pycache__/test_widgets.cpython-36.pyc,, +matplotlib/tests/conftest.py,sha256=tjbU0uzdD8q4j30uWm-lzYfZCjqFYRAF_-WMhA3O0qY,262 +matplotlib/tests/test_afm.py,sha256=afU9PfGYjm_WdfwGvoTfjTLgMxx_Vpn26wCDaWuFrg0,3847 +matplotlib/tests/test_agg.py,sha256=WA7gzi_zRxh_jPcmkf8ZO1p_NQry5__qF9GLzK8-obE,7675 +matplotlib/tests/test_agg_filter.py,sha256=i8Wv2oXmvi-m6020p_k8C3BCJ9UB6tBVSVNsul5NEaQ,999 +matplotlib/tests/test_animation.py,sha256=BS59_1os6n3Dm18njppZrO70Siqufwg2EMn5WUO0M_k,8628 +matplotlib/tests/test_arrow_patches.py,sha256=0w0X1vPHRd__DOy2oVrYupG9Rk-EuWzyiet4piB4bF8,5820 +matplotlib/tests/test_artist.py,sha256=2wZZaSk04a1BWHcycnd1SSDy_QxKEUCeKS8yOCv-ijQ,9340 +matplotlib/tests/test_axes.py,sha256=Wsd__JvtGmvymBNHszuvg3-ZbJo9y2ZXz1AsR-qB9jc,216653 +matplotlib/tests/test_backend_bases.py,sha256=wWnNPhja06EFHkK6G7BP6LxsdXV5mvyEnqcWGdXcAQk,6094 +matplotlib/tests/test_backend_cairo.py,sha256=2jejOSazlpDheGuSBe2C6R2D37bMjmMy3Ds2NRCzFnA,1869 +matplotlib/tests/test_backend_nbagg.py,sha256=rfFHvT0YhzBMdm-t3P-GBRKi-ZWgjTXie_Zuo6pngt0,935 +matplotlib/tests/test_backend_pdf.py,sha256=smPXlrcaA1jwSrSbvk7OU5Swx95KpVTZJPiy4jWrAUs,8941 +matplotlib/tests/test_backend_pgf.py,sha256=mmZwFn0289xZER-QSspcpP2uloXtb__YJsf4IpRSsrw,10646 +matplotlib/tests/test_backend_ps.py,sha256=zTd9xtJ74PN8ayuTCFSHKuzPGtO2WHkNbGkeW3VQzbc,4723 +matplotlib/tests/test_backend_qt.py,sha256=Wptn99GISjKld3R_14o3EIWtfUBCWbd1vEjbrGDW_10,9508 +matplotlib/tests/test_backend_svg.py,sha256=R8AnTHOc8Rf9AOoyK4pTcBTDwM_CaK3Q_NTPzpVVCJc,12841 +matplotlib/tests/test_backend_tk.py,sha256=roCO6jRKhyXYLg_-pA1LFGS77S_f0ZZ9ZAs7GqVsjXA,1443 +matplotlib/tests/test_backend_tools.py,sha256=6StNyy5SGVdfh1r-UwXyQnMg_XJCiXL_hC40HehT-zA,521 +matplotlib/tests/test_backend_webagg.py,sha256=lQP6u1Vq8eV_9a7AnUzv58ORFdwXC9ggVZq2FfS6nto,729 +matplotlib/tests/test_backends_interactive.py,sha256=icuX1A-AKY6AK-JrYQUJX888asmgzPEfe-vBsBOJpz4,8102 +matplotlib/tests/test_basic.py,sha256=MEGWm3w6inz0KHFArFPWTJXneX7_-o53Wf38GdcWXwg,1254 +matplotlib/tests/test_bbox_tight.py,sha256=4Hie2haP7to5h0h4OKe6TgCTcBXqXU5GRiHHgdn7S68,5090 +matplotlib/tests/test_category.py,sha256=vrQUEqupQziRmUyuT0vyuZUM9HVxRk9Gg2A2Ka1uujM,10501 +matplotlib/tests/test_cbook.py,sha256=PCHmdqO2JAdzIeSr7X5zY9OsfMIFp7WzeKd6L_QpPIA,25394 +matplotlib/tests/test_collections.py,sha256=PE-L1D_z-XaGwFgnu2DOkXClJrw5pxx1HY1lHgPfEMU,23670 +matplotlib/tests/test_colorbar.py,sha256=IAbC_fC_wr3gthL_ezyKIN7BwnMCQTwB7zjFeV96aRQ,23263 +matplotlib/tests/test_colors.py,sha256=YxpvtFxL2fVnAkQVYcI1b9VhenWF65vXwl1uJkAc6so,41833 +matplotlib/tests/test_compare_images.py,sha256=twmG-C7CB9EZpA9ogy6YrCtgH1TJZMj2sBjFaxeZx7M,3366 +matplotlib/tests/test_constrainedlayout.py,sha256=bwvGUepfjWEFfQDPgz5sE6QHn7OTJRGtXtfnkYgLYW4,13453 +matplotlib/tests/test_container.py,sha256=QgNodtC50-2SyP_8coGyp1nQsmLKhfivlJrfX4cujds,580 +matplotlib/tests/test_contour.py,sha256=v7nnoXzpkS1sAZZ4WtQ_otYkNJz0EGAHdCF5W1EUpXE,14229 +matplotlib/tests/test_cycles.py,sha256=7SnFRFaoAB9B77dtNkiDCBwr6QsQA4sSOQZxkzYZmYs,5820 +matplotlib/tests/test_dates.py,sha256=0d14ZtzF6oYQUXWCQ_kg0A1qeaM_p3vP9qogUhg9d-k,39907 +matplotlib/tests/test_determinism.py,sha256=vIsFXA2_4Wa3s2CxptfzjbvqYdtJASN0GjWHTEgqZr8,4729 +matplotlib/tests/test_dviread.py,sha256=USbWVyR1pY5HMuoHEHWgfBaCojUQkuxLt-J4gSkwBcw,2378 +matplotlib/tests/test_figure.py,sha256=zBewJVqO9xe5_XF6dadZ_2cwkMBp45l0FvqZHiz-Ocs,26386 +matplotlib/tests/test_font_manager.py,sha256=ZBCXRukt1fVQVpzrExKWWAZqk4rX6XGvU_uah63uGOk,7831 +matplotlib/tests/test_fontconfig_pattern.py,sha256=NeM0UxB4m4fGY2Zq84QoDGyKnEOAcgmi0Cs-VjyFY0I,2091 +matplotlib/tests/test_gridspec.py,sha256=JiyMWPmsW_wJc7m4f8b2KRqCKlBSSlUt3LUyhak0heM,997 +matplotlib/tests/test_image.py,sha256=Vtsqq5Y4unI29BK3OpZZsGeji--pHQJfjc8ztsFs_CM,36845 +matplotlib/tests/test_legend.py,sha256=jzxsQCCdjtb77NrW63ue7EdfANyFCZv7T8fjxERrRfQ,23535 +matplotlib/tests/test_lines.py,sha256=qXMwXdKo708S17A31-C8U-dCcjZfsyg3uuMdZXN-PLM,8602 +matplotlib/tests/test_marker.py,sha256=ixsrer_pKNc6vU7Idu_QrlUPSgHlZgYFGut2f3w9pP4,6612 +matplotlib/tests/test_mathtext.py,sha256=E0ZXbgGTazCLUXxSgifv6YmRwTPft7reW1nnUemlC14,15173 +matplotlib/tests/test_matplotlib.py,sha256=FgFg2Yym_bjlb0gtG1pcs14DPyUIMB7tRVtbiMieEQo,1499 +matplotlib/tests/test_mlab.py,sha256=-nRS28Dc9_XyVK3mmFRInJUc3gU9hBS6BDRaIW2-tmg,67393 +matplotlib/tests/test_offsetbox.py,sha256=Kx9DM2-C6REmqFN5GTO9xDL5vYYsISsO6e3pjzz9xpY,11136 +matplotlib/tests/test_patches.py,sha256=09GJ6ElUqYnSE_Areb_hZwwFfe-kLOF4DZLtPJQFMGY,19838 +matplotlib/tests/test_path.py,sha256=-85BmXlJRb9GBUK3mijnbhBjzDxkKcy8E1zBdc7hGRo,16733 +matplotlib/tests/test_patheffects.py,sha256=gZEisNhkiVXG20buNgTV1Z4x0sfTP-C4_wK6ylAIiBs,5336 +matplotlib/tests/test_pickle.py,sha256=f_d79RREQTbn1FVFPbTqypIsnW4vxPcanwsfd-jreDo,5831 +matplotlib/tests/test_png.py,sha256=W_Otlwm8zTB17LRYObVh9CdNe3hut6XV7LGbWypPJeg,1352 +matplotlib/tests/test_polar.py,sha256=8l4RGgsUgKIeYmPtlu6nWzEJjjRB38xUaii3vkLKpW0,12082 +matplotlib/tests/test_preprocess_data.py,sha256=D82rQiZBljpK3ggYJgcNTkszgiXGJLv11HsOm8WdHP4,10573 +matplotlib/tests/test_pyplot.py,sha256=YQXJd8U460zL3wSIRhuS4WfvdeHTDOHcUd7rhVrGoPs,2564 +matplotlib/tests/test_quiver.py,sha256=gyqDugeUL8gOnMXTvtd-GHI99usI7L5FDvfvLDuJCLU,8321 +matplotlib/tests/test_rcparams.py,sha256=0qWFMCXfetscfFzEicsVrl9jgNZAPeUQjytuYseO6DY,19765 +matplotlib/tests/test_sankey.py,sha256=Eo3IScGRi0pvO4zcFj3STd3UWSzmfXKiHkk_GZsJ2TQ,322 +matplotlib/tests/test_scale.py,sha256=S7lWUqoySkltqDuon7PzPtG-CBVqBcDIWdQ8cuS6B5U,5865 +matplotlib/tests/test_simplification.py,sha256=dt0mnYH7Iy059fIpG1WMd4W-mEBGrk74RtCxzgr1vkI,11393 +matplotlib/tests/test_skew.py,sha256=1fVVQkXDPq9KwzpWY7RA1inpNHNmTMUNQxxvm_eKB3A,6475 +matplotlib/tests/test_sphinxext.py,sha256=PcrAosOag3sZrW00QoJobUHaV28Vn4GFaSY0ljD19Y4,2023 +matplotlib/tests/test_spines.py,sha256=qYJoX052KdKhtAXTIPNWEi_BqfHhVEQXhA92UcOObBg,3226 +matplotlib/tests/test_streamplot.py,sha256=c9Tf85Pc2pJlWRA5AgJ64XHHbi3fsySUoDCIjUrwKnM,3936 +matplotlib/tests/test_style.py,sha256=Xm9v8NSieC69Rnd_rGXUG_6SOdNPE7Gr--nMKI5nBbc,5905 +matplotlib/tests/test_subplots.py,sha256=zDMNiClqpFPUsThIgfyrksP3gls_ylygz6JSvMNy8pE,6155 +matplotlib/tests/test_table.py,sha256=Qs0eUsDGfNIqMMnLPDj412deLm7UxQb0wXml5fZoV-0,5925 +matplotlib/tests/test_testing.py,sha256=s8XUis7Li46dxTs0ErnLdiYHmudmllzsw2B0Bcu1SCU,653 +matplotlib/tests/test_texmanager.py,sha256=IoCMn9UcYSngl3m_nlWrDV9BZrNrtOiM2P5s5ULSb4w,475 +matplotlib/tests/test_text.py,sha256=6hqNw62WHVi21Bm2oD-xdCRj5eyhIpWgjr-kHcaMfiA,22739 +matplotlib/tests/test_ticker.py,sha256=C0pc1h7FnMG1tf9B8W4g0n9B410OacvfZGPqmubCXmQ,52438 +matplotlib/tests/test_tightlayout.py,sha256=uS2TuGzXl0RyDVbxiJk68IPG6Lvdc768uEVW8bjCaZw,10634 +matplotlib/tests/test_transforms.py,sha256=DjlW9PJStojWJDFQBWnFRXNvQ4HPL5SUMQUoxpIZiUk,27982 +matplotlib/tests/test_triangulation.py,sha256=UwE8TMEKR_Q3tPi_3CVfN5PVGjb7hB641TiL-Drkf-g,47111 +matplotlib/tests/test_ttconv.py,sha256=MwDVqn8I2a09l7sN9vfluhABCpzoPasZOzrvoadYn8o,557 +matplotlib/tests/test_type1font.py,sha256=WHGvm7UADbY4Bislu7aTs-qonmYU_TyP4mCBWG6RR1Y,2139 +matplotlib/tests/test_units.py,sha256=BnJsIzwucyJb7WO7cXeXs9cvow48wmXl2q0Xz6igLJs,5889 +matplotlib/tests/test_usetex.py,sha256=4XmyxMcUWniqTGx0KGAGEo7E_6Qz18_qZEzWg74gOoM,2985 +matplotlib/tests/test_widgets.py,sha256=W7-t8XqjYvUkif10wVsoDteQQgiu32k55tz9RDuSCTk,16623 +matplotlib/texmanager.py,sha256=yDycQwJ014_j6SaZiHWe0tDEhDHhrY7QA-07xiSsn-c,16295 +matplotlib/text.py,sha256=YiUOb5wY1tf1gh8r4w2Y2cE3MIj6M24Sa2drtyQAGOE,67943 +matplotlib/textpath.py,sha256=U0SSfiAb6CFPAp0eLWj4jR5-lCEpLRdtyWSZzrFlkB0,15421 +matplotlib/ticker.py,sha256=XL31WXTR5NiIoW_o9YPMkhB_sN80BVMKmoLQfKzGk64,107299 +matplotlib/tight_bbox.py,sha256=3nPRkvIogLL6n-ZJPPRsOLCC_cZBEmgKAsJfvhC9xQk,3023 +matplotlib/tight_layout.py,sha256=wv_8EjQTMhXDCSTjFZTBOmzvUKAVuYt1q1LDqiyk72s,13494 +matplotlib/transforms.py,sha256=guwKIhVDmgbxtf86msod0KZhMvWoSbcAnQ663a9_Wqw,99721 +matplotlib/tri/__init__.py,sha256=mXgRgH1EgncFGFKziNNW2C6zp0VEHtuhWZAQ6MKctJg,268 +matplotlib/tri/__pycache__/__init__.cpython-36.pyc,, +matplotlib/tri/__pycache__/triangulation.cpython-36.pyc,, +matplotlib/tri/__pycache__/tricontour.cpython-36.pyc,, +matplotlib/tri/__pycache__/trifinder.cpython-36.pyc,, +matplotlib/tri/__pycache__/triinterpolate.cpython-36.pyc,, +matplotlib/tri/__pycache__/tripcolor.cpython-36.pyc,, +matplotlib/tri/__pycache__/triplot.cpython-36.pyc,, +matplotlib/tri/__pycache__/trirefine.cpython-36.pyc,, +matplotlib/tri/__pycache__/tritools.cpython-36.pyc,, +matplotlib/tri/triangulation.py,sha256=1Lw9tarP_KKZ8YQ9q_IgfPVbJrz3P86QYAK24dhOtzI,8546 +matplotlib/tri/tricontour.py,sha256=J5dPX6U5uqSuVChF2k8IznFVubzicoD3abcY5wRHzmE,11635 +matplotlib/tri/trifinder.py,sha256=1js01EAwh4I6uxFkQrrjozLxCJynlazZ0POamc-Ujfc,3561 +matplotlib/tri/triinterpolate.py,sha256=ODGAV7QauNe5-jKvzet9wiVp3kPYyHNlmOM42zI5wkg,65986 +matplotlib/tri/tripcolor.py,sha256=ySM-FzC0KRaz2XpAOh1B8x78qLRNzoa3gKSV4gC0_Rc,5138 +matplotlib/tri/triplot.py,sha256=iiS5jMj9P1LCuzQm_T5PLh8UXhQqTn7NSlHquiQK1dg,2845 +matplotlib/tri/trirefine.py,sha256=4mY037ghrhtOLE0_MW-f_KEbsgYXQ_buwxIGhgnYMiQ,13529 +matplotlib/tri/tritools.py,sha256=3MZ4E4xgXHuOUQmY_o0Py4hJ54Dm9Q5tRwwYpBeFmAs,10842 +matplotlib/ttconv.py,sha256=qZFAggBaXK9jMpA5YkRb2G7AyK9avcuxA6o4VbQLUKM,248 +matplotlib/type1font.py,sha256=bbwW3TLCfXUTilJmWXFE_KtnoJNrtw-M7e9LHUiW5Cw,12663 +matplotlib/units.py,sha256=HpMVm8uMp4JiL3wJDMDOH0MiqxVLRUy6WD2Iizs8BLU,7497 +matplotlib/widgets.py,sha256=fj5fyhbA4E0OojTRvBubazBoNkgy31PDbyumBxOtmBI,95922 +mpl_toolkits/axes_grid/__init__.py,sha256=3oKEqnq1Mee3hQEmxRcIxO3iS1WeYE_cJ5gZoIXFqUY,548 +mpl_toolkits/axes_grid/__pycache__/__init__.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/anchored_artists.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/angle_helper.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/axes_divider.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/axes_grid.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/axes_rgb.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/axes_size.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/axis_artist.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/axisline_style.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/axislines.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/clip_path.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/colorbar.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/floating_axes.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/grid_finder.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/grid_helper_curvelinear.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/inset_locator.cpython-36.pyc,, +mpl_toolkits/axes_grid/__pycache__/parasite_axes.cpython-36.pyc,, +mpl_toolkits/axes_grid/anchored_artists.py,sha256=TDvJzLlt85cfXoiT1Yk4j0DEw_6HXeV6zb-tAdPP3zs,297 +mpl_toolkits/axes_grid/angle_helper.py,sha256=f77E-aQao6GkJfpEGmLAvpWgFkktkIV9d8YXVsDfsBQ,52 +mpl_toolkits/axes_grid/axes_divider.py,sha256=Sa_hLFBUH6F6P4apbj_9RQQJS-LfK8kMKe1U5AvHYqE,181 +mpl_toolkits/axes_grid/axes_grid.py,sha256=k7q2Tuf5yr29lDqK9DhixFgDi9R0G2ZQT__paXthg34,91 +mpl_toolkits/axes_grid/axes_rgb.py,sha256=V691yLhii-qIdxPFDoRF-28h-IINq1CDHUWa_fPmqDY,48 +mpl_toolkits/axes_grid/axes_size.py,sha256=SV0uHhIRHVYpGIZdw3gb8T4jvh0G24KPUPr11x9TGbY,49 +mpl_toolkits/axes_grid/axis_artist.py,sha256=VuHYa0LaHdU1YKDIir03ykI64Wd1zSYhnXuUEiIatAk,51 +mpl_toolkits/axes_grid/axisline_style.py,sha256=o2aVaavBc62VLXVYZCStRjGktDD8rfkwxwXTHJuKb-U,54 +mpl_toolkits/axes_grid/axislines.py,sha256=JFEkMHiAfYPEK1M3atZwmnMAn6KcgoUQALk0aApbvZw,49 +mpl_toolkits/axes_grid/clip_path.py,sha256=uSPvk9ovfA9UkX2fPAoLJPA4nBOEB27HaEKb4M-tpdI,49 +mpl_toolkits/axes_grid/colorbar.py,sha256=o2y9Q9Vk4xziksEVaCbmlA41JleaqXITkargdew4u-U,176 +mpl_toolkits/axes_grid/floating_axes.py,sha256=tQxJJwFSBxNDcZCWjLDKIQ3Ck81TkJuErE_wbbwIbN0,53 +mpl_toolkits/axes_grid/grid_finder.py,sha256=YrtbbCgHY71Cowpc0TJOxTTyg-vwJSsuL0iRXogOllI,51 +mpl_toolkits/axes_grid/grid_helper_curvelinear.py,sha256=yh_X2vXTRUuGbrQxynWOO400T3Sifsxl5oFmNdrinrU,63 +mpl_toolkits/axes_grid/inset_locator.py,sha256=lvj8PMLvtLz0YA2KIUG6edwwI3zC0qbHo0V5ZPG-eKc,220 +mpl_toolkits/axes_grid/parasite_axes.py,sha256=cH5AdjQhngqXD_9-FALVW48GwLw9LvtBMkjXY9vvoK0,448 +mpl_toolkits/axes_grid1/__init__.py,sha256=Dj6jFICuj-u5Om3DuZvW_9BQC2-dXXz06xhFNZQCmlo,209 +mpl_toolkits/axes_grid1/__pycache__/__init__.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/anchored_artists.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/axes_divider.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/axes_grid.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/axes_rgb.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/axes_size.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/colorbar.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/inset_locator.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/mpl_axes.cpython-36.pyc,, +mpl_toolkits/axes_grid1/__pycache__/parasite_axes.cpython-36.pyc,, +mpl_toolkits/axes_grid1/anchored_artists.py,sha256=K36y4695wWBdr-UUJ2xWBm3StMWZH23c_SdzLHH0BMQ,20730 +mpl_toolkits/axes_grid1/axes_divider.py,sha256=otSCDY3RHMqL-JH5Wvwe_ZA5vNuhMvY33qGW-ZXEep4,26561 +mpl_toolkits/axes_grid1/axes_grid.py,sha256=RaoJpq3lXVAeKCaH7s2cmGh8-Yw4cjLtK2qxkDVX8d4,24226 +mpl_toolkits/axes_grid1/axes_rgb.py,sha256=78d239ySNQX3Dl03-3nwxeqjCgTYJs3ZQ9bzSU4HM4k,5339 +mpl_toolkits/axes_grid1/axes_size.py,sha256=RaAcOUa4o3uGQGueeLhKDdlMn3E9hUjvXCQzmTtQBYg,7822 +mpl_toolkits/axes_grid1/colorbar.py,sha256=Oi4TvfoOmI7n11HSWcwXZ9a53g7IbBZPrW-l9UHN8yM,28718 +mpl_toolkits/axes_grid1/inset_locator.py,sha256=vkPbxOMbq9hzvFn1VrweexDtG_lm4xSl8HoG36NjI7Y,23765 +mpl_toolkits/axes_grid1/mpl_axes.py,sha256=zIyP1XxXI1WqIbtT2v-9BbwBdx89hdZ2Tyg-X6MaDLk,4514 +mpl_toolkits/axes_grid1/parasite_axes.py,sha256=J-j_RvJd7js57ybEslbKIEvciaVcIKb04e6tbBYqpXQ,14832 +mpl_toolkits/axisartist/__init__.py,sha256=Aa_vsf6h8XO-YR-IDQckhtAN6HBFA8-3nnGEGKhV6hU,730 +mpl_toolkits/axisartist/__pycache__/__init__.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/angle_helper.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/axes_divider.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/axes_grid.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/axes_rgb.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/axis_artist.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/axisline_style.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/axislines.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/clip_path.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/floating_axes.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/grid_finder.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/grid_helper_curvelinear.cpython-36.pyc,, +mpl_toolkits/axisartist/__pycache__/parasite_axes.cpython-36.pyc,, +mpl_toolkits/axisartist/angle_helper.py,sha256=p2P2NldZ-n9EM0PAi80PU2Sl3xbPpE5UdzRRr0MgwYQ,13618 +mpl_toolkits/axisartist/axes_divider.py,sha256=2ReTIrF3nwwXtxiIchWRjImPLnxeoce8J17Gd8UsbY4,129 +mpl_toolkits/axisartist/axes_grid.py,sha256=-BiKRKmUY9SN2YkPwFkbsnrtDCux3HBQV2XbzOrKrrA,365 +mpl_toolkits/axisartist/axes_rgb.py,sha256=a2lvQ9MxvXjh9ZXDvzM6VNBA8AGg5xngu_pKAx8PWOc,190 +mpl_toolkits/axisartist/axis_artist.py,sha256=dB-QohGsgBE-ifygSGdAB1tZOeBAmiltny0WqomG8qU,43295 +mpl_toolkits/axisartist/axisline_style.py,sha256=oCZdSjWsCgs3HwnF9fAWjUyN49prLDxMDXDrwzA2EoY,5192 +mpl_toolkits/axisartist/axislines.py,sha256=0HXaSscHdQ_dgME8OKZGwX6XGCfScsYPLXuUAEVeZbE,20322 +mpl_toolkits/axisartist/clip_path.py,sha256=fQxXkgaFroThzHHeWDFfYLoAXbYoOsZt1awRFkSPZeY,3895 +mpl_toolkits/axisartist/floating_axes.py,sha256=GOhgBLbJg9kHCPwEsxIaNnGkY-XCD6DaZ16ePpesaKg,13233 +mpl_toolkits/axisartist/grid_finder.py,sha256=CvALRLYWhBp3bwcQc5heThDZTiEGVHmEq7E5N9ssQS0,11271 +mpl_toolkits/axisartist/grid_helper_curvelinear.py,sha256=CEqPWCyRbHrCSjTQhCLlEIJX4DxQXmVtkpNy5ay-stM,14656 +mpl_toolkits/axisartist/parasite_axes.py,sha256=Kj-_p0dpdSgnGzElDHtmBW7u8wdka5wfn150rTD4Uy4,425 +mpl_toolkits/mplot3d/__init__.py,sha256=-7jYs7BlOeVjnGxLWEfMb93i-vzMi6Hdi9CLsAWOD4k,28 +mpl_toolkits/mplot3d/__pycache__/__init__.cpython-36.pyc,, +mpl_toolkits/mplot3d/__pycache__/art3d.cpython-36.pyc,, +mpl_toolkits/mplot3d/__pycache__/axes3d.cpython-36.pyc,, +mpl_toolkits/mplot3d/__pycache__/axis3d.cpython-36.pyc,, +mpl_toolkits/mplot3d/__pycache__/proj3d.cpython-36.pyc,, +mpl_toolkits/mplot3d/art3d.py,sha256=MgozhPS_h7hSM4VwBdEPGiieAN51MojaJYdRDhdSvQU,27006 +mpl_toolkits/mplot3d/axes3d.py,sha256=J_r64AVS77Qex9jXyLOYpTYDjFFObyAlohggFF4wY4g,106493 +mpl_toolkits/mplot3d/axis3d.py,sha256=14a9N47mqzSNHfnqoPzG8CxBTNsqqkCxBhh2AAP9fOc,19299 +mpl_toolkits/mplot3d/proj3d.py,sha256=Lbc0nw5w6Cvune2_kxCwIJ9Gg5VryOD0Ilue_3lU9dY,4441 +mpl_toolkits/tests/__init__.py,sha256=jY2lF4letZKOagkrt6B_HnnKouuCgo8hG3saDzq8eGI,375 +mpl_toolkits/tests/__pycache__/__init__.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/conftest.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axes_grid.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axes_grid1.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axisartist_angle_helper.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axisartist_axis_artist.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axisartist_axislines.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axisartist_clip_path.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axisartist_floating_axes.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axisartist_grid_finder.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_axisartist_grid_helper_curvelinear.cpython-36.pyc,, +mpl_toolkits/tests/__pycache__/test_mplot3d.cpython-36.pyc,, +mpl_toolkits/tests/conftest.py,sha256=C8DbesGlumOhSREkWrdBGaWu0vchqik17dTokP8aDAA,216 +mpl_toolkits/tests/test_axes_grid.py,sha256=0mL0Wgf9QrjrhFOwDMIIKxEzK3FnbX0mS8sU4d6rp1E,2514 +mpl_toolkits/tests/test_axes_grid1.py,sha256=suLYN5t9RoT6y0tkSHYh_W8AmXsRzUQUrsmd3tUiils,18571 +mpl_toolkits/tests/test_axisartist_angle_helper.py,sha256=SI_lyCLbVKikZp9DRBpq--MbfT10vSQsNx1Z5HMeqMw,5811 +mpl_toolkits/tests/test_axisartist_axis_artist.py,sha256=yzGoWLFPlnkCYZKMO5M6KEipnZD0PsWK5UNToX3LaVU,3107 +mpl_toolkits/tests/test_axisartist_axislines.py,sha256=KUI4stZL2DRJyKdnv27czKk2qt4wuBQCOV_w59dfR-Q,2535 +mpl_toolkits/tests/test_axisartist_clip_path.py,sha256=Hj622Au6sUcrIN6ryWnm9UUxXVd2isWxFZCUo1YicY0,1036 +mpl_toolkits/tests/test_axisartist_floating_axes.py,sha256=91QavzOE2xtqTH8VcU68R2Uqg3IALIUI-qwZcCeaG5E,4247 +mpl_toolkits/tests/test_axisartist_grid_finder.py,sha256=oNZQ1PoRgpUoWqg8qdvgplZW3iFK6FhXcVS9LxuVqZE,338 +mpl_toolkits/tests/test_axisartist_grid_helper_curvelinear.py,sha256=pa5vb6wbkiv404-5b1O83IatUOObww4wbxDh9lSlZes,7729 +mpl_toolkits/tests/test_mplot3d.py,sha256=AZ5JHgIb1tJEDyrXC02vi9TteF3JCn9PQ-zsuBuIqrc,36118 +pylab.py,sha256=Ni2YJ31pBmyfkWr5WyTFmS1qM40JuEeKrJhYKWbd6KY,93 diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/WHEEL b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/WHEEL new file mode 100644 index 000000000..f2456e30b --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: false +Tag: cp36-cp36m-win32 + diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/namespace_packages.txt b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/namespace_packages.txt new file mode 100644 index 000000000..ba2e3ed90 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/namespace_packages.txt @@ -0,0 +1 @@ +mpl_toolkits diff --git a/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/top_level.txt b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/top_level.txt new file mode 100644 index 000000000..0eb77e4d9 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib-3.3.2.dist-info/top_level.txt @@ -0,0 +1,3 @@ +matplotlib +mpl_toolkits +pylab diff --git a/venv/Lib/site-packages/matplotlib/__init__.py b/venv/Lib/site-packages/matplotlib/__init__.py new file mode 100644 index 000000000..340eaeb83 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/__init__.py @@ -0,0 +1,1474 @@ +""" +An object-oriented plotting library. + +A procedural interface is provided by the companion pyplot module, +which may be imported directly, e.g.:: + + import matplotlib.pyplot as plt + +or using ipython:: + + ipython + +at your terminal, followed by:: + + In [1]: %matplotlib + In [2]: import matplotlib.pyplot as plt + +at the ipython shell prompt. + +For the most part, direct use of the object-oriented library is encouraged when +programming; pyplot is primarily for working interactively. The exceptions are +the pyplot functions `.pyplot.figure`, `.pyplot.subplot`, `.pyplot.subplots`, +and `.pyplot.savefig`, which can greatly simplify scripting. + +Modules include: + + :mod:`matplotlib.axes` + The `~.axes.Axes` class. Most pyplot functions are wrappers for + `~.axes.Axes` methods. The axes module is the highest level of OO + access to the library. + + :mod:`matplotlib.figure` + The `.Figure` class. + + :mod:`matplotlib.artist` + The `.Artist` base class for all classes that draw things. + + :mod:`matplotlib.lines` + The `.Line2D` class for drawing lines and markers. + + :mod:`matplotlib.patches` + Classes for drawing polygons. + + :mod:`matplotlib.text` + The `.Text` and `.Annotation` classes. + + :mod:`matplotlib.image` + The `.AxesImage` and `.FigureImage` classes. + + :mod:`matplotlib.collections` + Classes for efficient drawing of groups of lines or polygons. + + :mod:`matplotlib.colors` + Color specifications and making colormaps. + + :mod:`matplotlib.cm` + Colormaps, and the `.ScalarMappable` mixin class for providing color + mapping functionality to other classes. + + :mod:`matplotlib.ticker` + Calculation of tick mark locations and formatting of tick labels. + + :mod:`matplotlib.backends` + A subpackage with modules for various GUI libraries and output formats. + +The base matplotlib namespace includes: + + `~matplotlib.rcParams` + Default configuration settings; their defaults may be overridden using + a :file:`matplotlibrc` file. + + `~matplotlib.use` + Setting the Matplotlib backend. This should be called before any + figure is created, because it is not possible to switch between + different GUI backends after that. + +Matplotlib was initially written by John D. Hunter (1968-2012) and is now +developed and maintained by a host of others. + +Occasionally the internal documentation (python docstrings) will refer +to MATLAB®, a registered trademark of The MathWorks, Inc. +""" + +import atexit +from collections import namedtuple +from collections.abc import MutableMapping +import contextlib +from distutils.version import LooseVersion +import functools +import importlib +import inspect +from inspect import Parameter +import locale +import logging +import os +from pathlib import Path +import pprint +import re +import shutil +import subprocess +import sys +import tempfile +import warnings + +# cbook must import matplotlib only within function +# definitions, so it is safe to import from it here. +from . import cbook, rcsetup +from matplotlib.cbook import MatplotlibDeprecationWarning, sanitize_sequence +from matplotlib.cbook import mplDeprecation # deprecated +from matplotlib.rcsetup import validate_backend, cycler + +import numpy + +# Get the version from the _version.py versioneer file. For a git checkout, +# this is computed based on the number of commits since the last tag. +from ._version import get_versions +__version__ = str(get_versions()['version']) +del get_versions + +_log = logging.getLogger(__name__) + +__bibtex__ = r"""@Article{Hunter:2007, + Author = {Hunter, J. D.}, + Title = {Matplotlib: A 2D graphics environment}, + Journal = {Computing in Science \& Engineering}, + Volume = {9}, + Number = {3}, + Pages = {90--95}, + abstract = {Matplotlib is a 2D graphics package used for Python + for application development, interactive scripting, and + publication-quality image generation across user + interfaces and operating systems.}, + publisher = {IEEE COMPUTER SOC}, + year = 2007 +}""" + + +@cbook.deprecated("3.2") +def compare_versions(a, b): + """Return whether version *a* is greater than or equal to version *b*.""" + if isinstance(a, bytes): + cbook.warn_deprecated( + "3.0", message="compare_versions arguments should be strs.") + a = a.decode('ascii') + if isinstance(b, bytes): + cbook.warn_deprecated( + "3.0", message="compare_versions arguments should be strs.") + b = b.decode('ascii') + if a: + return LooseVersion(a) >= LooseVersion(b) + else: + return False + + +def _check_versions(): + + # Quickfix to ensure Microsoft Visual C++ redistributable + # DLLs are loaded before importing kiwisolver + from . import ft2font + + for modname, minver in [ + ("cycler", "0.10"), + ("dateutil", "2.1"), + ("kiwisolver", "1.0.1"), + ("numpy", "1.15"), + ("pyparsing", "2.0.1"), + ]: + module = importlib.import_module(modname) + if LooseVersion(module.__version__) < minver: + raise ImportError("Matplotlib requires {}>={}; you have {}" + .format(modname, minver, module.__version__)) + + +_check_versions() + + +# The decorator ensures this always returns the same handler (and it is only +# attached once). +@functools.lru_cache() +def _ensure_handler(): + """ + The first time this function is called, attach a `StreamHandler` using the + same format as `logging.basicConfig` to the Matplotlib root logger. + + Return this handler every time this function is called. + """ + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) + _log.addHandler(handler) + return handler + + +def set_loglevel(level): + """ + Set Matplotlib's root logger and root logger handler level, creating + the handler if it does not exist yet. + + Typically, one should call ``set_loglevel("info")`` or + ``set_loglevel("debug")`` to get additional debugging information. + + Parameters + ---------- + level : {"notset", "debug", "info", "warning", "error", "critical"} + The log level of the handler. + + Notes + ----- + The first time this function is called, an additional handler is attached + to Matplotlib's root handler; this handler is reused every time and this + function simply manipulates the logger and handler's level. + """ + _log.setLevel(level.upper()) + _ensure_handler().setLevel(level.upper()) + + +def _logged_cached(fmt, func=None): + """ + Decorator that logs a function's return value, and memoizes that value. + + After :: + + @_logged_cached(fmt) + def func(): ... + + the first call to *func* will log its return value at the DEBUG level using + %-format string *fmt*, and memoize it; later calls to *func* will directly + return that value. + """ + if func is None: # Return the actual decorator. + return functools.partial(_logged_cached, fmt) + + called = False + ret = None + + @functools.wraps(func) + def wrapper(**kwargs): + nonlocal called, ret + if not called: + ret = func(**kwargs) + called = True + _log.debug(fmt, ret) + return ret + + return wrapper + + +_ExecInfo = namedtuple("_ExecInfo", "executable version") + + +class ExecutableNotFoundError(FileNotFoundError): + """ + Error raised when an executable that Matplotlib optionally + depends on can't be found. + """ + pass + + +@functools.lru_cache() +def _get_executable_info(name): + """ + Get the version of some executable that Matplotlib optionally depends on. + + .. warning: + The list of executables that this function supports is set according to + Matplotlib's internal needs, and may change without notice. + + Parameters + ---------- + name : str + The executable to query. The following values are currently supported: + "dvipng", "gs", "inkscape", "magick", "pdftops". This list is subject + to change without notice. + + Returns + ------- + If the executable is found, a namedtuple with fields ``executable`` (`str`) + and ``version`` (`distutils.version.LooseVersion`, or ``None`` if the + version cannot be determined). + + Raises + ------ + ExecutableNotFoundError + If the executable is not found or older than the oldest version + supported by Matplotlib. + ValueError + If the executable is not one that we know how to query. + """ + + def impl(args, regex, min_ver=None, ignore_exit_code=False): + # Execute the subprocess specified by args; capture stdout and stderr. + # Search for a regex match in the output; if the match succeeds, the + # first group of the match is the version. + # Return an _ExecInfo if the executable exists, and has a version of + # at least min_ver (if set); else, raise ExecutableNotFoundError. + try: + output = subprocess.check_output( + args, stderr=subprocess.STDOUT, + universal_newlines=True, errors="replace") + except subprocess.CalledProcessError as _cpe: + if ignore_exit_code: + output = _cpe.output + else: + raise ExecutableNotFoundError(str(_cpe)) from _cpe + except OSError as _ose: + raise ExecutableNotFoundError(str(_ose)) from _ose + match = re.search(regex, output) + if match: + version = LooseVersion(match.group(1)) + if min_ver is not None and version < min_ver: + raise ExecutableNotFoundError( + f"You have {args[0]} version {version} but the minimum " + f"version supported by Matplotlib is {min_ver}") + return _ExecInfo(args[0], version) + else: + raise ExecutableNotFoundError( + f"Failed to determine the version of {args[0]} from " + f"{' '.join(args)}, which output {output}") + + if name == "dvipng": + return impl(["dvipng", "-version"], "(?m)^dvipng(?: .*)? (.+)", "1.6") + elif name == "gs": + execs = (["gswin32c", "gswin64c", "mgs", "gs"] # "mgs" for miktex. + if sys.platform == "win32" else + ["gs"]) + for e in execs: + try: + return impl([e, "--version"], "(.*)", "9") + except ExecutableNotFoundError: + pass + message = "Failed to find a Ghostscript installation" + raise ExecutableNotFoundError(message) + elif name == "inkscape": + try: + # Try headless option first (needed for Inkscape version < 1.0): + return impl(["inkscape", "--without-gui", "-V"], + "Inkscape ([^ ]*)") + except ExecutableNotFoundError: + pass # Suppress exception chaining. + # If --without-gui is not accepted, we may be using Inkscape >= 1.0 so + # try without it: + return impl(["inkscape", "-V"], "Inkscape ([^ ]*)") + elif name == "magick": + path = None + if sys.platform == "win32": + # Check the registry to avoid confusing ImageMagick's convert with + # Windows's builtin convert.exe. + import winreg + binpath = "" + for flag in [0, winreg.KEY_WOW64_32KEY, winreg.KEY_WOW64_64KEY]: + try: + with winreg.OpenKeyEx( + winreg.HKEY_LOCAL_MACHINE, + r"Software\Imagemagick\Current", + 0, winreg.KEY_QUERY_VALUE | flag) as hkey: + binpath = winreg.QueryValueEx(hkey, "BinPath")[0] + except OSError: + pass + if binpath: + for name in ["convert.exe", "magick.exe"]: + candidate = Path(binpath, name) + if candidate.exists(): + path = str(candidate) + break + else: + path = "convert" + if path is None: + raise ExecutableNotFoundError( + "Failed to find an ImageMagick installation") + return impl([path, "--version"], r"^Version: ImageMagick (\S*)") + elif name == "pdftops": + info = impl(["pdftops", "-v"], "^pdftops version (.*)", + ignore_exit_code=True) + if info and not ("3.0" <= info.version + # poppler version numbers. + or "0.9" <= info.version <= "1.0"): + raise ExecutableNotFoundError( + f"You have pdftops version {info.version} but the minimum " + f"version supported by Matplotlib is 3.0") + return info + else: + raise ValueError("Unknown executable: {!r}".format(name)) + + +@cbook.deprecated("3.2") +def checkdep_ps_distiller(s): + if not s: + return False + try: + _get_executable_info("gs") + except ExecutableNotFoundError: + _log.warning( + "Setting rcParams['ps.usedistiller'] requires ghostscript.") + return False + if s == "xpdf": + try: + _get_executable_info("pdftops") + except ExecutableNotFoundError: + _log.warning( + "Setting rcParams['ps.usedistiller'] to 'xpdf' requires xpdf.") + return False + return s + + +def checkdep_usetex(s): + if not s: + return False + if not shutil.which("tex"): + _log.warning("usetex mode requires TeX.") + return False + try: + _get_executable_info("dvipng") + except ExecutableNotFoundError: + _log.warning("usetex mode requires dvipng.") + return False + try: + _get_executable_info("gs") + except ExecutableNotFoundError: + _log.warning("usetex mode requires ghostscript.") + return False + return True + + +@cbook.deprecated("3.2", alternative="os.path.expanduser('~')") +@_logged_cached('$HOME=%s') +def get_home(): + """ + Return the user's home directory. + + If the user's home directory cannot be found, return None. + """ + try: + return str(Path.home()) + except Exception: + return None + + +def _get_xdg_config_dir(): + """ + Return the XDG configuration directory, according to the XDG base + directory spec: + + https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + """ + return os.environ.get('XDG_CONFIG_HOME') or str(Path.home() / ".config") + + +def _get_xdg_cache_dir(): + """ + Return the XDG cache directory, according to the XDG base directory spec: + + https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + """ + return os.environ.get('XDG_CACHE_HOME') or str(Path.home() / ".cache") + + +def _get_config_or_cache_dir(xdg_base): + configdir = os.environ.get('MPLCONFIGDIR') + if configdir: + configdir = Path(configdir).resolve() + elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base: + configdir = Path(xdg_base, "matplotlib") + else: + configdir = Path.home() / ".matplotlib" + try: + configdir.mkdir(parents=True, exist_ok=True) + except OSError: + pass + else: + if os.access(str(configdir), os.W_OK) and configdir.is_dir(): + return str(configdir) + # If the config or cache directory cannot be created or is not a writable + # directory, create a temporary one. + tmpdir = os.environ["MPLCONFIGDIR"] = \ + tempfile.mkdtemp(prefix="matplotlib-") + atexit.register(shutil.rmtree, tmpdir) + _log.warning( + "Matplotlib created a temporary config/cache directory at %s because " + "the default path (%s) is not a writable directory; it is highly " + "recommended to set the MPLCONFIGDIR environment variable to a " + "writable directory, in particular to speed up the import of " + "Matplotlib and to better support multiprocessing.", + tmpdir, configdir) + return tmpdir + + +@_logged_cached('CONFIGDIR=%s') +def get_configdir(): + """ + Return the string path of the the configuration directory. + + The directory is chosen as follows: + + 1. If the MPLCONFIGDIR environment variable is supplied, choose that. + 2. On Linux, follow the XDG specification and look first in + ``$XDG_CONFIG_HOME``, if defined, or ``$HOME/.config``. On other + platforms, choose ``$HOME/.matplotlib``. + 3. If the chosen directory exists and is writable, use that as the + configuration directory. + 4. Else, create a temporary directory, and use it as the configuration + directory. + """ + return _get_config_or_cache_dir(_get_xdg_config_dir()) + + +@_logged_cached('CACHEDIR=%s') +def get_cachedir(): + """ + Return the string path of the cache directory. + + The procedure used to find the directory is the same as for + _get_config_dir, except using ``$XDG_CACHE_HOME``/``$HOME/.cache`` instead. + """ + return _get_config_or_cache_dir(_get_xdg_cache_dir()) + + +@_logged_cached('matplotlib data path: %s') +def get_data_path(*, _from_rc=None): + """Return the path to Matplotlib data.""" + if _from_rc is not None: + cbook.warn_deprecated( + "3.2", + message=("Setting the datapath via matplotlibrc is deprecated " + "%(since)s and will be removed %(removal)s."), + removal='3.4') + path = Path(_from_rc) + if path.is_dir(): + return str(path) + else: + warnings.warn(f"You passed datapath: {_from_rc!r} in your " + f"matplotribrc file ({matplotlib_fname()}). " + "However this path does not exist, falling back " + "to standard paths.") + + return _get_data_path() + + +@_logged_cached('(private) matplotlib data path: %s') +def _get_data_path(): + path = Path(__file__).with_name("mpl-data") + if path.is_dir(): + return str(path) + + cbook.warn_deprecated( + "3.2", message="Matplotlib installs where the data is not in the " + "mpl-data subdirectory of the package are deprecated since %(since)s " + "and support for them will be removed %(removal)s.") + + def get_candidate_paths(): + # setuptools' namespace_packages may hijack this init file + # so need to try something known to be in Matplotlib, not basemap. + import matplotlib.afm + yield Path(matplotlib.afm.__file__).with_name('mpl-data') + # py2exe zips pure python, so still need special check. + if getattr(sys, 'frozen', None): + yield Path(sys.executable).with_name('mpl-data') + # Try again assuming we need to step up one more directory. + yield Path(sys.executable).parent.with_name('mpl-data') + # Try again assuming sys.path[0] is a dir not a exe. + yield Path(sys.path[0]) / 'mpl-data' + + for path in get_candidate_paths(): + if path.is_dir(): + defaultParams['datapath'][0] = str(path) + return str(path) + + raise RuntimeError('Could not find the matplotlib data files') + + +def matplotlib_fname(): + """ + Get the location of the config file. + + The file location is determined in the following order + + - ``$PWD/matplotlibrc`` + - ``$MATPLOTLIBRC`` if it is not a directory + - ``$MATPLOTLIBRC/matplotlibrc`` + - ``$MPLCONFIGDIR/matplotlibrc`` + - On Linux, + - ``$XDG_CONFIG_HOME/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME`` + is defined) + - or ``$HOME/.config/matplotlib/matplotlibrc`` (if ``$XDG_CONFIG_HOME`` + is not defined) + - On other platforms, + - ``$HOME/.matplotlib/matplotlibrc`` if ``$HOME`` is defined + - Lastly, it looks in ``$MATPLOTLIBDATA/matplotlibrc``, which should always + exist. + """ + + def gen_candidates(): + yield os.path.join(os.getcwd(), 'matplotlibrc') + try: + matplotlibrc = os.environ['MATPLOTLIBRC'] + except KeyError: + pass + else: + yield matplotlibrc + yield os.path.join(matplotlibrc, 'matplotlibrc') + yield os.path.join(get_configdir(), 'matplotlibrc') + yield os.path.join(_get_data_path(), 'matplotlibrc') + + for fname in gen_candidates(): + if os.path.exists(fname) and not os.path.isdir(fname): + return fname + + raise RuntimeError("Could not find matplotlibrc file; your Matplotlib " + "install is broken") + + +# rcParams deprecated and automatically mapped to another key. +# Values are tuples of (version, new_name, f_old2new, f_new2old). +_deprecated_map = {} + +# rcParams deprecated; some can manually be mapped to another key. +# Values are tuples of (version, new_name_or_None). +_deprecated_ignore_map = { +} + +# rcParams deprecated; can use None to suppress warnings; remain actually +# listed in the rcParams (not included in _all_deprecated). +# Values are tuples of (version,) +_deprecated_remain_as_none = { + 'datapath': ('3.2.1',), + 'animation.avconv_path': ('3.3',), + 'animation.avconv_args': ('3.3',), + 'animation.html_args': ('3.3',), + 'mathtext.fallback_to_cm': ('3.3',), + 'keymap.all_axes': ('3.3',), + 'savefig.jpeg_quality': ('3.3',), + 'text.latex.preview': ('3.3',), +} + + +_all_deprecated = {*_deprecated_map, *_deprecated_ignore_map} + + +class RcParams(MutableMapping, dict): + """ + A dictionary object including validation. + + Validating functions are defined and associated with rc parameters in + :mod:`matplotlib.rcsetup`. + + See Also + -------- + :ref:`customizing-with-matplotlibrc-files` + """ + + validate = rcsetup._validators + + # validate values on the way in + def __init__(self, *args, **kwargs): + self.update(*args, **kwargs) + + def __setitem__(self, key, val): + try: + if key in _deprecated_map: + version, alt_key, alt_val, inverse_alt = _deprecated_map[key] + cbook.warn_deprecated( + version, name=key, obj_type="rcparam", alternative=alt_key) + key = alt_key + val = alt_val(val) + elif key in _deprecated_remain_as_none and val is not None: + version, = _deprecated_remain_as_none[key] + cbook.warn_deprecated( + version, name=key, obj_type="rcparam") + elif key in _deprecated_ignore_map: + version, alt_key = _deprecated_ignore_map[key] + cbook.warn_deprecated( + version, name=key, obj_type="rcparam", alternative=alt_key) + return + elif key == 'backend': + if val is rcsetup._auto_backend_sentinel: + if 'backend' in self: + return + try: + cval = self.validate[key](val) + except ValueError as ve: + raise ValueError(f"Key {key}: {ve}") from None + dict.__setitem__(self, key, cval) + except KeyError as err: + raise KeyError( + f"{key} is not a valid rc parameter (see rcParams.keys() for " + f"a list of valid parameters)") from err + + def __getitem__(self, key): + if key in _deprecated_map: + version, alt_key, alt_val, inverse_alt = _deprecated_map[key] + cbook.warn_deprecated( + version, name=key, obj_type="rcparam", alternative=alt_key) + return inverse_alt(dict.__getitem__(self, alt_key)) + + elif key in _deprecated_ignore_map: + version, alt_key = _deprecated_ignore_map[key] + cbook.warn_deprecated( + version, name=key, obj_type="rcparam", alternative=alt_key) + return dict.__getitem__(self, alt_key) if alt_key else None + + elif key == "backend": + val = dict.__getitem__(self, key) + if val is rcsetup._auto_backend_sentinel: + from matplotlib import pyplot as plt + plt.switch_backend(rcsetup._auto_backend_sentinel) + + elif key == "datapath": + return get_data_path() + + return dict.__getitem__(self, key) + + def __repr__(self): + class_name = self.__class__.__name__ + indent = len(class_name) + 1 + with cbook._suppress_matplotlib_deprecation_warning(): + repr_split = pprint.pformat(dict(self), indent=1, + width=80 - indent).split('\n') + repr_indented = ('\n' + ' ' * indent).join(repr_split) + return '{}({})'.format(class_name, repr_indented) + + def __str__(self): + return '\n'.join(map('{0[0]}: {0[1]}'.format, sorted(self.items()))) + + def __iter__(self): + """Yield sorted list of keys.""" + with cbook._suppress_matplotlib_deprecation_warning(): + yield from sorted(dict.__iter__(self)) + + def __len__(self): + return dict.__len__(self) + + def find_all(self, pattern): + """ + Return the subset of this RcParams dictionary whose keys match, + using :func:`re.search`, the given ``pattern``. + + .. note:: + + Changes to the returned dictionary are *not* propagated to + the parent RcParams dictionary. + + """ + pattern_re = re.compile(pattern) + return RcParams((key, value) + for key, value in self.items() + if pattern_re.search(key)) + + def copy(self): + return {k: dict.__getitem__(self, k) for k in self} + + +def rc_params(fail_on_error=False): + """Construct a `RcParams` instance from the default Matplotlib rc file.""" + return rc_params_from_file(matplotlib_fname(), fail_on_error) + + +URL_REGEX = re.compile(r'^http://|^https://|^ftp://|^file:') + + +def is_url(filename): + """Return True if string is an http, ftp, or file URL path.""" + return URL_REGEX.match(filename) is not None + + +@functools.lru_cache() +def _get_ssl_context(): + import certifi + import ssl + return ssl.create_default_context(cafile=certifi.where()) + + +@contextlib.contextmanager +def _open_file_or_url(fname): + if not isinstance(fname, Path) and is_url(fname): + import urllib.request + with urllib.request.urlopen(fname, context=_get_ssl_context()) as f: + yield (line.decode('utf-8') for line in f) + else: + fname = os.path.expanduser(fname) + encoding = locale.getpreferredencoding(do_setlocale=False) + if encoding is None: + encoding = "utf-8" + with open(fname, encoding=encoding) as f: + yield f + + +def _rc_params_in_file(fname, transform=lambda x: x, fail_on_error=False): + """ + Construct a `RcParams` instance from file *fname*. + + Unlike `rc_params_from_file`, the configuration class only contains the + parameters specified in the file (i.e. default values are not filled in). + + Parameters + ---------- + fname : path-like + The loaded file. + transform : callable, default: the identity function + A function called on each individual line of the file to transform it, + before further parsing. + fail_on_error : bool, default: False + Whether invalid entries should result in an exception or a warning. + """ + rc_temp = {} + with _open_file_or_url(fname) as fd: + try: + for line_no, line in enumerate(fd, 1): + line = transform(line) + strippedline = line.split('#', 1)[0].strip() + if not strippedline: + continue + tup = strippedline.split(':', 1) + if len(tup) != 2: + _log.warning('Missing colon in file %r, line %d (%r)', + fname, line_no, line.rstrip('\n')) + continue + key, val = tup + key = key.strip() + val = val.strip() + if key in rc_temp: + _log.warning('Duplicate key in file %r, line %d (%r)', + fname, line_no, line.rstrip('\n')) + rc_temp[key] = (val, line, line_no) + except UnicodeDecodeError: + _log.warning('Cannot decode configuration file %s with encoding ' + '%s, check LANG and LC_* variables.', + fname, + locale.getpreferredencoding(do_setlocale=False) + or 'utf-8 (default)') + raise + + config = RcParams() + + for key, (val, line, line_no) in rc_temp.items(): + if key in rcsetup._validators: + if fail_on_error: + config[key] = val # try to convert to proper type or raise + else: + try: + config[key] = val # try to convert to proper type or skip + except Exception as msg: + _log.warning('Bad value in file %r, line %d (%r): %s', + fname, line_no, line.rstrip('\n'), msg) + elif key in _deprecated_ignore_map: + version, alt_key = _deprecated_ignore_map[key] + cbook.warn_deprecated( + version, name=key, alternative=alt_key, + addendum="Please update your matplotlibrc.") + else: + version = 'master' if '.post' in __version__ else f'v{__version__}' + _log.warning(""" +Bad key %(key)s in file %(fname)s, line %(line_no)s (%(line)r) +You probably need to get an updated matplotlibrc file from +https://github.com/matplotlib/matplotlib/blob/%(version)s/matplotlibrc.template +or from the matplotlib source distribution""", + dict(key=key, fname=fname, line_no=line_no, + line=line.rstrip('\n'), version=version)) + return config + + +def rc_params_from_file(fname, fail_on_error=False, use_default_template=True): + """ + Construct a `RcParams` from file *fname*. + + Parameters + ---------- + fname : str or path-like + A file with Matplotlib rc settings. + fail_on_error : bool + If True, raise an error when the parser fails to convert a parameter. + use_default_template : bool + If True, initialize with default parameters before updating with those + in the given file. If False, the configuration class only contains the + parameters specified in the file. (Useful for updating dicts.) + """ + config_from_file = _rc_params_in_file(fname, fail_on_error=fail_on_error) + + if not use_default_template: + return config_from_file + + with cbook._suppress_matplotlib_deprecation_warning(): + config = RcParams({**rcParamsDefault, **config_from_file}) + + with cbook._suppress_matplotlib_deprecation_warning(): + if config['datapath'] is None: + config['datapath'] = _get_data_path() + else: + config['datapath'] = get_data_path(_from_rc=config['datapath']) + + if "".join(config['text.latex.preamble']): + _log.info(""" +***************************************************************** +You have the following UNSUPPORTED LaTeX preamble customizations: +%s +Please do not ask for support with these customizations active. +***************************************************************** +""", '\n'.join(config['text.latex.preamble'])) + _log.debug('loaded rc file %s', fname) + + return config + + +# When constructing the global instances, we need to perform certain updates +# by explicitly calling the superclass (dict.update, dict.items) to avoid +# triggering resolution of _auto_backend_sentinel. +rcParamsDefault = _rc_params_in_file( + cbook._get_data_path("matplotlibrc"), + # Strip leading comment. + transform=lambda line: line[1:] if line.startswith("#") else line, + fail_on_error=True) +dict.update(rcParamsDefault, rcsetup._hardcoded_defaults) +rcParams = RcParams() # The global instance. +dict.update(rcParams, dict.items(rcParamsDefault)) +dict.update(rcParams, _rc_params_in_file(matplotlib_fname())) +with cbook._suppress_matplotlib_deprecation_warning(): + rcParamsOrig = RcParams(rcParams.copy()) + # This also checks that all rcParams are indeed listed in the template. + # Assiging to rcsetup.defaultParams is left only for backcompat. + defaultParams = rcsetup.defaultParams = { + # We want to resolve deprecated rcParams, but not backend... + key: [(rcsetup._auto_backend_sentinel if key == "backend" else + rcParamsDefault[key]), + validator] + for key, validator in rcsetup._validators.items()} +if rcParams['axes.formatter.use_locale']: + locale.setlocale(locale.LC_ALL, '') + + +def rc(group, **kwargs): + """ + Set the current `.rcParams`. *group* is the grouping for the rc, e.g., + for ``lines.linewidth`` the group is ``lines``, for + ``axes.facecolor``, the group is ``axes``, and so on. Group may + also be a list or tuple of group names, e.g., (*xtick*, *ytick*). + *kwargs* is a dictionary attribute name/value pairs, e.g.,:: + + rc('lines', linewidth=2, color='r') + + sets the current `.rcParams` and is equivalent to:: + + rcParams['lines.linewidth'] = 2 + rcParams['lines.color'] = 'r' + + The following aliases are available to save typing for interactive users: + + ===== ================= + Alias Property + ===== ================= + 'lw' 'linewidth' + 'ls' 'linestyle' + 'c' 'color' + 'fc' 'facecolor' + 'ec' 'edgecolor' + 'mew' 'markeredgewidth' + 'aa' 'antialiased' + ===== ================= + + Thus you could abbreviate the above call as:: + + rc('lines', lw=2, c='r') + + Note you can use python's kwargs dictionary facility to store + dictionaries of default parameters. e.g., you can customize the + font rc as follows:: + + font = {'family' : 'monospace', + 'weight' : 'bold', + 'size' : 'larger'} + rc('font', **font) # pass in the font dict as kwargs + + This enables you to easily switch between several configurations. Use + ``matplotlib.style.use('default')`` or :func:`~matplotlib.rcdefaults` to + restore the default `.rcParams` after changes. + + Notes + ----- + Similar functionality is available by using the normal dict interface, i.e. + ``rcParams.update({"lines.linewidth": 2, ...})`` (but ``rcParams.update`` + does not support abbreviations or grouping). + """ + + aliases = { + 'lw': 'linewidth', + 'ls': 'linestyle', + 'c': 'color', + 'fc': 'facecolor', + 'ec': 'edgecolor', + 'mew': 'markeredgewidth', + 'aa': 'antialiased', + } + + if isinstance(group, str): + group = (group,) + for g in group: + for k, v in kwargs.items(): + name = aliases.get(k) or k + key = '%s.%s' % (g, name) + try: + rcParams[key] = v + except KeyError as err: + raise KeyError(('Unrecognized key "%s" for group "%s" and ' + 'name "%s"') % (key, g, name)) from err + + +def rcdefaults(): + """ + Restore the `.rcParams` from Matplotlib's internal default style. + + Style-blacklisted `.rcParams` (defined in + `matplotlib.style.core.STYLE_BLACKLIST`) are not updated. + + See Also + -------- + matplotlib.rc_file_defaults + Restore the `.rcParams` from the rc file originally loaded by + Matplotlib. + matplotlib.style.use + Use a specific style file. Call ``style.use('default')`` to restore + the default style. + """ + # Deprecation warnings were already handled when creating rcParamsDefault, + # no need to reemit them here. + with cbook._suppress_matplotlib_deprecation_warning(): + from .style.core import STYLE_BLACKLIST + rcParams.clear() + rcParams.update({k: v for k, v in rcParamsDefault.items() + if k not in STYLE_BLACKLIST}) + + +def rc_file_defaults(): + """ + Restore the `.rcParams` from the original rc file loaded by Matplotlib. + + Style-blacklisted `.rcParams` (defined in + `matplotlib.style.core.STYLE_BLACKLIST`) are not updated. + """ + # Deprecation warnings were already handled when creating rcParamsOrig, no + # need to reemit them here. + with cbook._suppress_matplotlib_deprecation_warning(): + from .style.core import STYLE_BLACKLIST + rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig + if k not in STYLE_BLACKLIST}) + + +def rc_file(fname, *, use_default_template=True): + """ + Update `.rcParams` from file. + + Style-blacklisted `.rcParams` (defined in + `matplotlib.style.core.STYLE_BLACKLIST`) are not updated. + + Parameters + ---------- + fname : str or path-like + A file with Matplotlib rc settings. + + use_default_template : bool + If True, initialize with default parameters before updating with those + in the given file. If False, the current configuration persists + and only the parameters specified in the file are updated. + """ + # Deprecation warnings were already handled in rc_params_from_file, no need + # to reemit them here. + with cbook._suppress_matplotlib_deprecation_warning(): + from .style.core import STYLE_BLACKLIST + rc_from_file = rc_params_from_file( + fname, use_default_template=use_default_template) + rcParams.update({k: rc_from_file[k] for k in rc_from_file + if k not in STYLE_BLACKLIST}) + + +@contextlib.contextmanager +def rc_context(rc=None, fname=None): + """ + Return a context manager for temporarily changing rcParams. + + Parameters + ---------- + rc : dict + The rcParams to temporarily set. + fname : str or path-like + A file with Matplotlib rc settings. If both *fname* and *rc* are given, + settings from *rc* take precedence. + + See Also + -------- + :ref:`customizing-with-matplotlibrc-files` + + Examples + -------- + Passing explicit values via a dict:: + + with mpl.rc_context({'interactive': False}): + fig, ax = plt.subplots() + ax.plot(range(3), range(3)) + fig.savefig('example.png') + plt.close(fig) + + Loading settings from a file:: + + with mpl.rc_context(fname='print.rc'): + plt.plot(x, y) # uses 'print.rc' + + """ + orig = rcParams.copy() + try: + if fname: + rc_file(fname) + if rc: + rcParams.update(rc) + yield + finally: + dict.update(rcParams, orig) # Revert to the original rcs. + + +def use(backend, *, force=True): + """ + Select the backend used for rendering and GUI integration. + + Parameters + ---------- + backend : str + The backend to switch to. This can either be one of the standard + backend names, which are case-insensitive: + + - interactive backends: + GTK3Agg, GTK3Cairo, MacOSX, nbAgg, + Qt4Agg, Qt4Cairo, Qt5Agg, Qt5Cairo, + TkAgg, TkCairo, WebAgg, WX, WXAgg, WXCairo + + - non-interactive backends: + agg, cairo, pdf, pgf, ps, svg, template + + or a string of the form: ``module://my.module.name``. + + force : bool, default: True + If True (the default), raise an `ImportError` if the backend cannot be + set up (either because it fails to import, or because an incompatible + GUI interactive framework is already running); if False, ignore the + failure. + + See Also + -------- + :ref:`backends` + matplotlib.get_backend + """ + name = validate_backend(backend) + # we need to use the base-class method here to avoid (prematurely) + # resolving the "auto" backend setting + if dict.__getitem__(rcParams, 'backend') == name: + # Nothing to do if the requested backend is already set + pass + else: + # if pyplot is not already imported, do not import it. Doing + # so may trigger a `plt.switch_backend` to the _default_ backend + # before we get a chance to change to the one the user just requested + plt = sys.modules.get('matplotlib.pyplot') + # if pyplot is imported, then try to change backends + if plt is not None: + try: + # we need this import check here to re-raise if the + # user does not have the libraries to support their + # chosen backend installed. + plt.switch_backend(name) + except ImportError: + if force: + raise + # if we have not imported pyplot, then we can set the rcParam + # value which will be respected when the user finally imports + # pyplot + else: + rcParams['backend'] = backend + # if the user has asked for a given backend, do not helpfully + # fallback + rcParams['backend_fallback'] = False + + +if os.environ.get('MPLBACKEND'): + rcParams['backend'] = os.environ.get('MPLBACKEND') + + +def get_backend(): + """ + Return the name of the current backend. + + See Also + -------- + matplotlib.use + """ + return rcParams['backend'] + + +def interactive(b): + """ + Set whether to redraw after every plotting command (e.g. `.pyplot.xlabel`). + """ + rcParams['interactive'] = b + + +def is_interactive(): + """Return whether to redraw after every plotting command.""" + return rcParams['interactive'] + + +default_test_modules = [ + 'matplotlib.tests', + 'mpl_toolkits.tests', +] + + +def _init_tests(): + # The version of FreeType to install locally for running the + # tests. This must match the value in `setupext.py` + LOCAL_FREETYPE_VERSION = '2.6.1' + + from matplotlib import ft2font + if (ft2font.__freetype_version__ != LOCAL_FREETYPE_VERSION or + ft2font.__freetype_build_type__ != 'local'): + _log.warning( + f"Matplotlib is not built with the correct FreeType version to " + f"run tests. Rebuild without setting system_freetype=1 in " + f"setup.cfg. Expect many image comparison failures below. " + f"Expected freetype version {LOCAL_FREETYPE_VERSION}. " + f"Found freetype version {ft2font.__freetype_version__}. " + "Freetype build type is {}local".format( + "" if ft2font.__freetype_build_type__ == 'local' else "not ")) + + +@cbook._delete_parameter("3.2", "switch_backend_warn") +@cbook._delete_parameter("3.3", "recursionlimit") +def test(verbosity=None, coverage=False, switch_backend_warn=True, + recursionlimit=0, **kwargs): + """Run the matplotlib test suite.""" + + try: + import pytest + except ImportError: + print("matplotlib.test requires pytest to run.") + return -1 + + if not os.path.isdir(os.path.join(os.path.dirname(__file__), 'tests')): + print("Matplotlib test data is not installed") + return -1 + + old_backend = get_backend() + old_recursionlimit = sys.getrecursionlimit() + try: + use('agg') + if recursionlimit: + sys.setrecursionlimit(recursionlimit) + + args = kwargs.pop('argv', []) + provide_default_modules = True + use_pyargs = True + for arg in args: + if any(arg.startswith(module_path) + for module_path in default_test_modules): + provide_default_modules = False + break + if os.path.exists(arg): + provide_default_modules = False + use_pyargs = False + break + if use_pyargs: + args += ['--pyargs'] + if provide_default_modules: + args += default_test_modules + + if coverage: + args += ['--cov'] + + if verbosity: + args += ['-' + 'v' * verbosity] + + retcode = pytest.main(args, **kwargs) + finally: + if old_backend.lower() != 'agg': + use(old_backend) + if recursionlimit: + sys.setrecursionlimit(old_recursionlimit) + + return retcode + + +test.__test__ = False # pytest: this function is not a test + + +def _replacer(data, value): + """ + Either returns ``data[value]`` or passes ``data`` back, converts either to + a sequence. + """ + try: + # if key isn't a string don't bother + if isinstance(value, str): + # try to use __getitem__ + value = data[value] + except Exception: + # key does not exist, silently fall back to key + pass + return sanitize_sequence(value) + + +def _label_from_arg(y, default_name): + try: + return y.name + except AttributeError: + if isinstance(default_name, str): + return default_name + return None + + +_DATA_DOC_TITLE = """ + +Notes +----- +""" + +_DATA_DOC_APPENDIX = """ + +.. note:: + In addition to the above described arguments, this function can take + a *data* keyword argument. If such a *data* argument is given, +{replaced} + + Objects passed as **data** must support item access (``data[s]``) and + membership test (``s in data``). +""" + + +def _add_data_doc(docstring, replace_names): + """ + Add documentation for a *data* field to the given docstring. + + Parameters + ---------- + docstring : str + The input docstring. + replace_names : list of str or None + The list of parameter names which arguments should be replaced by + ``data[name]`` (if ``data[name]`` does not throw an exception). If + None, replacement is attempted for all arguments. + + Returns + ------- + str + The augmented docstring. + """ + if (docstring is None + or replace_names is not None and len(replace_names) == 0): + return docstring + docstring = inspect.cleandoc(docstring) + repl = ( + (" every other argument can also be string ``s``, which is\n" + " interpreted as ``data[s]`` (unless this raises an exception).") + if replace_names is None else + (" the following arguments can also be string ``s``, which is\n" + " interpreted as ``data[s]`` (unless this raises an exception):\n" + " " + ", ".join(map("*{}*".format, replace_names))) + ".") + addendum = _DATA_DOC_APPENDIX.format(replaced=repl) + if _DATA_DOC_TITLE not in docstring: + addendum = _DATA_DOC_TITLE + addendum + return docstring + addendum + + +def _preprocess_data(func=None, *, replace_names=None, label_namer=None): + """ + A decorator to add a 'data' kwarg to a function. + + When applied:: + + @_preprocess_data() + def func(ax, *args, **kwargs): ... + + the signature is modified to ``decorated(ax, *args, data=None, **kwargs)`` + with the following behavior: + + - if called with ``data=None``, forward the other arguments to ``func``; + - otherwise, *data* must be a mapping; for any argument passed in as a + string ``name``, replace the argument by ``data[name]`` (if this does not + throw an exception), then forward the arguments to ``func``. + + In either case, any argument that is a `MappingView` is also converted to a + list. + + Parameters + ---------- + replace_names : list of str or None, default: None + The list of parameter names for which lookup into *data* should be + attempted. If None, replacement is attempted for all arguments. + label_namer : str, default: None + If set e.g. to "namer" (which must be a kwarg in the function's + signature -- not as ``**kwargs``), if the *namer* argument passed in is + a (string) key of *data* and no *label* kwarg is passed, then use the + (string) value of the *namer* as *label*. :: + + @_preprocess_data(label_namer="foo") + def func(foo, label=None): ... + + func("key", data={"key": value}) + # is equivalent to + func.__wrapped__(value, label="key") + """ + + if func is None: # Return the actual decorator. + return functools.partial( + _preprocess_data, + replace_names=replace_names, label_namer=label_namer) + + sig = inspect.signature(func) + varargs_name = None + varkwargs_name = None + arg_names = [] + params = list(sig.parameters.values()) + for p in params: + if p.kind is Parameter.VAR_POSITIONAL: + varargs_name = p.name + elif p.kind is Parameter.VAR_KEYWORD: + varkwargs_name = p.name + else: + arg_names.append(p.name) + data_param = Parameter("data", Parameter.KEYWORD_ONLY, default=None) + if varkwargs_name: + params.insert(-1, data_param) + else: + params.append(data_param) + new_sig = sig.replace(parameters=params) + arg_names = arg_names[1:] # remove the first "ax" / self arg + + assert {*arg_names}.issuperset(replace_names or []) or varkwargs_name, ( + "Matplotlib internal error: invalid replace_names ({!r}) for {!r}" + .format(replace_names, func.__name__)) + assert label_namer is None or label_namer in arg_names, ( + "Matplotlib internal error: invalid label_namer ({!r}) for {!r}" + .format(label_namer, func.__name__)) + + @functools.wraps(func) + def inner(ax, *args, data=None, **kwargs): + if data is None: + return func(ax, *map(sanitize_sequence, args), **kwargs) + + bound = new_sig.bind(ax, *args, **kwargs) + auto_label = (bound.arguments.get(label_namer) + or bound.kwargs.get(label_namer)) + + for k, v in bound.arguments.items(): + if k == varkwargs_name: + for k1, v1 in v.items(): + if replace_names is None or k1 in replace_names: + v[k1] = _replacer(data, v1) + elif k == varargs_name: + if replace_names is None: + bound.arguments[k] = tuple(_replacer(data, v1) for v1 in v) + else: + if replace_names is None or k in replace_names: + bound.arguments[k] = _replacer(data, v) + + new_args = bound.args + new_kwargs = bound.kwargs + + args_and_kwargs = {**bound.arguments, **bound.kwargs} + if label_namer and "label" not in args_and_kwargs: + new_kwargs["label"] = _label_from_arg( + args_and_kwargs.get(label_namer), auto_label) + + return func(*new_args, **new_kwargs) + + inner.__doc__ = _add_data_doc(inner.__doc__, replace_names) + inner.__signature__ = new_sig + return inner + + +_log.debug('matplotlib version %s', __version__) +_log.debug('interactive is %s', is_interactive()) +_log.debug('platform is %s', sys.platform) +_log.debug('loaded modules: %s', list(sys.modules)) diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/__init__.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 000000000..12fd836c0 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/__init__.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_animation_data.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_animation_data.cpython-36.pyc new file mode 100644 index 000000000..1abe13f20 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_animation_data.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_cm.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_cm.cpython-36.pyc new file mode 100644 index 000000000..e620d4164 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_cm.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_cm_listed.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_cm_listed.cpython-36.pyc new file mode 100644 index 000000000..3f04767dc Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_cm_listed.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_color_data.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_color_data.cpython-36.pyc new file mode 100644 index 000000000..169ceecb2 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_color_data.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_constrained_layout.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_constrained_layout.cpython-36.pyc new file mode 100644 index 000000000..78c2c83b8 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_constrained_layout.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_internal_utils.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_internal_utils.cpython-36.pyc new file mode 100644 index 000000000..4de14d09a Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_internal_utils.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_layoutbox.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_layoutbox.cpython-36.pyc new file mode 100644 index 000000000..9a82dcbf6 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_layoutbox.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_mathtext_data.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_mathtext_data.cpython-36.pyc new file mode 100644 index 000000000..1e60c8092 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_mathtext_data.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_pylab_helpers.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_pylab_helpers.cpython-36.pyc new file mode 100644 index 000000000..49a4dde17 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_pylab_helpers.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_text_layout.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_text_layout.cpython-36.pyc new file mode 100644 index 000000000..31fb75c39 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_text_layout.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/_version.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/_version.cpython-36.pyc new file mode 100644 index 000000000..4b9e43e02 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/_version.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/afm.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/afm.cpython-36.pyc new file mode 100644 index 000000000..ac2d61638 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/afm.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/animation.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/animation.cpython-36.pyc new file mode 100644 index 000000000..5c8e753ab Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/animation.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/artist.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/artist.cpython-36.pyc new file mode 100644 index 000000000..93b1aacea Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/artist.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/axis.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/axis.cpython-36.pyc new file mode 100644 index 000000000..b58405d7e Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/axis.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/backend_bases.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/backend_bases.cpython-36.pyc new file mode 100644 index 000000000..aa48fafee Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/backend_bases.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/backend_managers.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/backend_managers.cpython-36.pyc new file mode 100644 index 000000000..d5fc22ff7 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/backend_managers.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/backend_tools.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/backend_tools.cpython-36.pyc new file mode 100644 index 000000000..2d1012645 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/backend_tools.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/bezier.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/bezier.cpython-36.pyc new file mode 100644 index 000000000..43dd9e164 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/bezier.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/blocking_input.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/blocking_input.cpython-36.pyc new file mode 100644 index 000000000..0e5a50648 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/blocking_input.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/category.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/category.cpython-36.pyc new file mode 100644 index 000000000..331b26e57 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/category.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/cm.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/cm.cpython-36.pyc new file mode 100644 index 000000000..19f2f9416 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/cm.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/collections.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/collections.cpython-36.pyc new file mode 100644 index 000000000..5e2b42a3d Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/collections.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/colorbar.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/colorbar.cpython-36.pyc new file mode 100644 index 000000000..db5541dbe Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/colorbar.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/colors.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/colors.cpython-36.pyc new file mode 100644 index 000000000..4e606edfb Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/colors.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/container.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/container.cpython-36.pyc new file mode 100644 index 000000000..ba7106bd0 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/container.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/contour.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/contour.cpython-36.pyc new file mode 100644 index 000000000..32428c707 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/contour.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/dates.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/dates.cpython-36.pyc new file mode 100644 index 000000000..68726965e Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/dates.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/docstring.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/docstring.cpython-36.pyc new file mode 100644 index 000000000..c7528a891 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/docstring.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/dviread.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/dviread.cpython-36.pyc new file mode 100644 index 000000000..f4102b4c3 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/dviread.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/figure.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/figure.cpython-36.pyc new file mode 100644 index 000000000..d81a5ea4a Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/figure.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/font_manager.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/font_manager.cpython-36.pyc new file mode 100644 index 000000000..181ddf7ef Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/font_manager.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/fontconfig_pattern.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/fontconfig_pattern.cpython-36.pyc new file mode 100644 index 000000000..7ce04d5a3 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/fontconfig_pattern.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/gridspec.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/gridspec.cpython-36.pyc new file mode 100644 index 000000000..118eab727 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/gridspec.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/hatch.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/hatch.cpython-36.pyc new file mode 100644 index 000000000..525d5ff37 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/hatch.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/image.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/image.cpython-36.pyc new file mode 100644 index 000000000..8d479243d Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/image.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/legend.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/legend.cpython-36.pyc new file mode 100644 index 000000000..f1648f97b Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/legend.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/legend_handler.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/legend_handler.cpython-36.pyc new file mode 100644 index 000000000..3e11a926e Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/legend_handler.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/lines.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/lines.cpython-36.pyc new file mode 100644 index 000000000..5fcdc48c4 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/lines.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/markers.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/markers.cpython-36.pyc new file mode 100644 index 000000000..0e4032aea Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/markers.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/mathtext.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/mathtext.cpython-36.pyc new file mode 100644 index 000000000..6e31a7f02 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/mathtext.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/mlab.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/mlab.cpython-36.pyc new file mode 100644 index 000000000..db90631c9 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/mlab.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/offsetbox.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/offsetbox.cpython-36.pyc new file mode 100644 index 000000000..d3b76f8d9 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/offsetbox.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/patches.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/patches.cpython-36.pyc new file mode 100644 index 000000000..3e036a317 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/patches.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/path.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/path.cpython-36.pyc new file mode 100644 index 000000000..b89bdd524 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/path.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/patheffects.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/patheffects.cpython-36.pyc new file mode 100644 index 000000000..91bf704ce Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/patheffects.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/pylab.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/pylab.cpython-36.pyc new file mode 100644 index 000000000..2582591da Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/pylab.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/pyplot.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/pyplot.cpython-36.pyc new file mode 100644 index 000000000..02a3c4a14 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/pyplot.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/quiver.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/quiver.cpython-36.pyc new file mode 100644 index 000000000..071ffbf01 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/quiver.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/rcsetup.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/rcsetup.cpython-36.pyc new file mode 100644 index 000000000..d226995fb Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/rcsetup.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/sankey.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/sankey.cpython-36.pyc new file mode 100644 index 000000000..656e28338 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/sankey.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/scale.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/scale.cpython-36.pyc new file mode 100644 index 000000000..8de36d5bc Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/scale.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/spines.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/spines.cpython-36.pyc new file mode 100644 index 000000000..bec019b87 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/spines.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/stackplot.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/stackplot.cpython-36.pyc new file mode 100644 index 000000000..d50d1907d Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/stackplot.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/streamplot.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/streamplot.cpython-36.pyc new file mode 100644 index 000000000..c9bd67983 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/streamplot.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/table.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/table.cpython-36.pyc new file mode 100644 index 000000000..b91197690 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/table.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/texmanager.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/texmanager.cpython-36.pyc new file mode 100644 index 000000000..a5c159b77 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/texmanager.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/text.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/text.cpython-36.pyc new file mode 100644 index 000000000..6cd00235f Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/text.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/textpath.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/textpath.cpython-36.pyc new file mode 100644 index 000000000..8f3800dcf Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/textpath.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/ticker.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/ticker.cpython-36.pyc new file mode 100644 index 000000000..bf037fee8 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/ticker.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/tight_bbox.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/tight_bbox.cpython-36.pyc new file mode 100644 index 000000000..3fdffcf4d Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/tight_bbox.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/tight_layout.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/tight_layout.cpython-36.pyc new file mode 100644 index 000000000..08afe4c11 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/tight_layout.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/transforms.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/transforms.cpython-36.pyc new file mode 100644 index 000000000..366a604fd Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/transforms.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/ttconv.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/ttconv.cpython-36.pyc new file mode 100644 index 000000000..fa1947596 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/ttconv.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/type1font.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/type1font.cpython-36.pyc new file mode 100644 index 000000000..8e0998e9c Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/type1font.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/units.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/units.cpython-36.pyc new file mode 100644 index 000000000..4ac576c15 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/units.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/__pycache__/widgets.cpython-36.pyc b/venv/Lib/site-packages/matplotlib/__pycache__/widgets.cpython-36.pyc new file mode 100644 index 000000000..769878513 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/__pycache__/widgets.cpython-36.pyc differ diff --git a/venv/Lib/site-packages/matplotlib/_animation_data.py b/venv/Lib/site-packages/matplotlib/_animation_data.py new file mode 100644 index 000000000..b7d3acc7e --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_animation_data.py @@ -0,0 +1,262 @@ +# Javascript template for HTMLWriter +JS_INCLUDE = """ + + +""" + + +# Style definitions for the HTML template +STYLE_INCLUDE = """ + +""" + + +# HTML template for HTMLWriter +DISPLAY_TEMPLATE = """ +

+ +
+ +
+ + + + + + + + + +
+
+ + + + + + +
+
+
+ + + +""" + + +INCLUDED_FRAMES = """ + for (var i=0; i<{Nframes}; i++){{ + frames[i] = "{frame_dir}/frame" + ("0000000" + i).slice(-7) + + ".{frame_format}"; + }} +""" diff --git a/venv/Lib/site-packages/matplotlib/_cm.py b/venv/Lib/site-packages/matplotlib/_cm.py new file mode 100644 index 000000000..f51b7591d --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_cm.py @@ -0,0 +1,1434 @@ +""" +Nothing here but dictionaries for generating LinearSegmentedColormaps, +and a dictionary of these dictionaries. + +Documentation for each is in pyplot.colormaps(). Please update this +with the purpose and type of your colormap if you add data for one here. +""" + +from functools import partial + +import numpy as np + +_binary_data = { + 'red': ((0., 1., 1.), (1., 0., 0.)), + 'green': ((0., 1., 1.), (1., 0., 0.)), + 'blue': ((0., 1., 1.), (1., 0., 0.)) + } + +_autumn_data = {'red': ((0., 1.0, 1.0), (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'blue': ((0., 0., 0.), (1.0, 0., 0.))} + +_bone_data = {'red': ((0., 0., 0.), + (0.746032, 0.652778, 0.652778), + (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), + (0.365079, 0.319444, 0.319444), + (0.746032, 0.777778, 0.777778), + (1.0, 1.0, 1.0)), + 'blue': ((0., 0., 0.), + (0.365079, 0.444444, 0.444444), + (1.0, 1.0, 1.0))} + +_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'green': ((0., 1., 1.), (1.0, 0., 0.)), + 'blue': ((0., 1., 1.), (1.0, 1., 1.))} + +_copper_data = {'red': ((0., 0., 0.), + (0.809524, 1.000000, 1.000000), + (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), + (1.0, 0.7812, 0.7812)), + 'blue': ((0., 0., 0.), + (1.0, 0.4975, 0.4975))} + +def _flag_red(x): return 0.75 * np.sin((x * 31.5 + 0.25) * np.pi) + 0.5 +def _flag_green(x): return np.sin(x * 31.5 * np.pi) +def _flag_blue(x): return 0.75 * np.sin((x * 31.5 - 0.25) * np.pi) + 0.5 +_flag_data = {'red': _flag_red, 'green': _flag_green, 'blue': _flag_blue} + +def _prism_red(x): return 0.75 * np.sin((x * 20.9 + 0.25) * np.pi) + 0.67 +def _prism_green(x): return 0.75 * np.sin((x * 20.9 - 0.25) * np.pi) + 0.33 +def _prism_blue(x): return -1.1 * np.sin((x * 20.9) * np.pi) +_prism_data = {'red': _prism_red, 'green': _prism_green, 'blue': _prism_blue} + +def _ch_helper(gamma, s, r, h, p0, p1, x): + """Helper function for generating picklable cubehelix color maps.""" + # Apply gamma factor to emphasise low or high intensity values + xg = x ** gamma + # Calculate amplitude and angle of deviation from the black to white + # diagonal in the plane of constant perceived intensity. + a = h * xg * (1 - xg) / 2 + phi = 2 * np.pi * (s / 3 + r * x) + return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi)) + +def cubehelix(gamma=1.0, s=0.5, r=-1.5, h=1.0): + """ + Return custom data dictionary of (r, g, b) conversion functions, which can + be used with :func:`register_cmap`, for the cubehelix color scheme. + + Unlike most other color schemes cubehelix was designed by D.A. Green to + be monotonically increasing in terms of perceived brightness. + Also, when printed on a black and white postscript printer, the scheme + results in a greyscale with monotonically increasing brightness. + This color scheme is named cubehelix because the (r, g, b) values produced + can be visualised as a squashed helix around the diagonal in the + (r, g, b) color cube. + + For a unit color cube (i.e. 3-D coordinates for (r, g, b) each in the + range 0 to 1) the color scheme starts at (r, g, b) = (0, 0, 0), i.e. black, + and finishes at (r, g, b) = (1, 1, 1), i.e. white. For some fraction *x*, + between 0 and 1, the color is the corresponding grey value at that + fraction along the black to white diagonal (x, x, x) plus a color + element. This color element is calculated in a plane of constant + perceived intensity and controlled by the following parameters. + + Parameters + ---------- + gamma : float, default: 1 + Gamma factor emphasizing either low intensity values (gamma < 1), or + high intensity values (gamma > 1). + s : float, default: 0.5 (purple) + The starting color. + r : float, default: -1.5 + The number of r, g, b rotations in color that are made from the start + to the end of the color scheme. The default of -1.5 corresponds to -> + B -> G -> R -> B. + h : float, default: 1 + The hue, i.e. how saturated the colors are. If this parameter is zero + then the color scheme is purely a greyscale. + """ + return {'red': partial(_ch_helper, gamma, s, r, h, -0.14861, 1.78277), + 'green': partial(_ch_helper, gamma, s, r, h, -0.29227, -0.90649), + 'blue': partial(_ch_helper, gamma, s, r, h, 1.97294, 0.0)} + +_cubehelix_data = cubehelix() + +_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0)) +_brg_data = ((0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)) + +# Gnuplot palette functions +def _g0(x): return 0 +def _g1(x): return 0.5 +def _g2(x): return 1 +def _g3(x): return x +def _g4(x): return x ** 2 +def _g5(x): return x ** 3 +def _g6(x): return x ** 4 +def _g7(x): return np.sqrt(x) +def _g8(x): return np.sqrt(np.sqrt(x)) +def _g9(x): return np.sin(x * np.pi / 2) +def _g10(x): return np.cos(x * np.pi / 2) +def _g11(x): return np.abs(x - 0.5) +def _g12(x): return (2 * x - 1) ** 2 +def _g13(x): return np.sin(x * np.pi) +def _g14(x): return np.abs(np.cos(x * np.pi)) +def _g15(x): return np.sin(x * 2 * np.pi) +def _g16(x): return np.cos(x * 2 * np.pi) +def _g17(x): return np.abs(np.sin(x * 2 * np.pi)) +def _g18(x): return np.abs(np.cos(x * 2 * np.pi)) +def _g19(x): return np.abs(np.sin(x * 4 * np.pi)) +def _g20(x): return np.abs(np.cos(x * 4 * np.pi)) +def _g21(x): return 3 * x +def _g22(x): return 3 * x - 1 +def _g23(x): return 3 * x - 2 +def _g24(x): return np.abs(3 * x - 1) +def _g25(x): return np.abs(3 * x - 2) +def _g26(x): return (3 * x - 1) / 2 +def _g27(x): return (3 * x - 2) / 2 +def _g28(x): return np.abs((3 * x - 1) / 2) +def _g29(x): return np.abs((3 * x - 2) / 2) +def _g30(x): return x / 0.32 - 0.78125 +def _g31(x): return 2 * x - 0.84 +def _g32(x): + ret = np.zeros(len(x)) + m = (x < 0.25) + ret[m] = 4 * x[m] + m = (x >= 0.25) & (x < 0.92) + ret[m] = -2 * x[m] + 1.84 + m = (x >= 0.92) + ret[m] = x[m] / 0.08 - 11.5 + return ret +def _g33(x): return np.abs(2 * x - 0.5) +def _g34(x): return 2 * x +def _g35(x): return 2 * x - 0.5 +def _g36(x): return 2 * x - 1 + +gfunc = {i: globals()["_g{}".format(i)] for i in range(37)} + +_gnuplot_data = { + 'red': gfunc[7], + 'green': gfunc[5], + 'blue': gfunc[15], +} + +_gnuplot2_data = { + 'red': gfunc[30], + 'green': gfunc[31], + 'blue': gfunc[32], +} + +_ocean_data = { + 'red': gfunc[23], + 'green': gfunc[28], + 'blue': gfunc[3], +} + +_afmhot_data = { + 'red': gfunc[34], + 'green': gfunc[35], + 'blue': gfunc[36], +} + +_rainbow_data = { + 'red': gfunc[33], + 'green': gfunc[13], + 'blue': gfunc[10], +} + +_seismic_data = ( + (0.0, 0.0, 0.3), (0.0, 0.0, 1.0), + (1.0, 1.0, 1.0), (1.0, 0.0, 0.0), + (0.5, 0.0, 0.0)) + +_terrain_data = ( + (0.00, (0.2, 0.2, 0.6)), + (0.15, (0.0, 0.6, 1.0)), + (0.25, (0.0, 0.8, 0.4)), + (0.50, (1.0, 1.0, 0.6)), + (0.75, (0.5, 0.36, 0.33)), + (1.00, (1.0, 1.0, 1.0))) + +_gray_data = {'red': ((0., 0, 0), (1., 1, 1)), + 'green': ((0., 0, 0), (1., 1, 1)), + 'blue': ((0., 0, 0), (1., 1, 1))} + +_hot_data = {'red': ((0., 0.0416, 0.0416), + (0.365079, 1.000000, 1.000000), + (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), + (0.365079, 0.000000, 0.000000), + (0.746032, 1.000000, 1.000000), + (1.0, 1.0, 1.0)), + 'blue': ((0., 0., 0.), + (0.746032, 0.000000, 0.000000), + (1.0, 1.0, 1.0))} + +_hsv_data = {'red': ((0., 1., 1.), + (0.158730, 1.000000, 1.000000), + (0.174603, 0.968750, 0.968750), + (0.333333, 0.031250, 0.031250), + (0.349206, 0.000000, 0.000000), + (0.666667, 0.000000, 0.000000), + (0.682540, 0.031250, 0.031250), + (0.841270, 0.968750, 0.968750), + (0.857143, 1.000000, 1.000000), + (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), + (0.158730, 0.937500, 0.937500), + (0.174603, 1.000000, 1.000000), + (0.507937, 1.000000, 1.000000), + (0.666667, 0.062500, 0.062500), + (0.682540, 0.000000, 0.000000), + (1.0, 0., 0.)), + 'blue': ((0., 0., 0.), + (0.333333, 0.000000, 0.000000), + (0.349206, 0.062500, 0.062500), + (0.507937, 1.000000, 1.000000), + (0.841270, 1.000000, 1.000000), + (0.857143, 0.937500, 0.937500), + (1.0, 0.09375, 0.09375))} + +_jet_data = {'red': ((0.00, 0, 0), + (0.35, 0, 0), + (0.66, 1, 1), + (0.89, 1, 1), + (1.00, 0.5, 0.5)), + 'green': ((0.000, 0, 0), + (0.125, 0, 0), + (0.375, 1, 1), + (0.640, 1, 1), + (0.910, 0, 0), + (1.000, 0, 0)), + 'blue': ((0.00, 0.5, 0.5), + (0.11, 1, 1), + (0.34, 1, 1), + (0.65, 0, 0), + (1.00, 0, 0))} + +_pink_data = {'red': ((0., 0.1178, 0.1178), (0.015873, 0.195857, 0.195857), + (0.031746, 0.250661, 0.250661), + (0.047619, 0.295468, 0.295468), + (0.063492, 0.334324, 0.334324), + (0.079365, 0.369112, 0.369112), + (0.095238, 0.400892, 0.400892), + (0.111111, 0.430331, 0.430331), + (0.126984, 0.457882, 0.457882), + (0.142857, 0.483867, 0.483867), + (0.158730, 0.508525, 0.508525), + (0.174603, 0.532042, 0.532042), + (0.190476, 0.554563, 0.554563), + (0.206349, 0.576204, 0.576204), + (0.222222, 0.597061, 0.597061), + (0.238095, 0.617213, 0.617213), + (0.253968, 0.636729, 0.636729), + (0.269841, 0.655663, 0.655663), + (0.285714, 0.674066, 0.674066), + (0.301587, 0.691980, 0.691980), + (0.317460, 0.709441, 0.709441), + (0.333333, 0.726483, 0.726483), + (0.349206, 0.743134, 0.743134), + (0.365079, 0.759421, 0.759421), + (0.380952, 0.766356, 0.766356), + (0.396825, 0.773229, 0.773229), + (0.412698, 0.780042, 0.780042), + (0.428571, 0.786796, 0.786796), + (0.444444, 0.793492, 0.793492), + (0.460317, 0.800132, 0.800132), + (0.476190, 0.806718, 0.806718), + (0.492063, 0.813250, 0.813250), + (0.507937, 0.819730, 0.819730), + (0.523810, 0.826160, 0.826160), + (0.539683, 0.832539, 0.832539), + (0.555556, 0.838870, 0.838870), + (0.571429, 0.845154, 0.845154), + (0.587302, 0.851392, 0.851392), + (0.603175, 0.857584, 0.857584), + (0.619048, 0.863731, 0.863731), + (0.634921, 0.869835, 0.869835), + (0.650794, 0.875897, 0.875897), + (0.666667, 0.881917, 0.881917), + (0.682540, 0.887896, 0.887896), + (0.698413, 0.893835, 0.893835), + (0.714286, 0.899735, 0.899735), + (0.730159, 0.905597, 0.905597), + (0.746032, 0.911421, 0.911421), + (0.761905, 0.917208, 0.917208), + (0.777778, 0.922958, 0.922958), + (0.793651, 0.928673, 0.928673), + (0.809524, 0.934353, 0.934353), + (0.825397, 0.939999, 0.939999), + (0.841270, 0.945611, 0.945611), + (0.857143, 0.951190, 0.951190), + (0.873016, 0.956736, 0.956736), + (0.888889, 0.962250, 0.962250), + (0.904762, 0.967733, 0.967733), + (0.920635, 0.973185, 0.973185), + (0.936508, 0.978607, 0.978607), + (0.952381, 0.983999, 0.983999), + (0.968254, 0.989361, 0.989361), + (0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), (0.015873, 0.102869, 0.102869), + (0.031746, 0.145479, 0.145479), + (0.047619, 0.178174, 0.178174), + (0.063492, 0.205738, 0.205738), + (0.079365, 0.230022, 0.230022), + (0.095238, 0.251976, 0.251976), + (0.111111, 0.272166, 0.272166), + (0.126984, 0.290957, 0.290957), + (0.142857, 0.308607, 0.308607), + (0.158730, 0.325300, 0.325300), + (0.174603, 0.341178, 0.341178), + (0.190476, 0.356348, 0.356348), + (0.206349, 0.370899, 0.370899), + (0.222222, 0.384900, 0.384900), + (0.238095, 0.398410, 0.398410), + (0.253968, 0.411476, 0.411476), + (0.269841, 0.424139, 0.424139), + (0.285714, 0.436436, 0.436436), + (0.301587, 0.448395, 0.448395), + (0.317460, 0.460044, 0.460044), + (0.333333, 0.471405, 0.471405), + (0.349206, 0.482498, 0.482498), + (0.365079, 0.493342, 0.493342), + (0.380952, 0.517549, 0.517549), + (0.396825, 0.540674, 0.540674), + (0.412698, 0.562849, 0.562849), + (0.428571, 0.584183, 0.584183), + (0.444444, 0.604765, 0.604765), + (0.460317, 0.624669, 0.624669), + (0.476190, 0.643958, 0.643958), + (0.492063, 0.662687, 0.662687), + (0.507937, 0.680900, 0.680900), + (0.523810, 0.698638, 0.698638), + (0.539683, 0.715937, 0.715937), + (0.555556, 0.732828, 0.732828), + (0.571429, 0.749338, 0.749338), + (0.587302, 0.765493, 0.765493), + (0.603175, 0.781313, 0.781313), + (0.619048, 0.796819, 0.796819), + (0.634921, 0.812029, 0.812029), + (0.650794, 0.826960, 0.826960), + (0.666667, 0.841625, 0.841625), + (0.682540, 0.856040, 0.856040), + (0.698413, 0.870216, 0.870216), + (0.714286, 0.884164, 0.884164), + (0.730159, 0.897896, 0.897896), + (0.746032, 0.911421, 0.911421), + (0.761905, 0.917208, 0.917208), + (0.777778, 0.922958, 0.922958), + (0.793651, 0.928673, 0.928673), + (0.809524, 0.934353, 0.934353), + (0.825397, 0.939999, 0.939999), + (0.841270, 0.945611, 0.945611), + (0.857143, 0.951190, 0.951190), + (0.873016, 0.956736, 0.956736), + (0.888889, 0.962250, 0.962250), + (0.904762, 0.967733, 0.967733), + (0.920635, 0.973185, 0.973185), + (0.936508, 0.978607, 0.978607), + (0.952381, 0.983999, 0.983999), + (0.968254, 0.989361, 0.989361), + (0.984127, 0.994695, 0.994695), (1.0, 1.0, 1.0)), + 'blue': ((0., 0., 0.), (0.015873, 0.102869, 0.102869), + (0.031746, 0.145479, 0.145479), + (0.047619, 0.178174, 0.178174), + (0.063492, 0.205738, 0.205738), + (0.079365, 0.230022, 0.230022), + (0.095238, 0.251976, 0.251976), + (0.111111, 0.272166, 0.272166), + (0.126984, 0.290957, 0.290957), + (0.142857, 0.308607, 0.308607), + (0.158730, 0.325300, 0.325300), + (0.174603, 0.341178, 0.341178), + (0.190476, 0.356348, 0.356348), + (0.206349, 0.370899, 0.370899), + (0.222222, 0.384900, 0.384900), + (0.238095, 0.398410, 0.398410), + (0.253968, 0.411476, 0.411476), + (0.269841, 0.424139, 0.424139), + (0.285714, 0.436436, 0.436436), + (0.301587, 0.448395, 0.448395), + (0.317460, 0.460044, 0.460044), + (0.333333, 0.471405, 0.471405), + (0.349206, 0.482498, 0.482498), + (0.365079, 0.493342, 0.493342), + (0.380952, 0.503953, 0.503953), + (0.396825, 0.514344, 0.514344), + (0.412698, 0.524531, 0.524531), + (0.428571, 0.534522, 0.534522), + (0.444444, 0.544331, 0.544331), + (0.460317, 0.553966, 0.553966), + (0.476190, 0.563436, 0.563436), + (0.492063, 0.572750, 0.572750), + (0.507937, 0.581914, 0.581914), + (0.523810, 0.590937, 0.590937), + (0.539683, 0.599824, 0.599824), + (0.555556, 0.608581, 0.608581), + (0.571429, 0.617213, 0.617213), + (0.587302, 0.625727, 0.625727), + (0.603175, 0.634126, 0.634126), + (0.619048, 0.642416, 0.642416), + (0.634921, 0.650600, 0.650600), + (0.650794, 0.658682, 0.658682), + (0.666667, 0.666667, 0.666667), + (0.682540, 0.674556, 0.674556), + (0.698413, 0.682355, 0.682355), + (0.714286, 0.690066, 0.690066), + (0.730159, 0.697691, 0.697691), + (0.746032, 0.705234, 0.705234), + (0.761905, 0.727166, 0.727166), + (0.777778, 0.748455, 0.748455), + (0.793651, 0.769156, 0.769156), + (0.809524, 0.789314, 0.789314), + (0.825397, 0.808969, 0.808969), + (0.841270, 0.828159, 0.828159), + (0.857143, 0.846913, 0.846913), + (0.873016, 0.865261, 0.865261), + (0.888889, 0.883229, 0.883229), + (0.904762, 0.900837, 0.900837), + (0.920635, 0.918109, 0.918109), + (0.936508, 0.935061, 0.935061), + (0.952381, 0.951711, 0.951711), + (0.968254, 0.968075, 0.968075), + (0.984127, 0.984167, 0.984167), (1.0, 1.0, 1.0))} + +_spring_data = {'red': ((0., 1., 1.), (1.0, 1.0, 1.0)), + 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'blue': ((0., 1., 1.), (1.0, 0.0, 0.0))} + + +_summer_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'green': ((0., 0.5, 0.5), (1.0, 1.0, 1.0)), + 'blue': ((0., 0.4, 0.4), (1.0, 0.4, 0.4))} + + +_winter_data = {'red': ((0., 0., 0.), (1.0, 0.0, 0.0)), + 'green': ((0., 0., 0.), (1.0, 1.0, 1.0)), + 'blue': ((0., 1., 1.), (1.0, 0.5, 0.5))} + +_nipy_spectral_data = { + 'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667), + (0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0), + (0.20, 0.0, 0.0), (0.25, 0.0, 0.0), + (0.30, 0.0, 0.0), (0.35, 0.0, 0.0), + (0.40, 0.0, 0.0), (0.45, 0.0, 0.0), + (0.50, 0.0, 0.0), (0.55, 0.0, 0.0), + (0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333), + (0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0), + (0.80, 1.0, 1.0), (0.85, 1.0, 1.0), + (0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80), + (1.0, 0.80, 0.80)], + 'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0), + (0.10, 0.0, 0.0), (0.15, 0.0, 0.0), + (0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667), + (0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667), + (0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000), + (0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667), + (0.60, 1.0, 1.0), (0.65, 1.0, 1.0), + (0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000), + (0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0), + (0.90, 0.0, 0.0), (0.95, 0.0, 0.0), + (1.0, 0.80, 0.80)], + 'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333), + (0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667), + (0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667), + (0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667), + (0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0), + (0.5, 0.0, 0.0), (0.55, 0.0, 0.0), + (0.60, 0.0, 0.0), (0.65, 0.0, 0.0), + (0.70, 0.0, 0.0), (0.75, 0.0, 0.0), + (0.80, 0.0, 0.0), (0.85, 0.0, 0.0), + (0.90, 0.0, 0.0), (0.95, 0.0, 0.0), + (1.0, 0.80, 0.80)], +} + + +# 34 colormaps based on color specifications and designs +# developed by Cynthia Brewer (http://colorbrewer.org). +# The ColorBrewer palettes have been included under the terms +# of an Apache-stype license (for details, see the file +# LICENSE_COLORBREWER in the license directory of the matplotlib +# source distribution). + +# RGB values taken from Brewer's Excel sheet, divided by 255 + +_Blues_data = ( + (0.96862745098039216, 0.98431372549019602, 1.0 ), + (0.87058823529411766, 0.92156862745098034, 0.96862745098039216), + (0.77647058823529413, 0.85882352941176465, 0.93725490196078431), + (0.61960784313725492, 0.792156862745098 , 0.88235294117647056), + (0.41960784313725491, 0.68235294117647061, 0.83921568627450982), + (0.25882352941176473, 0.5725490196078431 , 0.77647058823529413), + (0.12941176470588237, 0.44313725490196076, 0.70980392156862748), + (0.03137254901960784, 0.31764705882352939, 0.61176470588235299), + (0.03137254901960784, 0.18823529411764706, 0.41960784313725491) + ) + +_BrBG_data = ( + (0.32941176470588235, 0.18823529411764706, 0.0196078431372549 ), + (0.5490196078431373 , 0.31764705882352939, 0.0392156862745098 ), + (0.74901960784313726, 0.50588235294117645, 0.17647058823529413), + (0.87450980392156863, 0.76078431372549016, 0.49019607843137253), + (0.96470588235294119, 0.90980392156862744, 0.76470588235294112), + (0.96078431372549022, 0.96078431372549022, 0.96078431372549022), + (0.7803921568627451 , 0.91764705882352937, 0.89803921568627454), + (0.50196078431372548, 0.80392156862745101, 0.75686274509803919), + (0.20784313725490197, 0.59215686274509804, 0.5607843137254902 ), + (0.00392156862745098, 0.4 , 0.36862745098039218), + (0.0 , 0.23529411764705882, 0.18823529411764706) + ) + +_BuGn_data = ( + (0.96862745098039216, 0.9882352941176471 , 0.99215686274509807), + (0.89803921568627454, 0.96078431372549022, 0.97647058823529409), + (0.8 , 0.92549019607843142, 0.90196078431372551), + (0.6 , 0.84705882352941175, 0.78823529411764703), + (0.4 , 0.76078431372549016, 0.64313725490196083), + (0.25490196078431371, 0.68235294117647061, 0.46274509803921571), + (0.13725490196078433, 0.54509803921568623, 0.27058823529411763), + (0.0 , 0.42745098039215684, 0.17254901960784313), + (0.0 , 0.26666666666666666, 0.10588235294117647) + ) + +_BuPu_data = ( + (0.96862745098039216, 0.9882352941176471 , 0.99215686274509807), + (0.8784313725490196 , 0.92549019607843142, 0.95686274509803926), + (0.74901960784313726, 0.82745098039215681, 0.90196078431372551), + (0.61960784313725492, 0.73725490196078436, 0.85490196078431369), + (0.5490196078431373 , 0.58823529411764708, 0.77647058823529413), + (0.5490196078431373 , 0.41960784313725491, 0.69411764705882351), + (0.53333333333333333, 0.25490196078431371, 0.61568627450980395), + (0.50588235294117645, 0.05882352941176471, 0.48627450980392156), + (0.30196078431372547, 0.0 , 0.29411764705882354) + ) + +_GnBu_data = ( + (0.96862745098039216, 0.9882352941176471 , 0.94117647058823528), + (0.8784313725490196 , 0.95294117647058818, 0.85882352941176465), + (0.8 , 0.92156862745098034, 0.77254901960784317), + (0.6588235294117647 , 0.8666666666666667 , 0.70980392156862748), + (0.4823529411764706 , 0.8 , 0.7686274509803922 ), + (0.30588235294117649, 0.70196078431372544, 0.82745098039215681), + (0.16862745098039217, 0.5490196078431373 , 0.74509803921568629), + (0.03137254901960784, 0.40784313725490196, 0.67450980392156867), + (0.03137254901960784, 0.25098039215686274, 0.50588235294117645) + ) + +_Greens_data = ( + (0.96862745098039216, 0.9882352941176471 , 0.96078431372549022), + (0.89803921568627454, 0.96078431372549022, 0.8784313725490196 ), + (0.7803921568627451 , 0.9137254901960784 , 0.75294117647058822), + (0.63137254901960782, 0.85098039215686272, 0.60784313725490191), + (0.45490196078431372, 0.7686274509803922 , 0.46274509803921571), + (0.25490196078431371, 0.6705882352941176 , 0.36470588235294116), + (0.13725490196078433, 0.54509803921568623, 0.27058823529411763), + (0.0 , 0.42745098039215684, 0.17254901960784313), + (0.0 , 0.26666666666666666, 0.10588235294117647) + ) + +_Greys_data = ( + (1.0 , 1.0 , 1.0 ), + (0.94117647058823528, 0.94117647058823528, 0.94117647058823528), + (0.85098039215686272, 0.85098039215686272, 0.85098039215686272), + (0.74117647058823533, 0.74117647058823533, 0.74117647058823533), + (0.58823529411764708, 0.58823529411764708, 0.58823529411764708), + (0.45098039215686275, 0.45098039215686275, 0.45098039215686275), + (0.32156862745098042, 0.32156862745098042, 0.32156862745098042), + (0.14509803921568629, 0.14509803921568629, 0.14509803921568629), + (0.0 , 0.0 , 0.0 ) + ) + +_Oranges_data = ( + (1.0 , 0.96078431372549022, 0.92156862745098034), + (0.99607843137254903, 0.90196078431372551, 0.80784313725490198), + (0.99215686274509807, 0.81568627450980391, 0.63529411764705879), + (0.99215686274509807, 0.68235294117647061, 0.41960784313725491), + (0.99215686274509807, 0.55294117647058827, 0.23529411764705882), + (0.94509803921568625, 0.41176470588235292, 0.07450980392156863), + (0.85098039215686272, 0.28235294117647058, 0.00392156862745098), + (0.65098039215686276, 0.21176470588235294, 0.01176470588235294), + (0.49803921568627452, 0.15294117647058825, 0.01568627450980392) + ) + +_OrRd_data = ( + (1.0 , 0.96862745098039216, 0.92549019607843142), + (0.99607843137254903, 0.90980392156862744, 0.78431372549019607), + (0.99215686274509807, 0.83137254901960789, 0.61960784313725492), + (0.99215686274509807, 0.73333333333333328, 0.51764705882352946), + (0.9882352941176471 , 0.55294117647058827, 0.34901960784313724), + (0.93725490196078431, 0.396078431372549 , 0.28235294117647058), + (0.84313725490196079, 0.18823529411764706, 0.12156862745098039), + (0.70196078431372544, 0.0 , 0.0 ), + (0.49803921568627452, 0.0 , 0.0 ) + ) + +_PiYG_data = ( + (0.55686274509803924, 0.00392156862745098, 0.32156862745098042), + (0.77254901960784317, 0.10588235294117647, 0.49019607843137253), + (0.87058823529411766, 0.46666666666666667, 0.68235294117647061), + (0.94509803921568625, 0.71372549019607845, 0.85490196078431369), + (0.99215686274509807, 0.8784313725490196 , 0.93725490196078431), + (0.96862745098039216, 0.96862745098039216, 0.96862745098039216), + (0.90196078431372551, 0.96078431372549022, 0.81568627450980391), + (0.72156862745098038, 0.88235294117647056, 0.52549019607843139), + (0.49803921568627452, 0.73725490196078436, 0.25490196078431371), + (0.30196078431372547, 0.5725490196078431 , 0.12941176470588237), + (0.15294117647058825, 0.39215686274509803, 0.09803921568627451) + ) + +_PRGn_data = ( + (0.25098039215686274, 0.0 , 0.29411764705882354), + (0.46274509803921571, 0.16470588235294117, 0.51372549019607838), + (0.6 , 0.4392156862745098 , 0.6705882352941176 ), + (0.76078431372549016, 0.6470588235294118 , 0.81176470588235294), + (0.90588235294117647, 0.83137254901960789, 0.90980392156862744), + (0.96862745098039216, 0.96862745098039216, 0.96862745098039216), + (0.85098039215686272, 0.94117647058823528, 0.82745098039215681), + (0.65098039215686276, 0.85882352941176465, 0.62745098039215685), + (0.35294117647058826, 0.68235294117647061, 0.38039215686274508), + (0.10588235294117647, 0.47058823529411764, 0.21568627450980393), + (0.0 , 0.26666666666666666, 0.10588235294117647) + ) + +_PuBu_data = ( + (1.0 , 0.96862745098039216, 0.98431372549019602), + (0.92549019607843142, 0.90588235294117647, 0.94901960784313721), + (0.81568627450980391, 0.81960784313725488, 0.90196078431372551), + (0.65098039215686276, 0.74117647058823533, 0.85882352941176465), + (0.45490196078431372, 0.66274509803921566, 0.81176470588235294), + (0.21176470588235294, 0.56470588235294117, 0.75294117647058822), + (0.0196078431372549 , 0.4392156862745098 , 0.69019607843137254), + (0.01568627450980392, 0.35294117647058826, 0.55294117647058827), + (0.00784313725490196, 0.2196078431372549 , 0.34509803921568627) + ) + +_PuBuGn_data = ( + (1.0 , 0.96862745098039216, 0.98431372549019602), + (0.92549019607843142, 0.88627450980392153, 0.94117647058823528), + (0.81568627450980391, 0.81960784313725488, 0.90196078431372551), + (0.65098039215686276, 0.74117647058823533, 0.85882352941176465), + (0.40392156862745099, 0.66274509803921566, 0.81176470588235294), + (0.21176470588235294, 0.56470588235294117, 0.75294117647058822), + (0.00784313725490196, 0.50588235294117645, 0.54117647058823526), + (0.00392156862745098, 0.42352941176470588, 0.34901960784313724), + (0.00392156862745098, 0.27450980392156865, 0.21176470588235294) + ) + +_PuOr_data = ( + (0.49803921568627452, 0.23137254901960785, 0.03137254901960784), + (0.70196078431372544, 0.34509803921568627, 0.02352941176470588), + (0.8784313725490196 , 0.50980392156862742, 0.07843137254901961), + (0.99215686274509807, 0.72156862745098038, 0.38823529411764707), + (0.99607843137254903, 0.8784313725490196 , 0.71372549019607845), + (0.96862745098039216, 0.96862745098039216, 0.96862745098039216), + (0.84705882352941175, 0.85490196078431369, 0.92156862745098034), + (0.69803921568627447, 0.6705882352941176 , 0.82352941176470584), + (0.50196078431372548, 0.45098039215686275, 0.67450980392156867), + (0.32941176470588235, 0.15294117647058825, 0.53333333333333333), + (0.17647058823529413, 0.0 , 0.29411764705882354) + ) + +_PuRd_data = ( + (0.96862745098039216, 0.95686274509803926, 0.97647058823529409), + (0.90588235294117647, 0.88235294117647056, 0.93725490196078431), + (0.83137254901960789, 0.72549019607843135, 0.85490196078431369), + (0.78823529411764703, 0.58039215686274515, 0.7803921568627451 ), + (0.87450980392156863, 0.396078431372549 , 0.69019607843137254), + (0.90588235294117647, 0.16078431372549021, 0.54117647058823526), + (0.80784313725490198, 0.07058823529411765, 0.33725490196078434), + (0.59607843137254901, 0.0 , 0.2627450980392157 ), + (0.40392156862745099, 0.0 , 0.12156862745098039) + ) + +_Purples_data = ( + (0.9882352941176471 , 0.98431372549019602, 0.99215686274509807), + (0.93725490196078431, 0.92941176470588238, 0.96078431372549022), + (0.85490196078431369, 0.85490196078431369, 0.92156862745098034), + (0.73725490196078436, 0.74117647058823533, 0.86274509803921573), + (0.61960784313725492, 0.60392156862745094, 0.78431372549019607), + (0.50196078431372548, 0.49019607843137253, 0.72941176470588232), + (0.41568627450980394, 0.31764705882352939, 0.63921568627450975), + (0.32941176470588235, 0.15294117647058825, 0.5607843137254902 ), + (0.24705882352941178, 0.0 , 0.49019607843137253) + ) + +_RdBu_data = ( + (0.40392156862745099, 0.0 , 0.12156862745098039), + (0.69803921568627447, 0.09411764705882353, 0.16862745098039217), + (0.83921568627450982, 0.37647058823529411, 0.30196078431372547), + (0.95686274509803926, 0.6470588235294118 , 0.50980392156862742), + (0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ), + (0.96862745098039216, 0.96862745098039216, 0.96862745098039216), + (0.81960784313725488, 0.89803921568627454, 0.94117647058823528), + (0.5725490196078431 , 0.77254901960784317, 0.87058823529411766), + (0.2627450980392157 , 0.57647058823529407, 0.76470588235294112), + (0.12941176470588237, 0.4 , 0.67450980392156867), + (0.0196078431372549 , 0.18823529411764706, 0.38039215686274508) + ) + +_RdGy_data = ( + (0.40392156862745099, 0.0 , 0.12156862745098039), + (0.69803921568627447, 0.09411764705882353, 0.16862745098039217), + (0.83921568627450982, 0.37647058823529411, 0.30196078431372547), + (0.95686274509803926, 0.6470588235294118 , 0.50980392156862742), + (0.99215686274509807, 0.85882352941176465, 0.7803921568627451 ), + (1.0 , 1.0 , 1.0 ), + (0.8784313725490196 , 0.8784313725490196 , 0.8784313725490196 ), + (0.72941176470588232, 0.72941176470588232, 0.72941176470588232), + (0.52941176470588236, 0.52941176470588236, 0.52941176470588236), + (0.30196078431372547, 0.30196078431372547, 0.30196078431372547), + (0.10196078431372549, 0.10196078431372549, 0.10196078431372549) + ) + +_RdPu_data = ( + (1.0 , 0.96862745098039216, 0.95294117647058818), + (0.99215686274509807, 0.8784313725490196 , 0.86666666666666667), + (0.9882352941176471 , 0.77254901960784317, 0.75294117647058822), + (0.98039215686274506, 0.62352941176470589, 0.70980392156862748), + (0.96862745098039216, 0.40784313725490196, 0.63137254901960782), + (0.86666666666666667, 0.20392156862745098, 0.59215686274509804), + (0.68235294117647061, 0.00392156862745098, 0.49411764705882355), + (0.47843137254901963, 0.00392156862745098, 0.46666666666666667), + (0.28627450980392155, 0.0 , 0.41568627450980394) + ) + +_RdYlBu_data = ( + (0.6470588235294118 , 0.0 , 0.14901960784313725), + (0.84313725490196079, 0.18823529411764706 , 0.15294117647058825), + (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ), + (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508), + (0.99607843137254903, 0.8784313725490196 , 0.56470588235294117), + (1.0 , 1.0 , 0.74901960784313726), + (0.8784313725490196 , 0.95294117647058818 , 0.97254901960784312), + (0.6705882352941176 , 0.85098039215686272 , 0.9137254901960784 ), + (0.45490196078431372, 0.67843137254901964 , 0.81960784313725488), + (0.27058823529411763, 0.45882352941176469 , 0.70588235294117652), + (0.19215686274509805, 0.21176470588235294 , 0.58431372549019611) + ) + +_RdYlGn_data = ( + (0.6470588235294118 , 0.0 , 0.14901960784313725), + (0.84313725490196079, 0.18823529411764706 , 0.15294117647058825), + (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ), + (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508), + (0.99607843137254903, 0.8784313725490196 , 0.54509803921568623), + (1.0 , 1.0 , 0.74901960784313726), + (0.85098039215686272, 0.93725490196078431 , 0.54509803921568623), + (0.65098039215686276, 0.85098039215686272 , 0.41568627450980394), + (0.4 , 0.74117647058823533 , 0.38823529411764707), + (0.10196078431372549, 0.59607843137254901 , 0.31372549019607843), + (0.0 , 0.40784313725490196 , 0.21568627450980393) + ) + +_Reds_data = ( + (1.0 , 0.96078431372549022 , 0.94117647058823528), + (0.99607843137254903, 0.8784313725490196 , 0.82352941176470584), + (0.9882352941176471 , 0.73333333333333328 , 0.63137254901960782), + (0.9882352941176471 , 0.5725490196078431 , 0.44705882352941179), + (0.98431372549019602, 0.41568627450980394 , 0.29019607843137257), + (0.93725490196078431, 0.23137254901960785 , 0.17254901960784313), + (0.79607843137254897, 0.094117647058823528, 0.11372549019607843), + (0.6470588235294118 , 0.058823529411764705, 0.08235294117647058), + (0.40392156862745099, 0.0 , 0.05098039215686274) + ) + +_Spectral_data = ( + (0.61960784313725492, 0.003921568627450980, 0.25882352941176473), + (0.83529411764705885, 0.24313725490196078 , 0.30980392156862746), + (0.95686274509803926, 0.42745098039215684 , 0.2627450980392157 ), + (0.99215686274509807, 0.68235294117647061 , 0.38039215686274508), + (0.99607843137254903, 0.8784313725490196 , 0.54509803921568623), + (1.0 , 1.0 , 0.74901960784313726), + (0.90196078431372551, 0.96078431372549022 , 0.59607843137254901), + (0.6705882352941176 , 0.8666666666666667 , 0.64313725490196083), + (0.4 , 0.76078431372549016 , 0.6470588235294118 ), + (0.19607843137254902, 0.53333333333333333 , 0.74117647058823533), + (0.36862745098039218, 0.30980392156862746 , 0.63529411764705879) + ) + +_YlGn_data = ( + (1.0 , 1.0 , 0.89803921568627454), + (0.96862745098039216, 0.9882352941176471 , 0.72549019607843135), + (0.85098039215686272, 0.94117647058823528 , 0.63921568627450975), + (0.67843137254901964, 0.8666666666666667 , 0.55686274509803924), + (0.47058823529411764, 0.77647058823529413 , 0.47450980392156861), + (0.25490196078431371, 0.6705882352941176 , 0.36470588235294116), + (0.13725490196078433, 0.51764705882352946 , 0.2627450980392157 ), + (0.0 , 0.40784313725490196 , 0.21568627450980393), + (0.0 , 0.27058823529411763 , 0.16078431372549021) + ) + +_YlGnBu_data = ( + (1.0 , 1.0 , 0.85098039215686272), + (0.92941176470588238, 0.97254901960784312 , 0.69411764705882351), + (0.7803921568627451 , 0.9137254901960784 , 0.70588235294117652), + (0.49803921568627452, 0.80392156862745101 , 0.73333333333333328), + (0.25490196078431371, 0.71372549019607845 , 0.7686274509803922 ), + (0.11372549019607843, 0.56862745098039214 , 0.75294117647058822), + (0.13333333333333333, 0.36862745098039218 , 0.6588235294117647 ), + (0.14509803921568629, 0.20392156862745098 , 0.58039215686274515), + (0.03137254901960784, 0.11372549019607843 , 0.34509803921568627) + ) + +_YlOrBr_data = ( + (1.0 , 1.0 , 0.89803921568627454), + (1.0 , 0.96862745098039216 , 0.73725490196078436), + (0.99607843137254903, 0.8901960784313725 , 0.56862745098039214), + (0.99607843137254903, 0.7686274509803922 , 0.30980392156862746), + (0.99607843137254903, 0.6 , 0.16078431372549021), + (0.92549019607843142, 0.4392156862745098 , 0.07843137254901961), + (0.8 , 0.29803921568627451 , 0.00784313725490196), + (0.6 , 0.20392156862745098 , 0.01568627450980392), + (0.4 , 0.14509803921568629 , 0.02352941176470588) + ) + +_YlOrRd_data = ( + (1.0 , 1.0 , 0.8 ), + (1.0 , 0.92941176470588238 , 0.62745098039215685), + (0.99607843137254903, 0.85098039215686272 , 0.46274509803921571), + (0.99607843137254903, 0.69803921568627447 , 0.29803921568627451), + (0.99215686274509807, 0.55294117647058827 , 0.23529411764705882), + (0.9882352941176471 , 0.30588235294117649 , 0.16470588235294117), + (0.8901960784313725 , 0.10196078431372549 , 0.10980392156862745), + (0.74117647058823533, 0.0 , 0.14901960784313725), + (0.50196078431372548, 0.0 , 0.14901960784313725) + ) + + +# ColorBrewer's qualitative maps, implemented using ListedColormap +# for use with mpl.colors.NoNorm + +_Accent_data = ( + (0.49803921568627452, 0.78823529411764703, 0.49803921568627452), + (0.74509803921568629, 0.68235294117647061, 0.83137254901960789), + (0.99215686274509807, 0.75294117647058822, 0.52549019607843139), + (1.0, 1.0, 0.6 ), + (0.2196078431372549, 0.42352941176470588, 0.69019607843137254), + (0.94117647058823528, 0.00784313725490196, 0.49803921568627452), + (0.74901960784313726, 0.35686274509803922, 0.09019607843137254), + (0.4, 0.4, 0.4 ), + ) + +_Dark2_data = ( + (0.10588235294117647, 0.61960784313725492, 0.46666666666666667), + (0.85098039215686272, 0.37254901960784315, 0.00784313725490196), + (0.45882352941176469, 0.4392156862745098, 0.70196078431372544), + (0.90588235294117647, 0.16078431372549021, 0.54117647058823526), + (0.4, 0.65098039215686276, 0.11764705882352941), + (0.90196078431372551, 0.6705882352941176, 0.00784313725490196), + (0.65098039215686276, 0.46274509803921571, 0.11372549019607843), + (0.4, 0.4, 0.4 ), + ) + +_Paired_data = ( + (0.65098039215686276, 0.80784313725490198, 0.8901960784313725 ), + (0.12156862745098039, 0.47058823529411764, 0.70588235294117652), + (0.69803921568627447, 0.87450980392156863, 0.54117647058823526), + (0.2, 0.62745098039215685, 0.17254901960784313), + (0.98431372549019602, 0.60392156862745094, 0.6 ), + (0.8901960784313725, 0.10196078431372549, 0.10980392156862745), + (0.99215686274509807, 0.74901960784313726, 0.43529411764705883), + (1.0, 0.49803921568627452, 0.0 ), + (0.792156862745098, 0.69803921568627447, 0.83921568627450982), + (0.41568627450980394, 0.23921568627450981, 0.60392156862745094), + (1.0, 1.0, 0.6 ), + (0.69411764705882351, 0.34901960784313724, 0.15686274509803921), + ) + +_Pastel1_data = ( + (0.98431372549019602, 0.70588235294117652, 0.68235294117647061), + (0.70196078431372544, 0.80392156862745101, 0.8901960784313725 ), + (0.8, 0.92156862745098034, 0.77254901960784317), + (0.87058823529411766, 0.79607843137254897, 0.89411764705882357), + (0.99607843137254903, 0.85098039215686272, 0.65098039215686276), + (1.0, 1.0, 0.8 ), + (0.89803921568627454, 0.84705882352941175, 0.74117647058823533), + (0.99215686274509807, 0.85490196078431369, 0.92549019607843142), + (0.94901960784313721, 0.94901960784313721, 0.94901960784313721), + ) + +_Pastel2_data = ( + (0.70196078431372544, 0.88627450980392153, 0.80392156862745101), + (0.99215686274509807, 0.80392156862745101, 0.67450980392156867), + (0.79607843137254897, 0.83529411764705885, 0.90980392156862744), + (0.95686274509803926, 0.792156862745098, 0.89411764705882357), + (0.90196078431372551, 0.96078431372549022, 0.78823529411764703), + (1.0, 0.94901960784313721, 0.68235294117647061), + (0.94509803921568625, 0.88627450980392153, 0.8 ), + (0.8, 0.8, 0.8 ), + ) + +_Set1_data = ( + (0.89411764705882357, 0.10196078431372549, 0.10980392156862745), + (0.21568627450980393, 0.49411764705882355, 0.72156862745098038), + (0.30196078431372547, 0.68627450980392157, 0.29019607843137257), + (0.59607843137254901, 0.30588235294117649, 0.63921568627450975), + (1.0, 0.49803921568627452, 0.0 ), + (1.0, 1.0, 0.2 ), + (0.65098039215686276, 0.33725490196078434, 0.15686274509803921), + (0.96862745098039216, 0.50588235294117645, 0.74901960784313726), + (0.6, 0.6, 0.6), + ) + +_Set2_data = ( + (0.4, 0.76078431372549016, 0.6470588235294118 ), + (0.9882352941176471, 0.55294117647058827, 0.3843137254901961 ), + (0.55294117647058827, 0.62745098039215685, 0.79607843137254897), + (0.90588235294117647, 0.54117647058823526, 0.76470588235294112), + (0.65098039215686276, 0.84705882352941175, 0.32941176470588235), + (1.0, 0.85098039215686272, 0.18431372549019609), + (0.89803921568627454, 0.7686274509803922, 0.58039215686274515), + (0.70196078431372544, 0.70196078431372544, 0.70196078431372544), + ) + +_Set3_data = ( + (0.55294117647058827, 0.82745098039215681, 0.7803921568627451 ), + (1.0, 1.0, 0.70196078431372544), + (0.74509803921568629, 0.72941176470588232, 0.85490196078431369), + (0.98431372549019602, 0.50196078431372548, 0.44705882352941179), + (0.50196078431372548, 0.69411764705882351, 0.82745098039215681), + (0.99215686274509807, 0.70588235294117652, 0.3843137254901961 ), + (0.70196078431372544, 0.87058823529411766, 0.41176470588235292), + (0.9882352941176471, 0.80392156862745101, 0.89803921568627454), + (0.85098039215686272, 0.85098039215686272, 0.85098039215686272), + (0.73725490196078436, 0.50196078431372548, 0.74117647058823533), + (0.8, 0.92156862745098034, 0.77254901960784317), + (1.0, 0.92941176470588238, 0.43529411764705883), + ) + + +# The next 7 palettes are from the Yorick scientific visualization package, +# an evolution of the GIST package, both by David H. Munro. +# They are released under a BSD-like license (see LICENSE_YORICK in +# the license directory of the matplotlib source distribution). +# +# Most palette functions have been reduced to simple function descriptions +# by Reinier Heeres, since the rgb components were mostly straight lines. +# gist_earth_data and gist_ncar_data were simplified by a script and some +# manual effort. + +_gist_earth_data = \ +{'red': ( +(0.0, 0.0, 0.0000), +(0.2824, 0.1882, 0.1882), +(0.4588, 0.2714, 0.2714), +(0.5490, 0.4719, 0.4719), +(0.6980, 0.7176, 0.7176), +(0.7882, 0.7553, 0.7553), +(1.0000, 0.9922, 0.9922), +), 'green': ( +(0.0, 0.0, 0.0000), +(0.0275, 0.0000, 0.0000), +(0.1098, 0.1893, 0.1893), +(0.1647, 0.3035, 0.3035), +(0.2078, 0.3841, 0.3841), +(0.2824, 0.5020, 0.5020), +(0.5216, 0.6397, 0.6397), +(0.6980, 0.7171, 0.7171), +(0.7882, 0.6392, 0.6392), +(0.7922, 0.6413, 0.6413), +(0.8000, 0.6447, 0.6447), +(0.8078, 0.6481, 0.6481), +(0.8157, 0.6549, 0.6549), +(0.8667, 0.6991, 0.6991), +(0.8745, 0.7103, 0.7103), +(0.8824, 0.7216, 0.7216), +(0.8902, 0.7323, 0.7323), +(0.8980, 0.7430, 0.7430), +(0.9412, 0.8275, 0.8275), +(0.9569, 0.8635, 0.8635), +(0.9647, 0.8816, 0.8816), +(0.9961, 0.9733, 0.9733), +(1.0000, 0.9843, 0.9843), +), 'blue': ( +(0.0, 0.0, 0.0000), +(0.0039, 0.1684, 0.1684), +(0.0078, 0.2212, 0.2212), +(0.0275, 0.4329, 0.4329), +(0.0314, 0.4549, 0.4549), +(0.2824, 0.5004, 0.5004), +(0.4667, 0.2748, 0.2748), +(0.5451, 0.3205, 0.3205), +(0.7843, 0.3961, 0.3961), +(0.8941, 0.6651, 0.6651), +(1.0000, 0.9843, 0.9843), +)} + +_gist_gray_data = { + 'red': gfunc[3], + 'green': gfunc[3], + 'blue': gfunc[3], +} + +def _gist_heat_red(x): return 1.5 * x +def _gist_heat_green(x): return 2 * x - 1 +def _gist_heat_blue(x): return 4 * x - 3 +_gist_heat_data = { + 'red': _gist_heat_red, 'green': _gist_heat_green, 'blue': _gist_heat_blue} + +_gist_ncar_data = \ +{'red': ( +(0.0, 0.0, 0.0000), +(0.3098, 0.0000, 0.0000), +(0.3725, 0.3993, 0.3993), +(0.4235, 0.5003, 0.5003), +(0.5333, 1.0000, 1.0000), +(0.7922, 1.0000, 1.0000), +(0.8471, 0.6218, 0.6218), +(0.8980, 0.9235, 0.9235), +(1.0000, 0.9961, 0.9961), +), 'green': ( +(0.0, 0.0, 0.0000), +(0.0510, 0.3722, 0.3722), +(0.1059, 0.0000, 0.0000), +(0.1569, 0.7202, 0.7202), +(0.1608, 0.7537, 0.7537), +(0.1647, 0.7752, 0.7752), +(0.2157, 1.0000, 1.0000), +(0.2588, 0.9804, 0.9804), +(0.2706, 0.9804, 0.9804), +(0.3176, 1.0000, 1.0000), +(0.3686, 0.8081, 0.8081), +(0.4275, 1.0000, 1.0000), +(0.5216, 1.0000, 1.0000), +(0.6314, 0.7292, 0.7292), +(0.6863, 0.2796, 0.2796), +(0.7451, 0.0000, 0.0000), +(0.7922, 0.0000, 0.0000), +(0.8431, 0.1753, 0.1753), +(0.8980, 0.5000, 0.5000), +(1.0000, 0.9725, 0.9725), +), 'blue': ( +(0.0, 0.5020, 0.5020), +(0.0510, 0.0222, 0.0222), +(0.1098, 1.0000, 1.0000), +(0.2039, 1.0000, 1.0000), +(0.2627, 0.6145, 0.6145), +(0.3216, 0.0000, 0.0000), +(0.4157, 0.0000, 0.0000), +(0.4745, 0.2342, 0.2342), +(0.5333, 0.0000, 0.0000), +(0.5804, 0.0000, 0.0000), +(0.6314, 0.0549, 0.0549), +(0.6902, 0.0000, 0.0000), +(0.7373, 0.0000, 0.0000), +(0.7922, 0.9738, 0.9738), +(0.8000, 1.0000, 1.0000), +(0.8431, 1.0000, 1.0000), +(0.8980, 0.9341, 0.9341), +(1.0000, 0.9961, 0.9961), +)} + +_gist_rainbow_data = ( + (0.000, (1.00, 0.00, 0.16)), + (0.030, (1.00, 0.00, 0.00)), + (0.215, (1.00, 1.00, 0.00)), + (0.400, (0.00, 1.00, 0.00)), + (0.586, (0.00, 1.00, 1.00)), + (0.770, (0.00, 0.00, 1.00)), + (0.954, (1.00, 0.00, 1.00)), + (1.000, (1.00, 0.00, 0.75)) +) + +_gist_stern_data = { + 'red': ( + (0.000, 0.000, 0.000), (0.0547, 1.000, 1.000), + (0.250, 0.027, 0.250), # (0.2500, 0.250, 0.250), + (1.000, 1.000, 1.000)), + 'green': ((0, 0, 0), (1, 1, 1)), + 'blue': ( + (0.000, 0.000, 0.000), (0.500, 1.000, 1.000), + (0.735, 0.000, 0.000), (1.000, 1.000, 1.000)) +} + +def _gist_yarg(x): return 1 - x +_gist_yarg_data = {'red': _gist_yarg, 'green': _gist_yarg, 'blue': _gist_yarg} + +# This bipolar color map was generated from CoolWarmFloat33.csv of +# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland. +# +_coolwarm_data = { + 'red': [ + (0.0, 0.2298057, 0.2298057), + (0.03125, 0.26623388, 0.26623388), + (0.0625, 0.30386891, 0.30386891), + (0.09375, 0.342804478, 0.342804478), + (0.125, 0.38301334, 0.38301334), + (0.15625, 0.424369608, 0.424369608), + (0.1875, 0.46666708, 0.46666708), + (0.21875, 0.509635204, 0.509635204), + (0.25, 0.552953156, 0.552953156), + (0.28125, 0.596262162, 0.596262162), + (0.3125, 0.639176211, 0.639176211), + (0.34375, 0.681291281, 0.681291281), + (0.375, 0.722193294, 0.722193294), + (0.40625, 0.761464949, 0.761464949), + (0.4375, 0.798691636, 0.798691636), + (0.46875, 0.833466556, 0.833466556), + (0.5, 0.865395197, 0.865395197), + (0.53125, 0.897787179, 0.897787179), + (0.5625, 0.924127593, 0.924127593), + (0.59375, 0.944468518, 0.944468518), + (0.625, 0.958852946, 0.958852946), + (0.65625, 0.96732803, 0.96732803), + (0.6875, 0.969954137, 0.969954137), + (0.71875, 0.966811177, 0.966811177), + (0.75, 0.958003065, 0.958003065), + (0.78125, 0.943660866, 0.943660866), + (0.8125, 0.923944917, 0.923944917), + (0.84375, 0.89904617, 0.89904617), + (0.875, 0.869186849, 0.869186849), + (0.90625, 0.834620542, 0.834620542), + (0.9375, 0.795631745, 0.795631745), + (0.96875, 0.752534934, 0.752534934), + (1.0, 0.705673158, 0.705673158)], + 'green': [ + (0.0, 0.298717966, 0.298717966), + (0.03125, 0.353094838, 0.353094838), + (0.0625, 0.406535296, 0.406535296), + (0.09375, 0.458757618, 0.458757618), + (0.125, 0.50941904, 0.50941904), + (0.15625, 0.558148092, 0.558148092), + (0.1875, 0.604562568, 0.604562568), + (0.21875, 0.648280772, 0.648280772), + (0.25, 0.688929332, 0.688929332), + (0.28125, 0.726149107, 0.726149107), + (0.3125, 0.759599947, 0.759599947), + (0.34375, 0.788964712, 0.788964712), + (0.375, 0.813952739, 0.813952739), + (0.40625, 0.834302879, 0.834302879), + (0.4375, 0.849786142, 0.849786142), + (0.46875, 0.860207984, 0.860207984), + (0.5, 0.86541021, 0.86541021), + (0.53125, 0.848937047, 0.848937047), + (0.5625, 0.827384882, 0.827384882), + (0.59375, 0.800927443, 0.800927443), + (0.625, 0.769767752, 0.769767752), + (0.65625, 0.734132809, 0.734132809), + (0.6875, 0.694266682, 0.694266682), + (0.71875, 0.650421156, 0.650421156), + (0.75, 0.602842431, 0.602842431), + (0.78125, 0.551750968, 0.551750968), + (0.8125, 0.49730856, 0.49730856), + (0.84375, 0.439559467, 0.439559467), + (0.875, 0.378313092, 0.378313092), + (0.90625, 0.312874446, 0.312874446), + (0.9375, 0.24128379, 0.24128379), + (0.96875, 0.157246067, 0.157246067), + (1.0, 0.01555616, 0.01555616)], + 'blue': [ + (0.0, 0.753683153, 0.753683153), + (0.03125, 0.801466763, 0.801466763), + (0.0625, 0.84495867, 0.84495867), + (0.09375, 0.883725899, 0.883725899), + (0.125, 0.917387822, 0.917387822), + (0.15625, 0.945619588, 0.945619588), + (0.1875, 0.968154911, 0.968154911), + (0.21875, 0.98478814, 0.98478814), + (0.25, 0.995375608, 0.995375608), + (0.28125, 0.999836203, 0.999836203), + (0.3125, 0.998151185, 0.998151185), + (0.34375, 0.990363227, 0.990363227), + (0.375, 0.976574709, 0.976574709), + (0.40625, 0.956945269, 0.956945269), + (0.4375, 0.931688648, 0.931688648), + (0.46875, 0.901068838, 0.901068838), + (0.5, 0.865395561, 0.865395561), + (0.53125, 0.820880546, 0.820880546), + (0.5625, 0.774508472, 0.774508472), + (0.59375, 0.726736146, 0.726736146), + (0.625, 0.678007945, 0.678007945), + (0.65625, 0.628751763, 0.628751763), + (0.6875, 0.579375448, 0.579375448), + (0.71875, 0.530263762, 0.530263762), + (0.75, 0.481775914, 0.481775914), + (0.78125, 0.434243684, 0.434243684), + (0.8125, 0.387970225, 0.387970225), + (0.84375, 0.343229596, 0.343229596), + (0.875, 0.300267182, 0.300267182), + (0.90625, 0.259301199, 0.259301199), + (0.9375, 0.220525627, 0.220525627), + (0.96875, 0.184115123, 0.184115123), + (1.0, 0.150232812, 0.150232812)] + } + +# Implementation of Carey Rappaport's CMRmap. +# See `A Color Map for Effective Black-and-White Rendering of Color-Scale +# Images' by Carey Rappaport +# http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m +_CMRmap_data = {'red': ((0.000, 0.00, 0.00), + (0.125, 0.15, 0.15), + (0.250, 0.30, 0.30), + (0.375, 0.60, 0.60), + (0.500, 1.00, 1.00), + (0.625, 0.90, 0.90), + (0.750, 0.90, 0.90), + (0.875, 0.90, 0.90), + (1.000, 1.00, 1.00)), + 'green': ((0.000, 0.00, 0.00), + (0.125, 0.15, 0.15), + (0.250, 0.15, 0.15), + (0.375, 0.20, 0.20), + (0.500, 0.25, 0.25), + (0.625, 0.50, 0.50), + (0.750, 0.75, 0.75), + (0.875, 0.90, 0.90), + (1.000, 1.00, 1.00)), + 'blue': ((0.000, 0.00, 0.00), + (0.125, 0.50, 0.50), + (0.250, 0.75, 0.75), + (0.375, 0.50, 0.50), + (0.500, 0.15, 0.15), + (0.625, 0.00, 0.00), + (0.750, 0.10, 0.10), + (0.875, 0.50, 0.50), + (1.000, 1.00, 1.00))} + + +# An MIT licensed, colorblind-friendly heatmap from Wistia: +# https://github.com/wistia/heatmap-palette +# http://wistia.com/blog/heatmaps-for-colorblindness +# +# >>> import matplotlib.colors as c +# >>> colors = ["#e4ff7a", "#ffe81a", "#ffbd00", "#ffa000", "#fc7f00"] +# >>> cm = c.LinearSegmentedColormap.from_list('wistia', colors) +# >>> _wistia_data = cm._segmentdata +# >>> del _wistia_data['alpha'] +# +_wistia_data = { + 'red': [(0.0, 0.8941176470588236, 0.8941176470588236), + (0.25, 1.0, 1.0), + (0.5, 1.0, 1.0), + (0.75, 1.0, 1.0), + (1.0, 0.9882352941176471, 0.9882352941176471)], + 'green': [(0.0, 1.0, 1.0), + (0.25, 0.9098039215686274, 0.9098039215686274), + (0.5, 0.7411764705882353, 0.7411764705882353), + (0.75, 0.6274509803921569, 0.6274509803921569), + (1.0, 0.4980392156862745, 0.4980392156862745)], + 'blue': [(0.0, 0.47843137254901963, 0.47843137254901963), + (0.25, 0.10196078431372549, 0.10196078431372549), + (0.5, 0.0, 0.0), + (0.75, 0.0, 0.0), + (1.0, 0.0, 0.0)], +} + + +# Categorical palettes from Vega: +# https://github.com/vega/vega/wiki/Scales +# (divided by 255) +# + +_tab10_data = ( + (0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4 + (1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e + (0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c + (0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728 + (0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd + (0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b + (0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2 + (0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f + (0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22 + (0.09019607843137255, 0.7450980392156863, 0.8117647058823529), # 17becf +) + +_tab20_data = ( + (0.12156862745098039, 0.4666666666666667, 0.7058823529411765 ), # 1f77b4 + (0.6823529411764706, 0.7803921568627451, 0.9098039215686274 ), # aec7e8 + (1.0, 0.4980392156862745, 0.054901960784313725), # ff7f0e + (1.0, 0.7333333333333333, 0.47058823529411764 ), # ffbb78 + (0.17254901960784313, 0.6274509803921569, 0.17254901960784313 ), # 2ca02c + (0.596078431372549, 0.8745098039215686, 0.5411764705882353 ), # 98df8a + (0.8392156862745098, 0.15294117647058825, 0.1568627450980392 ), # d62728 + (1.0, 0.596078431372549, 0.5882352941176471 ), # ff9896 + (0.5803921568627451, 0.403921568627451, 0.7411764705882353 ), # 9467bd + (0.7725490196078432, 0.6901960784313725, 0.8352941176470589 ), # c5b0d5 + (0.5490196078431373, 0.33725490196078434, 0.29411764705882354 ), # 8c564b + (0.7686274509803922, 0.611764705882353, 0.5803921568627451 ), # c49c94 + (0.8901960784313725, 0.4666666666666667, 0.7607843137254902 ), # e377c2 + (0.9686274509803922, 0.7137254901960784, 0.8235294117647058 ), # f7b6d2 + (0.4980392156862745, 0.4980392156862745, 0.4980392156862745 ), # 7f7f7f + (0.7803921568627451, 0.7803921568627451, 0.7803921568627451 ), # c7c7c7 + (0.7372549019607844, 0.7411764705882353, 0.13333333333333333 ), # bcbd22 + (0.8588235294117647, 0.8588235294117647, 0.5529411764705883 ), # dbdb8d + (0.09019607843137255, 0.7450980392156863, 0.8117647058823529 ), # 17becf + (0.6196078431372549, 0.8549019607843137, 0.8980392156862745), # 9edae5 +) + +_tab20b_data = ( + (0.2235294117647059, 0.23137254901960785, 0.4745098039215686 ), # 393b79 + (0.3215686274509804, 0.32941176470588235, 0.6392156862745098 ), # 5254a3 + (0.4196078431372549, 0.43137254901960786, 0.8117647058823529 ), # 6b6ecf + (0.611764705882353, 0.6196078431372549, 0.8705882352941177 ), # 9c9ede + (0.38823529411764707, 0.4745098039215686, 0.2235294117647059 ), # 637939 + (0.5490196078431373, 0.6352941176470588, 0.3215686274509804 ), # 8ca252 + (0.7098039215686275, 0.8117647058823529, 0.4196078431372549 ), # b5cf6b + (0.807843137254902, 0.8588235294117647, 0.611764705882353 ), # cedb9c + (0.5490196078431373, 0.42745098039215684, 0.19215686274509805), # 8c6d31 + (0.7411764705882353, 0.6196078431372549, 0.2235294117647059 ), # bd9e39 + (0.9058823529411765, 0.7294117647058823, 0.3215686274509804 ), # e7ba52 + (0.9058823529411765, 0.796078431372549, 0.5803921568627451 ), # e7cb94 + (0.5176470588235295, 0.23529411764705882, 0.2235294117647059 ), # 843c39 + (0.6784313725490196, 0.28627450980392155, 0.2901960784313726 ), # ad494a + (0.8392156862745098, 0.3803921568627451, 0.4196078431372549 ), # d6616b + (0.9058823529411765, 0.5882352941176471, 0.611764705882353 ), # e7969c + (0.4823529411764706, 0.2549019607843137, 0.45098039215686275), # 7b4173 + (0.6470588235294118, 0.3176470588235294, 0.5803921568627451 ), # a55194 + (0.807843137254902, 0.42745098039215684, 0.7411764705882353 ), # ce6dbd + (0.8705882352941177, 0.6196078431372549, 0.8392156862745098 ), # de9ed6 +) + +_tab20c_data = ( + (0.19215686274509805, 0.5098039215686274, 0.7411764705882353 ), # 3182bd + (0.4196078431372549, 0.6823529411764706, 0.8392156862745098 ), # 6baed6 + (0.6196078431372549, 0.792156862745098, 0.8823529411764706 ), # 9ecae1 + (0.7764705882352941, 0.8588235294117647, 0.9372549019607843 ), # c6dbef + (0.9019607843137255, 0.3333333333333333, 0.050980392156862744), # e6550d + (0.9921568627450981, 0.5529411764705883, 0.23529411764705882 ), # fd8d3c + (0.9921568627450981, 0.6823529411764706, 0.4196078431372549 ), # fdae6b + (0.9921568627450981, 0.8156862745098039, 0.6352941176470588 ), # fdd0a2 + (0.19215686274509805, 0.6392156862745098, 0.32941176470588235 ), # 31a354 + (0.4549019607843137, 0.7686274509803922, 0.4627450980392157 ), # 74c476 + (0.6313725490196078, 0.8509803921568627, 0.6078431372549019 ), # a1d99b + (0.7803921568627451, 0.9137254901960784, 0.7529411764705882 ), # c7e9c0 + (0.4588235294117647, 0.4196078431372549, 0.6941176470588235 ), # 756bb1 + (0.6196078431372549, 0.6039215686274509, 0.7843137254901961 ), # 9e9ac8 + (0.7372549019607844, 0.7411764705882353, 0.8627450980392157 ), # bcbddc + (0.8549019607843137, 0.8549019607843137, 0.9215686274509803 ), # dadaeb + (0.38823529411764707, 0.38823529411764707, 0.38823529411764707 ), # 636363 + (0.5882352941176471, 0.5882352941176471, 0.5882352941176471 ), # 969696 + (0.7411764705882353, 0.7411764705882353, 0.7411764705882353 ), # bdbdbd + (0.8509803921568627, 0.8509803921568627, 0.8509803921568627 ), # d9d9d9 +) + + +datad = { + 'Blues': _Blues_data, + 'BrBG': _BrBG_data, + 'BuGn': _BuGn_data, + 'BuPu': _BuPu_data, + 'CMRmap': _CMRmap_data, + 'GnBu': _GnBu_data, + 'Greens': _Greens_data, + 'Greys': _Greys_data, + 'OrRd': _OrRd_data, + 'Oranges': _Oranges_data, + 'PRGn': _PRGn_data, + 'PiYG': _PiYG_data, + 'PuBu': _PuBu_data, + 'PuBuGn': _PuBuGn_data, + 'PuOr': _PuOr_data, + 'PuRd': _PuRd_data, + 'Purples': _Purples_data, + 'RdBu': _RdBu_data, + 'RdGy': _RdGy_data, + 'RdPu': _RdPu_data, + 'RdYlBu': _RdYlBu_data, + 'RdYlGn': _RdYlGn_data, + 'Reds': _Reds_data, + 'Spectral': _Spectral_data, + 'Wistia': _wistia_data, + 'YlGn': _YlGn_data, + 'YlGnBu': _YlGnBu_data, + 'YlOrBr': _YlOrBr_data, + 'YlOrRd': _YlOrRd_data, + 'afmhot': _afmhot_data, + 'autumn': _autumn_data, + 'binary': _binary_data, + 'bone': _bone_data, + 'brg': _brg_data, + 'bwr': _bwr_data, + 'cool': _cool_data, + 'coolwarm': _coolwarm_data, + 'copper': _copper_data, + 'cubehelix': _cubehelix_data, + 'flag': _flag_data, + 'gist_earth': _gist_earth_data, + 'gist_gray': _gist_gray_data, + 'gist_heat': _gist_heat_data, + 'gist_ncar': _gist_ncar_data, + 'gist_rainbow': _gist_rainbow_data, + 'gist_stern': _gist_stern_data, + 'gist_yarg': _gist_yarg_data, + 'gnuplot': _gnuplot_data, + 'gnuplot2': _gnuplot2_data, + 'gray': _gray_data, + 'hot': _hot_data, + 'hsv': _hsv_data, + 'jet': _jet_data, + 'nipy_spectral': _nipy_spectral_data, + 'ocean': _ocean_data, + 'pink': _pink_data, + 'prism': _prism_data, + 'rainbow': _rainbow_data, + 'seismic': _seismic_data, + 'spring': _spring_data, + 'summer': _summer_data, + 'terrain': _terrain_data, + 'winter': _winter_data, + # Qualitative + 'Accent': {'listed': _Accent_data}, + 'Dark2': {'listed': _Dark2_data}, + 'Paired': {'listed': _Paired_data}, + 'Pastel1': {'listed': _Pastel1_data}, + 'Pastel2': {'listed': _Pastel2_data}, + 'Set1': {'listed': _Set1_data}, + 'Set2': {'listed': _Set2_data}, + 'Set3': {'listed': _Set3_data}, + 'tab10': {'listed': _tab10_data}, + 'tab20': {'listed': _tab20_data}, + 'tab20b': {'listed': _tab20b_data}, + 'tab20c': {'listed': _tab20c_data}, +} diff --git a/venv/Lib/site-packages/matplotlib/_cm_listed.py b/venv/Lib/site-packages/matplotlib/_cm_listed.py new file mode 100644 index 000000000..a331ad74a --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_cm_listed.py @@ -0,0 +1,2071 @@ +from .colors import ListedColormap + +_magma_data = [[0.001462, 0.000466, 0.013866], + [0.002258, 0.001295, 0.018331], + [0.003279, 0.002305, 0.023708], + [0.004512, 0.003490, 0.029965], + [0.005950, 0.004843, 0.037130], + [0.007588, 0.006356, 0.044973], + [0.009426, 0.008022, 0.052844], + [0.011465, 0.009828, 0.060750], + [0.013708, 0.011771, 0.068667], + [0.016156, 0.013840, 0.076603], + [0.018815, 0.016026, 0.084584], + [0.021692, 0.018320, 0.092610], + [0.024792, 0.020715, 0.100676], + [0.028123, 0.023201, 0.108787], + [0.031696, 0.025765, 0.116965], + [0.035520, 0.028397, 0.125209], + [0.039608, 0.031090, 0.133515], + [0.043830, 0.033830, 0.141886], + [0.048062, 0.036607, 0.150327], + [0.052320, 0.039407, 0.158841], + [0.056615, 0.042160, 0.167446], + [0.060949, 0.044794, 0.176129], + [0.065330, 0.047318, 0.184892], + [0.069764, 0.049726, 0.193735], + [0.074257, 0.052017, 0.202660], + [0.078815, 0.054184, 0.211667], + [0.083446, 0.056225, 0.220755], + [0.088155, 0.058133, 0.229922], + [0.092949, 0.059904, 0.239164], + [0.097833, 0.061531, 0.248477], + [0.102815, 0.063010, 0.257854], + [0.107899, 0.064335, 0.267289], + [0.113094, 0.065492, 0.276784], + [0.118405, 0.066479, 0.286321], + [0.123833, 0.067295, 0.295879], + [0.129380, 0.067935, 0.305443], + [0.135053, 0.068391, 0.315000], + [0.140858, 0.068654, 0.324538], + [0.146785, 0.068738, 0.334011], + [0.152839, 0.068637, 0.343404], + [0.159018, 0.068354, 0.352688], + [0.165308, 0.067911, 0.361816], + [0.171713, 0.067305, 0.370771], + [0.178212, 0.066576, 0.379497], + [0.184801, 0.065732, 0.387973], + [0.191460, 0.064818, 0.396152], + [0.198177, 0.063862, 0.404009], + [0.204935, 0.062907, 0.411514], + [0.211718, 0.061992, 0.418647], + [0.218512, 0.061158, 0.425392], + [0.225302, 0.060445, 0.431742], + [0.232077, 0.059889, 0.437695], + [0.238826, 0.059517, 0.443256], + [0.245543, 0.059352, 0.448436], + [0.252220, 0.059415, 0.453248], + [0.258857, 0.059706, 0.457710], + [0.265447, 0.060237, 0.461840], + [0.271994, 0.060994, 0.465660], + [0.278493, 0.061978, 0.469190], + [0.284951, 0.063168, 0.472451], + [0.291366, 0.064553, 0.475462], + [0.297740, 0.066117, 0.478243], + [0.304081, 0.067835, 0.480812], + [0.310382, 0.069702, 0.483186], + [0.316654, 0.071690, 0.485380], + [0.322899, 0.073782, 0.487408], + [0.329114, 0.075972, 0.489287], + [0.335308, 0.078236, 0.491024], + [0.341482, 0.080564, 0.492631], + [0.347636, 0.082946, 0.494121], + [0.353773, 0.085373, 0.495501], + [0.359898, 0.087831, 0.496778], + [0.366012, 0.090314, 0.497960], + [0.372116, 0.092816, 0.499053], + [0.378211, 0.095332, 0.500067], + [0.384299, 0.097855, 0.501002], + [0.390384, 0.100379, 0.501864], + [0.396467, 0.102902, 0.502658], + [0.402548, 0.105420, 0.503386], + [0.408629, 0.107930, 0.504052], + [0.414709, 0.110431, 0.504662], + [0.420791, 0.112920, 0.505215], + [0.426877, 0.115395, 0.505714], + [0.432967, 0.117855, 0.506160], + [0.439062, 0.120298, 0.506555], + [0.445163, 0.122724, 0.506901], + [0.451271, 0.125132, 0.507198], + [0.457386, 0.127522, 0.507448], + [0.463508, 0.129893, 0.507652], + [0.469640, 0.132245, 0.507809], + [0.475780, 0.134577, 0.507921], + [0.481929, 0.136891, 0.507989], + [0.488088, 0.139186, 0.508011], + [0.494258, 0.141462, 0.507988], + [0.500438, 0.143719, 0.507920], + [0.506629, 0.145958, 0.507806], + [0.512831, 0.148179, 0.507648], + [0.519045, 0.150383, 0.507443], + [0.525270, 0.152569, 0.507192], + [0.531507, 0.154739, 0.506895], + [0.537755, 0.156894, 0.506551], + [0.544015, 0.159033, 0.506159], + [0.550287, 0.161158, 0.505719], + [0.556571, 0.163269, 0.505230], + [0.562866, 0.165368, 0.504692], + [0.569172, 0.167454, 0.504105], + [0.575490, 0.169530, 0.503466], + [0.581819, 0.171596, 0.502777], + [0.588158, 0.173652, 0.502035], + [0.594508, 0.175701, 0.501241], + [0.600868, 0.177743, 0.500394], + [0.607238, 0.179779, 0.499492], + [0.613617, 0.181811, 0.498536], + [0.620005, 0.183840, 0.497524], + [0.626401, 0.185867, 0.496456], + [0.632805, 0.187893, 0.495332], + [0.639216, 0.189921, 0.494150], + [0.645633, 0.191952, 0.492910], + [0.652056, 0.193986, 0.491611], + [0.658483, 0.196027, 0.490253], + [0.664915, 0.198075, 0.488836], + [0.671349, 0.200133, 0.487358], + [0.677786, 0.202203, 0.485819], + [0.684224, 0.204286, 0.484219], + [0.690661, 0.206384, 0.482558], + [0.697098, 0.208501, 0.480835], + [0.703532, 0.210638, 0.479049], + [0.709962, 0.212797, 0.477201], + [0.716387, 0.214982, 0.475290], + [0.722805, 0.217194, 0.473316], + [0.729216, 0.219437, 0.471279], + [0.735616, 0.221713, 0.469180], + [0.742004, 0.224025, 0.467018], + [0.748378, 0.226377, 0.464794], + [0.754737, 0.228772, 0.462509], + [0.761077, 0.231214, 0.460162], + [0.767398, 0.233705, 0.457755], + [0.773695, 0.236249, 0.455289], + [0.779968, 0.238851, 0.452765], + [0.786212, 0.241514, 0.450184], + [0.792427, 0.244242, 0.447543], + [0.798608, 0.247040, 0.444848], + [0.804752, 0.249911, 0.442102], + [0.810855, 0.252861, 0.439305], + [0.816914, 0.255895, 0.436461], + [0.822926, 0.259016, 0.433573], + [0.828886, 0.262229, 0.430644], + [0.834791, 0.265540, 0.427671], + [0.840636, 0.268953, 0.424666], + [0.846416, 0.272473, 0.421631], + [0.852126, 0.276106, 0.418573], + [0.857763, 0.279857, 0.415496], + [0.863320, 0.283729, 0.412403], + [0.868793, 0.287728, 0.409303], + [0.874176, 0.291859, 0.406205], + [0.879464, 0.296125, 0.403118], + [0.884651, 0.300530, 0.400047], + [0.889731, 0.305079, 0.397002], + [0.894700, 0.309773, 0.393995], + [0.899552, 0.314616, 0.391037], + [0.904281, 0.319610, 0.388137], + [0.908884, 0.324755, 0.385308], + [0.913354, 0.330052, 0.382563], + [0.917689, 0.335500, 0.379915], + [0.921884, 0.341098, 0.377376], + [0.925937, 0.346844, 0.374959], + [0.929845, 0.352734, 0.372677], + [0.933606, 0.358764, 0.370541], + [0.937221, 0.364929, 0.368567], + [0.940687, 0.371224, 0.366762], + [0.944006, 0.377643, 0.365136], + [0.947180, 0.384178, 0.363701], + [0.950210, 0.390820, 0.362468], + [0.953099, 0.397563, 0.361438], + [0.955849, 0.404400, 0.360619], + [0.958464, 0.411324, 0.360014], + [0.960949, 0.418323, 0.359630], + [0.963310, 0.425390, 0.359469], + [0.965549, 0.432519, 0.359529], + [0.967671, 0.439703, 0.359810], + [0.969680, 0.446936, 0.360311], + [0.971582, 0.454210, 0.361030], + [0.973381, 0.461520, 0.361965], + [0.975082, 0.468861, 0.363111], + [0.976690, 0.476226, 0.364466], + [0.978210, 0.483612, 0.366025], + [0.979645, 0.491014, 0.367783], + [0.981000, 0.498428, 0.369734], + [0.982279, 0.505851, 0.371874], + [0.983485, 0.513280, 0.374198], + [0.984622, 0.520713, 0.376698], + [0.985693, 0.528148, 0.379371], + [0.986700, 0.535582, 0.382210], + [0.987646, 0.543015, 0.385210], + [0.988533, 0.550446, 0.388365], + [0.989363, 0.557873, 0.391671], + [0.990138, 0.565296, 0.395122], + [0.990871, 0.572706, 0.398714], + [0.991558, 0.580107, 0.402441], + [0.992196, 0.587502, 0.406299], + [0.992785, 0.594891, 0.410283], + [0.993326, 0.602275, 0.414390], + [0.993834, 0.609644, 0.418613], + [0.994309, 0.616999, 0.422950], + [0.994738, 0.624350, 0.427397], + [0.995122, 0.631696, 0.431951], + [0.995480, 0.639027, 0.436607], + [0.995810, 0.646344, 0.441361], + [0.996096, 0.653659, 0.446213], + [0.996341, 0.660969, 0.451160], + [0.996580, 0.668256, 0.456192], + [0.996775, 0.675541, 0.461314], + [0.996925, 0.682828, 0.466526], + [0.997077, 0.690088, 0.471811], + [0.997186, 0.697349, 0.477182], + [0.997254, 0.704611, 0.482635], + [0.997325, 0.711848, 0.488154], + [0.997351, 0.719089, 0.493755], + [0.997351, 0.726324, 0.499428], + [0.997341, 0.733545, 0.505167], + [0.997285, 0.740772, 0.510983], + [0.997228, 0.747981, 0.516859], + [0.997138, 0.755190, 0.522806], + [0.997019, 0.762398, 0.528821], + [0.996898, 0.769591, 0.534892], + [0.996727, 0.776795, 0.541039], + [0.996571, 0.783977, 0.547233], + [0.996369, 0.791167, 0.553499], + [0.996162, 0.798348, 0.559820], + [0.995932, 0.805527, 0.566202], + [0.995680, 0.812706, 0.572645], + [0.995424, 0.819875, 0.579140], + [0.995131, 0.827052, 0.585701], + [0.994851, 0.834213, 0.592307], + [0.994524, 0.841387, 0.598983], + [0.994222, 0.848540, 0.605696], + [0.993866, 0.855711, 0.612482], + [0.993545, 0.862859, 0.619299], + [0.993170, 0.870024, 0.626189], + [0.992831, 0.877168, 0.633109], + [0.992440, 0.884330, 0.640099], + [0.992089, 0.891470, 0.647116], + [0.991688, 0.898627, 0.654202], + [0.991332, 0.905763, 0.661309], + [0.990930, 0.912915, 0.668481], + [0.990570, 0.920049, 0.675675], + [0.990175, 0.927196, 0.682926], + [0.989815, 0.934329, 0.690198], + [0.989434, 0.941470, 0.697519], + [0.989077, 0.948604, 0.704863], + [0.988717, 0.955742, 0.712242], + [0.988367, 0.962878, 0.719649], + [0.988033, 0.970012, 0.727077], + [0.987691, 0.977154, 0.734536], + [0.987387, 0.984288, 0.742002], + [0.987053, 0.991438, 0.749504]] + +_inferno_data = [[0.001462, 0.000466, 0.013866], + [0.002267, 0.001270, 0.018570], + [0.003299, 0.002249, 0.024239], + [0.004547, 0.003392, 0.030909], + [0.006006, 0.004692, 0.038558], + [0.007676, 0.006136, 0.046836], + [0.009561, 0.007713, 0.055143], + [0.011663, 0.009417, 0.063460], + [0.013995, 0.011225, 0.071862], + [0.016561, 0.013136, 0.080282], + [0.019373, 0.015133, 0.088767], + [0.022447, 0.017199, 0.097327], + [0.025793, 0.019331, 0.105930], + [0.029432, 0.021503, 0.114621], + [0.033385, 0.023702, 0.123397], + [0.037668, 0.025921, 0.132232], + [0.042253, 0.028139, 0.141141], + [0.046915, 0.030324, 0.150164], + [0.051644, 0.032474, 0.159254], + [0.056449, 0.034569, 0.168414], + [0.061340, 0.036590, 0.177642], + [0.066331, 0.038504, 0.186962], + [0.071429, 0.040294, 0.196354], + [0.076637, 0.041905, 0.205799], + [0.081962, 0.043328, 0.215289], + [0.087411, 0.044556, 0.224813], + [0.092990, 0.045583, 0.234358], + [0.098702, 0.046402, 0.243904], + [0.104551, 0.047008, 0.253430], + [0.110536, 0.047399, 0.262912], + [0.116656, 0.047574, 0.272321], + [0.122908, 0.047536, 0.281624], + [0.129285, 0.047293, 0.290788], + [0.135778, 0.046856, 0.299776], + [0.142378, 0.046242, 0.308553], + [0.149073, 0.045468, 0.317085], + [0.155850, 0.044559, 0.325338], + [0.162689, 0.043554, 0.333277], + [0.169575, 0.042489, 0.340874], + [0.176493, 0.041402, 0.348111], + [0.183429, 0.040329, 0.354971], + [0.190367, 0.039309, 0.361447], + [0.197297, 0.038400, 0.367535], + [0.204209, 0.037632, 0.373238], + [0.211095, 0.037030, 0.378563], + [0.217949, 0.036615, 0.383522], + [0.224763, 0.036405, 0.388129], + [0.231538, 0.036405, 0.392400], + [0.238273, 0.036621, 0.396353], + [0.244967, 0.037055, 0.400007], + [0.251620, 0.037705, 0.403378], + [0.258234, 0.038571, 0.406485], + [0.264810, 0.039647, 0.409345], + [0.271347, 0.040922, 0.411976], + [0.277850, 0.042353, 0.414392], + [0.284321, 0.043933, 0.416608], + [0.290763, 0.045644, 0.418637], + [0.297178, 0.047470, 0.420491], + [0.303568, 0.049396, 0.422182], + [0.309935, 0.051407, 0.423721], + [0.316282, 0.053490, 0.425116], + [0.322610, 0.055634, 0.426377], + [0.328921, 0.057827, 0.427511], + [0.335217, 0.060060, 0.428524], + [0.341500, 0.062325, 0.429425], + [0.347771, 0.064616, 0.430217], + [0.354032, 0.066925, 0.430906], + [0.360284, 0.069247, 0.431497], + [0.366529, 0.071579, 0.431994], + [0.372768, 0.073915, 0.432400], + [0.379001, 0.076253, 0.432719], + [0.385228, 0.078591, 0.432955], + [0.391453, 0.080927, 0.433109], + [0.397674, 0.083257, 0.433183], + [0.403894, 0.085580, 0.433179], + [0.410113, 0.087896, 0.433098], + [0.416331, 0.090203, 0.432943], + [0.422549, 0.092501, 0.432714], + [0.428768, 0.094790, 0.432412], + [0.434987, 0.097069, 0.432039], + [0.441207, 0.099338, 0.431594], + [0.447428, 0.101597, 0.431080], + [0.453651, 0.103848, 0.430498], + [0.459875, 0.106089, 0.429846], + [0.466100, 0.108322, 0.429125], + [0.472328, 0.110547, 0.428334], + [0.478558, 0.112764, 0.427475], + [0.484789, 0.114974, 0.426548], + [0.491022, 0.117179, 0.425552], + [0.497257, 0.119379, 0.424488], + [0.503493, 0.121575, 0.423356], + [0.509730, 0.123769, 0.422156], + [0.515967, 0.125960, 0.420887], + [0.522206, 0.128150, 0.419549], + [0.528444, 0.130341, 0.418142], + [0.534683, 0.132534, 0.416667], + [0.540920, 0.134729, 0.415123], + [0.547157, 0.136929, 0.413511], + [0.553392, 0.139134, 0.411829], + [0.559624, 0.141346, 0.410078], + [0.565854, 0.143567, 0.408258], + [0.572081, 0.145797, 0.406369], + [0.578304, 0.148039, 0.404411], + [0.584521, 0.150294, 0.402385], + [0.590734, 0.152563, 0.400290], + [0.596940, 0.154848, 0.398125], + [0.603139, 0.157151, 0.395891], + [0.609330, 0.159474, 0.393589], + [0.615513, 0.161817, 0.391219], + [0.621685, 0.164184, 0.388781], + [0.627847, 0.166575, 0.386276], + [0.633998, 0.168992, 0.383704], + [0.640135, 0.171438, 0.381065], + [0.646260, 0.173914, 0.378359], + [0.652369, 0.176421, 0.375586], + [0.658463, 0.178962, 0.372748], + [0.664540, 0.181539, 0.369846], + [0.670599, 0.184153, 0.366879], + [0.676638, 0.186807, 0.363849], + [0.682656, 0.189501, 0.360757], + [0.688653, 0.192239, 0.357603], + [0.694627, 0.195021, 0.354388], + [0.700576, 0.197851, 0.351113], + [0.706500, 0.200728, 0.347777], + [0.712396, 0.203656, 0.344383], + [0.718264, 0.206636, 0.340931], + [0.724103, 0.209670, 0.337424], + [0.729909, 0.212759, 0.333861], + [0.735683, 0.215906, 0.330245], + [0.741423, 0.219112, 0.326576], + [0.747127, 0.222378, 0.322856], + [0.752794, 0.225706, 0.319085], + [0.758422, 0.229097, 0.315266], + [0.764010, 0.232554, 0.311399], + [0.769556, 0.236077, 0.307485], + [0.775059, 0.239667, 0.303526], + [0.780517, 0.243327, 0.299523], + [0.785929, 0.247056, 0.295477], + [0.791293, 0.250856, 0.291390], + [0.796607, 0.254728, 0.287264], + [0.801871, 0.258674, 0.283099], + [0.807082, 0.262692, 0.278898], + [0.812239, 0.266786, 0.274661], + [0.817341, 0.270954, 0.270390], + [0.822386, 0.275197, 0.266085], + [0.827372, 0.279517, 0.261750], + [0.832299, 0.283913, 0.257383], + [0.837165, 0.288385, 0.252988], + [0.841969, 0.292933, 0.248564], + [0.846709, 0.297559, 0.244113], + [0.851384, 0.302260, 0.239636], + [0.855992, 0.307038, 0.235133], + [0.860533, 0.311892, 0.230606], + [0.865006, 0.316822, 0.226055], + [0.869409, 0.321827, 0.221482], + [0.873741, 0.326906, 0.216886], + [0.878001, 0.332060, 0.212268], + [0.882188, 0.337287, 0.207628], + [0.886302, 0.342586, 0.202968], + [0.890341, 0.347957, 0.198286], + [0.894305, 0.353399, 0.193584], + [0.898192, 0.358911, 0.188860], + [0.902003, 0.364492, 0.184116], + [0.905735, 0.370140, 0.179350], + [0.909390, 0.375856, 0.174563], + [0.912966, 0.381636, 0.169755], + [0.916462, 0.387481, 0.164924], + [0.919879, 0.393389, 0.160070], + [0.923215, 0.399359, 0.155193], + [0.926470, 0.405389, 0.150292], + [0.929644, 0.411479, 0.145367], + [0.932737, 0.417627, 0.140417], + [0.935747, 0.423831, 0.135440], + [0.938675, 0.430091, 0.130438], + [0.941521, 0.436405, 0.125409], + [0.944285, 0.442772, 0.120354], + [0.946965, 0.449191, 0.115272], + [0.949562, 0.455660, 0.110164], + [0.952075, 0.462178, 0.105031], + [0.954506, 0.468744, 0.099874], + [0.956852, 0.475356, 0.094695], + [0.959114, 0.482014, 0.089499], + [0.961293, 0.488716, 0.084289], + [0.963387, 0.495462, 0.079073], + [0.965397, 0.502249, 0.073859], + [0.967322, 0.509078, 0.068659], + [0.969163, 0.515946, 0.063488], + [0.970919, 0.522853, 0.058367], + [0.972590, 0.529798, 0.053324], + [0.974176, 0.536780, 0.048392], + [0.975677, 0.543798, 0.043618], + [0.977092, 0.550850, 0.039050], + [0.978422, 0.557937, 0.034931], + [0.979666, 0.565057, 0.031409], + [0.980824, 0.572209, 0.028508], + [0.981895, 0.579392, 0.026250], + [0.982881, 0.586606, 0.024661], + [0.983779, 0.593849, 0.023770], + [0.984591, 0.601122, 0.023606], + [0.985315, 0.608422, 0.024202], + [0.985952, 0.615750, 0.025592], + [0.986502, 0.623105, 0.027814], + [0.986964, 0.630485, 0.030908], + [0.987337, 0.637890, 0.034916], + [0.987622, 0.645320, 0.039886], + [0.987819, 0.652773, 0.045581], + [0.987926, 0.660250, 0.051750], + [0.987945, 0.667748, 0.058329], + [0.987874, 0.675267, 0.065257], + [0.987714, 0.682807, 0.072489], + [0.987464, 0.690366, 0.079990], + [0.987124, 0.697944, 0.087731], + [0.986694, 0.705540, 0.095694], + [0.986175, 0.713153, 0.103863], + [0.985566, 0.720782, 0.112229], + [0.984865, 0.728427, 0.120785], + [0.984075, 0.736087, 0.129527], + [0.983196, 0.743758, 0.138453], + [0.982228, 0.751442, 0.147565], + [0.981173, 0.759135, 0.156863], + [0.980032, 0.766837, 0.166353], + [0.978806, 0.774545, 0.176037], + [0.977497, 0.782258, 0.185923], + [0.976108, 0.789974, 0.196018], + [0.974638, 0.797692, 0.206332], + [0.973088, 0.805409, 0.216877], + [0.971468, 0.813122, 0.227658], + [0.969783, 0.820825, 0.238686], + [0.968041, 0.828515, 0.249972], + [0.966243, 0.836191, 0.261534], + [0.964394, 0.843848, 0.273391], + [0.962517, 0.851476, 0.285546], + [0.960626, 0.859069, 0.298010], + [0.958720, 0.866624, 0.310820], + [0.956834, 0.874129, 0.323974], + [0.954997, 0.881569, 0.337475], + [0.953215, 0.888942, 0.351369], + [0.951546, 0.896226, 0.365627], + [0.950018, 0.903409, 0.380271], + [0.948683, 0.910473, 0.395289], + [0.947594, 0.917399, 0.410665], + [0.946809, 0.924168, 0.426373], + [0.946392, 0.930761, 0.442367], + [0.946403, 0.937159, 0.458592], + [0.946903, 0.943348, 0.474970], + [0.947937, 0.949318, 0.491426], + [0.949545, 0.955063, 0.507860], + [0.951740, 0.960587, 0.524203], + [0.954529, 0.965896, 0.540361], + [0.957896, 0.971003, 0.556275], + [0.961812, 0.975924, 0.571925], + [0.966249, 0.980678, 0.587206], + [0.971162, 0.985282, 0.602154], + [0.976511, 0.989753, 0.616760], + [0.982257, 0.994109, 0.631017], + [0.988362, 0.998364, 0.644924]] + +_plasma_data = [[0.050383, 0.029803, 0.527975], + [0.063536, 0.028426, 0.533124], + [0.075353, 0.027206, 0.538007], + [0.086222, 0.026125, 0.542658], + [0.096379, 0.025165, 0.547103], + [0.105980, 0.024309, 0.551368], + [0.115124, 0.023556, 0.555468], + [0.123903, 0.022878, 0.559423], + [0.132381, 0.022258, 0.563250], + [0.140603, 0.021687, 0.566959], + [0.148607, 0.021154, 0.570562], + [0.156421, 0.020651, 0.574065], + [0.164070, 0.020171, 0.577478], + [0.171574, 0.019706, 0.580806], + [0.178950, 0.019252, 0.584054], + [0.186213, 0.018803, 0.587228], + [0.193374, 0.018354, 0.590330], + [0.200445, 0.017902, 0.593364], + [0.207435, 0.017442, 0.596333], + [0.214350, 0.016973, 0.599239], + [0.221197, 0.016497, 0.602083], + [0.227983, 0.016007, 0.604867], + [0.234715, 0.015502, 0.607592], + [0.241396, 0.014979, 0.610259], + [0.248032, 0.014439, 0.612868], + [0.254627, 0.013882, 0.615419], + [0.261183, 0.013308, 0.617911], + [0.267703, 0.012716, 0.620346], + [0.274191, 0.012109, 0.622722], + [0.280648, 0.011488, 0.625038], + [0.287076, 0.010855, 0.627295], + [0.293478, 0.010213, 0.629490], + [0.299855, 0.009561, 0.631624], + [0.306210, 0.008902, 0.633694], + [0.312543, 0.008239, 0.635700], + [0.318856, 0.007576, 0.637640], + [0.325150, 0.006915, 0.639512], + [0.331426, 0.006261, 0.641316], + [0.337683, 0.005618, 0.643049], + [0.343925, 0.004991, 0.644710], + [0.350150, 0.004382, 0.646298], + [0.356359, 0.003798, 0.647810], + [0.362553, 0.003243, 0.649245], + [0.368733, 0.002724, 0.650601], + [0.374897, 0.002245, 0.651876], + [0.381047, 0.001814, 0.653068], + [0.387183, 0.001434, 0.654177], + [0.393304, 0.001114, 0.655199], + [0.399411, 0.000859, 0.656133], + [0.405503, 0.000678, 0.656977], + [0.411580, 0.000577, 0.657730], + [0.417642, 0.000564, 0.658390], + [0.423689, 0.000646, 0.658956], + [0.429719, 0.000831, 0.659425], + [0.435734, 0.001127, 0.659797], + [0.441732, 0.001540, 0.660069], + [0.447714, 0.002080, 0.660240], + [0.453677, 0.002755, 0.660310], + [0.459623, 0.003574, 0.660277], + [0.465550, 0.004545, 0.660139], + [0.471457, 0.005678, 0.659897], + [0.477344, 0.006980, 0.659549], + [0.483210, 0.008460, 0.659095], + [0.489055, 0.010127, 0.658534], + [0.494877, 0.011990, 0.657865], + [0.500678, 0.014055, 0.657088], + [0.506454, 0.016333, 0.656202], + [0.512206, 0.018833, 0.655209], + [0.517933, 0.021563, 0.654109], + [0.523633, 0.024532, 0.652901], + [0.529306, 0.027747, 0.651586], + [0.534952, 0.031217, 0.650165], + [0.540570, 0.034950, 0.648640], + [0.546157, 0.038954, 0.647010], + [0.551715, 0.043136, 0.645277], + [0.557243, 0.047331, 0.643443], + [0.562738, 0.051545, 0.641509], + [0.568201, 0.055778, 0.639477], + [0.573632, 0.060028, 0.637349], + [0.579029, 0.064296, 0.635126], + [0.584391, 0.068579, 0.632812], + [0.589719, 0.072878, 0.630408], + [0.595011, 0.077190, 0.627917], + [0.600266, 0.081516, 0.625342], + [0.605485, 0.085854, 0.622686], + [0.610667, 0.090204, 0.619951], + [0.615812, 0.094564, 0.617140], + [0.620919, 0.098934, 0.614257], + [0.625987, 0.103312, 0.611305], + [0.631017, 0.107699, 0.608287], + [0.636008, 0.112092, 0.605205], + [0.640959, 0.116492, 0.602065], + [0.645872, 0.120898, 0.598867], + [0.650746, 0.125309, 0.595617], + [0.655580, 0.129725, 0.592317], + [0.660374, 0.134144, 0.588971], + [0.665129, 0.138566, 0.585582], + [0.669845, 0.142992, 0.582154], + [0.674522, 0.147419, 0.578688], + [0.679160, 0.151848, 0.575189], + [0.683758, 0.156278, 0.571660], + [0.688318, 0.160709, 0.568103], + [0.692840, 0.165141, 0.564522], + [0.697324, 0.169573, 0.560919], + [0.701769, 0.174005, 0.557296], + [0.706178, 0.178437, 0.553657], + [0.710549, 0.182868, 0.550004], + [0.714883, 0.187299, 0.546338], + [0.719181, 0.191729, 0.542663], + [0.723444, 0.196158, 0.538981], + [0.727670, 0.200586, 0.535293], + [0.731862, 0.205013, 0.531601], + [0.736019, 0.209439, 0.527908], + [0.740143, 0.213864, 0.524216], + [0.744232, 0.218288, 0.520524], + [0.748289, 0.222711, 0.516834], + [0.752312, 0.227133, 0.513149], + [0.756304, 0.231555, 0.509468], + [0.760264, 0.235976, 0.505794], + [0.764193, 0.240396, 0.502126], + [0.768090, 0.244817, 0.498465], + [0.771958, 0.249237, 0.494813], + [0.775796, 0.253658, 0.491171], + [0.779604, 0.258078, 0.487539], + [0.783383, 0.262500, 0.483918], + [0.787133, 0.266922, 0.480307], + [0.790855, 0.271345, 0.476706], + [0.794549, 0.275770, 0.473117], + [0.798216, 0.280197, 0.469538], + [0.801855, 0.284626, 0.465971], + [0.805467, 0.289057, 0.462415], + [0.809052, 0.293491, 0.458870], + [0.812612, 0.297928, 0.455338], + [0.816144, 0.302368, 0.451816], + [0.819651, 0.306812, 0.448306], + [0.823132, 0.311261, 0.444806], + [0.826588, 0.315714, 0.441316], + [0.830018, 0.320172, 0.437836], + [0.833422, 0.324635, 0.434366], + [0.836801, 0.329105, 0.430905], + [0.840155, 0.333580, 0.427455], + [0.843484, 0.338062, 0.424013], + [0.846788, 0.342551, 0.420579], + [0.850066, 0.347048, 0.417153], + [0.853319, 0.351553, 0.413734], + [0.856547, 0.356066, 0.410322], + [0.859750, 0.360588, 0.406917], + [0.862927, 0.365119, 0.403519], + [0.866078, 0.369660, 0.400126], + [0.869203, 0.374212, 0.396738], + [0.872303, 0.378774, 0.393355], + [0.875376, 0.383347, 0.389976], + [0.878423, 0.387932, 0.386600], + [0.881443, 0.392529, 0.383229], + [0.884436, 0.397139, 0.379860], + [0.887402, 0.401762, 0.376494], + [0.890340, 0.406398, 0.373130], + [0.893250, 0.411048, 0.369768], + [0.896131, 0.415712, 0.366407], + [0.898984, 0.420392, 0.363047], + [0.901807, 0.425087, 0.359688], + [0.904601, 0.429797, 0.356329], + [0.907365, 0.434524, 0.352970], + [0.910098, 0.439268, 0.349610], + [0.912800, 0.444029, 0.346251], + [0.915471, 0.448807, 0.342890], + [0.918109, 0.453603, 0.339529], + [0.920714, 0.458417, 0.336166], + [0.923287, 0.463251, 0.332801], + [0.925825, 0.468103, 0.329435], + [0.928329, 0.472975, 0.326067], + [0.930798, 0.477867, 0.322697], + [0.933232, 0.482780, 0.319325], + [0.935630, 0.487712, 0.315952], + [0.937990, 0.492667, 0.312575], + [0.940313, 0.497642, 0.309197], + [0.942598, 0.502639, 0.305816], + [0.944844, 0.507658, 0.302433], + [0.947051, 0.512699, 0.299049], + [0.949217, 0.517763, 0.295662], + [0.951344, 0.522850, 0.292275], + [0.953428, 0.527960, 0.288883], + [0.955470, 0.533093, 0.285490], + [0.957469, 0.538250, 0.282096], + [0.959424, 0.543431, 0.278701], + [0.961336, 0.548636, 0.275305], + [0.963203, 0.553865, 0.271909], + [0.965024, 0.559118, 0.268513], + [0.966798, 0.564396, 0.265118], + [0.968526, 0.569700, 0.261721], + [0.970205, 0.575028, 0.258325], + [0.971835, 0.580382, 0.254931], + [0.973416, 0.585761, 0.251540], + [0.974947, 0.591165, 0.248151], + [0.976428, 0.596595, 0.244767], + [0.977856, 0.602051, 0.241387], + [0.979233, 0.607532, 0.238013], + [0.980556, 0.613039, 0.234646], + [0.981826, 0.618572, 0.231287], + [0.983041, 0.624131, 0.227937], + [0.984199, 0.629718, 0.224595], + [0.985301, 0.635330, 0.221265], + [0.986345, 0.640969, 0.217948], + [0.987332, 0.646633, 0.214648], + [0.988260, 0.652325, 0.211364], + [0.989128, 0.658043, 0.208100], + [0.989935, 0.663787, 0.204859], + [0.990681, 0.669558, 0.201642], + [0.991365, 0.675355, 0.198453], + [0.991985, 0.681179, 0.195295], + [0.992541, 0.687030, 0.192170], + [0.993032, 0.692907, 0.189084], + [0.993456, 0.698810, 0.186041], + [0.993814, 0.704741, 0.183043], + [0.994103, 0.710698, 0.180097], + [0.994324, 0.716681, 0.177208], + [0.994474, 0.722691, 0.174381], + [0.994553, 0.728728, 0.171622], + [0.994561, 0.734791, 0.168938], + [0.994495, 0.740880, 0.166335], + [0.994355, 0.746995, 0.163821], + [0.994141, 0.753137, 0.161404], + [0.993851, 0.759304, 0.159092], + [0.993482, 0.765499, 0.156891], + [0.993033, 0.771720, 0.154808], + [0.992505, 0.777967, 0.152855], + [0.991897, 0.784239, 0.151042], + [0.991209, 0.790537, 0.149377], + [0.990439, 0.796859, 0.147870], + [0.989587, 0.803205, 0.146529], + [0.988648, 0.809579, 0.145357], + [0.987621, 0.815978, 0.144363], + [0.986509, 0.822401, 0.143557], + [0.985314, 0.828846, 0.142945], + [0.984031, 0.835315, 0.142528], + [0.982653, 0.841812, 0.142303], + [0.981190, 0.848329, 0.142279], + [0.979644, 0.854866, 0.142453], + [0.977995, 0.861432, 0.142808], + [0.976265, 0.868016, 0.143351], + [0.974443, 0.874622, 0.144061], + [0.972530, 0.881250, 0.144923], + [0.970533, 0.887896, 0.145919], + [0.968443, 0.894564, 0.147014], + [0.966271, 0.901249, 0.148180], + [0.964021, 0.907950, 0.149370], + [0.961681, 0.914672, 0.150520], + [0.959276, 0.921407, 0.151566], + [0.956808, 0.928152, 0.152409], + [0.954287, 0.934908, 0.152921], + [0.951726, 0.941671, 0.152925], + [0.949151, 0.948435, 0.152178], + [0.946602, 0.955190, 0.150328], + [0.944152, 0.961916, 0.146861], + [0.941896, 0.968590, 0.140956], + [0.940015, 0.975158, 0.131326]] + +_viridis_data = [[0.267004, 0.004874, 0.329415], + [0.268510, 0.009605, 0.335427], + [0.269944, 0.014625, 0.341379], + [0.271305, 0.019942, 0.347269], + [0.272594, 0.025563, 0.353093], + [0.273809, 0.031497, 0.358853], + [0.274952, 0.037752, 0.364543], + [0.276022, 0.044167, 0.370164], + [0.277018, 0.050344, 0.375715], + [0.277941, 0.056324, 0.381191], + [0.278791, 0.062145, 0.386592], + [0.279566, 0.067836, 0.391917], + [0.280267, 0.073417, 0.397163], + [0.280894, 0.078907, 0.402329], + [0.281446, 0.084320, 0.407414], + [0.281924, 0.089666, 0.412415], + [0.282327, 0.094955, 0.417331], + [0.282656, 0.100196, 0.422160], + [0.282910, 0.105393, 0.426902], + [0.283091, 0.110553, 0.431554], + [0.283197, 0.115680, 0.436115], + [0.283229, 0.120777, 0.440584], + [0.283187, 0.125848, 0.444960], + [0.283072, 0.130895, 0.449241], + [0.282884, 0.135920, 0.453427], + [0.282623, 0.140926, 0.457517], + [0.282290, 0.145912, 0.461510], + [0.281887, 0.150881, 0.465405], + [0.281412, 0.155834, 0.469201], + [0.280868, 0.160771, 0.472899], + [0.280255, 0.165693, 0.476498], + [0.279574, 0.170599, 0.479997], + [0.278826, 0.175490, 0.483397], + [0.278012, 0.180367, 0.486697], + [0.277134, 0.185228, 0.489898], + [0.276194, 0.190074, 0.493001], + [0.275191, 0.194905, 0.496005], + [0.274128, 0.199721, 0.498911], + [0.273006, 0.204520, 0.501721], + [0.271828, 0.209303, 0.504434], + [0.270595, 0.214069, 0.507052], + [0.269308, 0.218818, 0.509577], + [0.267968, 0.223549, 0.512008], + [0.266580, 0.228262, 0.514349], + [0.265145, 0.232956, 0.516599], + [0.263663, 0.237631, 0.518762], + [0.262138, 0.242286, 0.520837], + [0.260571, 0.246922, 0.522828], + [0.258965, 0.251537, 0.524736], + [0.257322, 0.256130, 0.526563], + [0.255645, 0.260703, 0.528312], + [0.253935, 0.265254, 0.529983], + [0.252194, 0.269783, 0.531579], + [0.250425, 0.274290, 0.533103], + [0.248629, 0.278775, 0.534556], + [0.246811, 0.283237, 0.535941], + [0.244972, 0.287675, 0.537260], + [0.243113, 0.292092, 0.538516], + [0.241237, 0.296485, 0.539709], + [0.239346, 0.300855, 0.540844], + [0.237441, 0.305202, 0.541921], + [0.235526, 0.309527, 0.542944], + [0.233603, 0.313828, 0.543914], + [0.231674, 0.318106, 0.544834], + [0.229739, 0.322361, 0.545706], + [0.227802, 0.326594, 0.546532], + [0.225863, 0.330805, 0.547314], + [0.223925, 0.334994, 0.548053], + [0.221989, 0.339161, 0.548752], + [0.220057, 0.343307, 0.549413], + [0.218130, 0.347432, 0.550038], + [0.216210, 0.351535, 0.550627], + [0.214298, 0.355619, 0.551184], + [0.212395, 0.359683, 0.551710], + [0.210503, 0.363727, 0.552206], + [0.208623, 0.367752, 0.552675], + [0.206756, 0.371758, 0.553117], + [0.204903, 0.375746, 0.553533], + [0.203063, 0.379716, 0.553925], + [0.201239, 0.383670, 0.554294], + [0.199430, 0.387607, 0.554642], + [0.197636, 0.391528, 0.554969], + [0.195860, 0.395433, 0.555276], + [0.194100, 0.399323, 0.555565], + [0.192357, 0.403199, 0.555836], + [0.190631, 0.407061, 0.556089], + [0.188923, 0.410910, 0.556326], + [0.187231, 0.414746, 0.556547], + [0.185556, 0.418570, 0.556753], + [0.183898, 0.422383, 0.556944], + [0.182256, 0.426184, 0.557120], + [0.180629, 0.429975, 0.557282], + [0.179019, 0.433756, 0.557430], + [0.177423, 0.437527, 0.557565], + [0.175841, 0.441290, 0.557685], + [0.174274, 0.445044, 0.557792], + [0.172719, 0.448791, 0.557885], + [0.171176, 0.452530, 0.557965], + [0.169646, 0.456262, 0.558030], + [0.168126, 0.459988, 0.558082], + [0.166617, 0.463708, 0.558119], + [0.165117, 0.467423, 0.558141], + [0.163625, 0.471133, 0.558148], + [0.162142, 0.474838, 0.558140], + [0.160665, 0.478540, 0.558115], + [0.159194, 0.482237, 0.558073], + [0.157729, 0.485932, 0.558013], + [0.156270, 0.489624, 0.557936], + [0.154815, 0.493313, 0.557840], + [0.153364, 0.497000, 0.557724], + [0.151918, 0.500685, 0.557587], + [0.150476, 0.504369, 0.557430], + [0.149039, 0.508051, 0.557250], + [0.147607, 0.511733, 0.557049], + [0.146180, 0.515413, 0.556823], + [0.144759, 0.519093, 0.556572], + [0.143343, 0.522773, 0.556295], + [0.141935, 0.526453, 0.555991], + [0.140536, 0.530132, 0.555659], + [0.139147, 0.533812, 0.555298], + [0.137770, 0.537492, 0.554906], + [0.136408, 0.541173, 0.554483], + [0.135066, 0.544853, 0.554029], + [0.133743, 0.548535, 0.553541], + [0.132444, 0.552216, 0.553018], + [0.131172, 0.555899, 0.552459], + [0.129933, 0.559582, 0.551864], + [0.128729, 0.563265, 0.551229], + [0.127568, 0.566949, 0.550556], + [0.126453, 0.570633, 0.549841], + [0.125394, 0.574318, 0.549086], + [0.124395, 0.578002, 0.548287], + [0.123463, 0.581687, 0.547445], + [0.122606, 0.585371, 0.546557], + [0.121831, 0.589055, 0.545623], + [0.121148, 0.592739, 0.544641], + [0.120565, 0.596422, 0.543611], + [0.120092, 0.600104, 0.542530], + [0.119738, 0.603785, 0.541400], + [0.119512, 0.607464, 0.540218], + [0.119423, 0.611141, 0.538982], + [0.119483, 0.614817, 0.537692], + [0.119699, 0.618490, 0.536347], + [0.120081, 0.622161, 0.534946], + [0.120638, 0.625828, 0.533488], + [0.121380, 0.629492, 0.531973], + [0.122312, 0.633153, 0.530398], + [0.123444, 0.636809, 0.528763], + [0.124780, 0.640461, 0.527068], + [0.126326, 0.644107, 0.525311], + [0.128087, 0.647749, 0.523491], + [0.130067, 0.651384, 0.521608], + [0.132268, 0.655014, 0.519661], + [0.134692, 0.658636, 0.517649], + [0.137339, 0.662252, 0.515571], + [0.140210, 0.665859, 0.513427], + [0.143303, 0.669459, 0.511215], + [0.146616, 0.673050, 0.508936], + [0.150148, 0.676631, 0.506589], + [0.153894, 0.680203, 0.504172], + [0.157851, 0.683765, 0.501686], + [0.162016, 0.687316, 0.499129], + [0.166383, 0.690856, 0.496502], + [0.170948, 0.694384, 0.493803], + [0.175707, 0.697900, 0.491033], + [0.180653, 0.701402, 0.488189], + [0.185783, 0.704891, 0.485273], + [0.191090, 0.708366, 0.482284], + [0.196571, 0.711827, 0.479221], + [0.202219, 0.715272, 0.476084], + [0.208030, 0.718701, 0.472873], + [0.214000, 0.722114, 0.469588], + [0.220124, 0.725509, 0.466226], + [0.226397, 0.728888, 0.462789], + [0.232815, 0.732247, 0.459277], + [0.239374, 0.735588, 0.455688], + [0.246070, 0.738910, 0.452024], + [0.252899, 0.742211, 0.448284], + [0.259857, 0.745492, 0.444467], + [0.266941, 0.748751, 0.440573], + [0.274149, 0.751988, 0.436601], + [0.281477, 0.755203, 0.432552], + [0.288921, 0.758394, 0.428426], + [0.296479, 0.761561, 0.424223], + [0.304148, 0.764704, 0.419943], + [0.311925, 0.767822, 0.415586], + [0.319809, 0.770914, 0.411152], + [0.327796, 0.773980, 0.406640], + [0.335885, 0.777018, 0.402049], + [0.344074, 0.780029, 0.397381], + [0.352360, 0.783011, 0.392636], + [0.360741, 0.785964, 0.387814], + [0.369214, 0.788888, 0.382914], + [0.377779, 0.791781, 0.377939], + [0.386433, 0.794644, 0.372886], + [0.395174, 0.797475, 0.367757], + [0.404001, 0.800275, 0.362552], + [0.412913, 0.803041, 0.357269], + [0.421908, 0.805774, 0.351910], + [0.430983, 0.808473, 0.346476], + [0.440137, 0.811138, 0.340967], + [0.449368, 0.813768, 0.335384], + [0.458674, 0.816363, 0.329727], + [0.468053, 0.818921, 0.323998], + [0.477504, 0.821444, 0.318195], + [0.487026, 0.823929, 0.312321], + [0.496615, 0.826376, 0.306377], + [0.506271, 0.828786, 0.300362], + [0.515992, 0.831158, 0.294279], + [0.525776, 0.833491, 0.288127], + [0.535621, 0.835785, 0.281908], + [0.545524, 0.838039, 0.275626], + [0.555484, 0.840254, 0.269281], + [0.565498, 0.842430, 0.262877], + [0.575563, 0.844566, 0.256415], + [0.585678, 0.846661, 0.249897], + [0.595839, 0.848717, 0.243329], + [0.606045, 0.850733, 0.236712], + [0.616293, 0.852709, 0.230052], + [0.626579, 0.854645, 0.223353], + [0.636902, 0.856542, 0.216620], + [0.647257, 0.858400, 0.209861], + [0.657642, 0.860219, 0.203082], + [0.668054, 0.861999, 0.196293], + [0.678489, 0.863742, 0.189503], + [0.688944, 0.865448, 0.182725], + [0.699415, 0.867117, 0.175971], + [0.709898, 0.868751, 0.169257], + [0.720391, 0.870350, 0.162603], + [0.730889, 0.871916, 0.156029], + [0.741388, 0.873449, 0.149561], + [0.751884, 0.874951, 0.143228], + [0.762373, 0.876424, 0.137064], + [0.772852, 0.877868, 0.131109], + [0.783315, 0.879285, 0.125405], + [0.793760, 0.880678, 0.120005], + [0.804182, 0.882046, 0.114965], + [0.814576, 0.883393, 0.110347], + [0.824940, 0.884720, 0.106217], + [0.835270, 0.886029, 0.102646], + [0.845561, 0.887322, 0.099702], + [0.855810, 0.888601, 0.097452], + [0.866013, 0.889868, 0.095953], + [0.876168, 0.891125, 0.095250], + [0.886271, 0.892374, 0.095374], + [0.896320, 0.893616, 0.096335], + [0.906311, 0.894855, 0.098125], + [0.916242, 0.896091, 0.100717], + [0.926106, 0.897330, 0.104071], + [0.935904, 0.898570, 0.108131], + [0.945636, 0.899815, 0.112838], + [0.955300, 0.901065, 0.118128], + [0.964894, 0.902323, 0.123941], + [0.974417, 0.903590, 0.130215], + [0.983868, 0.904867, 0.136897], + [0.993248, 0.906157, 0.143936]] + +_cividis_data = [[0.000000, 0.135112, 0.304751], + [0.000000, 0.138068, 0.311105], + [0.000000, 0.141013, 0.317579], + [0.000000, 0.143951, 0.323982], + [0.000000, 0.146877, 0.330479], + [0.000000, 0.149791, 0.337065], + [0.000000, 0.152673, 0.343704], + [0.000000, 0.155377, 0.350500], + [0.000000, 0.157932, 0.357521], + [0.000000, 0.160495, 0.364534], + [0.000000, 0.163058, 0.371608], + [0.000000, 0.165621, 0.378769], + [0.000000, 0.168204, 0.385902], + [0.000000, 0.170800, 0.393100], + [0.000000, 0.173420, 0.400353], + [0.000000, 0.176082, 0.407577], + [0.000000, 0.178802, 0.414764], + [0.000000, 0.181610, 0.421859], + [0.000000, 0.184550, 0.428802], + [0.000000, 0.186915, 0.435532], + [0.000000, 0.188769, 0.439563], + [0.000000, 0.190950, 0.441085], + [0.000000, 0.193366, 0.441561], + [0.003602, 0.195911, 0.441564], + [0.017852, 0.198528, 0.441248], + [0.032110, 0.201199, 0.440785], + [0.046205, 0.203903, 0.440196], + [0.058378, 0.206629, 0.439531], + [0.068968, 0.209372, 0.438863], + [0.078624, 0.212122, 0.438105], + [0.087465, 0.214879, 0.437342], + [0.095645, 0.217643, 0.436593], + [0.103401, 0.220406, 0.435790], + [0.110658, 0.223170, 0.435067], + [0.117612, 0.225935, 0.434308], + [0.124291, 0.228697, 0.433547], + [0.130669, 0.231458, 0.432840], + [0.136830, 0.234216, 0.432148], + [0.142852, 0.236972, 0.431404], + [0.148638, 0.239724, 0.430752], + [0.154261, 0.242475, 0.430120], + [0.159733, 0.245221, 0.429528], + [0.165113, 0.247965, 0.428908], + [0.170362, 0.250707, 0.428325], + [0.175490, 0.253444, 0.427790], + [0.180503, 0.256180, 0.427299], + [0.185453, 0.258914, 0.426788], + [0.190303, 0.261644, 0.426329], + [0.195057, 0.264372, 0.425924], + [0.199764, 0.267099, 0.425497], + [0.204385, 0.269823, 0.425126], + [0.208926, 0.272546, 0.424809], + [0.213431, 0.275266, 0.424480], + [0.217863, 0.277985, 0.424206], + [0.222264, 0.280702, 0.423914], + [0.226598, 0.283419, 0.423678], + [0.230871, 0.286134, 0.423498], + [0.235120, 0.288848, 0.423304], + [0.239312, 0.291562, 0.423167], + [0.243485, 0.294274, 0.423014], + [0.247605, 0.296986, 0.422917], + [0.251675, 0.299698, 0.422873], + [0.255731, 0.302409, 0.422814], + [0.259740, 0.305120, 0.422810], + [0.263738, 0.307831, 0.422789], + [0.267693, 0.310542, 0.422821], + [0.271639, 0.313253, 0.422837], + [0.275513, 0.315965, 0.422979], + [0.279411, 0.318677, 0.423031], + [0.283240, 0.321390, 0.423211], + [0.287065, 0.324103, 0.423373], + [0.290884, 0.326816, 0.423517], + [0.294669, 0.329531, 0.423716], + [0.298421, 0.332247, 0.423973], + [0.302169, 0.334963, 0.424213], + [0.305886, 0.337681, 0.424512], + [0.309601, 0.340399, 0.424790], + [0.313287, 0.343120, 0.425120], + [0.316941, 0.345842, 0.425512], + [0.320595, 0.348565, 0.425889], + [0.324250, 0.351289, 0.426250], + [0.327875, 0.354016, 0.426670], + [0.331474, 0.356744, 0.427144], + [0.335073, 0.359474, 0.427605], + [0.338673, 0.362206, 0.428053], + [0.342246, 0.364939, 0.428559], + [0.345793, 0.367676, 0.429127], + [0.349341, 0.370414, 0.429685], + [0.352892, 0.373153, 0.430226], + [0.356418, 0.375896, 0.430823], + [0.359916, 0.378641, 0.431501], + [0.363446, 0.381388, 0.432075], + [0.366923, 0.384139, 0.432796], + [0.370430, 0.386890, 0.433428], + [0.373884, 0.389646, 0.434209], + [0.377371, 0.392404, 0.434890], + [0.380830, 0.395164, 0.435653], + [0.384268, 0.397928, 0.436475], + [0.387705, 0.400694, 0.437305], + [0.391151, 0.403464, 0.438096], + [0.394568, 0.406236, 0.438986], + [0.397991, 0.409011, 0.439848], + [0.401418, 0.411790, 0.440708], + [0.404820, 0.414572, 0.441642], + [0.408226, 0.417357, 0.442570], + [0.411607, 0.420145, 0.443577], + [0.414992, 0.422937, 0.444578], + [0.418383, 0.425733, 0.445560], + [0.421748, 0.428531, 0.446640], + [0.425120, 0.431334, 0.447692], + [0.428462, 0.434140, 0.448864], + [0.431817, 0.436950, 0.449982], + [0.435168, 0.439763, 0.451134], + [0.438504, 0.442580, 0.452341], + [0.441810, 0.445402, 0.453659], + [0.445148, 0.448226, 0.454885], + [0.448447, 0.451053, 0.456264], + [0.451759, 0.453887, 0.457582], + [0.455072, 0.456718, 0.458976], + [0.458366, 0.459552, 0.460457], + [0.461616, 0.462405, 0.461969], + [0.464947, 0.465241, 0.463395], + [0.468254, 0.468083, 0.464908], + [0.471501, 0.470960, 0.466357], + [0.474812, 0.473832, 0.467681], + [0.478186, 0.476699, 0.468845], + [0.481622, 0.479573, 0.469767], + [0.485141, 0.482451, 0.470384], + [0.488697, 0.485318, 0.471008], + [0.492278, 0.488198, 0.471453], + [0.495913, 0.491076, 0.471751], + [0.499552, 0.493960, 0.472032], + [0.503185, 0.496851, 0.472305], + [0.506866, 0.499743, 0.472432], + [0.510540, 0.502643, 0.472550], + [0.514226, 0.505546, 0.472640], + [0.517920, 0.508454, 0.472707], + [0.521643, 0.511367, 0.472639], + [0.525348, 0.514285, 0.472660], + [0.529086, 0.517207, 0.472543], + [0.532829, 0.520135, 0.472401], + [0.536553, 0.523067, 0.472352], + [0.540307, 0.526005, 0.472163], + [0.544069, 0.528948, 0.471947], + [0.547840, 0.531895, 0.471704], + [0.551612, 0.534849, 0.471439], + [0.555393, 0.537807, 0.471147], + [0.559181, 0.540771, 0.470829], + [0.562972, 0.543741, 0.470488], + [0.566802, 0.546715, 0.469988], + [0.570607, 0.549695, 0.469593], + [0.574417, 0.552682, 0.469172], + [0.578236, 0.555673, 0.468724], + [0.582087, 0.558670, 0.468118], + [0.585916, 0.561674, 0.467618], + [0.589753, 0.564682, 0.467090], + [0.593622, 0.567697, 0.466401], + [0.597469, 0.570718, 0.465821], + [0.601354, 0.573743, 0.465074], + [0.605211, 0.576777, 0.464441], + [0.609105, 0.579816, 0.463638], + [0.612977, 0.582861, 0.462950], + [0.616852, 0.585913, 0.462237], + [0.620765, 0.588970, 0.461351], + [0.624654, 0.592034, 0.460583], + [0.628576, 0.595104, 0.459641], + [0.632506, 0.598180, 0.458668], + [0.636412, 0.601264, 0.457818], + [0.640352, 0.604354, 0.456791], + [0.644270, 0.607450, 0.455886], + [0.648222, 0.610553, 0.454801], + [0.652178, 0.613664, 0.453689], + [0.656114, 0.616780, 0.452702], + [0.660082, 0.619904, 0.451534], + [0.664055, 0.623034, 0.450338], + [0.668008, 0.626171, 0.449270], + [0.671991, 0.629316, 0.448018], + [0.675981, 0.632468, 0.446736], + [0.679979, 0.635626, 0.445424], + [0.683950, 0.638793, 0.444251], + [0.687957, 0.641966, 0.442886], + [0.691971, 0.645145, 0.441491], + [0.695985, 0.648334, 0.440072], + [0.700008, 0.651529, 0.438624], + [0.704037, 0.654731, 0.437147], + [0.708067, 0.657942, 0.435647], + [0.712105, 0.661160, 0.434117], + [0.716177, 0.664384, 0.432386], + [0.720222, 0.667618, 0.430805], + [0.724274, 0.670859, 0.429194], + [0.728334, 0.674107, 0.427554], + [0.732422, 0.677364, 0.425717], + [0.736488, 0.680629, 0.424028], + [0.740589, 0.683900, 0.422131], + [0.744664, 0.687181, 0.420393], + [0.748772, 0.690470, 0.418448], + [0.752886, 0.693766, 0.416472], + [0.756975, 0.697071, 0.414659], + [0.761096, 0.700384, 0.412638], + [0.765223, 0.703705, 0.410587], + [0.769353, 0.707035, 0.408516], + [0.773486, 0.710373, 0.406422], + [0.777651, 0.713719, 0.404112], + [0.781795, 0.717074, 0.401966], + [0.785965, 0.720438, 0.399613], + [0.790116, 0.723810, 0.397423], + [0.794298, 0.727190, 0.395016], + [0.798480, 0.730580, 0.392597], + [0.802667, 0.733978, 0.390153], + [0.806859, 0.737385, 0.387684], + [0.811054, 0.740801, 0.385198], + [0.815274, 0.744226, 0.382504], + [0.819499, 0.747659, 0.379785], + [0.823729, 0.751101, 0.377043], + [0.827959, 0.754553, 0.374292], + [0.832192, 0.758014, 0.371529], + [0.836429, 0.761483, 0.368747], + [0.840693, 0.764962, 0.365746], + [0.844957, 0.768450, 0.362741], + [0.849223, 0.771947, 0.359729], + [0.853515, 0.775454, 0.356500], + [0.857809, 0.778969, 0.353259], + [0.862105, 0.782494, 0.350011], + [0.866421, 0.786028, 0.346571], + [0.870717, 0.789572, 0.343333], + [0.875057, 0.793125, 0.339685], + [0.879378, 0.796687, 0.336241], + [0.883720, 0.800258, 0.332599], + [0.888081, 0.803839, 0.328770], + [0.892440, 0.807430, 0.324968], + [0.896818, 0.811030, 0.320982], + [0.901195, 0.814639, 0.317021], + [0.905589, 0.818257, 0.312889], + [0.910000, 0.821885, 0.308594], + [0.914407, 0.825522, 0.304348], + [0.918828, 0.829168, 0.299960], + [0.923279, 0.832822, 0.295244], + [0.927724, 0.836486, 0.290611], + [0.932180, 0.840159, 0.285880], + [0.936660, 0.843841, 0.280876], + [0.941147, 0.847530, 0.275815], + [0.945654, 0.851228, 0.270532], + [0.950178, 0.854933, 0.265085], + [0.954725, 0.858646, 0.259365], + [0.959284, 0.862365, 0.253563], + [0.963872, 0.866089, 0.247445], + [0.968469, 0.869819, 0.241310], + [0.973114, 0.873550, 0.234677], + [0.977780, 0.877281, 0.227954], + [0.982497, 0.881008, 0.220878], + [0.987293, 0.884718, 0.213336], + [0.992218, 0.888385, 0.205468], + [0.994847, 0.892954, 0.203445], + [0.995249, 0.898384, 0.207561], + [0.995503, 0.903866, 0.212370], + [0.995737, 0.909344, 0.217772]] + +_twilight_data = [ + [0.88575015840754434, 0.85000924943067835, 0.8879736506427196], + [0.88378520195539056, 0.85072940540310626, 0.88723222096949894], + [0.88172231059285788, 0.85127594077653468, 0.88638056925514819], + [0.8795410528270573, 0.85165675407495722, 0.8854143767924102], + [0.87724880858965482, 0.85187028338870274, 0.88434120381311432], + [0.87485347508575972, 0.85191526123023187, 0.88316926967613829], + [0.87233134085124076, 0.85180165478080894, 0.88189704355001619], + [0.86970474853509816, 0.85152403004797894, 0.88053883390003362], + [0.86696015505333579, 0.8510896085314068, 0.87909766977173343], + [0.86408985081463996, 0.85050391167507788, 0.87757925784892632], + [0.86110245436899846, 0.84976754857001258, 0.87599242923439569], + [0.85798259245670372, 0.84888934810281835, 0.87434038553446281], + [0.85472593189256985, 0.84787488124672816, 0.8726282980930582], + [0.85133714570857189, 0.84672735796116472, 0.87086081657350445], + [0.84780710702577922, 0.8454546229209523, 0.86904036783694438], + [0.8441261828674842, 0.84406482711037389, 0.86716973322690072], + [0.84030420805957784, 0.8425605950855084, 0.865250882410458], + [0.83634031809191178, 0.84094796518951942, 0.86328528001070159], + [0.83222705712934408, 0.83923490627754482, 0.86127563500427884], + [0.82796894316013536, 0.83742600751395202, 0.85922399451306786], + [0.82357429680252847, 0.83552487764795436, 0.85713191328514948], + [0.81904654677937527, 0.8335364929949034, 0.85500206287010105], + [0.81438982121143089, 0.83146558694197847, 0.85283759062147024], + [0.8095999819094809, 0.82931896673505456, 0.85064441601050367], + [0.80469164429814577, 0.82709838780560663, 0.84842449296974021], + [0.79967075421267997, 0.82480781812080928, 0.84618210029578533], + [0.79454305089231114, 0.82245116226304615, 0.84392184786827984], + [0.78931445564608915, 0.82003213188702007, 0.8416486380471222], + [0.78399101042764918, 0.81755426400533426, 0.83936747464036732], + [0.77857892008227592, 0.81502089378742548, 0.8370834463093898], + [0.77308416590170936, 0.81243524735466011, 0.83480172950579679], + [0.76751108504417864, 0.8098007598713145, 0.83252816638059668], + [0.76186907937980286, 0.80711949387647486, 0.830266486168872], + [0.75616443584381976, 0.80439408733477935, 0.82802138994719998], + [0.75040346765406696, 0.80162699008965321, 0.82579737851082424], + [0.74459247771890169, 0.79882047719583249, 0.82359867586156521], + [0.73873771700494939, 0.79597665735031009, 0.82142922780433014], + [0.73284543645523459, 0.79309746468844067, 0.81929263384230377], + [0.72692177512829703, 0.7901846863592763, 0.81719217466726379], + [0.72097280665536778, 0.78723995923452639, 0.81513073920879264], + [0.71500403076252128, 0.78426487091581187, 0.81311116559949914], + [0.70902078134539304, 0.78126088716070907, 0.81113591855117928], + [0.7030297722540817, 0.77822904973358131, 0.80920618848056969], + [0.6970365443886174, 0.77517050008066057, 0.80732335380063447], + [0.69104641009309098, 0.77208629460678091, 0.80548841690679074], + [0.68506446154395928, 0.7689774029354699, 0.80370206267176914], + [0.67909554499882152, 0.76584472131395898, 0.8019646617300199], + [0.67314422559426212, 0.76268908733890484, 0.80027628545809526], + [0.66721479803752815, 0.7595112803730375, 0.79863674654537764], + [0.6613112930078745, 0.75631202708719025, 0.7970456043491897], + [0.65543692326454717, 0.75309208756768431, 0.79550271129031047], + [0.64959573004253479, 0.74985201221941766, 0.79400674021499107], + [0.6437910831099849, 0.7465923800833657, 0.79255653201306053], + [0.63802586828545982, 0.74331376714033193, 0.79115100459573173], + [0.6323027138710603, 0.74001672160131404, 0.78978892762640429], + [0.62662402022604591, 0.73670175403699445, 0.78846901316334561], + [0.62099193064817548, 0.73336934798923203, 0.78718994624696581], + [0.61540846411770478, 0.73001995232739691, 0.78595022706750484], + [0.60987543176093062, 0.72665398759758293, 0.78474835732694714], + [0.60439434200274855, 0.7232718614323369, 0.78358295593535587], + [0.5989665814482068, 0.71987394892246725, 0.78245259899346642], + [0.59359335696837223, 0.7164606049658685, 0.78135588237640097], + [0.58827579780555495, 0.71303214646458135, 0.78029141405636515], + [0.58301487036932409, 0.70958887676997473, 0.77925781820476592], + [0.5778116438998202, 0.70613106157153982, 0.77825345121025524], + [0.5726668948158774, 0.7026589535425779, 0.77727702680911992], + [0.56758117853861967, 0.69917279302646274, 0.77632748534275298], + [0.56255515357219343, 0.69567278381629649, 0.77540359142309845], + [0.55758940419605174, 0.69215911458254054, 0.7745041337932782], + [0.55268450589347129, 0.68863194515166382, 0.7736279426902245], + [0.54784098153018634, 0.68509142218509878, 0.77277386473440868], + [0.54305932424018233, 0.68153767253065878, 0.77194079697835083], + [0.53834015575176275, 0.67797081129095405, 0.77112734439057717], + [0.53368389147728401, 0.67439093705212727, 0.7703325054879735], + [0.529090861832473, 0.67079812302806219, 0.76955552292313134], + [0.52456151470593582, 0.66719242996142225, 0.76879541714230948], + [0.52009627392235558, 0.66357391434030388, 0.76805119403344102], + [0.5156955988596057, 0.65994260812897998, 0.76732191489596169], + [0.51135992541601927, 0.65629853981831865, 0.76660663780645333], + [0.50708969576451657, 0.65264172403146448, 0.76590445660835849], + [0.5028853540415561, 0.64897216734095264, 0.76521446718174913], + [0.49874733661356069, 0.6452898684900934, 0.76453578734180083], + [0.4946761847863938, 0.64159484119504429, 0.76386719002130909], + [0.49067224938561221, 0.63788704858847078, 0.76320812763163837], + [0.4867359599430568, 0.63416646251100506, 0.76255780085924041], + [0.4828677867260272, 0.6304330455306234, 0.76191537149895305], + [0.47906816236197386, 0.62668676251860134, 0.76128000375662419], + [0.47533752394906287, 0.62292757283835809, 0.76065085571817748], + [0.47167629518877091, 0.61915543242884641, 0.76002709227883047], + [0.46808490970531597, 0.61537028695790286, 0.75940789891092741], + [0.46456376716303932, 0.61157208822864151, 0.75879242623025811], + [0.46111326647023881, 0.607760777169989, 0.75817986436807139], + [0.45773377230160567, 0.60393630046586455, 0.75756936901859162], + [0.45442563977552913, 0.60009859503858665, 0.75696013660606487], + [0.45118918687617743, 0.59624762051353541, 0.75635120643246645], + [0.44802470933589172, 0.59238331452146575, 0.75574176474107924], + [0.44493246854215379, 0.5885055998308617, 0.7551311041857901], + [0.44191271766696399, 0.58461441100175571, 0.75451838884410671], + [0.43896563958048396, 0.58070969241098491, 0.75390276208285945], + [0.43609138958356369, 0.57679137998186081, 0.7532834105961016], + [0.43329008867358393, 0.57285941625606673, 0.75265946532566674], + [0.43056179073057571, 0.56891374572457176, 0.75203008099312696], + [0.42790652284925834, 0.5649543060909209, 0.75139443521914839], + [0.42532423665011354, 0.56098104959950301, 0.75075164989005116], + [0.42281485675772662, 0.55699392126996583, 0.75010086988227642], + [0.42037822361396326, 0.55299287158108168, 0.7494412559451894], + [0.41801414079233629, 0.54897785421888889, 0.74877193167001121], + [0.4157223260454232, 0.54494882715350401, 0.74809204459000522], + [0.41350245743314729, 0.54090574771098476, 0.74740073297543086], + [0.41135414697304568, 0.53684857765005933, 0.74669712855065784], + [0.4092768899914751, 0.53277730177130322, 0.74598030635707824], + [0.40727018694219069, 0.52869188011057411, 0.74524942637581271], + [0.40533343789303178, 0.52459228174983119, 0.74450365836708132], + [0.40346600333905397, 0.52047847653840029, 0.74374215223567086], + [0.40166714010896104, 0.51635044969688759, 0.7429640345324835], + [0.39993606933454834, 0.51220818143218516, 0.74216844571317986], + [0.3982719152586337, 0.50805166539276136, 0.74135450918099721], + [0.39667374905665609, 0.50388089053847973, 0.74052138580516735], + [0.39514058808207631, 0.49969585326377758, 0.73966820211715711], + [0.39367135736822567, 0.49549655777451179, 0.738794102296364], + [0.39226494876209317, 0.49128300332899261, 0.73789824784475078], + [0.39092017571994903, 0.48705520251223039, 0.73697977133881254], + [0.38963580160340855, 0.48281316715123496, 0.73603782546932739], + [0.38841053300842432, 0.47855691131792805, 0.73507157641157261], + [0.38724301459330251, 0.47428645933635388, 0.73408016787854391], + [0.38613184178892102, 0.4700018340988123, 0.7330627749243106], + [0.38507556793651387, 0.46570306719930193, 0.73201854033690505], + [0.38407269378943537, 0.46139018782416635, 0.73094665432902683], + [0.38312168084402748, 0.45706323581407199, 0.72984626791353258], + [0.38222094988570376, 0.45272225034283325, 0.72871656144003782], + [0.38136887930454161, 0.44836727669277859, 0.72755671317141346], + [0.38056380696565623, 0.44399837208633719, 0.72636587045135315], + [0.37980403744848751, 0.43961558821222629, 0.72514323778761092], + [0.37908789283110761, 0.43521897612544935, 0.72388798691323131], + [0.378413635091359, 0.43080859411413064, 0.72259931993061044], + [0.37777949753513729, 0.4263845142616835, 0.72127639993530235], + [0.37718371844251231, 0.42194680223454828, 0.71991841524475775], + [0.37662448930806297, 0.41749553747893614, 0.71852454736176108], + [0.37610001286385814, 0.41303079952477062, 0.71709396919920232], + [0.37560846919442398, 0.40855267638072096, 0.71562585091587549], + [0.37514802505380473, 0.4040612609993941, 0.7141193695725726], + [0.37471686019302231, 0.3995566498711684, 0.71257368516500463], + [0.37431313199312338, 0.39503894828283309, 0.71098796522377461], + [0.37393499330475782, 0.39050827529375831, 0.70936134293478448], + [0.3735806215098284, 0.38596474386057539, 0.70769297607310577], + [0.37324816143326384, 0.38140848555753937, 0.70598200974806036], + [0.37293578646665032, 0.37683963835219841, 0.70422755780589941], + [0.37264166757849604, 0.37225835004836849, 0.7024287314570723], + [0.37236397858465387, 0.36766477862108266, 0.70058463496520773], + [0.37210089702443822, 0.36305909736982378, 0.69869434615073722], + [0.3718506155898596, 0.35844148285875221, 0.69675695810256544], + [0.37161133234400479, 0.3538121372967869, 0.69477149919380887], + [0.37138124223736607, 0.34917126878479027, 0.69273703471928827], + [0.37115856636209105, 0.34451911410230168, 0.69065253586464992], + [0.37094151551337329, 0.33985591488818123, 0.68851703379505125], + [0.37072833279422668, 0.33518193808489577, 0.68632948169606767], + [0.37051738634484427, 0.33049741244307851, 0.68408888788857214], + [0.37030682071842685, 0.32580269697872455, 0.68179411684486679], + [0.37009487130772695, 0.3210981375964933, 0.67944405399056851], + [0.36987980329025361, 0.31638410101153364, 0.67703755438090574], + [0.36965987626565955, 0.31166098762951971, 0.67457344743419545], + [0.36943334591276228, 0.30692923551862339, 0.67205052849120617], + [0.36919847837592484, 0.30218932176507068, 0.66946754331614522], + [0.36895355306596778, 0.29744175492366276, 0.66682322089824264], + [0.36869682231895268, 0.29268709856150099, 0.66411625298236909], + [0.36842655638020444, 0.28792596437778462, 0.66134526910944602], + [0.36814101479899719, 0.28315901221182987, 0.65850888806972308], + [0.36783843696531082, 0.27838697181297761, 0.65560566838453704], + [0.36751707094367697, 0.27361063317090978, 0.65263411711618635], + [0.36717513650699446, 0.26883085667326956, 0.64959272297892245], + [0.36681085540107988, 0.26404857724525643, 0.64647991652908243], + [0.36642243251550632, 0.25926481158628106, 0.64329409140765537], + [0.36600853966739794, 0.25448043878086224, 0.64003361803368586], + [0.36556698373538982, 0.24969683475296395, 0.63669675187488584], + [0.36509579845886808, 0.24491536803550484, 0.63328173520055586], + [0.36459308890125008, 0.24013747024823828, 0.62978680155026101], + [0.36405693022088509, 0.23536470386204195, 0.62621013451953023], + [0.36348537610385145, 0.23059876218396419, 0.62254988622392882], + [0.36287643560041027, 0.22584149293287031, 0.61880417410823019], + [0.36222809558295926, 0.22109488427338303, 0.61497112346096128], + [0.36153829010998356, 0.21636111429594002, 0.61104880679640927], + [0.36080493826624654, 0.21164251793458128, 0.60703532172064711], + [0.36002681809096376, 0.20694122817889948, 0.60292845431916875], + [0.35920088560930186, 0.20226037920758122, 0.5987265295935138], + [0.35832489966617809, 0.197602942459778, 0.59442768517501066], + [0.35739663292915563, 0.19297208197842461, 0.59003011251063131], + [0.35641381143126327, 0.18837119869242164, 0.5855320765920552], + [0.35537415306906722, 0.18380392577704466, 0.58093191431832802], + [0.35427534960663759, 0.17927413271618647, 0.57622809660668717], + [0.35311574421123737, 0.17478570377561287, 0.57141871523555288], + [0.35189248608873791, 0.17034320478524959, 0.56650284911216653], + [0.35060304441931012, 0.16595129984720861, 0.56147964703993225], + [0.34924513554955644, 0.16161477763045118, 0.55634837474163779], + [0.34781653238777782, 0.15733863511152979, 0.55110853452703257], + [0.34631507175793091, 0.15312802296627787, 0.5457599924248665], + [0.34473901574536375, 0.14898820589826409, 0.54030245920406539], + [0.34308600291572294, 0.14492465359918028, 0.53473704282067103], + [0.34135411074506483, 0.1409427920655632, 0.52906500940336754], + [0.33954168752669694, 0.13704801896718169, 0.52328797535085236], + [0.33764732090671112, 0.13324562282438077, 0.51740807573979475], + [0.33566978565015315, 0.12954074251271822, 0.51142807215168951], + [0.33360804901486002, 0.12593818301005921, 0.50535164796654897], + [0.33146154891145124, 0.12244245263391232, 0.49918274588431072], + [0.32923005203231409, 0.11905764321981127, 0.49292595612342666], + [0.3269137124539796, 0.1157873496841953, 0.48658646495697461], + [0.32451307931207785, 0.11263459791730848, 0.48017007211645196], + [0.32202882276069322, 0.10960114111258401, 0.47368494725726878], + [0.31946262395497965, 0.10668879882392659, 0.46713728801395243], + [0.31681648089023501, 0.10389861387653518, 0.46053414662739794], + [0.31409278414755532, 0.10123077676403242, 0.45388335612058467], + [0.31129434479712365, 0.098684771934052201, 0.44719313715161618], + [0.30842444457210105, 0.096259385340577736, 0.44047194882050544], + [0.30548675819945936, 0.093952764840823738, 0.43372849999361113], + [0.30248536364574252, 0.091761187397303601, 0.42697404043749887], + [0.29942483960214772, 0.089682253716750038, 0.42021619665853854], + [0.29631000388905288, 0.087713250960463951, 0.41346259134143476], + [0.29314593096985248, 0.085850656889620708, 0.40672178082365834], + [0.28993792445176608, 0.08409078829085731, 0.40000214725256295], + [0.28669151388283165, 0.082429873848480689, 0.39331182532243375], + [0.28341239797185225, 0.080864153365499375, 0.38665868550105914], + [0.28010638576975472, 0.079389994802261526, 0.38005028528138707], + [0.27677939615815589, 0.078003941033788216, 0.37349382846504675], + [0.27343739342450812, 0.076702800237496066, 0.36699616136347685], + [0.27008637749114051, 0.075483675584275545, 0.36056376228111864], + [0.26673233211995284, 0.074344018028546205, 0.35420276066240958], + [0.26338121807151404, 0.073281657939897077, 0.34791888996380105], + [0.26003895187439957, 0.072294781043362205, 0.3417175669546984], + [0.25671191651083902, 0.071380106242082242, 0.33560648984600089], + [0.25340685873736807, 0.070533582926851829, 0.3295945757321303], + [0.25012845306199383, 0.069758206429106989, 0.32368100685760637], + [0.24688226237958999, 0.069053639449204451, 0.31786993834254956], + [0.24367372557466271, 0.068419855150922693, 0.31216524050888372], + [0.24050813332295939, 0.067857103814855602, 0.30657054493678321], + [0.23739062429054825, 0.067365888050555517, 0.30108922184065873], + [0.23433055727563878, 0.066935599661639394, 0.29574009929867601], + [0.23132955273021344, 0.066576186939090592, 0.29051361067988485], + [0.2283917709422868, 0.06628997924139618, 0.28541074411068496], + [0.22552164337737857, 0.066078173119395595, 0.28043398847505197], + [0.22272706739121817, 0.065933790675651943, 0.27559714652053702], + [0.22001251100779617, 0.065857918918907604, 0.27090279994325861], + [0.21737845072382705, 0.065859661233562045, 0.26634209349669508], + [0.21482843531473683, 0.065940385613778491, 0.26191675992376573], + [0.21237411048541005, 0.066085024661758446, 0.25765165093569542], + [0.21001214221188125, 0.066308573918947178, 0.2535289048041211], + [0.2077442377448806, 0.06661453200418091, 0.24954644291943817], + [0.20558051999470117, 0.066990462397868739, 0.24572497420147632], + [0.20352007949514977, 0.067444179612424215, 0.24205576625191821], + [0.20156133764129841, 0.067983271026200248, 0.23852974228695395], + [0.19971571438603364, 0.068592710553704722, 0.23517094067076993], + [0.19794834061899208, 0.069314066071660657, 0.23194647381302336], + [0.1960826032659409, 0.070321227242423623, 0.22874673279569585], + [0.19410351363791453, 0.071608304856891569, 0.22558727307410353], + [0.19199449184606268, 0.073182830649273306, 0.22243385243433622], + [0.18975853639094634, 0.075019861862143766, 0.2193005075652994], + [0.18739228342697645, 0.077102096899588329, 0.21618875376309582], + [0.18488035509396164, 0.079425730279723883, 0.21307651648984993], + [0.18774482037046955, 0.077251588468039312, 0.21387448578597812], + [0.19049578401722037, 0.075311278416787641, 0.2146562337112265], + [0.1931548636579131, 0.073606819040117955, 0.21542362939081539], + [0.19571853588267552, 0.072157781039602742, 0.21617499187076789], + [0.19819343656336558, 0.070974625252738788, 0.21690975060032436], + [0.20058760685133747, 0.070064576149984209, 0.21762721310371608], + [0.20290365333558247, 0.069435248580458964, 0.21833167885096033], + [0.20531725273301316, 0.068919592266397572, 0.21911516689288835], + [0.20785704662965598, 0.068484398797025281, 0.22000133917653536], + [0.21052882914958676, 0.06812195249816172, 0.22098759107715404], + [0.2133313859647627, 0.067830148426026665, 0.22207043213024291], + [0.21625279838647882, 0.067616330270516389, 0.22324568672294431], + [0.21930503925136402, 0.067465786362940039, 0.22451023616807558], + [0.22247308588973624, 0.067388214053092838, 0.22585960379408354], + [0.2257539681670791, 0.067382132300147474, 0.22728984778098055], + [0.22915620278592841, 0.067434730871152565, 0.22879681433956656], + [0.23266299920501882, 0.067557104388479783, 0.23037617493752832], + [0.23627495835774248, 0.06774359820987802, 0.23202360805926608], + [0.23999586188690308, 0.067985029964779953, 0.23373434258507808], + [0.24381149720247919, 0.068289851529011875, 0.23550427698321885], + [0.24772092990501099, 0.068653337909486523, 0.2373288009471749], + [0.25172899728289466, 0.069064630826035506, 0.23920260612763083], + [0.25582135547481771, 0.06953231029187984, 0.24112190491594204], + [0.25999463887892144, 0.070053855603861875, 0.24308218808684579], + [0.26425512207060942, 0.070616595622995437, 0.24507758869355967], + [0.26859095948172862, 0.071226716277922458, 0.24710443563450618], + [0.27299701518897301, 0.071883555446163511, 0.24915847093232929], + [0.27747150809142801, 0.072582969899254779, 0.25123493995942769], + [0.28201746297366942, 0.073315693214040967, 0.25332800295084507], + [0.28662309235899847, 0.074088460826808866, 0.25543478673717029], + [0.29128515387578635, 0.074899049847466703, 0.25755101595750435], + [0.2960004726065818, 0.075745336000958424, 0.25967245030364566], + [0.30077276812918691, 0.076617824336164764, 0.26179294097819672], + [0.30559226007249934, 0.077521963107537312, 0.26391006692119662], + [0.31045520848595526, 0.078456871676182177, 0.2660200572779356], + [0.31535870009205808, 0.079420997315243186, 0.26811904076941961], + [0.32029986557994061, 0.080412994737554838, 0.27020322893039511], + [0.32527888860401261, 0.081428390076546092, 0.27226772884656186], + [0.33029174471181438, 0.08246763389003825, 0.27430929404579435], + [0.33533353224455448, 0.083532434119003962, 0.27632534356790039], + [0.34040164359597463, 0.084622236191702671, 0.27831254595259397], + [0.34549355713871799, 0.085736654965126335, 0.28026769921081435], + [0.35060678246032478, 0.08687555176033529, 0.28218770540182386], + [0.35573889947341125, 0.088038974350243354, 0.2840695897279818], + [0.36088752387578377, 0.089227194362745205, 0.28591050458531014], + [0.36605031412464006, 0.090440685427697898, 0.2877077458811747], + [0.37122508431309342, 0.091679997480262732, 0.28945865397633169], + [0.3764103053221462, 0.092945198093777909, 0.29116024157313919], + [0.38160247377467543, 0.094238731263712183, 0.29281107506269488], + [0.38679939079544168, 0.09556181960083443, 0.29440901248173756], + [0.39199887556812907, 0.09691583650296684, 0.29595212005509081], + [0.39719876876325577, 0.098302320968278623, 0.29743856476285779], + [0.40239692379737496, 0.099722930314950553, 0.29886674369733968], + [0.40759120392688708, 0.10117945586419633, 0.30023519507728602], + [0.41277985630360303, 0.1026734006932461, 0.30154226437468967], + [0.41796105205173684, 0.10420644885760968, 0.30278652039631843], + [0.42313214269556043, 0.10578120994917611, 0.3039675809469457], + [0.42829101315789753, 0.1073997763055258, 0.30508479060294547], + [0.4334355841041439, 0.1090642347484701, 0.30613767928289148], + [0.43856378187931538, 0.11077667828375456, 0.30712600062348083], + [0.44367358645071275, 0.11253912421257944, 0.30804973095465449], + [0.44876299173174822, 0.11435355574622549, 0.30890905921943196], + [0.45383005086999889, 0.11622183788331528, 0.30970441249844921], + [0.45887288947308297, 0.11814571137706886, 0.31043636979038808], + [0.46389102840284874, 0.12012561256850712, 0.31110343446582983], + [0.46888111384598413, 0.12216445576414045, 0.31170911458932665], + [0.473841437035254, 0.12426354237989065, 0.31225470169927194], + [0.47877034239726296, 0.12642401401409453, 0.31274172735821959], + [0.48366628618847957, 0.12864679022013889, 0.31317188565991266], + [0.48852847371852987, 0.13093210934893723, 0.31354553695453014], + [0.49335504375145617, 0.13328091630401023, 0.31386561956734976], + [0.49814435462074153, 0.13569380302451714, 0.314135190862664], + [0.50289524974970612, 0.13817086581280427, 0.31435662153833671], + [0.50760681181053691, 0.14071192654913128, 0.31453200120082569], + [0.51227835105321762, 0.14331656120063752, 0.3146630922831542], + [0.51690848800544464, 0.14598463068714407, 0.31475407592280041], + [0.52149652863229956, 0.14871544765633712, 0.31480767954534428], + [0.52604189625477482, 0.15150818660835483, 0.31482653406646727], + [0.53054420489856446, 0.15436183633886777, 0.31481299789187128], + [0.5350027976174474, 0.15727540775107324, 0.31477085207396532], + [0.53941736649199057, 0.16024769309971934, 0.31470295028655965], + [0.54378771313608565, 0.16327738551419116, 0.31461204226295625], + [0.54811370033467621, 0.1663630904279047, 0.31450102990914708], + [0.55239521572711914, 0.16950338809328983, 0.31437291554615371], + [0.55663229034969341, 0.17269677158182117, 0.31423043195101424], + [0.56082499039117173, 0.17594170887918095, 0.31407639883970623], + [0.56497343529017696, 0.17923664950367169, 0.3139136046337036], + [0.56907784784011428, 0.18258004462335425, 0.31374440956796529], + [0.57313845754107873, 0.18597036007065024, 0.31357126868520002], + [0.57715550812992045, 0.18940601489760422, 0.31339704333572083], + [0.58112932761586555, 0.19288548904692518, 0.31322399394183942], + [0.58506024396466882, 0.19640737049066315, 0.31305401163732732], + [0.58894861935544707, 0.19997020971775276, 0.31288922211590126], + [0.59279480536520257, 0.20357251410079796, 0.31273234839304942], + [0.59659918109122367, 0.207212956082026, 0.31258523031121233], + [0.60036213010411577, 0.21089030138947745, 0.31244934410414688], + [0.60408401696732739, 0.21460331490206347, 0.31232652641170694], + [0.60776523994818654, 0.21835070166659282, 0.31221903291870201], + [0.6114062072731884, 0.22213124697023234, 0.31212881396435238], + [0.61500723236391375, 0.22594402043981826, 0.31205680685765741], + [0.61856865258877192, 0.22978799249179921, 0.31200463838728931], + [0.62209079821082613, 0.2336621873300741, 0.31197383273627388], + [0.62557416500434959, 0.23756535071152696, 0.31196698314912269], + [0.62901892016985872, 0.24149689191922535, 0.31198447195645718], + [0.63242534854210275, 0.24545598775548677, 0.31202765974624452], + [0.6357937104834237, 0.24944185818822678, 0.31209793953300591], + [0.6391243387840212, 0.25345365461983138, 0.31219689612063978], + [0.642417577481186, 0.257490519876798, 0.31232631707560987], + [0.64567349382645434, 0.26155203161615281, 0.31248673753935263], + [0.64889230169458245, 0.26563755336209077, 0.31267941819570189], + [0.65207417290277303, 0.26974650525236699, 0.31290560605819168], + [0.65521932609327127, 0.27387826652410152, 0.3131666792687211], + [0.6583280801134499, 0.27803210957665631, 0.3134643447952643], + [0.66140037532601781, 0.28220778870555907, 0.31379912926498488], + [0.66443632469878844, 0.28640483614256179, 0.31417223403606975], + [0.66743603766369131, 0.29062280081258873, 0.31458483752056837], + [0.67039959547676198, 0.29486126309253047, 0.31503813956872212], + [0.67332725564817331, 0.29911962764489264, 0.31553372323982209], + [0.67621897924409746, 0.30339762792450425, 0.3160724937230589], + [0.67907474028157344, 0.30769497879760166, 0.31665545668946665], + [0.68189457150944521, 0.31201133280550686, 0.31728380489244951], + [0.68467850942494535, 0.31634634821222207, 0.31795870784057567], + [0.68742656435169625, 0.32069970535138104, 0.31868137622277692], + [0.6901389321505248, 0.32507091815606004, 0.31945332332898302], + [0.69281544846764931, 0.32945984647042675, 0.3202754315314667], + [0.69545608346891119, 0.33386622163232865, 0.32114884306985791], + [0.6980608153581771, 0.33828976326048621, 0.32207478855218091], + [0.70062962477242097, 0.34273019305341756, 0.32305449047765694], + [0.70316249458814151, 0.34718723719597999, 0.32408913679491225], + [0.70565951122610093, 0.35166052978120937, 0.32518014084085567], + [0.70812059568420482, 0.35614985523380299, 0.32632861885644465], + [0.7105456546582587, 0.36065500290840113, 0.32753574162788762], + [0.71293466839773467, 0.36517570519856757, 0.3288027427038317], + [0.71528760614847287, 0.36971170225223449, 0.3301308728723546], + [0.71760444908133847, 0.37426272710686193, 0.33152138620958932], + [0.71988521490549851, 0.37882848839337313, 0.33297555200245399], + [0.7221299918421461, 0.38340864508963057, 0.33449469983585844], + [0.72433865647781592, 0.38800301593162145, 0.33607995965691828], + [0.72651122900227549, 0.3926113126792577, 0.3377325942005665], + [0.72864773856716547, 0.39723324476747235, 0.33945384341064017], + [0.73074820754845171, 0.401868526884681, 0.3412449533046818], + [0.73281270506268747, 0.4065168468778026, 0.34310715173410822], + [0.73484133598564938, 0.41117787004519513, 0.34504169470809071], + [0.73683422173585866, 0.41585125850290111, 0.34704978520758401], + [0.73879140024599266, 0.42053672992315327, 0.34913260148542435], + [0.74071301619506091, 0.4252339389526239, 0.35129130890802607], + [0.7425992159973317, 0.42994254036133867, 0.35352709245374592], + [0.74445018676570673, 0.43466217184617112, 0.35584108091122535], + [0.74626615789163442, 0.43939245044973502, 0.35823439142300639], + [0.74804739275559562, 0.44413297780351974, 0.36070813602540136], + [0.74979420547170472, 0.44888333481548809, 0.36326337558360278], + [0.75150685045891663, 0.45364314496866825, 0.36590112443835765], + [0.75318566369046569, 0.45841199172949604, 0.36862236642234769], + [0.75483105066959544, 0.46318942799460555, 0.3714280448394211], + [0.75644341577140706, 0.46797501437948458, 0.37431909037543515], + [0.75802325538455839, 0.4727682731566229, 0.37729635531096678], + [0.75957111105340058, 0.47756871222057079, 0.380360657784311], + [0.7610876378057071, 0.48237579130289127, 0.38351275723852291], + [0.76257333554052609, 0.48718906673415824, 0.38675335037837993], + [0.76402885609288662, 0.49200802533379656, 0.39008308392311997], + [0.76545492593330511, 0.49683212909727231, 0.39350254000115381], + [0.76685228950643891, 0.5016608471009063, 0.39701221751773474], + [0.76822176599735303, 0.50649362371287909, 0.40061257089416885], + [0.7695642334401418, 0.5113298901696085, 0.40430398069682483], + [0.77088091962302474, 0.51616892643469103, 0.40808667584648967], + [0.77217257229605551, 0.5210102658711383, 0.41196089987122869], + [0.77344021829889886, 0.52585332093451564, 0.41592679539764366], + [0.77468494746063199, 0.53069749384776732, 0.41998440356963762], + [0.77590790730685699, 0.53554217882461186, 0.42413367909988375], + [0.7771103295521099, 0.54038674910561235, 0.42837450371258479], + [0.77829345807633121, 0.54523059488426595, 0.432706647838971], + [0.77945862731506643, 0.55007308413977274, 0.43712979856444761], + [0.78060774749483774, 0.55491335744890613, 0.44164332426364639], + [0.78174180478981836, 0.55975098052594863, 0.44624687186865436], + [0.78286225264440912, 0.56458533111166875, 0.45093985823706345], + [0.78397060836414478, 0.56941578326710418, 0.45572154742892063], + [0.78506845019606841, 0.5742417003617839, 0.46059116206904965], + [0.78615737132332963, 0.5790624629815756, 0.46554778281918402], + [0.78723904108188347, 0.58387743744557208, 0.47059039582133383], + [0.78831514045623963, 0.58868600173562435, 0.47571791879076081], + [0.78938737766251943, 0.5934875421745599, 0.48092913815357724], + [0.79045776847727878, 0.59828134277062461, 0.48622257801969754], + [0.79152832843475607, 0.60306670593147205, 0.49159667021646397], + [0.79260034304237448, 0.60784322087037024, 0.49705020621532009], + [0.79367559698664958, 0.61261029334072192, 0.50258161291269432], + [0.79475585972654039, 0.61736734400220705, 0.50818921213102985], + [0.79584292379583765, 0.62211378808451145, 0.51387124091909786], + [0.79693854719951607, 0.62684905679296699, 0.5196258425240281], + [0.79804447815136637, 0.63157258225089552, 0.52545108144834785], + [0.7991624518501963, 0.63628379372029187, 0.53134495942561433], + [0.80029415389753977, 0.64098213306749863, 0.53730535185141037], + [0.80144124292560048, 0.64566703459218766, 0.5433300863249918], + [0.80260531146112946, 0.65033793748103852, 0.54941691584603647], + [0.80378792531077625, 0.65499426549472628, 0.55556350867083815], + [0.80499054790810298, 0.65963545027564163, 0.56176745110546977], + [0.80621460526927058, 0.66426089585282289, 0.56802629178649788], + [0.8074614045096935, 0.6688700095398864, 0.57433746373459582], + [0.80873219170089694, 0.67346216702194517, 0.58069834805576737], + [0.81002809466520687, 0.67803672673971815, 0.58710626908082753], + [0.81135014011763329, 0.68259301546243389, 0.59355848909050757], + [0.81269922039881493, 0.68713033714618876, 0.60005214820435104], + [0.81407611046993344, 0.69164794791482131, 0.6065843782630862], + [0.81548146627279483, 0.69614505508308089, 0.61315221209322646], + [0.81691575775055891, 0.70062083014783982, 0.61975260637257923], + [0.81837931164498223, 0.70507438189635097, 0.62638245478933297], + [0.81987230650455289, 0.70950474978787481, 0.63303857040067113], + [0.8213947205565636, 0.7139109141951604, 0.63971766697672761], + [0.82294635110428427, 0.71829177331290062, 0.6464164243818421], + [0.8245268129450285, 0.72264614312088882, 0.65313137915422603], + [0.82613549710580259, 0.72697275518238258, 0.65985900156216504], + [0.8277716072353446, 0.73127023324078089, 0.66659570204682972], + [0.82943407816481474, 0.7355371221572935, 0.67333772009301907], + [0.83112163529096306, 0.73977184647638616, 0.68008125203631464], + [0.83283277185777982, 0.74397271817459876, 0.68682235874648545], + [0.8345656905566583, 0.7481379479992134, 0.69355697649863846], + [0.83631898844737929, 0.75226548952875261, 0.70027999028864962], + [0.83809123476131964, 0.75635314860808633, 0.70698561390212977], + [0.83987839884120874, 0.76039907199779677, 0.71367147811129228], + [0.84167750766845151, 0.76440101200982946, 0.72033299387284622], + [0.84348529222933699, 0.76835660399870176, 0.72696536998972039], + [0.84529810731955113, 0.77226338601044719, 0.73356368240541492], + [0.84711195507965098, 0.77611880236047159, 0.74012275762807056], + [0.84892245563117641, 0.77992021407650147, 0.74663719293664366], + [0.85072697023178789, 0.78366457342383888, 0.7530974636118285], + [0.85251907207708444, 0.78734936133548439, 0.7594994148789691], + [0.85429219611470464, 0.79097196777091994, 0.76583801477914104], + [0.85604022314725403, 0.79452963601550608, 0.77210610037674143], + [0.85775662943504905, 0.79801963142713928, 0.77829571667247499], + [0.8594346370300241, 0.8014392309950078, 0.78439788751383921], + [0.86107117027565516, 0.80478517909812231, 0.79039529663736285], + [0.86265601051127572, 0.80805523804261525, 0.796282666437655], + [0.86418343723941027, 0.81124644224653542, 0.80204612696863953], + [0.86564934325605325, 0.81435544067514909, 0.80766972324164554], + [0.86705314907048503, 0.81737804041911244, 0.81313419626911398], + [0.86839954695818633, 0.82030875512181523, 0.81841638963128993], + [0.86969131502613806, 0.82314158859569164, 0.82350476683173168], + [0.87093846717297507, 0.82586857889438514, 0.82838497261149613], + [0.87215331978454325, 0.82848052823709672, 0.8330486712880828], + [0.87335171360916275, 0.83096715251272624, 0.83748851001197089], + [0.87453793320260187, 0.83331972948645461, 0.84171925358069011], + [0.87571458709961403, 0.8355302318472394, 0.84575537519027078], + [0.87687848451614692, 0.83759238071186537, 0.84961373549150254], + [0.87802298436649007, 0.83950165618540074, 0.85330645352458923], + [0.87913244240792765, 0.84125554884475906, 0.85685572291039636], + [0.88019293315695812, 0.84285224824778615, 0.86027399927156634], + [0.88119169871341951, 0.84429066717717349, 0.86356595168669881], + [0.88211542489401606, 0.84557007254559347, 0.86673765046233331], + [0.88295168595448525, 0.84668970275699273, 0.86979617048190971], + [0.88369127145898041, 0.84764891761519268, 0.87274147101441557], + [0.88432713054113543, 0.84844741572055415, 0.87556785228242973], + [0.88485138159908572, 0.84908426422893801, 0.87828235285372469], + [0.88525897972630474, 0.84955892810989209, 0.88088414794024839], + [0.88554714811952384, 0.84987174283631584, 0.88336206121170946], + [0.88571155122845646, 0.85002186115856315, 0.88572538990087124]] + +_twilight_shifted_data = (_twilight_data[len(_twilight_data)//2:] + + _twilight_data[:len(_twilight_data)//2]) +_twilight_shifted_data.reverse() +_turbo_data = [[0.18995, 0.07176, 0.23217], + [0.19483, 0.08339, 0.26149], + [0.19956, 0.09498, 0.29024], + [0.20415, 0.10652, 0.31844], + [0.20860, 0.11802, 0.34607], + [0.21291, 0.12947, 0.37314], + [0.21708, 0.14087, 0.39964], + [0.22111, 0.15223, 0.42558], + [0.22500, 0.16354, 0.45096], + [0.22875, 0.17481, 0.47578], + [0.23236, 0.18603, 0.50004], + [0.23582, 0.19720, 0.52373], + [0.23915, 0.20833, 0.54686], + [0.24234, 0.21941, 0.56942], + [0.24539, 0.23044, 0.59142], + [0.24830, 0.24143, 0.61286], + [0.25107, 0.25237, 0.63374], + [0.25369, 0.26327, 0.65406], + [0.25618, 0.27412, 0.67381], + [0.25853, 0.28492, 0.69300], + [0.26074, 0.29568, 0.71162], + [0.26280, 0.30639, 0.72968], + [0.26473, 0.31706, 0.74718], + [0.26652, 0.32768, 0.76412], + [0.26816, 0.33825, 0.78050], + [0.26967, 0.34878, 0.79631], + [0.27103, 0.35926, 0.81156], + [0.27226, 0.36970, 0.82624], + [0.27334, 0.38008, 0.84037], + [0.27429, 0.39043, 0.85393], + [0.27509, 0.40072, 0.86692], + [0.27576, 0.41097, 0.87936], + [0.27628, 0.42118, 0.89123], + [0.27667, 0.43134, 0.90254], + [0.27691, 0.44145, 0.91328], + [0.27701, 0.45152, 0.92347], + [0.27698, 0.46153, 0.93309], + [0.27680, 0.47151, 0.94214], + [0.27648, 0.48144, 0.95064], + [0.27603, 0.49132, 0.95857], + [0.27543, 0.50115, 0.96594], + [0.27469, 0.51094, 0.97275], + [0.27381, 0.52069, 0.97899], + [0.27273, 0.53040, 0.98461], + [0.27106, 0.54015, 0.98930], + [0.26878, 0.54995, 0.99303], + [0.26592, 0.55979, 0.99583], + [0.26252, 0.56967, 0.99773], + [0.25862, 0.57958, 0.99876], + [0.25425, 0.58950, 0.99896], + [0.24946, 0.59943, 0.99835], + [0.24427, 0.60937, 0.99697], + [0.23874, 0.61931, 0.99485], + [0.23288, 0.62923, 0.99202], + [0.22676, 0.63913, 0.98851], + [0.22039, 0.64901, 0.98436], + [0.21382, 0.65886, 0.97959], + [0.20708, 0.66866, 0.97423], + [0.20021, 0.67842, 0.96833], + [0.19326, 0.68812, 0.96190], + [0.18625, 0.69775, 0.95498], + [0.17923, 0.70732, 0.94761], + [0.17223, 0.71680, 0.93981], + [0.16529, 0.72620, 0.93161], + [0.15844, 0.73551, 0.92305], + [0.15173, 0.74472, 0.91416], + [0.14519, 0.75381, 0.90496], + [0.13886, 0.76279, 0.89550], + [0.13278, 0.77165, 0.88580], + [0.12698, 0.78037, 0.87590], + [0.12151, 0.78896, 0.86581], + [0.11639, 0.79740, 0.85559], + [0.11167, 0.80569, 0.84525], + [0.10738, 0.81381, 0.83484], + [0.10357, 0.82177, 0.82437], + [0.10026, 0.82955, 0.81389], + [0.09750, 0.83714, 0.80342], + [0.09532, 0.84455, 0.79299], + [0.09377, 0.85175, 0.78264], + [0.09287, 0.85875, 0.77240], + [0.09267, 0.86554, 0.76230], + [0.09320, 0.87211, 0.75237], + [0.09451, 0.87844, 0.74265], + [0.09662, 0.88454, 0.73316], + [0.09958, 0.89040, 0.72393], + [0.10342, 0.89600, 0.71500], + [0.10815, 0.90142, 0.70599], + [0.11374, 0.90673, 0.69651], + [0.12014, 0.91193, 0.68660], + [0.12733, 0.91701, 0.67627], + [0.13526, 0.92197, 0.66556], + [0.14391, 0.92680, 0.65448], + [0.15323, 0.93151, 0.64308], + [0.16319, 0.93609, 0.63137], + [0.17377, 0.94053, 0.61938], + [0.18491, 0.94484, 0.60713], + [0.19659, 0.94901, 0.59466], + [0.20877, 0.95304, 0.58199], + [0.22142, 0.95692, 0.56914], + [0.23449, 0.96065, 0.55614], + [0.24797, 0.96423, 0.54303], + [0.26180, 0.96765, 0.52981], + [0.27597, 0.97092, 0.51653], + [0.29042, 0.97403, 0.50321], + [0.30513, 0.97697, 0.48987], + [0.32006, 0.97974, 0.47654], + [0.33517, 0.98234, 0.46325], + [0.35043, 0.98477, 0.45002], + [0.36581, 0.98702, 0.43688], + [0.38127, 0.98909, 0.42386], + [0.39678, 0.99098, 0.41098], + [0.41229, 0.99268, 0.39826], + [0.42778, 0.99419, 0.38575], + [0.44321, 0.99551, 0.37345], + [0.45854, 0.99663, 0.36140], + [0.47375, 0.99755, 0.34963], + [0.48879, 0.99828, 0.33816], + [0.50362, 0.99879, 0.32701], + [0.51822, 0.99910, 0.31622], + [0.53255, 0.99919, 0.30581], + [0.54658, 0.99907, 0.29581], + [0.56026, 0.99873, 0.28623], + [0.57357, 0.99817, 0.27712], + [0.58646, 0.99739, 0.26849], + [0.59891, 0.99638, 0.26038], + [0.61088, 0.99514, 0.25280], + [0.62233, 0.99366, 0.24579], + [0.63323, 0.99195, 0.23937], + [0.64362, 0.98999, 0.23356], + [0.65394, 0.98775, 0.22835], + [0.66428, 0.98524, 0.22370], + [0.67462, 0.98246, 0.21960], + [0.68494, 0.97941, 0.21602], + [0.69525, 0.97610, 0.21294], + [0.70553, 0.97255, 0.21032], + [0.71577, 0.96875, 0.20815], + [0.72596, 0.96470, 0.20640], + [0.73610, 0.96043, 0.20504], + [0.74617, 0.95593, 0.20406], + [0.75617, 0.95121, 0.20343], + [0.76608, 0.94627, 0.20311], + [0.77591, 0.94113, 0.20310], + [0.78563, 0.93579, 0.20336], + [0.79524, 0.93025, 0.20386], + [0.80473, 0.92452, 0.20459], + [0.81410, 0.91861, 0.20552], + [0.82333, 0.91253, 0.20663], + [0.83241, 0.90627, 0.20788], + [0.84133, 0.89986, 0.20926], + [0.85010, 0.89328, 0.21074], + [0.85868, 0.88655, 0.21230], + [0.86709, 0.87968, 0.21391], + [0.87530, 0.87267, 0.21555], + [0.88331, 0.86553, 0.21719], + [0.89112, 0.85826, 0.21880], + [0.89870, 0.85087, 0.22038], + [0.90605, 0.84337, 0.22188], + [0.91317, 0.83576, 0.22328], + [0.92004, 0.82806, 0.22456], + [0.92666, 0.82025, 0.22570], + [0.93301, 0.81236, 0.22667], + [0.93909, 0.80439, 0.22744], + [0.94489, 0.79634, 0.22800], + [0.95039, 0.78823, 0.22831], + [0.95560, 0.78005, 0.22836], + [0.96049, 0.77181, 0.22811], + [0.96507, 0.76352, 0.22754], + [0.96931, 0.75519, 0.22663], + [0.97323, 0.74682, 0.22536], + [0.97679, 0.73842, 0.22369], + [0.98000, 0.73000, 0.22161], + [0.98289, 0.72140, 0.21918], + [0.98549, 0.71250, 0.21650], + [0.98781, 0.70330, 0.21358], + [0.98986, 0.69382, 0.21043], + [0.99163, 0.68408, 0.20706], + [0.99314, 0.67408, 0.20348], + [0.99438, 0.66386, 0.19971], + [0.99535, 0.65341, 0.19577], + [0.99607, 0.64277, 0.19165], + [0.99654, 0.63193, 0.18738], + [0.99675, 0.62093, 0.18297], + [0.99672, 0.60977, 0.17842], + [0.99644, 0.59846, 0.17376], + [0.99593, 0.58703, 0.16899], + [0.99517, 0.57549, 0.16412], + [0.99419, 0.56386, 0.15918], + [0.99297, 0.55214, 0.15417], + [0.99153, 0.54036, 0.14910], + [0.98987, 0.52854, 0.14398], + [0.98799, 0.51667, 0.13883], + [0.98590, 0.50479, 0.13367], + [0.98360, 0.49291, 0.12849], + [0.98108, 0.48104, 0.12332], + [0.97837, 0.46920, 0.11817], + [0.97545, 0.45740, 0.11305], + [0.97234, 0.44565, 0.10797], + [0.96904, 0.43399, 0.10294], + [0.96555, 0.42241, 0.09798], + [0.96187, 0.41093, 0.09310], + [0.95801, 0.39958, 0.08831], + [0.95398, 0.38836, 0.08362], + [0.94977, 0.37729, 0.07905], + [0.94538, 0.36638, 0.07461], + [0.94084, 0.35566, 0.07031], + [0.93612, 0.34513, 0.06616], + [0.93125, 0.33482, 0.06218], + [0.92623, 0.32473, 0.05837], + [0.92105, 0.31489, 0.05475], + [0.91572, 0.30530, 0.05134], + [0.91024, 0.29599, 0.04814], + [0.90463, 0.28696, 0.04516], + [0.89888, 0.27824, 0.04243], + [0.89298, 0.26981, 0.03993], + [0.88691, 0.26152, 0.03753], + [0.88066, 0.25334, 0.03521], + [0.87422, 0.24526, 0.03297], + [0.86760, 0.23730, 0.03082], + [0.86079, 0.22945, 0.02875], + [0.85380, 0.22170, 0.02677], + [0.84662, 0.21407, 0.02487], + [0.83926, 0.20654, 0.02305], + [0.83172, 0.19912, 0.02131], + [0.82399, 0.19182, 0.01966], + [0.81608, 0.18462, 0.01809], + [0.80799, 0.17753, 0.01660], + [0.79971, 0.17055, 0.01520], + [0.79125, 0.16368, 0.01387], + [0.78260, 0.15693, 0.01264], + [0.77377, 0.15028, 0.01148], + [0.76476, 0.14374, 0.01041], + [0.75556, 0.13731, 0.00942], + [0.74617, 0.13098, 0.00851], + [0.73661, 0.12477, 0.00769], + [0.72686, 0.11867, 0.00695], + [0.71692, 0.11268, 0.00629], + [0.70680, 0.10680, 0.00571], + [0.69650, 0.10102, 0.00522], + [0.68602, 0.09536, 0.00481], + [0.67535, 0.08980, 0.00449], + [0.66449, 0.08436, 0.00424], + [0.65345, 0.07902, 0.00408], + [0.64223, 0.07380, 0.00401], + [0.63082, 0.06868, 0.00401], + [0.61923, 0.06367, 0.00410], + [0.60746, 0.05878, 0.00427], + [0.59550, 0.05399, 0.00453], + [0.58336, 0.04931, 0.00486], + [0.57103, 0.04474, 0.00529], + [0.55852, 0.04028, 0.00579], + [0.54583, 0.03593, 0.00638], + [0.53295, 0.03169, 0.00705], + [0.51989, 0.02756, 0.00780], + [0.50664, 0.02354, 0.00863], + [0.49321, 0.01963, 0.00955], + [0.47960, 0.01583, 0.01055]] + + +cmaps = { + name: ListedColormap(data, name=name) for name, data in [ + ('magma', _magma_data), + ('inferno', _inferno_data), + ('plasma', _plasma_data), + ('viridis', _viridis_data), + ('cividis', _cividis_data), + ('twilight', _twilight_data), + ('twilight_shifted', _twilight_shifted_data), + ('turbo', _turbo_data), + ]} diff --git a/venv/Lib/site-packages/matplotlib/_color_data.py b/venv/Lib/site-packages/matplotlib/_color_data.py new file mode 100644 index 000000000..e50998b18 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_color_data.py @@ -0,0 +1,1147 @@ +from collections import OrderedDict + + +BASE_COLORS = { + 'b': (0, 0, 1), # blue + 'g': (0, 0.5, 0), # green + 'r': (1, 0, 0), # red + 'c': (0, 0.75, 0.75), # cyan + 'm': (0.75, 0, 0.75), # magenta + 'y': (0.75, 0.75, 0), # yellow + 'k': (0, 0, 0), # black + 'w': (1, 1, 1), # white +} + + +# These colors are from Tableau +TABLEAU_COLORS = ( + ('blue', '#1f77b4'), + ('orange', '#ff7f0e'), + ('green', '#2ca02c'), + ('red', '#d62728'), + ('purple', '#9467bd'), + ('brown', '#8c564b'), + ('pink', '#e377c2'), + ('gray', '#7f7f7f'), + ('olive', '#bcbd22'), + ('cyan', '#17becf'), +) + +# Normalize name to "tab:" to avoid name collisions. +TABLEAU_COLORS = OrderedDict( + ('tab:' + name, value) for name, value in TABLEAU_COLORS) + +# This mapping of color names -> hex values is taken from +# a survey run by Randall Munroe see: +# https://blog.xkcd.com/2010/05/03/color-survey-results/ +# for more details. The results are hosted at +# https://xkcd.com/color/rgb +# and also available as a text file at +# https://xkcd.com/color/rgb.txt +# +# License: http://creativecommons.org/publicdomain/zero/1.0/ +XKCD_COLORS = { + 'cloudy blue': '#acc2d9', + 'dark pastel green': '#56ae57', + 'dust': '#b2996e', + 'electric lime': '#a8ff04', + 'fresh green': '#69d84f', + 'light eggplant': '#894585', + 'nasty green': '#70b23f', + 'really light blue': '#d4ffff', + 'tea': '#65ab7c', + 'warm purple': '#952e8f', + 'yellowish tan': '#fcfc81', + 'cement': '#a5a391', + 'dark grass green': '#388004', + 'dusty teal': '#4c9085', + 'grey teal': '#5e9b8a', + 'macaroni and cheese': '#efb435', + 'pinkish tan': '#d99b82', + 'spruce': '#0a5f38', + 'strong blue': '#0c06f7', + 'toxic green': '#61de2a', + 'windows blue': '#3778bf', + 'blue blue': '#2242c7', + 'blue with a hint of purple': '#533cc6', + 'booger': '#9bb53c', + 'bright sea green': '#05ffa6', + 'dark green blue': '#1f6357', + 'deep turquoise': '#017374', + 'green teal': '#0cb577', + 'strong pink': '#ff0789', + 'bland': '#afa88b', + 'deep aqua': '#08787f', + 'lavender pink': '#dd85d7', + 'light moss green': '#a6c875', + 'light seafoam green': '#a7ffb5', + 'olive yellow': '#c2b709', + 'pig pink': '#e78ea5', + 'deep lilac': '#966ebd', + 'desert': '#ccad60', + 'dusty lavender': '#ac86a8', + 'purpley grey': '#947e94', + 'purply': '#983fb2', + 'candy pink': '#ff63e9', + 'light pastel green': '#b2fba5', + 'boring green': '#63b365', + 'kiwi green': '#8ee53f', + 'light grey green': '#b7e1a1', + 'orange pink': '#ff6f52', + 'tea green': '#bdf8a3', + 'very light brown': '#d3b683', + 'egg shell': '#fffcc4', + 'eggplant purple': '#430541', + 'powder pink': '#ffb2d0', + 'reddish grey': '#997570', + 'baby shit brown': '#ad900d', + 'liliac': '#c48efd', + 'stormy blue': '#507b9c', + 'ugly brown': '#7d7103', + 'custard': '#fffd78', + 'darkish pink': '#da467d', + 'deep brown': '#410200', + 'greenish beige': '#c9d179', + 'manilla': '#fffa86', + 'off blue': '#5684ae', + 'battleship grey': '#6b7c85', + 'browny green': '#6f6c0a', + 'bruise': '#7e4071', + 'kelley green': '#009337', + 'sickly yellow': '#d0e429', + 'sunny yellow': '#fff917', + 'azul': '#1d5dec', + 'darkgreen': '#054907', + 'green/yellow': '#b5ce08', + 'lichen': '#8fb67b', + 'light light green': '#c8ffb0', + 'pale gold': '#fdde6c', + 'sun yellow': '#ffdf22', + 'tan green': '#a9be70', + 'burple': '#6832e3', + 'butterscotch': '#fdb147', + 'toupe': '#c7ac7d', + 'dark cream': '#fff39a', + 'indian red': '#850e04', + 'light lavendar': '#efc0fe', + 'poison green': '#40fd14', + 'baby puke green': '#b6c406', + 'bright yellow green': '#9dff00', + 'charcoal grey': '#3c4142', + 'squash': '#f2ab15', + 'cinnamon': '#ac4f06', + 'light pea green': '#c4fe82', + 'radioactive green': '#2cfa1f', + 'raw sienna': '#9a6200', + 'baby purple': '#ca9bf7', + 'cocoa': '#875f42', + 'light royal blue': '#3a2efe', + 'orangeish': '#fd8d49', + 'rust brown': '#8b3103', + 'sand brown': '#cba560', + 'swamp': '#698339', + 'tealish green': '#0cdc73', + 'burnt siena': '#b75203', + 'camo': '#7f8f4e', + 'dusk blue': '#26538d', + 'fern': '#63a950', + 'old rose': '#c87f89', + 'pale light green': '#b1fc99', + 'peachy pink': '#ff9a8a', + 'rosy pink': '#f6688e', + 'light bluish green': '#76fda8', + 'light bright green': '#53fe5c', + 'light neon green': '#4efd54', + 'light seafoam': '#a0febf', + 'tiffany blue': '#7bf2da', + 'washed out green': '#bcf5a6', + 'browny orange': '#ca6b02', + 'nice blue': '#107ab0', + 'sapphire': '#2138ab', + 'greyish teal': '#719f91', + 'orangey yellow': '#fdb915', + 'parchment': '#fefcaf', + 'straw': '#fcf679', + 'very dark brown': '#1d0200', + 'terracota': '#cb6843', + 'ugly blue': '#31668a', + 'clear blue': '#247afd', + 'creme': '#ffffb6', + 'foam green': '#90fda9', + 'grey/green': '#86a17d', + 'light gold': '#fddc5c', + 'seafoam blue': '#78d1b6', + 'topaz': '#13bbaf', + 'violet pink': '#fb5ffc', + 'wintergreen': '#20f986', + 'yellow tan': '#ffe36e', + 'dark fuchsia': '#9d0759', + 'indigo blue': '#3a18b1', + 'light yellowish green': '#c2ff89', + 'pale magenta': '#d767ad', + 'rich purple': '#720058', + 'sunflower yellow': '#ffda03', + 'green/blue': '#01c08d', + 'leather': '#ac7434', + 'racing green': '#014600', + 'vivid purple': '#9900fa', + 'dark royal blue': '#02066f', + 'hazel': '#8e7618', + 'muted pink': '#d1768f', + 'booger green': '#96b403', + 'canary': '#fdff63', + 'cool grey': '#95a3a6', + 'dark taupe': '#7f684e', + 'darkish purple': '#751973', + 'true green': '#089404', + 'coral pink': '#ff6163', + 'dark sage': '#598556', + 'dark slate blue': '#214761', + 'flat blue': '#3c73a8', + 'mushroom': '#ba9e88', + 'rich blue': '#021bf9', + 'dirty purple': '#734a65', + 'greenblue': '#23c48b', + 'icky green': '#8fae22', + 'light khaki': '#e6f2a2', + 'warm blue': '#4b57db', + 'dark hot pink': '#d90166', + 'deep sea blue': '#015482', + 'carmine': '#9d0216', + 'dark yellow green': '#728f02', + 'pale peach': '#ffe5ad', + 'plum purple': '#4e0550', + 'golden rod': '#f9bc08', + 'neon red': '#ff073a', + 'old pink': '#c77986', + 'very pale blue': '#d6fffe', + 'blood orange': '#fe4b03', + 'grapefruit': '#fd5956', + 'sand yellow': '#fce166', + 'clay brown': '#b2713d', + 'dark blue grey': '#1f3b4d', + 'flat green': '#699d4c', + 'light green blue': '#56fca2', + 'warm pink': '#fb5581', + 'dodger blue': '#3e82fc', + 'gross green': '#a0bf16', + 'ice': '#d6fffa', + 'metallic blue': '#4f738e', + 'pale salmon': '#ffb19a', + 'sap green': '#5c8b15', + 'algae': '#54ac68', + 'bluey grey': '#89a0b0', + 'greeny grey': '#7ea07a', + 'highlighter green': '#1bfc06', + 'light light blue': '#cafffb', + 'light mint': '#b6ffbb', + 'raw umber': '#a75e09', + 'vivid blue': '#152eff', + 'deep lavender': '#8d5eb7', + 'dull teal': '#5f9e8f', + 'light greenish blue': '#63f7b4', + 'mud green': '#606602', + 'pinky': '#fc86aa', + 'red wine': '#8c0034', + 'shit green': '#758000', + 'tan brown': '#ab7e4c', + 'darkblue': '#030764', + 'rosa': '#fe86a4', + 'lipstick': '#d5174e', + 'pale mauve': '#fed0fc', + 'claret': '#680018', + 'dandelion': '#fedf08', + 'orangered': '#fe420f', + 'poop green': '#6f7c00', + 'ruby': '#ca0147', + 'dark': '#1b2431', + 'greenish turquoise': '#00fbb0', + 'pastel red': '#db5856', + 'piss yellow': '#ddd618', + 'bright cyan': '#41fdfe', + 'dark coral': '#cf524e', + 'algae green': '#21c36f', + 'darkish red': '#a90308', + 'reddy brown': '#6e1005', + 'blush pink': '#fe828c', + 'camouflage green': '#4b6113', + 'lawn green': '#4da409', + 'putty': '#beae8a', + 'vibrant blue': '#0339f8', + 'dark sand': '#a88f59', + 'purple/blue': '#5d21d0', + 'saffron': '#feb209', + 'twilight': '#4e518b', + 'warm brown': '#964e02', + 'bluegrey': '#85a3b2', + 'bubble gum pink': '#ff69af', + 'duck egg blue': '#c3fbf4', + 'greenish cyan': '#2afeb7', + 'petrol': '#005f6a', + 'royal': '#0c1793', + 'butter': '#ffff81', + 'dusty orange': '#f0833a', + 'off yellow': '#f1f33f', + 'pale olive green': '#b1d27b', + 'orangish': '#fc824a', + 'leaf': '#71aa34', + 'light blue grey': '#b7c9e2', + 'dried blood': '#4b0101', + 'lightish purple': '#a552e6', + 'rusty red': '#af2f0d', + 'lavender blue': '#8b88f8', + 'light grass green': '#9af764', + 'light mint green': '#a6fbb2', + 'sunflower': '#ffc512', + 'velvet': '#750851', + 'brick orange': '#c14a09', + 'lightish red': '#fe2f4a', + 'pure blue': '#0203e2', + 'twilight blue': '#0a437a', + 'violet red': '#a50055', + 'yellowy brown': '#ae8b0c', + 'carnation': '#fd798f', + 'muddy yellow': '#bfac05', + 'dark seafoam green': '#3eaf76', + 'deep rose': '#c74767', + 'dusty red': '#b9484e', + 'grey/blue': '#647d8e', + 'lemon lime': '#bffe28', + 'purple/pink': '#d725de', + 'brown yellow': '#b29705', + 'purple brown': '#673a3f', + 'wisteria': '#a87dc2', + 'banana yellow': '#fafe4b', + 'lipstick red': '#c0022f', + 'water blue': '#0e87cc', + 'brown grey': '#8d8468', + 'vibrant purple': '#ad03de', + 'baby green': '#8cff9e', + 'barf green': '#94ac02', + 'eggshell blue': '#c4fff7', + 'sandy yellow': '#fdee73', + 'cool green': '#33b864', + 'pale': '#fff9d0', + 'blue/grey': '#758da3', + 'hot magenta': '#f504c9', + 'greyblue': '#77a1b5', + 'purpley': '#8756e4', + 'baby shit green': '#889717', + 'brownish pink': '#c27e79', + 'dark aquamarine': '#017371', + 'diarrhea': '#9f8303', + 'light mustard': '#f7d560', + 'pale sky blue': '#bdf6fe', + 'turtle green': '#75b84f', + 'bright olive': '#9cbb04', + 'dark grey blue': '#29465b', + 'greeny brown': '#696006', + 'lemon green': '#adf802', + 'light periwinkle': '#c1c6fc', + 'seaweed green': '#35ad6b', + 'sunshine yellow': '#fffd37', + 'ugly purple': '#a442a0', + 'medium pink': '#f36196', + 'puke brown': '#947706', + 'very light pink': '#fff4f2', + 'viridian': '#1e9167', + 'bile': '#b5c306', + 'faded yellow': '#feff7f', + 'very pale green': '#cffdbc', + 'vibrant green': '#0add08', + 'bright lime': '#87fd05', + 'spearmint': '#1ef876', + 'light aquamarine': '#7bfdc7', + 'light sage': '#bcecac', + 'yellowgreen': '#bbf90f', + 'baby poo': '#ab9004', + 'dark seafoam': '#1fb57a', + 'deep teal': '#00555a', + 'heather': '#a484ac', + 'rust orange': '#c45508', + 'dirty blue': '#3f829d', + 'fern green': '#548d44', + 'bright lilac': '#c95efb', + 'weird green': '#3ae57f', + 'peacock blue': '#016795', + 'avocado green': '#87a922', + 'faded orange': '#f0944d', + 'grape purple': '#5d1451', + 'hot green': '#25ff29', + 'lime yellow': '#d0fe1d', + 'mango': '#ffa62b', + 'shamrock': '#01b44c', + 'bubblegum': '#ff6cb5', + 'purplish brown': '#6b4247', + 'vomit yellow': '#c7c10c', + 'pale cyan': '#b7fffa', + 'key lime': '#aeff6e', + 'tomato red': '#ec2d01', + 'lightgreen': '#76ff7b', + 'merlot': '#730039', + 'night blue': '#040348', + 'purpleish pink': '#df4ec8', + 'apple': '#6ecb3c', + 'baby poop green': '#8f9805', + 'green apple': '#5edc1f', + 'heliotrope': '#d94ff5', + 'yellow/green': '#c8fd3d', + 'almost black': '#070d0d', + 'cool blue': '#4984b8', + 'leafy green': '#51b73b', + 'mustard brown': '#ac7e04', + 'dusk': '#4e5481', + 'dull brown': '#876e4b', + 'frog green': '#58bc08', + 'vivid green': '#2fef10', + 'bright light green': '#2dfe54', + 'fluro green': '#0aff02', + 'kiwi': '#9cef43', + 'seaweed': '#18d17b', + 'navy green': '#35530a', + 'ultramarine blue': '#1805db', + 'iris': '#6258c4', + 'pastel orange': '#ff964f', + 'yellowish orange': '#ffab0f', + 'perrywinkle': '#8f8ce7', + 'tealish': '#24bca8', + 'dark plum': '#3f012c', + 'pear': '#cbf85f', + 'pinkish orange': '#ff724c', + 'midnight purple': '#280137', + 'light urple': '#b36ff6', + 'dark mint': '#48c072', + 'greenish tan': '#bccb7a', + 'light burgundy': '#a8415b', + 'turquoise blue': '#06b1c4', + 'ugly pink': '#cd7584', + 'sandy': '#f1da7a', + 'electric pink': '#ff0490', + 'muted purple': '#805b87', + 'mid green': '#50a747', + 'greyish': '#a8a495', + 'neon yellow': '#cfff04', + 'banana': '#ffff7e', + 'carnation pink': '#ff7fa7', + 'tomato': '#ef4026', + 'sea': '#3c9992', + 'muddy brown': '#886806', + 'turquoise green': '#04f489', + 'buff': '#fef69e', + 'fawn': '#cfaf7b', + 'muted blue': '#3b719f', + 'pale rose': '#fdc1c5', + 'dark mint green': '#20c073', + 'amethyst': '#9b5fc0', + 'blue/green': '#0f9b8e', + 'chestnut': '#742802', + 'sick green': '#9db92c', + 'pea': '#a4bf20', + 'rusty orange': '#cd5909', + 'stone': '#ada587', + 'rose red': '#be013c', + 'pale aqua': '#b8ffeb', + 'deep orange': '#dc4d01', + 'earth': '#a2653e', + 'mossy green': '#638b27', + 'grassy green': '#419c03', + 'pale lime green': '#b1ff65', + 'light grey blue': '#9dbcd4', + 'pale grey': '#fdfdfe', + 'asparagus': '#77ab56', + 'blueberry': '#464196', + 'purple red': '#990147', + 'pale lime': '#befd73', + 'greenish teal': '#32bf84', + 'caramel': '#af6f09', + 'deep magenta': '#a0025c', + 'light peach': '#ffd8b1', + 'milk chocolate': '#7f4e1e', + 'ocher': '#bf9b0c', + 'off green': '#6ba353', + 'purply pink': '#f075e6', + 'lightblue': '#7bc8f6', + 'dusky blue': '#475f94', + 'golden': '#f5bf03', + 'light beige': '#fffeb6', + 'butter yellow': '#fffd74', + 'dusky purple': '#895b7b', + 'french blue': '#436bad', + 'ugly yellow': '#d0c101', + 'greeny yellow': '#c6f808', + 'orangish red': '#f43605', + 'shamrock green': '#02c14d', + 'orangish brown': '#b25f03', + 'tree green': '#2a7e19', + 'deep violet': '#490648', + 'gunmetal': '#536267', + 'blue/purple': '#5a06ef', + 'cherry': '#cf0234', + 'sandy brown': '#c4a661', + 'warm grey': '#978a84', + 'dark indigo': '#1f0954', + 'midnight': '#03012d', + 'bluey green': '#2bb179', + 'grey pink': '#c3909b', + 'soft purple': '#a66fb5', + 'blood': '#770001', + 'brown red': '#922b05', + 'medium grey': '#7d7f7c', + 'berry': '#990f4b', + 'poo': '#8f7303', + 'purpley pink': '#c83cb9', + 'light salmon': '#fea993', + 'snot': '#acbb0d', + 'easter purple': '#c071fe', + 'light yellow green': '#ccfd7f', + 'dark navy blue': '#00022e', + 'drab': '#828344', + 'light rose': '#ffc5cb', + 'rouge': '#ab1239', + 'purplish red': '#b0054b', + 'slime green': '#99cc04', + 'baby poop': '#937c00', + 'irish green': '#019529', + 'pink/purple': '#ef1de7', + 'dark navy': '#000435', + 'greeny blue': '#42b395', + 'light plum': '#9d5783', + 'pinkish grey': '#c8aca9', + 'dirty orange': '#c87606', + 'rust red': '#aa2704', + 'pale lilac': '#e4cbff', + 'orangey red': '#fa4224', + 'primary blue': '#0804f9', + 'kermit green': '#5cb200', + 'brownish purple': '#76424e', + 'murky green': '#6c7a0e', + 'wheat': '#fbdd7e', + 'very dark purple': '#2a0134', + 'bottle green': '#044a05', + 'watermelon': '#fd4659', + 'deep sky blue': '#0d75f8', + 'fire engine red': '#fe0002', + 'yellow ochre': '#cb9d06', + 'pumpkin orange': '#fb7d07', + 'pale olive': '#b9cc81', + 'light lilac': '#edc8ff', + 'lightish green': '#61e160', + 'carolina blue': '#8ab8fe', + 'mulberry': '#920a4e', + 'shocking pink': '#fe02a2', + 'auburn': '#9a3001', + 'bright lime green': '#65fe08', + 'celadon': '#befdb7', + 'pinkish brown': '#b17261', + 'poo brown': '#885f01', + 'bright sky blue': '#02ccfe', + 'celery': '#c1fd95', + 'dirt brown': '#836539', + 'strawberry': '#fb2943', + 'dark lime': '#84b701', + 'copper': '#b66325', + 'medium brown': '#7f5112', + 'muted green': '#5fa052', + "robin's egg": '#6dedfd', + 'bright aqua': '#0bf9ea', + 'bright lavender': '#c760ff', + 'ivory': '#ffffcb', + 'very light purple': '#f6cefc', + 'light navy': '#155084', + 'pink red': '#f5054f', + 'olive brown': '#645403', + 'poop brown': '#7a5901', + 'mustard green': '#a8b504', + 'ocean green': '#3d9973', + 'very dark blue': '#000133', + 'dusty green': '#76a973', + 'light navy blue': '#2e5a88', + 'minty green': '#0bf77d', + 'adobe': '#bd6c48', + 'barney': '#ac1db8', + 'jade green': '#2baf6a', + 'bright light blue': '#26f7fd', + 'light lime': '#aefd6c', + 'dark khaki': '#9b8f55', + 'orange yellow': '#ffad01', + 'ocre': '#c69c04', + 'maize': '#f4d054', + 'faded pink': '#de9dac', + 'british racing green': '#05480d', + 'sandstone': '#c9ae74', + 'mud brown': '#60460f', + 'light sea green': '#98f6b0', + 'robin egg blue': '#8af1fe', + 'aqua marine': '#2ee8bb', + 'dark sea green': '#11875d', + 'soft pink': '#fdb0c0', + 'orangey brown': '#b16002', + 'cherry red': '#f7022a', + 'burnt yellow': '#d5ab09', + 'brownish grey': '#86775f', + 'camel': '#c69f59', + 'purplish grey': '#7a687f', + 'marine': '#042e60', + 'greyish pink': '#c88d94', + 'pale turquoise': '#a5fbd5', + 'pastel yellow': '#fffe71', + 'bluey purple': '#6241c7', + 'canary yellow': '#fffe40', + 'faded red': '#d3494e', + 'sepia': '#985e2b', + 'coffee': '#a6814c', + 'bright magenta': '#ff08e8', + 'mocha': '#9d7651', + 'ecru': '#feffca', + 'purpleish': '#98568d', + 'cranberry': '#9e003a', + 'darkish green': '#287c37', + 'brown orange': '#b96902', + 'dusky rose': '#ba6873', + 'melon': '#ff7855', + 'sickly green': '#94b21c', + 'silver': '#c5c9c7', + 'purply blue': '#661aee', + 'purpleish blue': '#6140ef', + 'hospital green': '#9be5aa', + 'shit brown': '#7b5804', + 'mid blue': '#276ab3', + 'amber': '#feb308', + 'easter green': '#8cfd7e', + 'soft blue': '#6488ea', + 'cerulean blue': '#056eee', + 'golden brown': '#b27a01', + 'bright turquoise': '#0ffef9', + 'red pink': '#fa2a55', + 'red purple': '#820747', + 'greyish brown': '#7a6a4f', + 'vermillion': '#f4320c', + 'russet': '#a13905', + 'steel grey': '#6f828a', + 'lighter purple': '#a55af4', + 'bright violet': '#ad0afd', + 'prussian blue': '#004577', + 'slate green': '#658d6d', + 'dirty pink': '#ca7b80', + 'dark blue green': '#005249', + 'pine': '#2b5d34', + 'yellowy green': '#bff128', + 'dark gold': '#b59410', + 'bluish': '#2976bb', + 'darkish blue': '#014182', + 'dull red': '#bb3f3f', + 'pinky red': '#fc2647', + 'bronze': '#a87900', + 'pale teal': '#82cbb2', + 'military green': '#667c3e', + 'barbie pink': '#fe46a5', + 'bubblegum pink': '#fe83cc', + 'pea soup green': '#94a617', + 'dark mustard': '#a88905', + 'shit': '#7f5f00', + 'medium purple': '#9e43a2', + 'very dark green': '#062e03', + 'dirt': '#8a6e45', + 'dusky pink': '#cc7a8b', + 'red violet': '#9e0168', + 'lemon yellow': '#fdff38', + 'pistachio': '#c0fa8b', + 'dull yellow': '#eedc5b', + 'dark lime green': '#7ebd01', + 'denim blue': '#3b5b92', + 'teal blue': '#01889f', + 'lightish blue': '#3d7afd', + 'purpley blue': '#5f34e7', + 'light indigo': '#6d5acf', + 'swamp green': '#748500', + 'brown green': '#706c11', + 'dark maroon': '#3c0008', + 'hot purple': '#cb00f5', + 'dark forest green': '#002d04', + 'faded blue': '#658cbb', + 'drab green': '#749551', + 'light lime green': '#b9ff66', + 'snot green': '#9dc100', + 'yellowish': '#faee66', + 'light blue green': '#7efbb3', + 'bordeaux': '#7b002c', + 'light mauve': '#c292a1', + 'ocean': '#017b92', + 'marigold': '#fcc006', + 'muddy green': '#657432', + 'dull orange': '#d8863b', + 'steel': '#738595', + 'electric purple': '#aa23ff', + 'fluorescent green': '#08ff08', + 'yellowish brown': '#9b7a01', + 'blush': '#f29e8e', + 'soft green': '#6fc276', + 'bright orange': '#ff5b00', + 'lemon': '#fdff52', + 'purple grey': '#866f85', + 'acid green': '#8ffe09', + 'pale lavender': '#eecffe', + 'violet blue': '#510ac9', + 'light forest green': '#4f9153', + 'burnt red': '#9f2305', + 'khaki green': '#728639', + 'cerise': '#de0c62', + 'faded purple': '#916e99', + 'apricot': '#ffb16d', + 'dark olive green': '#3c4d03', + 'grey brown': '#7f7053', + 'green grey': '#77926f', + 'true blue': '#010fcc', + 'pale violet': '#ceaefa', + 'periwinkle blue': '#8f99fb', + 'light sky blue': '#c6fcff', + 'blurple': '#5539cc', + 'green brown': '#544e03', + 'bluegreen': '#017a79', + 'bright teal': '#01f9c6', + 'brownish yellow': '#c9b003', + 'pea soup': '#929901', + 'forest': '#0b5509', + 'barney purple': '#a00498', + 'ultramarine': '#2000b1', + 'purplish': '#94568c', + 'puke yellow': '#c2be0e', + 'bluish grey': '#748b97', + 'dark periwinkle': '#665fd1', + 'dark lilac': '#9c6da5', + 'reddish': '#c44240', + 'light maroon': '#a24857', + 'dusty purple': '#825f87', + 'terra cotta': '#c9643b', + 'avocado': '#90b134', + 'marine blue': '#01386a', + 'teal green': '#25a36f', + 'slate grey': '#59656d', + 'lighter green': '#75fd63', + 'electric green': '#21fc0d', + 'dusty blue': '#5a86ad', + 'golden yellow': '#fec615', + 'bright yellow': '#fffd01', + 'light lavender': '#dfc5fe', + 'umber': '#b26400', + 'poop': '#7f5e00', + 'dark peach': '#de7e5d', + 'jungle green': '#048243', + 'eggshell': '#ffffd4', + 'denim': '#3b638c', + 'yellow brown': '#b79400', + 'dull purple': '#84597e', + 'chocolate brown': '#411900', + 'wine red': '#7b0323', + 'neon blue': '#04d9ff', + 'dirty green': '#667e2c', + 'light tan': '#fbeeac', + 'ice blue': '#d7fffe', + 'cadet blue': '#4e7496', + 'dark mauve': '#874c62', + 'very light blue': '#d5ffff', + 'grey purple': '#826d8c', + 'pastel pink': '#ffbacd', + 'very light green': '#d1ffbd', + 'dark sky blue': '#448ee4', + 'evergreen': '#05472a', + 'dull pink': '#d5869d', + 'aubergine': '#3d0734', + 'mahogany': '#4a0100', + 'reddish orange': '#f8481c', + 'deep green': '#02590f', + 'vomit green': '#89a203', + 'purple pink': '#e03fd8', + 'dusty pink': '#d58a94', + 'faded green': '#7bb274', + 'camo green': '#526525', + 'pinky purple': '#c94cbe', + 'pink purple': '#db4bda', + 'brownish red': '#9e3623', + 'dark rose': '#b5485d', + 'mud': '#735c12', + 'brownish': '#9c6d57', + 'emerald green': '#028f1e', + 'pale brown': '#b1916e', + 'dull blue': '#49759c', + 'burnt umber': '#a0450e', + 'medium green': '#39ad48', + 'clay': '#b66a50', + 'light aqua': '#8cffdb', + 'light olive green': '#a4be5c', + 'brownish orange': '#cb7723', + 'dark aqua': '#05696b', + 'purplish pink': '#ce5dae', + 'dark salmon': '#c85a53', + 'greenish grey': '#96ae8d', + 'jade': '#1fa774', + 'ugly green': '#7a9703', + 'dark beige': '#ac9362', + 'emerald': '#01a049', + 'pale red': '#d9544d', + 'light magenta': '#fa5ff7', + 'sky': '#82cafc', + 'light cyan': '#acfffc', + 'yellow orange': '#fcb001', + 'reddish purple': '#910951', + 'reddish pink': '#fe2c54', + 'orchid': '#c875c4', + 'dirty yellow': '#cdc50a', + 'orange red': '#fd411e', + 'deep red': '#9a0200', + 'orange brown': '#be6400', + 'cobalt blue': '#030aa7', + 'neon pink': '#fe019a', + 'rose pink': '#f7879a', + 'greyish purple': '#887191', + 'raspberry': '#b00149', + 'aqua green': '#12e193', + 'salmon pink': '#fe7b7c', + 'tangerine': '#ff9408', + 'brownish green': '#6a6e09', + 'red brown': '#8b2e16', + 'greenish brown': '#696112', + 'pumpkin': '#e17701', + 'pine green': '#0a481e', + 'charcoal': '#343837', + 'baby pink': '#ffb7ce', + 'cornflower': '#6a79f7', + 'blue violet': '#5d06e9', + 'chocolate': '#3d1c02', + 'greyish green': '#82a67d', + 'scarlet': '#be0119', + 'green yellow': '#c9ff27', + 'dark olive': '#373e02', + 'sienna': '#a9561e', + 'pastel purple': '#caa0ff', + 'terracotta': '#ca6641', + 'aqua blue': '#02d8e9', + 'sage green': '#88b378', + 'blood red': '#980002', + 'deep pink': '#cb0162', + 'grass': '#5cac2d', + 'moss': '#769958', + 'pastel blue': '#a2bffe', + 'bluish green': '#10a674', + 'green blue': '#06b48b', + 'dark tan': '#af884a', + 'greenish blue': '#0b8b87', + 'pale orange': '#ffa756', + 'vomit': '#a2a415', + 'forrest green': '#154406', + 'dark lavender': '#856798', + 'dark violet': '#34013f', + 'purple blue': '#632de9', + 'dark cyan': '#0a888a', + 'olive drab': '#6f7632', + 'pinkish': '#d46a7e', + 'cobalt': '#1e488f', + 'neon purple': '#bc13fe', + 'light turquoise': '#7ef4cc', + 'apple green': '#76cd26', + 'dull green': '#74a662', + 'wine': '#80013f', + 'powder blue': '#b1d1fc', + 'off white': '#ffffe4', + 'electric blue': '#0652ff', + 'dark turquoise': '#045c5a', + 'blue purple': '#5729ce', + 'azure': '#069af3', + 'bright red': '#ff000d', + 'pinkish red': '#f10c45', + 'cornflower blue': '#5170d7', + 'light olive': '#acbf69', + 'grape': '#6c3461', + 'greyish blue': '#5e819d', + 'purplish blue': '#601ef9', + 'yellowish green': '#b0dd16', + 'greenish yellow': '#cdfd02', + 'medium blue': '#2c6fbb', + 'dusty rose': '#c0737a', + 'light violet': '#d6b4fc', + 'midnight blue': '#020035', + 'bluish purple': '#703be7', + 'red orange': '#fd3c06', + 'dark magenta': '#960056', + 'greenish': '#40a368', + 'ocean blue': '#03719c', + 'coral': '#fc5a50', + 'cream': '#ffffc2', + 'reddish brown': '#7f2b0a', + 'burnt sienna': '#b04e0f', + 'brick': '#a03623', + 'sage': '#87ae73', + 'grey green': '#789b73', + 'white': '#ffffff', + "robin's egg blue": '#98eff9', + 'moss green': '#658b38', + 'steel blue': '#5a7d9a', + 'eggplant': '#380835', + 'light yellow': '#fffe7a', + 'leaf green': '#5ca904', + 'light grey': '#d8dcd6', + 'puke': '#a5a502', + 'pinkish purple': '#d648d7', + 'sea blue': '#047495', + 'pale purple': '#b790d4', + 'slate blue': '#5b7c99', + 'blue grey': '#607c8e', + 'hunter green': '#0b4008', + 'fuchsia': '#ed0dd9', + 'crimson': '#8c000f', + 'pale yellow': '#ffff84', + 'ochre': '#bf9005', + 'mustard yellow': '#d2bd0a', + 'light red': '#ff474c', + 'cerulean': '#0485d1', + 'pale pink': '#ffcfdc', + 'deep blue': '#040273', + 'rust': '#a83c09', + 'light teal': '#90e4c1', + 'slate': '#516572', + 'goldenrod': '#fac205', + 'dark yellow': '#d5b60a', + 'dark grey': '#363737', + 'army green': '#4b5d16', + 'grey blue': '#6b8ba4', + 'seafoam': '#80f9ad', + 'puce': '#a57e52', + 'spring green': '#a9f971', + 'dark orange': '#c65102', + 'sand': '#e2ca76', + 'pastel green': '#b0ff9d', + 'mint': '#9ffeb0', + 'light orange': '#fdaa48', + 'bright pink': '#fe01b1', + 'chartreuse': '#c1f80a', + 'deep purple': '#36013f', + 'dark brown': '#341c02', + 'taupe': '#b9a281', + 'pea green': '#8eab12', + 'puke green': '#9aae07', + 'kelly green': '#02ab2e', + 'seafoam green': '#7af9ab', + 'blue green': '#137e6d', + 'khaki': '#aaa662', + 'burgundy': '#610023', + 'dark teal': '#014d4e', + 'brick red': '#8f1402', + 'royal purple': '#4b006e', + 'plum': '#580f41', + 'mint green': '#8fff9f', + 'gold': '#dbb40c', + 'baby blue': '#a2cffe', + 'yellow green': '#c0fb2d', + 'bright purple': '#be03fd', + 'dark red': '#840000', + 'pale blue': '#d0fefe', + 'grass green': '#3f9b0b', + 'navy': '#01153e', + 'aquamarine': '#04d8b2', + 'burnt orange': '#c04e01', + 'neon green': '#0cff0c', + 'bright blue': '#0165fc', + 'rose': '#cf6275', + 'light pink': '#ffd1df', + 'mustard': '#ceb301', + 'indigo': '#380282', + 'lime': '#aaff32', + 'sea green': '#53fca1', + 'periwinkle': '#8e82fe', + 'dark pink': '#cb416b', + 'olive green': '#677a04', + 'peach': '#ffb07c', + 'pale green': '#c7fdb5', + 'light brown': '#ad8150', + 'hot pink': '#ff028d', + 'black': '#000000', + 'lilac': '#cea2fd', + 'navy blue': '#001146', + 'royal blue': '#0504aa', + 'beige': '#e6daa6', + 'salmon': '#ff796c', + 'olive': '#6e750e', + 'maroon': '#650021', + 'bright green': '#01ff07', + 'dark purple': '#35063e', + 'mauve': '#ae7181', + 'forest green': '#06470c', + 'aqua': '#13eac9', + 'cyan': '#00ffff', + 'tan': '#d1b26f', + 'dark blue': '#00035b', + 'lavender': '#c79fef', + 'turquoise': '#06c2ac', + 'dark green': '#033500', + 'violet': '#9a0eea', + 'light purple': '#bf77f6', + 'lime green': '#89fe05', + 'grey': '#929591', + 'sky blue': '#75bbfd', + 'yellow': '#ffff14', + 'magenta': '#c20078', + 'light green': '#96f97b', + 'orange': '#f97306', + 'teal': '#029386', + 'light blue': '#95d0fc', + 'red': '#e50000', + 'brown': '#653700', + 'pink': '#ff81c0', + 'blue': '#0343df', + 'green': '#15b01a', + 'purple': '#7e1e9c'} + +# Normalize name to "xkcd:" to avoid name collisions. +XKCD_COLORS = {'xkcd:' + name: value for name, value in XKCD_COLORS.items()} + + +# https://drafts.csswg.org/css-color-4/#named-colors +CSS4_COLORS = { + 'aliceblue': '#F0F8FF', + 'antiquewhite': '#FAEBD7', + 'aqua': '#00FFFF', + 'aquamarine': '#7FFFD4', + 'azure': '#F0FFFF', + 'beige': '#F5F5DC', + 'bisque': '#FFE4C4', + 'black': '#000000', + 'blanchedalmond': '#FFEBCD', + 'blue': '#0000FF', + 'blueviolet': '#8A2BE2', + 'brown': '#A52A2A', + 'burlywood': '#DEB887', + 'cadetblue': '#5F9EA0', + 'chartreuse': '#7FFF00', + 'chocolate': '#D2691E', + 'coral': '#FF7F50', + 'cornflowerblue': '#6495ED', + 'cornsilk': '#FFF8DC', + 'crimson': '#DC143C', + 'cyan': '#00FFFF', + 'darkblue': '#00008B', + 'darkcyan': '#008B8B', + 'darkgoldenrod': '#B8860B', + 'darkgray': '#A9A9A9', + 'darkgreen': '#006400', + 'darkgrey': '#A9A9A9', + 'darkkhaki': '#BDB76B', + 'darkmagenta': '#8B008B', + 'darkolivegreen': '#556B2F', + 'darkorange': '#FF8C00', + 'darkorchid': '#9932CC', + 'darkred': '#8B0000', + 'darksalmon': '#E9967A', + 'darkseagreen': '#8FBC8F', + 'darkslateblue': '#483D8B', + 'darkslategray': '#2F4F4F', + 'darkslategrey': '#2F4F4F', + 'darkturquoise': '#00CED1', + 'darkviolet': '#9400D3', + 'deeppink': '#FF1493', + 'deepskyblue': '#00BFFF', + 'dimgray': '#696969', + 'dimgrey': '#696969', + 'dodgerblue': '#1E90FF', + 'firebrick': '#B22222', + 'floralwhite': '#FFFAF0', + 'forestgreen': '#228B22', + 'fuchsia': '#FF00FF', + 'gainsboro': '#DCDCDC', + 'ghostwhite': '#F8F8FF', + 'gold': '#FFD700', + 'goldenrod': '#DAA520', + 'gray': '#808080', + 'green': '#008000', + 'greenyellow': '#ADFF2F', + 'grey': '#808080', + 'honeydew': '#F0FFF0', + 'hotpink': '#FF69B4', + 'indianred': '#CD5C5C', + 'indigo': '#4B0082', + 'ivory': '#FFFFF0', + 'khaki': '#F0E68C', + 'lavender': '#E6E6FA', + 'lavenderblush': '#FFF0F5', + 'lawngreen': '#7CFC00', + 'lemonchiffon': '#FFFACD', + 'lightblue': '#ADD8E6', + 'lightcoral': '#F08080', + 'lightcyan': '#E0FFFF', + 'lightgoldenrodyellow': '#FAFAD2', + 'lightgray': '#D3D3D3', + 'lightgreen': '#90EE90', + 'lightgrey': '#D3D3D3', + 'lightpink': '#FFB6C1', + 'lightsalmon': '#FFA07A', + 'lightseagreen': '#20B2AA', + 'lightskyblue': '#87CEFA', + 'lightslategray': '#778899', + 'lightslategrey': '#778899', + 'lightsteelblue': '#B0C4DE', + 'lightyellow': '#FFFFE0', + 'lime': '#00FF00', + 'limegreen': '#32CD32', + 'linen': '#FAF0E6', + 'magenta': '#FF00FF', + 'maroon': '#800000', + 'mediumaquamarine': '#66CDAA', + 'mediumblue': '#0000CD', + 'mediumorchid': '#BA55D3', + 'mediumpurple': '#9370DB', + 'mediumseagreen': '#3CB371', + 'mediumslateblue': '#7B68EE', + 'mediumspringgreen': '#00FA9A', + 'mediumturquoise': '#48D1CC', + 'mediumvioletred': '#C71585', + 'midnightblue': '#191970', + 'mintcream': '#F5FFFA', + 'mistyrose': '#FFE4E1', + 'moccasin': '#FFE4B5', + 'navajowhite': '#FFDEAD', + 'navy': '#000080', + 'oldlace': '#FDF5E6', + 'olive': '#808000', + 'olivedrab': '#6B8E23', + 'orange': '#FFA500', + 'orangered': '#FF4500', + 'orchid': '#DA70D6', + 'palegoldenrod': '#EEE8AA', + 'palegreen': '#98FB98', + 'paleturquoise': '#AFEEEE', + 'palevioletred': '#DB7093', + 'papayawhip': '#FFEFD5', + 'peachpuff': '#FFDAB9', + 'peru': '#CD853F', + 'pink': '#FFC0CB', + 'plum': '#DDA0DD', + 'powderblue': '#B0E0E6', + 'purple': '#800080', + 'rebeccapurple': '#663399', + 'red': '#FF0000', + 'rosybrown': '#BC8F8F', + 'royalblue': '#4169E1', + 'saddlebrown': '#8B4513', + 'salmon': '#FA8072', + 'sandybrown': '#F4A460', + 'seagreen': '#2E8B57', + 'seashell': '#FFF5EE', + 'sienna': '#A0522D', + 'silver': '#C0C0C0', + 'skyblue': '#87CEEB', + 'slateblue': '#6A5ACD', + 'slategray': '#708090', + 'slategrey': '#708090', + 'snow': '#FFFAFA', + 'springgreen': '#00FF7F', + 'steelblue': '#4682B4', + 'tan': '#D2B48C', + 'teal': '#008080', + 'thistle': '#D8BFD8', + 'tomato': '#FF6347', + 'turquoise': '#40E0D0', + 'violet': '#EE82EE', + 'wheat': '#F5DEB3', + 'white': '#FFFFFF', + 'whitesmoke': '#F5F5F5', + 'yellow': '#FFFF00', + 'yellowgreen': '#9ACD32'} diff --git a/venv/Lib/site-packages/matplotlib/_constrained_layout.py b/venv/Lib/site-packages/matplotlib/_constrained_layout.py new file mode 100644 index 000000000..90faebc15 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_constrained_layout.py @@ -0,0 +1,662 @@ +""" +Adjust subplot layouts so that there are no overlapping axes or axes +decorations. All axes decorations are dealt with (labels, ticks, titles, +ticklabels) and some dependent artists are also dealt with (colorbar, suptitle, +legend). + +Layout is done via `~matplotlib.gridspec`, with one constraint per gridspec, +so it is possible to have overlapping axes if the gridspecs overlap (i.e. +using `~matplotlib.gridspec.GridSpecFromSubplotSpec`). Axes placed using +``figure.subplots()`` or ``figure.add_subplots()`` will participate in the +layout. Axes manually placed via ``figure.add_axes()`` will not. + +See Tutorial: :doc:`/tutorials/intermediate/constrainedlayout_guide` +""" + +# Development Notes: + +# What gets a layoutbox: +# - figure +# - gridspec +# - subplotspec +# EITHER: +# - axes + pos for the axes (i.e. the total area taken by axis and +# the actual "position" argument that needs to be sent to +# ax.set_position.) +# - The axes layout box will also encompass the legend, and that is +# how legends get included (axes legends, not figure legends) +# - colorbars are siblings of the axes if they are single-axes +# colorbars +# OR: +# - a gridspec can be inside a subplotspec. +# - subplotspec +# EITHER: +# - axes... +# OR: +# - gridspec... with arbitrary nesting... +# - colorbars are siblings of the subplotspecs if they are multi-axes +# colorbars. +# - suptitle: +# - right now suptitles are just stacked atop everything else in figure. +# Could imagine suptitles being gridspec suptitles, but not implemented +# +# Todo: AnchoredOffsetbox connected to gridspecs or axes. This would +# be more general way to add extra-axes annotations. + +import logging + +import numpy as np + +import matplotlib.cbook as cbook +import matplotlib._layoutbox as layoutbox + +_log = logging.getLogger(__name__) + + +def _spans_overlap(span0, span1): + return span0.start in span1 or span1.start in span0 + + +def _axes_all_finite_sized(fig): + """Return whether all axes in the figure have a finite width and height.""" + for ax in fig.axes: + if ax._layoutbox is not None: + newpos = ax._poslayoutbox.get_rect() + if newpos[2] <= 0 or newpos[3] <= 0: + return False + return True + + +###################################################### +def do_constrained_layout(fig, renderer, h_pad, w_pad, + hspace=None, wspace=None): + """ + Do the constrained_layout. Called at draw time in + ``figure.constrained_layout()`` + + Parameters + ---------- + fig : Figure + is the ``figure`` instance to do the layout in. + + renderer : Renderer + the renderer to use. + + h_pad, w_pad : float + are in figure-normalized units, and are a padding around the axes + elements. + + hspace, wspace : float + are in fractions of the subplot sizes. + + """ + + # Steps: + # + # 1. get a list of unique gridspecs in this figure. Each gridspec will be + # constrained separately. + # 2. Check for gaps in the gridspecs. i.e. if not every axes slot in the + # gridspec has been filled. If empty, add a ghost axis that is made so + # that it cannot be seen (though visible=True). This is needed to make + # a blank spot in the layout. + # 3. Compare the tight_bbox of each axes to its `position`, and assume that + # the difference is the space needed by the elements around the edge of + # the axes (decorations) like the title, ticklabels, x-labels, etc. This + # can include legends who overspill the axes boundaries. + # 4. Constrain gridspec elements to line up: + # a) if colnum0 != colnumC, the two subplotspecs are stacked next to + # each other, with the appropriate order. + # b) if colnum0 == colnumC, line up the left or right side of the + # _poslayoutbox (depending if it is the min or max num that is equal). + # c) do the same for rows... + # 5. The above doesn't constrain relative sizes of the _poslayoutboxes + # at all, and indeed zero-size is a solution that the solver often finds + # more convenient than expanding the sizes. Right now the solution is to + # compare subplotspec sizes (i.e. drowsC and drows0) and constrain the + # larger _poslayoutbox to be larger than the ratio of the sizes. i.e. if + # drows0 > drowsC, then ax._poslayoutbox > axc._poslayoutbox*drowsC/drows0. + # This works fine *if* the decorations are similar between the axes. + # If the larger subplotspec has much larger axes decorations, then the + # constraint above is incorrect. + # + # We need the greater than in the above, in general, rather than an equals + # sign. Consider the case of the left column having 2 rows, and the right + # column having 1 row. We want the top and bottom of the _poslayoutboxes + # to line up. So that means if there are decorations on the left column + # axes they will be smaller than half as large as the right hand axis. + # + # This can break down if the decoration size for the right hand axis (the + # margins) is very large. There must be a math way to check for this case. + + invTransFig = fig.transFigure.inverted().transform_bbox + + # list of unique gridspecs that contain child axes: + gss = set() + for ax in fig.axes: + if hasattr(ax, 'get_subplotspec'): + gs = ax.get_subplotspec().get_gridspec() + if gs._layoutbox is not None: + gss.add(gs) + if len(gss) == 0: + cbook._warn_external('There are no gridspecs with layoutboxes. ' + 'Possibly did not call parent GridSpec with the' + ' figure= keyword') + + if fig._layoutbox.constrained_layout_called < 1: + for gs in gss: + # fill in any empty gridspec slots w/ ghost axes... + _make_ghost_gridspec_slots(fig, gs) + + for _ in range(2): + # do the algorithm twice. This has to be done because decorators + # change size after the first re-position (i.e. x/yticklabels get + # larger/smaller). This second reposition tends to be much milder, + # so doing twice makes things work OK. + for ax in fig.axes: + _log.debug(ax._layoutbox) + if ax._layoutbox is not None: + # make margins for each layout box based on the size of + # the decorators. + _make_layout_margins(ax, renderer, h_pad, w_pad) + + # do layout for suptitle. + suptitle = fig._suptitle + do_suptitle = (suptitle is not None and + suptitle._layoutbox is not None and + suptitle.get_in_layout()) + if do_suptitle: + bbox = invTransFig( + suptitle.get_window_extent(renderer=renderer)) + height = bbox.height + if np.isfinite(height): + # reserve at top of figure include an h_pad above and below + suptitle._layoutbox.edit_height(height + h_pad * 2) + + # OK, the above lines up ax._poslayoutbox with ax._layoutbox + # now we need to + # 1) arrange the subplotspecs. We do it at this level because + # the subplotspecs are meant to contain other dependent axes + # like colorbars or legends. + # 2) line up the right and left side of the ax._poslayoutbox + # that have the same subplotspec maxes. + + if fig._layoutbox.constrained_layout_called < 1: + # arrange the subplotspecs... This is all done relative to each + # other. Some subplotspecs contain axes, and others contain + # gridspecs the ones that contain gridspecs are a set proportion + # of their parent gridspec. The ones that contain axes are + # not so constrained. + figlb = fig._layoutbox + for child in figlb.children: + if child._is_gridspec_layoutbox(): + # This routine makes all the subplot spec containers + # have the correct arrangement. It just stacks the + # subplot layoutboxes in the correct order... + _arrange_subplotspecs(child, hspace=hspace, wspace=wspace) + + for gs in gss: + _align_spines(fig, gs) + + fig._layoutbox.constrained_layout_called += 1 + fig._layoutbox.update_variables() + + # check if any axes collapsed to zero. If not, don't change positions: + if _axes_all_finite_sized(fig): + # Now set the position of the axes... + for ax in fig.axes: + if ax._layoutbox is not None: + newpos = ax._poslayoutbox.get_rect() + # Now set the new position. + # ax.set_position will zero out the layout for + # this axis, allowing users to hard-code the position, + # so this does the same w/o zeroing layout. + ax._set_position(newpos, which='original') + if do_suptitle: + newpos = suptitle._layoutbox.get_rect() + suptitle.set_y(1.0 - h_pad) + else: + if suptitle is not None and suptitle._layoutbox is not None: + suptitle._layoutbox.edit_height(0) + else: + cbook._warn_external('constrained_layout not applied. At least ' + 'one axes collapsed to zero width or height.') + + +def _make_ghost_gridspec_slots(fig, gs): + """ + Check for unoccupied gridspec slots and make ghost axes for these + slots... Do for each gs separately. This is a pretty big kludge + but shouldn't have too much ill effect. The worst is that + someone querying the figure will wonder why there are more + axes than they thought. + """ + nrows, ncols = gs.get_geometry() + hassubplotspec = np.zeros(nrows * ncols, dtype=bool) + axs = [] + for ax in fig.axes: + if (hasattr(ax, 'get_subplotspec') + and ax._layoutbox is not None + and ax.get_subplotspec().get_gridspec() == gs): + axs += [ax] + for ax in axs: + ss0 = ax.get_subplotspec() + hassubplotspec[ss0.num1:(ss0.num2 + 1)] = True + for nn, hss in enumerate(hassubplotspec): + if not hss: + # this gridspec slot doesn't have an axis so we + # make a "ghost". + ax = fig.add_subplot(gs[nn]) + ax.set_visible(False) + + +def _make_layout_margins(ax, renderer, h_pad, w_pad): + """ + For each axes, make a margin between the *pos* layoutbox and the + *axes* layoutbox be a minimum size that can accommodate the + decorations on the axis. + """ + fig = ax.figure + invTransFig = fig.transFigure.inverted().transform_bbox + pos = ax.get_position(original=True) + try: + tightbbox = ax.get_tightbbox(renderer=renderer, for_layout_only=True) + except TypeError: + tightbbox = ax.get_tightbbox(renderer=renderer) + + if tightbbox is None: + bbox = pos + else: + bbox = invTransFig(tightbbox) + + # this can go wrong: + if not (np.isfinite(bbox.width) and np.isfinite(bbox.height)): + # just abort, this is likely a bad set of coordinates that + # is transitory... + return + # use stored h_pad if it exists + h_padt = ax._poslayoutbox.h_pad + if h_padt is None: + h_padt = h_pad + w_padt = ax._poslayoutbox.w_pad + if w_padt is None: + w_padt = w_pad + ax._poslayoutbox.edit_left_margin_min(-bbox.x0 + pos.x0 + w_padt) + ax._poslayoutbox.edit_right_margin_min(bbox.x1 - pos.x1 + w_padt) + ax._poslayoutbox.edit_bottom_margin_min(-bbox.y0 + pos.y0 + h_padt) + ax._poslayoutbox.edit_top_margin_min(bbox.y1-pos.y1+h_padt) + _log.debug('left %f', (-bbox.x0 + pos.x0 + w_pad)) + _log.debug('right %f', (bbox.x1 - pos.x1 + w_pad)) + _log.debug('bottom %f', (-bbox.y0 + pos.y0 + h_padt)) + _log.debug('bbox.y0 %f', bbox.y0) + _log.debug('pos.y0 %f', pos.y0) + # Sometimes its possible for the solver to collapse + # rather than expand axes, so they all have zero height + # or width. This stops that... It *should* have been + # taken into account w/ pref_width... + if fig._layoutbox.constrained_layout_called < 1: + ax._poslayoutbox.constrain_height_min(20, strength='weak') + ax._poslayoutbox.constrain_width_min(20, strength='weak') + ax._layoutbox.constrain_height_min(20, strength='weak') + ax._layoutbox.constrain_width_min(20, strength='weak') + ax._poslayoutbox.constrain_top_margin(0, strength='weak') + ax._poslayoutbox.constrain_bottom_margin(0, strength='weak') + ax._poslayoutbox.constrain_right_margin(0, strength='weak') + ax._poslayoutbox.constrain_left_margin(0, strength='weak') + + +def _align_spines(fig, gs): + """ + - Align right/left and bottom/top spines of appropriate subplots. + - Compare size of subplotspec including height and width ratios + and make sure that the axes spines are at least as large + as they should be. + """ + # for each gridspec... + nrows, ncols = gs.get_geometry() + width_ratios = gs.get_width_ratios() + height_ratios = gs.get_height_ratios() + if width_ratios is None: + width_ratios = np.ones(ncols) + if height_ratios is None: + height_ratios = np.ones(nrows) + + # get axes in this gridspec.... + axs = [ax for ax in fig.axes + if (hasattr(ax, 'get_subplotspec') + and ax._layoutbox is not None + and ax.get_subplotspec().get_gridspec() == gs)] + rowspans = [] + colspans = [] + heights = [] + widths = [] + + for ax in axs: + ss0 = ax.get_subplotspec() + rowspan = ss0.rowspan + colspan = ss0.colspan + rowspans.append(rowspan) + colspans.append(colspan) + heights.append(sum(height_ratios[rowspan.start:rowspan.stop])) + widths.append(sum(width_ratios[colspan.start:colspan.stop])) + + for idx0, ax0 in enumerate(axs): + # Compare ax to all other axs: If the subplotspecs start (/stop) at + # the same column, then line up their left (/right) sides; likewise + # for rows/top/bottom. + rowspan0 = rowspans[idx0] + colspan0 = colspans[idx0] + height0 = heights[idx0] + width0 = widths[idx0] + alignleft = False + alignright = False + alignbot = False + aligntop = False + alignheight = False + alignwidth = False + for idx1 in range(idx0 + 1, len(axs)): + ax1 = axs[idx1] + rowspan1 = rowspans[idx1] + colspan1 = colspans[idx1] + width1 = widths[idx1] + height1 = heights[idx1] + # Horizontally align axes spines if they have the same min or max: + if not alignleft and colspan0.start == colspan1.start: + _log.debug('same start columns; line up layoutbox lefts') + layoutbox.align([ax0._poslayoutbox, ax1._poslayoutbox], + 'left') + alignleft = True + if not alignright and colspan0.stop == colspan1.stop: + _log.debug('same stop columns; line up layoutbox rights') + layoutbox.align([ax0._poslayoutbox, ax1._poslayoutbox], + 'right') + alignright = True + # Vertically align axes spines if they have the same min or max: + if not aligntop and rowspan0.start == rowspan1.start: + _log.debug('same start rows; line up layoutbox tops') + layoutbox.align([ax0._poslayoutbox, ax1._poslayoutbox], + 'top') + aligntop = True + if not alignbot and rowspan0.stop == rowspan1.stop: + _log.debug('same stop rows; line up layoutbox bottoms') + layoutbox.align([ax0._poslayoutbox, ax1._poslayoutbox], + 'bottom') + alignbot = True + + # Now we make the widths and heights of position boxes + # similar. (i.e the spine locations) + # This allows vertically stacked subplots to have different sizes + # if they occupy different amounts of the gridspec, e.g. if + # gs = gridspec.GridSpec(3, 1) + # ax0 = gs[0, :] + # ax1 = gs[1:, :] + # then len(rowspan0) = 1, and len(rowspan1) = 2, + # and ax1 should be at least twice as large as ax0. + # But it can be more than twice as large because + # it needs less room for the labeling. + + # For heights, do it if the subplots share a column. + if not alignheight and len(rowspan0) == len(rowspan1): + ax0._poslayoutbox.constrain_height( + ax1._poslayoutbox.height * height0 / height1) + alignheight = True + elif _spans_overlap(colspan0, colspan1): + if height0 > height1: + ax0._poslayoutbox.constrain_height_min( + ax1._poslayoutbox.height * height0 / height1) + elif height0 < height1: + ax1._poslayoutbox.constrain_height_min( + ax0._poslayoutbox.height * height1 / height0) + # For widths, do it if the subplots share a row. + if not alignwidth and len(colspan0) == len(colspan1): + ax0._poslayoutbox.constrain_width( + ax1._poslayoutbox.width * width0 / width1) + alignwidth = True + elif _spans_overlap(rowspan0, rowspan1): + if width0 > width1: + ax0._poslayoutbox.constrain_width_min( + ax1._poslayoutbox.width * width0 / width1) + elif width0 < width1: + ax1._poslayoutbox.constrain_width_min( + ax0._poslayoutbox.width * width1 / width0) + + +def _arrange_subplotspecs(gs, hspace=0, wspace=0): + """Recursively arrange the subplotspec children of the given gridspec.""" + sschildren = [] + for child in gs.children: + if child._is_subplotspec_layoutbox(): + for child2 in child.children: + # check for gridspec children... + if child2._is_gridspec_layoutbox(): + _arrange_subplotspecs(child2, hspace=hspace, wspace=wspace) + sschildren += [child] + # now arrange the subplots... + for child0 in sschildren: + ss0 = child0.artist + nrows, ncols = ss0.get_gridspec().get_geometry() + rowspan0 = ss0.rowspan + colspan0 = ss0.colspan + sschildren = sschildren[1:] + for child1 in sschildren: + ss1 = child1.artist + rowspan1 = ss1.rowspan + colspan1 = ss1.colspan + # OK, this tells us the relative layout of child0 with child1. + pad = wspace / ncols + if colspan0.stop <= colspan1.start: + layoutbox.hstack([ss0._layoutbox, ss1._layoutbox], padding=pad) + if colspan1.stop <= colspan0.start: + layoutbox.hstack([ss1._layoutbox, ss0._layoutbox], padding=pad) + # vertical alignment + pad = hspace / nrows + if rowspan0.stop <= rowspan1.start: + layoutbox.vstack([ss0._layoutbox, ss1._layoutbox], padding=pad) + if rowspan1.stop <= rowspan0.start: + layoutbox.vstack([ss1._layoutbox, ss0._layoutbox], padding=pad) + + +def layoutcolorbarsingle(ax, cax, shrink, aspect, location, pad=0.05): + """ + Do the layout for a colorbar, to not overly pollute colorbar.py + + *pad* is in fraction of the original axis size. + """ + axlb = ax._layoutbox + axpos = ax._poslayoutbox + axsslb = ax.get_subplotspec()._layoutbox + lb = layoutbox.LayoutBox( + parent=axsslb, + name=axsslb.name + '.cbar', + artist=cax) + + if location in ('left', 'right'): + lbpos = layoutbox.LayoutBox( + parent=lb, + name=lb.name + '.pos', + tightwidth=False, + pos=True, + subplot=False, + artist=cax) + + if location == 'right': + # arrange to right of parent axis + layoutbox.hstack([axlb, lb], padding=pad * axlb.width, + strength='strong') + else: + layoutbox.hstack([lb, axlb], padding=pad * axlb.width) + # constrain the height and center... + layoutbox.match_heights([axpos, lbpos], [1, shrink]) + layoutbox.align([axpos, lbpos], 'v_center') + # set the width of the pos box + lbpos.constrain_width(shrink * axpos.height * (1/aspect), + strength='strong') + elif location in ('bottom', 'top'): + lbpos = layoutbox.LayoutBox( + parent=lb, + name=lb.name + '.pos', + tightheight=True, + pos=True, + subplot=False, + artist=cax) + + if location == 'bottom': + layoutbox.vstack([axlb, lb], padding=pad * axlb.height) + else: + layoutbox.vstack([lb, axlb], padding=pad * axlb.height) + # constrain the height and center... + layoutbox.match_widths([axpos, lbpos], + [1, shrink], strength='strong') + layoutbox.align([axpos, lbpos], 'h_center') + # set the height of the pos box + lbpos.constrain_height(axpos.width * aspect * shrink, + strength='medium') + + return lb, lbpos + + +def _getmaxminrowcolumn(axs): + """ + Find axes covering the first and last rows and columns of a list of axes. + """ + startrow = startcol = np.inf + stoprow = stopcol = -np.inf + startax_row = startax_col = stopax_row = stopax_col = None + for ax in axs: + subspec = ax.get_subplotspec() + if subspec.rowspan.start < startrow: + startrow = subspec.rowspan.start + startax_row = ax + if subspec.rowspan.stop > stoprow: + stoprow = subspec.rowspan.stop + stopax_row = ax + if subspec.colspan.start < startcol: + startcol = subspec.colspan.start + startax_col = ax + if subspec.colspan.stop > stopcol: + stopcol = subspec.colspan.stop + stopax_col = ax + return (startrow, stoprow - 1, startax_row, stopax_row, + startcol, stopcol - 1, startax_col, stopax_col) + + +def layoutcolorbargridspec(parents, cax, shrink, aspect, location, pad=0.05): + """ + Do the layout for a colorbar, to not overly pollute colorbar.py + + *pad* is in fraction of the original axis size. + """ + + gs = parents[0].get_subplotspec().get_gridspec() + # parent layout box.... + gslb = gs._layoutbox + + lb = layoutbox.LayoutBox(parent=gslb.parent, + name=gslb.parent.name + '.cbar', + artist=cax) + # figure out the row and column extent of the parents. + (minrow, maxrow, minax_row, maxax_row, + mincol, maxcol, minax_col, maxax_col) = _getmaxminrowcolumn(parents) + + if location in ('left', 'right'): + lbpos = layoutbox.LayoutBox( + parent=lb, + name=lb.name + '.pos', + tightwidth=False, + pos=True, + subplot=False, + artist=cax) + for ax in parents: + if location == 'right': + order = [ax._layoutbox, lb] + else: + order = [lb, ax._layoutbox] + layoutbox.hstack(order, padding=pad * gslb.width, + strength='strong') + # constrain the height and center... + # This isn't quite right. We'd like the colorbar + # pos to line up w/ the axes poss, not the size of the + # gs. + + # Horizontal Layout: need to check all the axes in this gridspec + for ch in gslb.children: + subspec = ch.artist + if location == 'right': + if subspec.colspan.stop - 1 <= maxcol: + order = [subspec._layoutbox, lb] + # arrange to right of the parents + elif subspec.colspan.start > maxcol: + order = [lb, subspec._layoutbox] + elif location == 'left': + if subspec.colspan.start >= mincol: + order = [lb, subspec._layoutbox] + elif subspec.colspan.stop - 1 < mincol: + order = [subspec._layoutbox, lb] + layoutbox.hstack(order, padding=pad * gslb.width, + strength='strong') + + # Vertical layout: + maxposlb = minax_row._poslayoutbox + minposlb = maxax_row._poslayoutbox + # now we want the height of the colorbar pos to be + # set by the top and bottom of the min/max axes... + # bottom top + # b t + # h = (top-bottom)*shrink + # b = bottom + (top-bottom - h) / 2. + lbpos.constrain_height( + (maxposlb.top - minposlb.bottom) * + shrink, strength='strong') + lbpos.constrain_bottom( + (maxposlb.top - minposlb.bottom) * + (1 - shrink)/2 + minposlb.bottom, + strength='strong') + + # set the width of the pos box + lbpos.constrain_width(lbpos.height * (shrink / aspect), + strength='strong') + elif location in ('bottom', 'top'): + lbpos = layoutbox.LayoutBox( + parent=lb, + name=lb.name + '.pos', + tightheight=True, + pos=True, + subplot=False, + artist=cax) + + for ax in parents: + if location == 'bottom': + order = [ax._layoutbox, lb] + else: + order = [lb, ax._layoutbox] + layoutbox.vstack(order, padding=pad * gslb.width, + strength='strong') + + # Vertical Layout: need to check all the axes in this gridspec + for ch in gslb.children: + subspec = ch.artist + if location == 'bottom': + if subspec.rowspan.stop - 1 <= minrow: + order = [subspec._layoutbox, lb] + elif subspec.rowspan.start > maxrow: + order = [lb, subspec._layoutbox] + elif location == 'top': + if subspec.rowspan.stop - 1 < minrow: + order = [subspec._layoutbox, lb] + elif subspec.rowspan.start >= maxrow: + order = [lb, subspec._layoutbox] + layoutbox.vstack(order, padding=pad * gslb.width, + strength='strong') + + # Do horizontal layout... + maxposlb = maxax_col._poslayoutbox + minposlb = minax_col._poslayoutbox + lbpos.constrain_width((maxposlb.right - minposlb.left) * + shrink) + lbpos.constrain_left( + (maxposlb.right - minposlb.left) * + (1-shrink)/2 + minposlb.left) + # set the height of the pos box + lbpos.constrain_height(lbpos.width * shrink * aspect, + strength='medium') + + return lb, lbpos diff --git a/venv/Lib/site-packages/matplotlib/_contour.cp36-win32.pyd b/venv/Lib/site-packages/matplotlib/_contour.cp36-win32.pyd new file mode 100644 index 000000000..de80ca16e Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/_contour.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/matplotlib/_image.cp36-win32.pyd b/venv/Lib/site-packages/matplotlib/_image.cp36-win32.pyd new file mode 100644 index 000000000..d9e275cba Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/_image.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/matplotlib/_internal_utils.py b/venv/Lib/site-packages/matplotlib/_internal_utils.py new file mode 100644 index 000000000..0223aa593 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_internal_utils.py @@ -0,0 +1,64 @@ +""" +Internal debugging utilities, that are not expected to be used in the rest of +the codebase. + +WARNING: Code in this module may change without prior notice! +""" + +from io import StringIO +from pathlib import Path +import subprocess + +from matplotlib.transforms import TransformNode + + +def graphviz_dump_transform(transform, dest, *, highlight=None): + """ + Generate a graphical representation of the transform tree for *transform* + using the :program:`dot` program (which this function depends on). The + output format (png, dot, etc.) is determined from the suffix of *dest*. + + Parameters + ---------- + transform : `~matplotlib.transform.Transform` + The represented transform. + dest : str + Output filename. The extension must be one of the formats supported + by :program:`dot`, e.g. png, svg, dot, ... + (see https://www.graphviz.org/doc/info/output.html). + highlight : list of `~matplotlib.transform.Transform` or None + The transforms in the tree to be drawn in bold. + If *None*, *transform* is highlighted. + """ + + if highlight is None: + highlight = [transform] + seen = set() + + def recurse(root, buf): + if id(root) in seen: + return + seen.add(id(root)) + props = {} + label = type(root).__name__ + if root._invalid: + label = f'[{label}]' + if root in highlight: + props['style'] = 'bold' + props['shape'] = 'box' + props['label'] = '"%s"' % label + props = ' '.join(map('{0[0]}={0[1]}'.format, props.items())) + buf.write(f'{id(root)} [{props}];\n') + for key, val in vars(root).items(): + if isinstance(val, TransformNode) and id(root) in val._parents: + buf.write(f'"{id(root)}" -> "{id(val)}" ' + f'[label="{key}", fontsize=10];\n') + recurse(val, buf) + + buf = StringIO() + buf.write('digraph G {\n') + recurse(transform, buf) + buf.write('}\n') + subprocess.run( + ['dot', '-T', Path(dest).suffix[1:], '-o', dest], + input=buf.getvalue().encode('utf-8'), check=True) diff --git a/venv/Lib/site-packages/matplotlib/_layoutbox.py b/venv/Lib/site-packages/matplotlib/_layoutbox.py new file mode 100644 index 000000000..0afa2e482 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_layoutbox.py @@ -0,0 +1,695 @@ +""" + +Conventions: + +"constrain_x" means to constrain the variable with either +another kiwisolver variable, or a float. i.e. `constrain_width(0.2)` +will set a constraint that the width has to be 0.2 and this constraint is +permanent - i.e. it will not be removed if it becomes obsolete. + +"edit_x" means to set x to a value (just a float), and that this value can +change. So `edit_width(0.2)` will set width to be 0.2, but `edit_width(0.3)` +will allow it to change to 0.3 later. Note that these values are still just +"suggestions" in `kiwisolver` parlance, and could be over-ridden by +other constrains. + +""" + +import itertools +import kiwisolver as kiwi +import logging +import numpy as np + + +_log = logging.getLogger(__name__) + + +# renderers can be complicated +def get_renderer(fig): + if fig._cachedRenderer: + renderer = fig._cachedRenderer + else: + canvas = fig.canvas + if canvas and hasattr(canvas, "get_renderer"): + renderer = canvas.get_renderer() + else: + # not sure if this can happen + # seems to with PDF... + _log.info("constrained_layout : falling back to Agg renderer") + from matplotlib.backends.backend_agg import FigureCanvasAgg + canvas = FigureCanvasAgg(fig) + renderer = canvas.get_renderer() + + return renderer + + +class LayoutBox: + """ + Basic rectangle representation using kiwi solver variables + """ + + def __init__(self, parent=None, name='', tightwidth=False, + tightheight=False, artist=None, + lower_left=(0, 0), upper_right=(1, 1), pos=False, + subplot=False, h_pad=None, w_pad=None): + Variable = kiwi.Variable + self.parent = parent + self.name = name + sn = self.name + '_' + if parent is None: + self.solver = kiwi.Solver() + self.constrained_layout_called = 0 + else: + self.solver = parent.solver + self.constrained_layout_called = None + # parent wants to know about this child! + parent.add_child(self) + # keep track of artist associated w/ this layout. Can be none + self.artist = artist + # keep track if this box is supposed to be a pos that is constrained + # by the parent. + self.pos = pos + # keep track of whether we need to match this subplot up with others. + self.subplot = subplot + + self.top = Variable(sn + 'top') + self.bottom = Variable(sn + 'bottom') + self.left = Variable(sn + 'left') + self.right = Variable(sn + 'right') + + self.width = Variable(sn + 'width') + self.height = Variable(sn + 'height') + self.h_center = Variable(sn + 'h_center') + self.v_center = Variable(sn + 'v_center') + + self.min_width = Variable(sn + 'min_width') + self.min_height = Variable(sn + 'min_height') + self.pref_width = Variable(sn + 'pref_width') + self.pref_height = Variable(sn + 'pref_height') + # margins are only used for axes-position layout boxes. maybe should + # be a separate subclass: + self.left_margin = Variable(sn + 'left_margin') + self.right_margin = Variable(sn + 'right_margin') + self.bottom_margin = Variable(sn + 'bottom_margin') + self.top_margin = Variable(sn + 'top_margin') + # mins + self.left_margin_min = Variable(sn + 'left_margin_min') + self.right_margin_min = Variable(sn + 'right_margin_min') + self.bottom_margin_min = Variable(sn + 'bottom_margin_min') + self.top_margin_min = Variable(sn + 'top_margin_min') + + right, top = upper_right + left, bottom = lower_left + self.tightheight = tightheight + self.tightwidth = tightwidth + self.add_constraints() + self.children = [] + self.subplotspec = None + if self.pos: + self.constrain_margins() + self.h_pad = h_pad + self.w_pad = w_pad + + def constrain_margins(self): + """ + Only do this for pos. This sets a variable distance + margin between the position of the axes and the outer edge of + the axes. + + Margins are variable because they change with the figure size. + + Margin minimums are set to make room for axes decorations. However, + the margins can be larger if we are mathicng the position size to + other axes. + """ + sol = self.solver + + # left + if not sol.hasEditVariable(self.left_margin_min): + sol.addEditVariable(self.left_margin_min, 'strong') + sol.suggestValue(self.left_margin_min, 0.0001) + c = (self.left_margin == self.left - self.parent.left) + self.solver.addConstraint(c | 'required') + c = (self.left_margin >= self.left_margin_min) + self.solver.addConstraint(c | 'strong') + + # right + if not sol.hasEditVariable(self.right_margin_min): + sol.addEditVariable(self.right_margin_min, 'strong') + sol.suggestValue(self.right_margin_min, 0.0001) + c = (self.right_margin == self.parent.right - self.right) + self.solver.addConstraint(c | 'required') + c = (self.right_margin >= self.right_margin_min) + self.solver.addConstraint(c | 'required') + # bottom + if not sol.hasEditVariable(self.bottom_margin_min): + sol.addEditVariable(self.bottom_margin_min, 'strong') + sol.suggestValue(self.bottom_margin_min, 0.0001) + c = (self.bottom_margin == self.bottom - self.parent.bottom) + self.solver.addConstraint(c | 'required') + c = (self.bottom_margin >= self.bottom_margin_min) + self.solver.addConstraint(c | 'required') + # top + if not sol.hasEditVariable(self.top_margin_min): + sol.addEditVariable(self.top_margin_min, 'strong') + sol.suggestValue(self.top_margin_min, 0.0001) + c = (self.top_margin == self.parent.top - self.top) + self.solver.addConstraint(c | 'required') + c = (self.top_margin >= self.top_margin_min) + self.solver.addConstraint(c | 'required') + + def add_child(self, child): + self.children += [child] + + def remove_child(self, child): + try: + self.children.remove(child) + except ValueError: + _log.info("Tried to remove child that doesn't belong to parent") + + def add_constraints(self): + sol = self.solver + # never let width and height go negative. + for i in [self.min_width, self.min_height]: + sol.addEditVariable(i, 1e9) + sol.suggestValue(i, 0.0) + # define relation ships between things thing width and right and left + self.hard_constraints() + # self.soft_constraints() + if self.parent: + self.parent_constrain() + # sol.updateVariables() + + def parent_constrain(self): + parent = self.parent + hc = [self.left >= parent.left, + self.bottom >= parent.bottom, + self.top <= parent.top, + self.right <= parent.right] + for c in hc: + self.solver.addConstraint(c | 'required') + + def hard_constraints(self): + hc = [self.width == self.right - self.left, + self.height == self.top - self.bottom, + self.h_center == (self.left + self.right) * 0.5, + self.v_center == (self.top + self.bottom) * 0.5, + self.width >= self.min_width, + self.height >= self.min_height] + for c in hc: + self.solver.addConstraint(c | 'required') + + def soft_constraints(self): + sol = self.solver + if self.tightwidth: + suggest = 0. + else: + suggest = 20. + c = (self.pref_width == suggest) + for i in c: + sol.addConstraint(i | 'required') + if self.tightheight: + suggest = 0. + else: + suggest = 20. + c = (self.pref_height == suggest) + for i in c: + sol.addConstraint(i | 'required') + + c = [(self.width >= suggest), + (self.height >= suggest)] + for i in c: + sol.addConstraint(i | 150000) + + def set_parent(self, parent): + """Replace the parent of this with the new parent.""" + self.parent = parent + self.parent_constrain() + + def constrain_geometry(self, left, bottom, right, top, strength='strong'): + hc = [self.left == left, + self.right == right, + self.bottom == bottom, + self.top == top] + for c in hc: + self.solver.addConstraint(c | strength) + # self.solver.updateVariables() + + def constrain_same(self, other, strength='strong'): + """ + Make the layoutbox have same position as other layoutbox + """ + hc = [self.left == other.left, + self.right == other.right, + self.bottom == other.bottom, + self.top == other.top] + for c in hc: + self.solver.addConstraint(c | strength) + + def constrain_left_margin(self, margin, strength='strong'): + c = (self.left == self.parent.left + margin) + self.solver.addConstraint(c | strength) + + def edit_left_margin_min(self, margin): + self.solver.suggestValue(self.left_margin_min, margin) + + def constrain_right_margin(self, margin, strength='strong'): + c = (self.right == self.parent.right - margin) + self.solver.addConstraint(c | strength) + + def edit_right_margin_min(self, margin): + self.solver.suggestValue(self.right_margin_min, margin) + + def constrain_bottom_margin(self, margin, strength='strong'): + c = (self.bottom == self.parent.bottom + margin) + self.solver.addConstraint(c | strength) + + def edit_bottom_margin_min(self, margin): + self.solver.suggestValue(self.bottom_margin_min, margin) + + def constrain_top_margin(self, margin, strength='strong'): + c = (self.top == self.parent.top - margin) + self.solver.addConstraint(c | strength) + + def edit_top_margin_min(self, margin): + self.solver.suggestValue(self.top_margin_min, margin) + + def get_rect(self): + return (self.left.value(), self.bottom.value(), + self.width.value(), self.height.value()) + + def update_variables(self): + """ + Update *all* the variables that are part of the solver this LayoutBox + is created with. + """ + self.solver.updateVariables() + + def edit_height(self, height, strength='strong'): + """ + Set the height of the layout box. + + This is done as an editable variable so that the value can change + due to resizing. + """ + sol = self.solver + for i in [self.height]: + if not sol.hasEditVariable(i): + sol.addEditVariable(i, strength) + sol.suggestValue(self.height, height) + + def constrain_height(self, height, strength='strong'): + """ + Constrain the height of the layout box. height is + either a float or a layoutbox.height. + """ + c = (self.height == height) + self.solver.addConstraint(c | strength) + + def constrain_height_min(self, height, strength='strong'): + c = (self.height >= height) + self.solver.addConstraint(c | strength) + + def edit_width(self, width, strength='strong'): + sol = self.solver + for i in [self.width]: + if not sol.hasEditVariable(i): + sol.addEditVariable(i, strength) + sol.suggestValue(self.width, width) + + def constrain_width(self, width, strength='strong'): + """ + Constrain the width of the layout box. *width* is + either a float or a layoutbox.width. + """ + c = (self.width == width) + self.solver.addConstraint(c | strength) + + def constrain_width_min(self, width, strength='strong'): + c = (self.width >= width) + self.solver.addConstraint(c | strength) + + def constrain_left(self, left, strength='strong'): + c = (self.left == left) + self.solver.addConstraint(c | strength) + + def constrain_bottom(self, bottom, strength='strong'): + c = (self.bottom == bottom) + self.solver.addConstraint(c | strength) + + def constrain_right(self, right, strength='strong'): + c = (self.right == right) + self.solver.addConstraint(c | strength) + + def constrain_top(self, top, strength='strong'): + c = (self.top == top) + self.solver.addConstraint(c | strength) + + def _is_subplotspec_layoutbox(self): + """ + Helper to check if this layoutbox is the layoutbox of a subplotspec. + """ + name = self.name.split('.')[-1] + return name[:2] == 'ss' + + def _is_gridspec_layoutbox(self): + """ + Helper to check if this layoutbox is the layoutbox of a gridspec. + """ + name = self.name.split('.')[-1] + return name[:8] == 'gridspec' + + def find_child_subplots(self): + """ + Find children of this layout box that are subplots. We want to line + poss up, and this is an easy way to find them all. + """ + if self.subplot: + subplots = [self] + else: + subplots = [] + for child in self.children: + subplots += child.find_child_subplots() + return subplots + + def layout_from_subplotspec(self, subspec, + name='', artist=None, pos=False): + """ + Make a layout box from a subplotspec. The layout box is + constrained to be a fraction of the width/height of the parent, + and be a fraction of the parent width/height from the left/bottom + of the parent. Therefore the parent can move around and the + layout for the subplot spec should move with it. + + The parent is *usually* the gridspec that made the subplotspec.?? + """ + lb = LayoutBox(parent=self, name=name, artist=artist, pos=pos) + gs = subspec.get_gridspec() + nrows, ncols = gs.get_geometry() + parent = self.parent + + # OK, now, we want to set the position of this subplotspec + # based on its subplotspec parameters. The new gridspec will inherit + # from gridspec. prob should be new method in gridspec + left = 0.0 + right = 1.0 + bottom = 0.0 + top = 1.0 + totWidth = right-left + totHeight = top-bottom + hspace = 0. + wspace = 0. + + # calculate accumulated heights of columns + cellH = totHeight / (nrows + hspace * (nrows - 1)) + sepH = hspace * cellH + + if gs._row_height_ratios is not None: + netHeight = cellH * nrows + tr = sum(gs._row_height_ratios) + cellHeights = [netHeight * r / tr for r in gs._row_height_ratios] + else: + cellHeights = [cellH] * nrows + + sepHeights = [0] + ([sepH] * (nrows - 1)) + cellHs = np.cumsum(np.column_stack([sepHeights, cellHeights]).flat) + + # calculate accumulated widths of rows + cellW = totWidth / (ncols + wspace * (ncols - 1)) + sepW = wspace * cellW + + if gs._col_width_ratios is not None: + netWidth = cellW * ncols + tr = sum(gs._col_width_ratios) + cellWidths = [netWidth * r / tr for r in gs._col_width_ratios] + else: + cellWidths = [cellW] * ncols + + sepWidths = [0] + ([sepW] * (ncols - 1)) + cellWs = np.cumsum(np.column_stack([sepWidths, cellWidths]).flat) + + figTops = [top - cellHs[2 * rowNum] for rowNum in range(nrows)] + figBottoms = [top - cellHs[2 * rowNum + 1] for rowNum in range(nrows)] + figLefts = [left + cellWs[2 * colNum] for colNum in range(ncols)] + figRights = [left + cellWs[2 * colNum + 1] for colNum in range(ncols)] + + rowNum1, colNum1 = divmod(subspec.num1, ncols) + rowNum2, colNum2 = divmod(subspec.num2, ncols) + figBottom = min(figBottoms[rowNum1], figBottoms[rowNum2]) + figTop = max(figTops[rowNum1], figTops[rowNum2]) + figLeft = min(figLefts[colNum1], figLefts[colNum2]) + figRight = max(figRights[colNum1], figRights[colNum2]) + + # These are numbers relative to (0, 0, 1, 1). Need to constrain + # relative to parent. + + width = figRight - figLeft + height = figTop - figBottom + parent = self.parent + cs = [self.left == parent.left + parent.width * figLeft, + self.bottom == parent.bottom + parent.height * figBottom, + self.width == parent.width * width, + self.height == parent.height * height] + for c in cs: + self.solver.addConstraint(c | 'required') + + return lb + + def __repr__(self): + return (f'LayoutBox: {self.name:25s}, ' + f'(left: {self.left.value():1.3f}) ' + f'(bot: {self.bottom.value():1.3f}) ' + f'(right: {self.right.value():1.3f}) ' + f'(top: {self.top.value():1.3f})') + + +# Utility functions that act on layoutboxes... +def hstack(boxes, padding=0, strength='strong'): + """ + Stack LayoutBox instances from left to right. + *padding* is in figure-relative units. + """ + + for i in range(1, len(boxes)): + c = (boxes[i-1].right + padding <= boxes[i].left) + boxes[i].solver.addConstraint(c | strength) + + +def hpack(boxes, padding=0, strength='strong'): + """Stack LayoutBox instances from left to right.""" + + for i in range(1, len(boxes)): + c = (boxes[i-1].right + padding == boxes[i].left) + boxes[i].solver.addConstraint(c | strength) + + +def vstack(boxes, padding=0, strength='strong'): + """Stack LayoutBox instances from top to bottom.""" + + for i in range(1, len(boxes)): + c = (boxes[i-1].bottom - padding >= boxes[i].top) + boxes[i].solver.addConstraint(c | strength) + + +def vpack(boxes, padding=0, strength='strong'): + """Stack LayoutBox instances from top to bottom.""" + + for i in range(1, len(boxes)): + c = (boxes[i-1].bottom - padding >= boxes[i].top) + boxes[i].solver.addConstraint(c | strength) + + +def match_heights(boxes, height_ratios=None, strength='medium'): + """Stack LayoutBox instances from top to bottom.""" + + if height_ratios is None: + height_ratios = np.ones(len(boxes)) + for i in range(1, len(boxes)): + c = (boxes[i-1].height == + boxes[i].height*height_ratios[i-1]/height_ratios[i]) + boxes[i].solver.addConstraint(c | strength) + + +def match_widths(boxes, width_ratios=None, strength='medium'): + """Stack LayoutBox instances from top to bottom.""" + + if width_ratios is None: + width_ratios = np.ones(len(boxes)) + for i in range(1, len(boxes)): + c = (boxes[i-1].width == + boxes[i].width*width_ratios[i-1]/width_ratios[i]) + boxes[i].solver.addConstraint(c | strength) + + +def vstackeq(boxes, padding=0, height_ratios=None): + vstack(boxes, padding=padding) + match_heights(boxes, height_ratios=height_ratios) + + +def hstackeq(boxes, padding=0, width_ratios=None): + hstack(boxes, padding=padding) + match_widths(boxes, width_ratios=width_ratios) + + +def align(boxes, attr, strength='strong'): + cons = [] + for box in boxes[1:]: + cons = (getattr(boxes[0], attr) == getattr(box, attr)) + boxes[0].solver.addConstraint(cons | strength) + + +def match_top_margins(boxes, levels=1): + box0 = boxes[0] + top0 = box0 + for n in range(levels): + top0 = top0.parent + for box in boxes[1:]: + topb = box + for n in range(levels): + topb = topb.parent + c = (box0.top-top0.top == box.top-topb.top) + box0.solver.addConstraint(c | 'strong') + + +def match_bottom_margins(boxes, levels=1): + box0 = boxes[0] + top0 = box0 + for n in range(levels): + top0 = top0.parent + for box in boxes[1:]: + topb = box + for n in range(levels): + topb = topb.parent + c = (box0.bottom-top0.bottom == box.bottom-topb.bottom) + box0.solver.addConstraint(c | 'strong') + + +def match_left_margins(boxes, levels=1): + box0 = boxes[0] + top0 = box0 + for n in range(levels): + top0 = top0.parent + for box in boxes[1:]: + topb = box + for n in range(levels): + topb = topb.parent + c = (box0.left-top0.left == box.left-topb.left) + box0.solver.addConstraint(c | 'strong') + + +def match_right_margins(boxes, levels=1): + box0 = boxes[0] + top0 = box0 + for n in range(levels): + top0 = top0.parent + for box in boxes[1:]: + topb = box + for n in range(levels): + topb = topb.parent + c = (box0.right-top0.right == box.right-topb.right) + box0.solver.addConstraint(c | 'strong') + + +def match_width_margins(boxes, levels=1): + match_left_margins(boxes, levels=levels) + match_right_margins(boxes, levels=levels) + + +def match_height_margins(boxes, levels=1): + match_top_margins(boxes, levels=levels) + match_bottom_margins(boxes, levels=levels) + + +def match_margins(boxes, levels=1): + match_width_margins(boxes, levels=levels) + match_height_margins(boxes, levels=levels) + + +_layoutboxobjnum = itertools.count() + + +def seq_id(): + """Generate a short sequential id for layoutbox objects.""" + return '%06d' % next(_layoutboxobjnum) + + +def print_children(lb): + """Print the children of the layoutbox.""" + print(lb) + for child in lb.children: + print_children(child) + + +def nonetree(lb): + """ + Make all elements in this tree None, signalling not to do any more layout. + """ + if lb is not None: + if lb.parent is None: + # Clear the solver. Hopefully this garbage collects. + lb.solver.reset() + nonechildren(lb) + else: + nonetree(lb.parent) + + +def nonechildren(lb): + for child in lb.children: + nonechildren(child) + lb.artist._layoutbox = None + lb = None + + +def print_tree(lb): + """Print the tree of layoutboxes.""" + + if lb.parent is None: + print('LayoutBox Tree\n') + print('==============\n') + print_children(lb) + print('\n') + else: + print_tree(lb.parent) + + +def plot_children(fig, box, level=0, printit=True): + """Simple plotting to show where boxes are.""" + import matplotlib + import matplotlib.pyplot as plt + + if isinstance(fig, matplotlib.figure.Figure): + ax = fig.add_axes([0., 0., 1., 1.]) + ax.set_facecolor([1., 1., 1., 0.7]) + ax.set_alpha(0.3) + fig.draw(fig.canvas.get_renderer()) + else: + ax = fig + + import matplotlib.patches as patches + colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] + if printit: + print("Level:", level) + for child in box.children: + if printit: + print(child) + ax.add_patch( + patches.Rectangle( + (child.left.value(), child.bottom.value()), # (x, y) + child.width.value(), # width + child.height.value(), # height + fc='none', + alpha=0.8, + ec=colors[level] + ) + ) + if level > 0: + name = child.name.split('.')[-1] + if level % 2 == 0: + ax.text(child.left.value(), child.bottom.value(), name, + size=12-level, color=colors[level]) + else: + ax.text(child.right.value(), child.top.value(), name, + ha='right', va='top', size=12-level, + color=colors[level]) + + plot_children(ax, child, level=level+1, printit=printit) diff --git a/venv/Lib/site-packages/matplotlib/_mathtext_data.py b/venv/Lib/site-packages/matplotlib/_mathtext_data.py new file mode 100644 index 000000000..8c8a2d0d3 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_mathtext_data.py @@ -0,0 +1,1397 @@ +""" +font data tables for truetype and afm computer modern fonts +""" + +latex_to_bakoma = { + '\\__sqrt__' : ('cmex10', 0x70), + '\\bigcap' : ('cmex10', 0x5c), + '\\bigcup' : ('cmex10', 0x5b), + '\\bigodot' : ('cmex10', 0x4b), + '\\bigoplus' : ('cmex10', 0x4d), + '\\bigotimes' : ('cmex10', 0x4f), + '\\biguplus' : ('cmex10', 0x5d), + '\\bigvee' : ('cmex10', 0x5f), + '\\bigwedge' : ('cmex10', 0x5e), + '\\coprod' : ('cmex10', 0x61), + '\\int' : ('cmex10', 0x5a), + '\\langle' : ('cmex10', 0xad), + '\\leftangle' : ('cmex10', 0xad), + '\\leftbrace' : ('cmex10', 0xa9), + '\\oint' : ('cmex10', 0x49), + '\\prod' : ('cmex10', 0x59), + '\\rangle' : ('cmex10', 0xae), + '\\rightangle' : ('cmex10', 0xae), + '\\rightbrace' : ('cmex10', 0xaa), + '\\sum' : ('cmex10', 0x58), + '\\widehat' : ('cmex10', 0x62), + '\\widetilde' : ('cmex10', 0x65), + '\\{' : ('cmex10', 0xa9), + '\\}' : ('cmex10', 0xaa), + '{' : ('cmex10', 0xa9), + '}' : ('cmex10', 0xaa), + + ',' : ('cmmi10', 0x3b), + '.' : ('cmmi10', 0x3a), + '/' : ('cmmi10', 0x3d), + '<' : ('cmmi10', 0x3c), + '>' : ('cmmi10', 0x3e), + '\\alpha' : ('cmmi10', 0xae), + '\\beta' : ('cmmi10', 0xaf), + '\\chi' : ('cmmi10', 0xc2), + '\\combiningrightarrowabove' : ('cmmi10', 0x7e), + '\\delta' : ('cmmi10', 0xb1), + '\\ell' : ('cmmi10', 0x60), + '\\epsilon' : ('cmmi10', 0xb2), + '\\eta' : ('cmmi10', 0xb4), + '\\flat' : ('cmmi10', 0x5b), + '\\frown' : ('cmmi10', 0x5f), + '\\gamma' : ('cmmi10', 0xb0), + '\\imath' : ('cmmi10', 0x7b), + '\\iota' : ('cmmi10', 0xb6), + '\\jmath' : ('cmmi10', 0x7c), + '\\kappa' : ('cmmi10', 0x2219), + '\\lambda' : ('cmmi10', 0xb8), + '\\leftharpoondown' : ('cmmi10', 0x29), + '\\leftharpoonup' : ('cmmi10', 0x28), + '\\mu' : ('cmmi10', 0xb9), + '\\natural' : ('cmmi10', 0x5c), + '\\nu' : ('cmmi10', 0xba), + '\\omega' : ('cmmi10', 0x21), + '\\phi' : ('cmmi10', 0xc1), + '\\pi' : ('cmmi10', 0xbc), + '\\psi' : ('cmmi10', 0xc3), + '\\rho' : ('cmmi10', 0xbd), + '\\rightharpoondown' : ('cmmi10', 0x2b), + '\\rightharpoonup' : ('cmmi10', 0x2a), + '\\sharp' : ('cmmi10', 0x5d), + '\\sigma' : ('cmmi10', 0xbe), + '\\smile' : ('cmmi10', 0x5e), + '\\tau' : ('cmmi10', 0xbf), + '\\theta' : ('cmmi10', 0xb5), + '\\triangleleft' : ('cmmi10', 0x2f), + '\\triangleright' : ('cmmi10', 0x2e), + '\\upsilon' : ('cmmi10', 0xc0), + '\\varepsilon' : ('cmmi10', 0x22), + '\\varphi' : ('cmmi10', 0x27), + '\\varrho' : ('cmmi10', 0x25), + '\\varsigma' : ('cmmi10', 0x26), + '\\vartheta' : ('cmmi10', 0x23), + '\\wp' : ('cmmi10', 0x7d), + '\\xi' : ('cmmi10', 0xbb), + '\\zeta' : ('cmmi10', 0xb3), + + '!' : ('cmr10', 0x21), + '%' : ('cmr10', 0x25), + '&' : ('cmr10', 0x26), + '(' : ('cmr10', 0x28), + ')' : ('cmr10', 0x29), + '+' : ('cmr10', 0x2b), + '0' : ('cmr10', 0x30), + '1' : ('cmr10', 0x31), + '2' : ('cmr10', 0x32), + '3' : ('cmr10', 0x33), + '4' : ('cmr10', 0x34), + '5' : ('cmr10', 0x35), + '6' : ('cmr10', 0x36), + '7' : ('cmr10', 0x37), + '8' : ('cmr10', 0x38), + '9' : ('cmr10', 0x39), + ':' : ('cmr10', 0x3a), + ';' : ('cmr10', 0x3b), + '=' : ('cmr10', 0x3d), + '?' : ('cmr10', 0x3f), + '@' : ('cmr10', 0x40), + '[' : ('cmr10', 0x5b), + '\\#' : ('cmr10', 0x23), + '\\$' : ('cmr10', 0x24), + '\\%' : ('cmr10', 0x25), + '\\Delta' : ('cmr10', 0xa2), + '\\Gamma' : ('cmr10', 0xa1), + '\\Lambda' : ('cmr10', 0xa4), + '\\Omega' : ('cmr10', 0xad), + '\\Phi' : ('cmr10', 0xa9), + '\\Pi' : ('cmr10', 0xa6), + '\\Psi' : ('cmr10', 0xaa), + '\\Sigma' : ('cmr10', 0xa7), + '\\Theta' : ('cmr10', 0xa3), + '\\Upsilon' : ('cmr10', 0xa8), + '\\Xi' : ('cmr10', 0xa5), + '\\circumflexaccent' : ('cmr10', 0x5e), + '\\combiningacuteaccent' : ('cmr10', 0xb6), + '\\combiningbreve' : ('cmr10', 0xb8), + '\\combiningdiaeresis' : ('cmr10', 0xc4), + '\\combiningdotabove' : ('cmr10', 0x5f), + '\\combininggraveaccent' : ('cmr10', 0xb5), + '\\combiningoverline' : ('cmr10', 0xb9), + '\\combiningtilde' : ('cmr10', 0x7e), + '\\leftbracket' : ('cmr10', 0x5b), + '\\leftparen' : ('cmr10', 0x28), + '\\rightbracket' : ('cmr10', 0x5d), + '\\rightparen' : ('cmr10', 0x29), + '\\widebar' : ('cmr10', 0xb9), + ']' : ('cmr10', 0x5d), + + '*' : ('cmsy10', 0xa4), + '-' : ('cmsy10', 0xa1), + '\\Downarrow' : ('cmsy10', 0x2b), + '\\Im' : ('cmsy10', 0x3d), + '\\Leftarrow' : ('cmsy10', 0x28), + '\\Leftrightarrow' : ('cmsy10', 0x2c), + '\\P' : ('cmsy10', 0x7b), + '\\Re' : ('cmsy10', 0x3c), + '\\Rightarrow' : ('cmsy10', 0x29), + '\\S' : ('cmsy10', 0x78), + '\\Uparrow' : ('cmsy10', 0x2a), + '\\Updownarrow' : ('cmsy10', 0x6d), + '\\Vert' : ('cmsy10', 0x6b), + '\\aleph' : ('cmsy10', 0x40), + '\\approx' : ('cmsy10', 0xbc), + '\\ast' : ('cmsy10', 0xa4), + '\\asymp' : ('cmsy10', 0xb3), + '\\backslash' : ('cmsy10', 0x6e), + '\\bigcirc' : ('cmsy10', 0xb0), + '\\bigtriangledown' : ('cmsy10', 0x35), + '\\bigtriangleup' : ('cmsy10', 0x34), + '\\bot' : ('cmsy10', 0x3f), + '\\bullet' : ('cmsy10', 0xb2), + '\\cap' : ('cmsy10', 0x5c), + '\\cdot' : ('cmsy10', 0xa2), + '\\circ' : ('cmsy10', 0xb1), + '\\clubsuit' : ('cmsy10', 0x7c), + '\\cup' : ('cmsy10', 0x5b), + '\\dag' : ('cmsy10', 0x79), + '\\dashv' : ('cmsy10', 0x61), + '\\ddag' : ('cmsy10', 0x7a), + '\\diamond' : ('cmsy10', 0xa6), + '\\diamondsuit' : ('cmsy10', 0x7d), + '\\div' : ('cmsy10', 0xa5), + '\\downarrow' : ('cmsy10', 0x23), + '\\emptyset' : ('cmsy10', 0x3b), + '\\equiv' : ('cmsy10', 0xb4), + '\\exists' : ('cmsy10', 0x39), + '\\forall' : ('cmsy10', 0x38), + '\\geq' : ('cmsy10', 0xb8), + '\\gg' : ('cmsy10', 0xc0), + '\\heartsuit' : ('cmsy10', 0x7e), + '\\in' : ('cmsy10', 0x32), + '\\infty' : ('cmsy10', 0x31), + '\\lbrace' : ('cmsy10', 0x66), + '\\lceil' : ('cmsy10', 0x64), + '\\leftarrow' : ('cmsy10', 0xc3), + '\\leftrightarrow' : ('cmsy10', 0x24), + '\\leq' : ('cmsy10', 0x2219), + '\\lfloor' : ('cmsy10', 0x62), + '\\ll' : ('cmsy10', 0xbf), + '\\mid' : ('cmsy10', 0x6a), + '\\mp' : ('cmsy10', 0xa8), + '\\nabla' : ('cmsy10', 0x72), + '\\nearrow' : ('cmsy10', 0x25), + '\\neg' : ('cmsy10', 0x3a), + '\\ni' : ('cmsy10', 0x33), + '\\nwarrow' : ('cmsy10', 0x2d), + '\\odot' : ('cmsy10', 0xaf), + '\\ominus' : ('cmsy10', 0xaa), + '\\oplus' : ('cmsy10', 0xa9), + '\\oslash' : ('cmsy10', 0xae), + '\\otimes' : ('cmsy10', 0xad), + '\\pm' : ('cmsy10', 0xa7), + '\\prec' : ('cmsy10', 0xc1), + '\\preceq' : ('cmsy10', 0xb9), + '\\prime' : ('cmsy10', 0x30), + '\\propto' : ('cmsy10', 0x2f), + '\\rbrace' : ('cmsy10', 0x67), + '\\rceil' : ('cmsy10', 0x65), + '\\rfloor' : ('cmsy10', 0x63), + '\\rightarrow' : ('cmsy10', 0x21), + '\\searrow' : ('cmsy10', 0x26), + '\\sim' : ('cmsy10', 0xbb), + '\\simeq' : ('cmsy10', 0x27), + '\\slash' : ('cmsy10', 0x36), + '\\spadesuit' : ('cmsy10', 0xc4), + '\\sqcap' : ('cmsy10', 0x75), + '\\sqcup' : ('cmsy10', 0x74), + '\\sqsubseteq' : ('cmsy10', 0x76), + '\\sqsupseteq' : ('cmsy10', 0x77), + '\\subset' : ('cmsy10', 0xbd), + '\\subseteq' : ('cmsy10', 0xb5), + '\\succ' : ('cmsy10', 0xc2), + '\\succeq' : ('cmsy10', 0xba), + '\\supset' : ('cmsy10', 0xbe), + '\\supseteq' : ('cmsy10', 0xb6), + '\\swarrow' : ('cmsy10', 0x2e), + '\\times' : ('cmsy10', 0xa3), + '\\to' : ('cmsy10', 0x21), + '\\top' : ('cmsy10', 0x3e), + '\\uparrow' : ('cmsy10', 0x22), + '\\updownarrow' : ('cmsy10', 0x6c), + '\\uplus' : ('cmsy10', 0x5d), + '\\vdash' : ('cmsy10', 0x60), + '\\vee' : ('cmsy10', 0x5f), + '\\vert' : ('cmsy10', 0x6a), + '\\wedge' : ('cmsy10', 0x5e), + '\\wr' : ('cmsy10', 0x6f), + '\\|' : ('cmsy10', 0x6b), + '|' : ('cmsy10', 0x6a), + + '\\_' : ('cmtt10', 0x5f) +} + +latex_to_cmex = { + r'\__sqrt__' : 112, + r'\bigcap' : 92, + r'\bigcup' : 91, + r'\bigodot' : 75, + r'\bigoplus' : 77, + r'\bigotimes' : 79, + r'\biguplus' : 93, + r'\bigvee' : 95, + r'\bigwedge' : 94, + r'\coprod' : 97, + r'\int' : 90, + r'\leftangle' : 173, + r'\leftbrace' : 169, + r'\oint' : 73, + r'\prod' : 89, + r'\rightangle' : 174, + r'\rightbrace' : 170, + r'\sum' : 88, + r'\widehat' : 98, + r'\widetilde' : 101, +} + +latex_to_standard = { + r'\cong' : ('psyr', 64), + r'\Delta' : ('psyr', 68), + r'\Phi' : ('psyr', 70), + r'\Gamma' : ('psyr', 89), + r'\alpha' : ('psyr', 97), + r'\beta' : ('psyr', 98), + r'\chi' : ('psyr', 99), + r'\delta' : ('psyr', 100), + r'\varepsilon' : ('psyr', 101), + r'\phi' : ('psyr', 102), + r'\gamma' : ('psyr', 103), + r'\eta' : ('psyr', 104), + r'\iota' : ('psyr', 105), + r'\varpsi' : ('psyr', 106), + r'\kappa' : ('psyr', 108), + r'\nu' : ('psyr', 110), + r'\pi' : ('psyr', 112), + r'\theta' : ('psyr', 113), + r'\rho' : ('psyr', 114), + r'\sigma' : ('psyr', 115), + r'\tau' : ('psyr', 116), + r'\upsilon' : ('psyr', 117), + r'\varpi' : ('psyr', 118), + r'\omega' : ('psyr', 119), + r'\xi' : ('psyr', 120), + r'\psi' : ('psyr', 121), + r'\zeta' : ('psyr', 122), + r'\sim' : ('psyr', 126), + r'\leq' : ('psyr', 163), + r'\infty' : ('psyr', 165), + r'\clubsuit' : ('psyr', 167), + r'\diamondsuit' : ('psyr', 168), + r'\heartsuit' : ('psyr', 169), + r'\spadesuit' : ('psyr', 170), + r'\leftrightarrow' : ('psyr', 171), + r'\leftarrow' : ('psyr', 172), + r'\uparrow' : ('psyr', 173), + r'\rightarrow' : ('psyr', 174), + r'\downarrow' : ('psyr', 175), + r'\pm' : ('psyr', 176), + r'\geq' : ('psyr', 179), + r'\times' : ('psyr', 180), + r'\propto' : ('psyr', 181), + r'\partial' : ('psyr', 182), + r'\bullet' : ('psyr', 183), + r'\div' : ('psyr', 184), + r'\neq' : ('psyr', 185), + r'\equiv' : ('psyr', 186), + r'\approx' : ('psyr', 187), + r'\ldots' : ('psyr', 188), + r'\aleph' : ('psyr', 192), + r'\Im' : ('psyr', 193), + r'\Re' : ('psyr', 194), + r'\wp' : ('psyr', 195), + r'\otimes' : ('psyr', 196), + r'\oplus' : ('psyr', 197), + r'\oslash' : ('psyr', 198), + r'\cap' : ('psyr', 199), + r'\cup' : ('psyr', 200), + r'\supset' : ('psyr', 201), + r'\supseteq' : ('psyr', 202), + r'\subset' : ('psyr', 204), + r'\subseteq' : ('psyr', 205), + r'\in' : ('psyr', 206), + r'\notin' : ('psyr', 207), + r'\angle' : ('psyr', 208), + r'\nabla' : ('psyr', 209), + r'\textregistered' : ('psyr', 210), + r'\copyright' : ('psyr', 211), + r'\texttrademark' : ('psyr', 212), + r'\Pi' : ('psyr', 213), + r'\prod' : ('psyr', 213), + r'\surd' : ('psyr', 214), + r'\__sqrt__' : ('psyr', 214), + r'\cdot' : ('psyr', 215), + r'\urcorner' : ('psyr', 216), + r'\vee' : ('psyr', 217), + r'\wedge' : ('psyr', 218), + r'\Leftrightarrow' : ('psyr', 219), + r'\Leftarrow' : ('psyr', 220), + r'\Uparrow' : ('psyr', 221), + r'\Rightarrow' : ('psyr', 222), + r'\Downarrow' : ('psyr', 223), + r'\Diamond' : ('psyr', 224), + r'\Sigma' : ('psyr', 229), + r'\sum' : ('psyr', 229), + r'\forall' : ('psyr', 34), + r'\exists' : ('psyr', 36), + r'\lceil' : ('psyr', 233), + r'\lbrace' : ('psyr', 123), + r'\Psi' : ('psyr', 89), + r'\bot' : ('psyr', 0o136), + r'\Omega' : ('psyr', 0o127), + r'\leftbracket' : ('psyr', 0o133), + r'\rightbracket' : ('psyr', 0o135), + r'\leftbrace' : ('psyr', 123), + r'\leftparen' : ('psyr', 0o50), + r'\prime' : ('psyr', 0o242), + r'\sharp' : ('psyr', 0o43), + r'\slash' : ('psyr', 0o57), + r'\Lamda' : ('psyr', 0o114), + r'\neg' : ('psyr', 0o330), + r'\Upsilon' : ('psyr', 0o241), + r'\rightbrace' : ('psyr', 0o175), + r'\rfloor' : ('psyr', 0o373), + r'\lambda' : ('psyr', 0o154), + r'\to' : ('psyr', 0o256), + r'\Xi' : ('psyr', 0o130), + r'\emptyset' : ('psyr', 0o306), + r'\lfloor' : ('psyr', 0o353), + r'\rightparen' : ('psyr', 0o51), + r'\rceil' : ('psyr', 0o371), + r'\ni' : ('psyr', 0o47), + r'\epsilon' : ('psyr', 0o145), + r'\Theta' : ('psyr', 0o121), + r'\langle' : ('psyr', 0o341), + r'\leftangle' : ('psyr', 0o341), + r'\rangle' : ('psyr', 0o361), + r'\rightangle' : ('psyr', 0o361), + r'\rbrace' : ('psyr', 0o175), + r'\circ' : ('psyr', 0o260), + r'\diamond' : ('psyr', 0o340), + r'\mu' : ('psyr', 0o155), + r'\mid' : ('psyr', 0o352), + r'\imath' : ('pncri8a', 105), + r'\%' : ('pncr8a', 37), + r'\$' : ('pncr8a', 36), + r'\{' : ('pncr8a', 123), + r'\}' : ('pncr8a', 125), + r'\backslash' : ('pncr8a', 92), + r'\ast' : ('pncr8a', 42), + r'\#' : ('pncr8a', 35), + + r'\circumflexaccent' : ('pncri8a', 124), # for \hat + r'\combiningbreve' : ('pncri8a', 81), # for \breve + r'\combininggraveaccent' : ('pncri8a', 114), # for \grave + r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute + r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot + r'\combiningtilde' : ('pncri8a', 75), # for \tilde + r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec + r'\combiningdotabove' : ('pncri8a', 26), # for \dot +} + +# Automatically generated. + +type12uni = { + 'aring' : 229, + 'quotedblright' : 8221, + 'V' : 86, + 'dollar' : 36, + 'four' : 52, + 'Yacute' : 221, + 'P' : 80, + 'underscore' : 95, + 'p' : 112, + 'Otilde' : 213, + 'perthousand' : 8240, + 'zero' : 48, + 'dotlessi' : 305, + 'Scaron' : 352, + 'zcaron' : 382, + 'egrave' : 232, + 'section' : 167, + 'Icircumflex' : 206, + 'ntilde' : 241, + 'ampersand' : 38, + 'dotaccent' : 729, + 'degree' : 176, + 'K' : 75, + 'acircumflex' : 226, + 'Aring' : 197, + 'k' : 107, + 'smalltilde' : 732, + 'Agrave' : 192, + 'divide' : 247, + 'ocircumflex' : 244, + 'asciitilde' : 126, + 'two' : 50, + 'E' : 69, + 'scaron' : 353, + 'F' : 70, + 'bracketleft' : 91, + 'asciicircum' : 94, + 'f' : 102, + 'ordmasculine' : 186, + 'mu' : 181, + 'paragraph' : 182, + 'nine' : 57, + 'v' : 118, + 'guilsinglleft' : 8249, + 'backslash' : 92, + 'six' : 54, + 'A' : 65, + 'icircumflex' : 238, + 'a' : 97, + 'ogonek' : 731, + 'q' : 113, + 'oacute' : 243, + 'ograve' : 242, + 'edieresis' : 235, + 'comma' : 44, + 'otilde' : 245, + 'guillemotright' : 187, + 'ecircumflex' : 234, + 'greater' : 62, + 'uacute' : 250, + 'L' : 76, + 'bullet' : 8226, + 'cedilla' : 184, + 'ydieresis' : 255, + 'l' : 108, + 'logicalnot' : 172, + 'exclamdown' : 161, + 'endash' : 8211, + 'agrave' : 224, + 'Adieresis' : 196, + 'germandbls' : 223, + 'Odieresis' : 214, + 'space' : 32, + 'quoteright' : 8217, + 'ucircumflex' : 251, + 'G' : 71, + 'quoteleft' : 8216, + 'W' : 87, + 'Q' : 81, + 'g' : 103, + 'w' : 119, + 'question' : 63, + 'one' : 49, + 'ring' : 730, + 'figuredash' : 8210, + 'B' : 66, + 'iacute' : 237, + 'Ydieresis' : 376, + 'R' : 82, + 'b' : 98, + 'r' : 114, + 'Ccedilla' : 199, + 'minus' : 8722, + 'Lslash' : 321, + 'Uacute' : 218, + 'yacute' : 253, + 'Ucircumflex' : 219, + 'quotedbl' : 34, + 'onehalf' : 189, + 'Thorn' : 222, + 'M' : 77, + 'eight' : 56, + 'multiply' : 215, + 'grave' : 96, + 'Ocircumflex' : 212, + 'm' : 109, + 'Ugrave' : 217, + 'guilsinglright' : 8250, + 'Ntilde' : 209, + 'questiondown' : 191, + 'Atilde' : 195, + 'ccedilla' : 231, + 'Z' : 90, + 'copyright' : 169, + 'yen' : 165, + 'Eacute' : 201, + 'H' : 72, + 'X' : 88, + 'Idieresis' : 207, + 'bar' : 124, + 'h' : 104, + 'x' : 120, + 'udieresis' : 252, + 'ordfeminine' : 170, + 'braceleft' : 123, + 'macron' : 175, + 'atilde' : 227, + 'Acircumflex' : 194, + 'Oslash' : 216, + 'C' : 67, + 'quotedblleft' : 8220, + 'S' : 83, + 'exclam' : 33, + 'Zcaron' : 381, + 'equal' : 61, + 's' : 115, + 'eth' : 240, + 'Egrave' : 200, + 'hyphen' : 45, + 'period' : 46, + 'igrave' : 236, + 'colon' : 58, + 'Ecircumflex' : 202, + 'trademark' : 8482, + 'Aacute' : 193, + 'cent' : 162, + 'lslash' : 322, + 'c' : 99, + 'N' : 78, + 'breve' : 728, + 'Oacute' : 211, + 'guillemotleft' : 171, + 'n' : 110, + 'idieresis' : 239, + 'braceright' : 125, + 'seven' : 55, + 'brokenbar' : 166, + 'ugrave' : 249, + 'periodcentered' : 183, + 'sterling' : 163, + 'I' : 73, + 'Y' : 89, + 'Eth' : 208, + 'emdash' : 8212, + 'i' : 105, + 'daggerdbl' : 8225, + 'y' : 121, + 'plusminus' : 177, + 'less' : 60, + 'Udieresis' : 220, + 'D' : 68, + 'five' : 53, + 'T' : 84, + 'oslash' : 248, + 'acute' : 180, + 'd' : 100, + 'OE' : 338, + 'Igrave' : 204, + 't' : 116, + 'parenright' : 41, + 'adieresis' : 228, + 'quotesingle' : 39, + 'twodotenleader' : 8229, + 'slash' : 47, + 'ellipsis' : 8230, + 'numbersign' : 35, + 'odieresis' : 246, + 'O' : 79, + 'oe' : 339, + 'o' : 111, + 'Edieresis' : 203, + 'plus' : 43, + 'dagger' : 8224, + 'three' : 51, + 'hungarumlaut' : 733, + 'parenleft' : 40, + 'fraction' : 8260, + 'registered' : 174, + 'J' : 74, + 'dieresis' : 168, + 'Ograve' : 210, + 'j' : 106, + 'z' : 122, + 'ae' : 230, + 'semicolon' : 59, + 'at' : 64, + 'Iacute' : 205, + 'percent' : 37, + 'bracketright' : 93, + 'AE' : 198, + 'asterisk' : 42, + 'aacute' : 225, + 'U' : 85, + 'eacute' : 233, + 'e' : 101, + 'thorn' : 254, + 'u' : 117, +} + +uni2type1 = {v: k for k, v in type12uni.items()} + +tex2uni = { + 'widehat' : 0x0302, + 'widetilde' : 0x0303, + 'widebar' : 0x0305, + 'langle' : 0x27e8, + 'rangle' : 0x27e9, + 'perp' : 0x27c2, + 'neq' : 0x2260, + 'Join' : 0x2a1d, + 'leqslant' : 0x2a7d, + 'geqslant' : 0x2a7e, + 'lessapprox' : 0x2a85, + 'gtrapprox' : 0x2a86, + 'lesseqqgtr' : 0x2a8b, + 'gtreqqless' : 0x2a8c, + 'triangleeq' : 0x225c, + 'eqslantless' : 0x2a95, + 'eqslantgtr' : 0x2a96, + 'backepsilon' : 0x03f6, + 'precapprox' : 0x2ab7, + 'succapprox' : 0x2ab8, + 'fallingdotseq' : 0x2252, + 'subseteqq' : 0x2ac5, + 'supseteqq' : 0x2ac6, + 'varpropto' : 0x221d, + 'precnapprox' : 0x2ab9, + 'succnapprox' : 0x2aba, + 'subsetneqq' : 0x2acb, + 'supsetneqq' : 0x2acc, + 'lnapprox' : 0x2ab9, + 'gnapprox' : 0x2aba, + 'longleftarrow' : 0x27f5, + 'longrightarrow' : 0x27f6, + 'longleftrightarrow' : 0x27f7, + 'Longleftarrow' : 0x27f8, + 'Longrightarrow' : 0x27f9, + 'Longleftrightarrow' : 0x27fa, + 'longmapsto' : 0x27fc, + 'leadsto' : 0x21dd, + 'dashleftarrow' : 0x290e, + 'dashrightarrow' : 0x290f, + 'circlearrowleft' : 0x21ba, + 'circlearrowright' : 0x21bb, + 'leftrightsquigarrow' : 0x21ad, + 'leftsquigarrow' : 0x219c, + 'rightsquigarrow' : 0x219d, + 'Game' : 0x2141, + 'hbar' : 0x0127, + 'hslash' : 0x210f, + 'ldots' : 0x2026, + 'vdots' : 0x22ee, + 'doteqdot' : 0x2251, + 'doteq' : 8784, + 'partial' : 8706, + 'gg' : 8811, + 'asymp' : 8781, + 'blacktriangledown' : 9662, + 'otimes' : 8855, + 'nearrow' : 8599, + 'varpi' : 982, + 'vee' : 8744, + 'vec' : 8407, + 'smile' : 8995, + 'succnsim' : 8937, + 'gimel' : 8503, + 'vert' : 124, + '|' : 124, + 'varrho' : 1009, + 'P' : 182, + 'approxident' : 8779, + 'Swarrow' : 8665, + 'textasciicircum' : 94, + 'imageof' : 8887, + 'ntriangleleft' : 8938, + 'nleq' : 8816, + 'div' : 247, + 'nparallel' : 8742, + 'Leftarrow' : 8656, + 'lll' : 8920, + 'oiint' : 8751, + 'ngeq' : 8817, + 'Theta' : 920, + 'origof' : 8886, + 'blacksquare' : 9632, + 'solbar' : 9023, + 'neg' : 172, + 'sum' : 8721, + 'Vdash' : 8873, + 'coloneq' : 8788, + 'degree' : 176, + 'bowtie' : 8904, + 'blacktriangleright' : 9654, + 'varsigma' : 962, + 'leq' : 8804, + 'ggg' : 8921, + 'lneqq' : 8808, + 'scurel' : 8881, + 'stareq' : 8795, + 'BbbN' : 8469, + 'nLeftarrow' : 8653, + 'nLeftrightarrow' : 8654, + 'k' : 808, + 'bot' : 8869, + 'BbbC' : 8450, + 'Lsh' : 8624, + 'leftleftarrows' : 8647, + 'BbbZ' : 8484, + 'digamma' : 989, + 'BbbR' : 8477, + 'BbbP' : 8473, + 'BbbQ' : 8474, + 'vartriangleright' : 8883, + 'succsim' : 8831, + 'wedge' : 8743, + 'lessgtr' : 8822, + 'veebar' : 8891, + 'mapsdown' : 8615, + 'Rsh' : 8625, + 'chi' : 967, + 'prec' : 8826, + 'nsubseteq' : 8840, + 'therefore' : 8756, + 'eqcirc' : 8790, + 'textexclamdown' : 161, + 'nRightarrow' : 8655, + 'flat' : 9837, + 'notin' : 8713, + 'llcorner' : 8990, + 'varepsilon' : 949, + 'bigtriangleup' : 9651, + 'aleph' : 8501, + 'dotminus' : 8760, + 'upsilon' : 965, + 'Lambda' : 923, + 'cap' : 8745, + 'barleftarrow' : 8676, + 'mu' : 956, + 'boxplus' : 8862, + 'mp' : 8723, + 'circledast' : 8859, + 'tau' : 964, + 'in' : 8712, + 'backslash' : 92, + 'varnothing' : 8709, + 'sharp' : 9839, + 'eqsim' : 8770, + 'gnsim' : 8935, + 'Searrow' : 8664, + 'updownarrows' : 8645, + 'heartsuit' : 9825, + 'trianglelefteq' : 8884, + 'ddag' : 8225, + 'sqsubseteq' : 8849, + 'mapsfrom' : 8612, + 'boxbar' : 9707, + 'sim' : 8764, + 'Nwarrow' : 8662, + 'nequiv' : 8802, + 'succ' : 8827, + 'vdash' : 8866, + 'Leftrightarrow' : 8660, + 'parallel' : 8741, + 'invnot' : 8976, + 'natural' : 9838, + 'ss' : 223, + 'uparrow' : 8593, + 'nsim' : 8769, + 'hookrightarrow' : 8618, + 'Equiv' : 8803, + 'approx' : 8776, + 'Vvdash' : 8874, + 'nsucc' : 8833, + 'leftrightharpoons' : 8651, + 'Re' : 8476, + 'boxminus' : 8863, + 'equiv' : 8801, + 'Lleftarrow' : 8666, + 'll' : 8810, + 'Cup' : 8915, + 'measeq' : 8798, + 'upharpoonleft' : 8639, + 'lq' : 8216, + 'Upsilon' : 933, + 'subsetneq' : 8842, + 'greater' : 62, + 'supsetneq' : 8843, + 'Cap' : 8914, + 'L' : 321, + 'spadesuit' : 9824, + 'lrcorner' : 8991, + 'not' : 824, + 'bar' : 772, + 'rightharpoonaccent' : 8401, + 'boxdot' : 8865, + 'l' : 322, + 'leftharpoondown' : 8637, + 'bigcup' : 8899, + 'iint' : 8748, + 'bigwedge' : 8896, + 'downharpoonleft' : 8643, + 'textasciitilde' : 126, + 'subset' : 8834, + 'leqq' : 8806, + 'mapsup' : 8613, + 'nvDash' : 8877, + 'looparrowleft' : 8619, + 'nless' : 8814, + 'rightarrowbar' : 8677, + 'Vert' : 8214, + 'downdownarrows' : 8650, + 'uplus' : 8846, + 'simeq' : 8771, + 'napprox' : 8777, + 'ast' : 8727, + 'twoheaduparrow' : 8607, + 'doublebarwedge' : 8966, + 'Sigma' : 931, + 'leftharpoonaccent' : 8400, + 'ntrianglelefteq' : 8940, + 'nexists' : 8708, + 'times' : 215, + 'measuredangle' : 8737, + 'bumpeq' : 8783, + 'carriagereturn' : 8629, + 'adots' : 8944, + 'checkmark' : 10003, + 'lambda' : 955, + 'xi' : 958, + 'rbrace' : 125, + 'rbrack' : 93, + 'Nearrow' : 8663, + 'maltese' : 10016, + 'clubsuit' : 9827, + 'top' : 8868, + 'overarc' : 785, + 'varphi' : 966, + 'Delta' : 916, + 'iota' : 953, + 'nleftarrow' : 8602, + 'candra' : 784, + 'supset' : 8835, + 'triangleleft' : 9665, + 'gtreqless' : 8923, + 'ntrianglerighteq' : 8941, + 'quad' : 8195, + 'Xi' : 926, + 'gtrdot' : 8919, + 'leftthreetimes' : 8907, + 'minus' : 8722, + 'preccurlyeq' : 8828, + 'nleftrightarrow' : 8622, + 'lambdabar' : 411, + 'blacktriangle' : 9652, + 'kernelcontraction' : 8763, + 'Phi' : 934, + 'angle' : 8736, + 'spadesuitopen' : 9828, + 'eqless' : 8924, + 'mid' : 8739, + 'varkappa' : 1008, + 'Ldsh' : 8626, + 'updownarrow' : 8597, + 'beta' : 946, + 'textquotedblleft' : 8220, + 'rho' : 961, + 'alpha' : 945, + 'intercal' : 8890, + 'beth' : 8502, + 'grave' : 768, + 'acwopencirclearrow' : 8634, + 'nmid' : 8740, + 'nsupset' : 8837, + 'sigma' : 963, + 'dot' : 775, + 'Rightarrow' : 8658, + 'turnednot' : 8985, + 'backsimeq' : 8909, + 'leftarrowtail' : 8610, + 'approxeq' : 8778, + 'curlyeqsucc' : 8927, + 'rightarrowtail' : 8611, + 'Psi' : 936, + 'copyright' : 169, + 'yen' : 165, + 'vartriangleleft' : 8882, + 'rasp' : 700, + 'triangleright' : 9655, + 'precsim' : 8830, + 'infty' : 8734, + 'geq' : 8805, + 'updownarrowbar' : 8616, + 'precnsim' : 8936, + 'H' : 779, + 'ulcorner' : 8988, + 'looparrowright' : 8620, + 'ncong' : 8775, + 'downarrow' : 8595, + 'circeq' : 8791, + 'subseteq' : 8838, + 'bigstar' : 9733, + 'prime' : 8242, + 'lceil' : 8968, + 'Rrightarrow' : 8667, + 'oiiint' : 8752, + 'curlywedge' : 8911, + 'vDash' : 8872, + 'lfloor' : 8970, + 'ddots' : 8945, + 'exists' : 8707, + 'underbar' : 817, + 'Pi' : 928, + 'leftrightarrows' : 8646, + 'sphericalangle' : 8738, + 'coprod' : 8720, + 'circledcirc' : 8858, + 'gtrsim' : 8819, + 'gneqq' : 8809, + 'between' : 8812, + 'theta' : 952, + 'complement' : 8705, + 'arceq' : 8792, + 'nVdash' : 8878, + 'S' : 167, + 'wr' : 8768, + 'wp' : 8472, + 'backcong' : 8780, + 'lasp' : 701, + 'c' : 807, + 'nabla' : 8711, + 'dotplus' : 8724, + 'eta' : 951, + 'forall' : 8704, + 'eth' : 240, + 'colon' : 58, + 'sqcup' : 8852, + 'rightrightarrows' : 8649, + 'sqsupset' : 8848, + 'mapsto' : 8614, + 'bigtriangledown' : 9661, + 'sqsupseteq' : 8850, + 'propto' : 8733, + 'pi' : 960, + 'pm' : 177, + 'dots' : 0x2026, + 'nrightarrow' : 8603, + 'textasciiacute' : 180, + 'Doteq' : 8785, + 'breve' : 774, + 'sqcap' : 8851, + 'twoheadrightarrow' : 8608, + 'kappa' : 954, + 'vartriangle' : 9653, + 'diamondsuit' : 9826, + 'pitchfork' : 8916, + 'blacktriangleleft' : 9664, + 'nprec' : 8832, + 'curvearrowright' : 8631, + 'barwedge' : 8892, + 'multimap' : 8888, + 'textquestiondown' : 191, + 'cong' : 8773, + 'rtimes' : 8906, + 'rightzigzagarrow' : 8669, + 'rightarrow' : 8594, + 'leftarrow' : 8592, + '__sqrt__' : 8730, + 'twoheaddownarrow' : 8609, + 'oint' : 8750, + 'bigvee' : 8897, + 'eqdef' : 8797, + 'sterling' : 163, + 'phi' : 981, + 'Updownarrow' : 8661, + 'backprime' : 8245, + 'emdash' : 8212, + 'Gamma' : 915, + 'i' : 305, + 'rceil' : 8969, + 'leftharpoonup' : 8636, + 'Im' : 8465, + 'curvearrowleft' : 8630, + 'wedgeq' : 8793, + 'curlyeqprec' : 8926, + 'questeq' : 8799, + 'less' : 60, + 'upuparrows' : 8648, + 'tilde' : 771, + 'textasciigrave' : 96, + 'smallsetminus' : 8726, + 'ell' : 8467, + 'cup' : 8746, + 'danger' : 9761, + 'nVDash' : 8879, + 'cdotp' : 183, + 'cdots' : 8943, + 'hat' : 770, + 'eqgtr' : 8925, + 'psi' : 968, + 'frown' : 8994, + 'acute' : 769, + 'downzigzagarrow' : 8623, + 'ntriangleright' : 8939, + 'cupdot' : 8845, + 'circleddash' : 8861, + 'oslash' : 8856, + 'mho' : 8487, + 'd' : 803, + 'sqsubset' : 8847, + 'cdot' : 8901, + 'Omega' : 937, + 'OE' : 338, + 'veeeq' : 8794, + 'Finv' : 8498, + 't' : 865, + 'leftrightarrow' : 8596, + 'swarrow' : 8601, + 'rightthreetimes' : 8908, + 'rightleftharpoons' : 8652, + 'lesssim' : 8818, + 'searrow' : 8600, + 'because' : 8757, + 'gtrless' : 8823, + 'star' : 8902, + 'nsubset' : 8836, + 'zeta' : 950, + 'dddot' : 8411, + 'bigcirc' : 9675, + 'Supset' : 8913, + 'circ' : 8728, + 'slash' : 8725, + 'ocirc' : 778, + 'prod' : 8719, + 'twoheadleftarrow' : 8606, + 'daleth' : 8504, + 'upharpoonright' : 8638, + 'odot' : 8857, + 'Uparrow' : 8657, + 'O' : 216, + 'hookleftarrow' : 8617, + 'trianglerighteq' : 8885, + 'nsime' : 8772, + 'oe' : 339, + 'nwarrow' : 8598, + 'o' : 248, + 'ddddot' : 8412, + 'downharpoonright' : 8642, + 'succcurlyeq' : 8829, + 'gamma' : 947, + 'scrR' : 8475, + 'dag' : 8224, + 'thickspace' : 8197, + 'frakZ' : 8488, + 'lessdot' : 8918, + 'triangledown' : 9663, + 'ltimes' : 8905, + 'scrB' : 8492, + 'endash' : 8211, + 'scrE' : 8496, + 'scrF' : 8497, + 'scrH' : 8459, + 'scrI' : 8464, + 'rightharpoondown' : 8641, + 'scrL' : 8466, + 'scrM' : 8499, + 'frakC' : 8493, + 'nsupseteq' : 8841, + 'circledR' : 174, + 'circledS' : 9416, + 'ngtr' : 8815, + 'bigcap' : 8898, + 'scre' : 8495, + 'Downarrow' : 8659, + 'scrg' : 8458, + 'overleftrightarrow' : 8417, + 'scro' : 8500, + 'lnsim' : 8934, + 'eqcolon' : 8789, + 'curlyvee' : 8910, + 'urcorner' : 8989, + 'lbrace' : 123, + 'Bumpeq' : 8782, + 'delta' : 948, + 'boxtimes' : 8864, + 'overleftarrow' : 8406, + 'prurel' : 8880, + 'clubsuitopen' : 9831, + 'cwopencirclearrow' : 8635, + 'geqq' : 8807, + 'rightleftarrows' : 8644, + 'ac' : 8766, + 'ae' : 230, + 'int' : 8747, + 'rfloor' : 8971, + 'risingdotseq' : 8787, + 'nvdash' : 8876, + 'diamond' : 8900, + 'ddot' : 776, + 'backsim' : 8765, + 'oplus' : 8853, + 'triangleq' : 8796, + 'check' : 780, + 'ni' : 8715, + 'iiint' : 8749, + 'ne' : 8800, + 'lesseqgtr' : 8922, + 'obar' : 9021, + 'supseteq' : 8839, + 'nu' : 957, + 'AA' : 197, + 'AE' : 198, + 'models' : 8871, + 'ominus' : 8854, + 'dashv' : 8867, + 'omega' : 969, + 'rq' : 8217, + 'Subset' : 8912, + 'rightharpoonup' : 8640, + 'Rdsh' : 8627, + 'bullet' : 8729, + 'divideontimes' : 8903, + 'lbrack' : 91, + 'textquotedblright' : 8221, + 'Colon' : 8759, + '%' : 37, + '$' : 36, + '{' : 123, + '}' : 125, + '_' : 95, + '#' : 35, + 'imath' : 0x131, + 'circumflexaccent' : 770, + 'combiningbreve' : 774, + 'combiningoverline' : 772, + 'combininggraveaccent' : 768, + 'combiningacuteaccent' : 769, + 'combiningdiaeresis' : 776, + 'combiningtilde' : 771, + 'combiningrightarrowabove' : 8407, + 'combiningdotabove' : 775, + 'to' : 8594, + 'succeq' : 8829, + 'emptyset' : 8709, + 'leftparen' : 40, + 'rightparen' : 41, + 'bigoplus' : 10753, + 'leftangle' : 10216, + 'rightangle' : 10217, + 'leftbrace' : 124, + 'rightbrace' : 125, + 'jmath' : 567, + 'bigodot' : 10752, + 'preceq' : 8828, + 'biguplus' : 10756, + 'epsilon' : 949, + 'vartheta' : 977, + 'bigotimes' : 10754, + 'guillemotleft' : 171, + 'ring' : 730, + 'Thorn' : 222, + 'guilsinglright' : 8250, + 'perthousand' : 8240, + 'macron' : 175, + 'cent' : 162, + 'guillemotright' : 187, + 'equal' : 61, + 'asterisk' : 42, + 'guilsinglleft' : 8249, + 'plus' : 43, + 'thorn' : 254, + 'dagger' : 8224 +} + +# Each element is a 4-tuple of the form: +# src_start, src_end, dst_font, dst_start +# +stix_virtual_fonts = { + 'bb': + { + 'rm': + [ + (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 + (0x0041, 0x0042, 'rm', 0x1d538), # A-B + (0x0043, 0x0043, 'rm', 0x2102), # C + (0x0044, 0x0047, 'rm', 0x1d53b), # D-G + (0x0048, 0x0048, 'rm', 0x210d), # H + (0x0049, 0x004d, 'rm', 0x1d540), # I-M + (0x004e, 0x004e, 'rm', 0x2115), # N + (0x004f, 0x004f, 'rm', 0x1d546), # O + (0x0050, 0x0051, 'rm', 0x2119), # P-Q + (0x0052, 0x0052, 'rm', 0x211d), # R + (0x0053, 0x0059, 'rm', 0x1d54a), # S-Y + (0x005a, 0x005a, 'rm', 0x2124), # Z + (0x0061, 0x007a, 'rm', 0x1d552), # a-z + (0x0393, 0x0393, 'rm', 0x213e), # \Gamma + (0x03a0, 0x03a0, 'rm', 0x213f), # \Pi + (0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma + (0x03b3, 0x03b3, 'rm', 0x213d), # \gamma + (0x03c0, 0x03c0, 'rm', 0x213c), # \pi + ], + 'it': + [ + (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 + (0x0041, 0x0042, 'it', 0xe154), # A-B + (0x0043, 0x0043, 'it', 0x2102), # C + (0x0044, 0x0044, 'it', 0x2145), # D + (0x0045, 0x0047, 'it', 0xe156), # E-G + (0x0048, 0x0048, 'it', 0x210d), # H + (0x0049, 0x004d, 'it', 0xe159), # I-M + (0x004e, 0x004e, 'it', 0x2115), # N + (0x004f, 0x004f, 'it', 0xe15e), # O + (0x0050, 0x0051, 'it', 0x2119), # P-Q + (0x0052, 0x0052, 'it', 0x211d), # R + (0x0053, 0x0059, 'it', 0xe15f), # S-Y + (0x005a, 0x005a, 'it', 0x2124), # Z + (0x0061, 0x0063, 'it', 0xe166), # a-c + (0x0064, 0x0065, 'it', 0x2146), # d-e + (0x0066, 0x0068, 'it', 0xe169), # f-h + (0x0069, 0x006a, 'it', 0x2148), # i-j + (0x006b, 0x007a, 'it', 0xe16c), # k-z + (0x0393, 0x0393, 'it', 0x213e), # \Gamma (not in beta STIX fonts) + (0x03a0, 0x03a0, 'it', 0x213f), # \Pi + (0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (not in beta STIX fonts) + (0x03b3, 0x03b3, 'it', 0x213d), # \gamma (not in beta STIX fonts) + (0x03c0, 0x03c0, 'it', 0x213c), # \pi + ], + 'bf': + [ + (0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9 + (0x0041, 0x0042, 'bf', 0xe38a), # A-B + (0x0043, 0x0043, 'bf', 0x2102), # C + (0x0044, 0x0044, 'bf', 0x2145), # D + (0x0045, 0x0047, 'bf', 0xe38d), # E-G + (0x0048, 0x0048, 'bf', 0x210d), # H + (0x0049, 0x004d, 'bf', 0xe390), # I-M + (0x004e, 0x004e, 'bf', 0x2115), # N + (0x004f, 0x004f, 'bf', 0xe395), # O + (0x0050, 0x0051, 'bf', 0x2119), # P-Q + (0x0052, 0x0052, 'bf', 0x211d), # R + (0x0053, 0x0059, 'bf', 0xe396), # S-Y + (0x005a, 0x005a, 'bf', 0x2124), # Z + (0x0061, 0x0063, 'bf', 0xe39d), # a-c + (0x0064, 0x0065, 'bf', 0x2146), # d-e + (0x0066, 0x0068, 'bf', 0xe3a2), # f-h + (0x0069, 0x006a, 'bf', 0x2148), # i-j + (0x006b, 0x007a, 'bf', 0xe3a7), # k-z + (0x0393, 0x0393, 'bf', 0x213e), # \Gamma + (0x03a0, 0x03a0, 'bf', 0x213f), # \Pi + (0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma + (0x03b3, 0x03b3, 'bf', 0x213d), # \gamma + (0x03c0, 0x03c0, 'bf', 0x213c), # \pi + ], + }, + 'cal': + [ + (0x0041, 0x005a, 'it', 0xe22d), # A-Z + ], + 'frak': + { + 'rm': + [ + (0x0041, 0x0042, 'rm', 0x1d504), # A-B + (0x0043, 0x0043, 'rm', 0x212d), # C + (0x0044, 0x0047, 'rm', 0x1d507), # D-G + (0x0048, 0x0048, 'rm', 0x210c), # H + (0x0049, 0x0049, 'rm', 0x2111), # I + (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q + (0x0052, 0x0052, 'rm', 0x211c), # R + (0x0053, 0x0059, 'rm', 0x1d516), # S-Y + (0x005a, 0x005a, 'rm', 0x2128), # Z + (0x0061, 0x007a, 'rm', 0x1d51e), # a-z + ], + 'it': + [ + (0x0041, 0x0042, 'rm', 0x1d504), # A-B + (0x0043, 0x0043, 'rm', 0x212d), # C + (0x0044, 0x0047, 'rm', 0x1d507), # D-G + (0x0048, 0x0048, 'rm', 0x210c), # H + (0x0049, 0x0049, 'rm', 0x2111), # I + (0x004a, 0x0051, 'rm', 0x1d50d), # J-Q + (0x0052, 0x0052, 'rm', 0x211c), # R + (0x0053, 0x0059, 'rm', 0x1d516), # S-Y + (0x005a, 0x005a, 'rm', 0x2128), # Z + (0x0061, 0x007a, 'rm', 0x1d51e), # a-z + ], + 'bf': + [ + (0x0041, 0x005a, 'bf', 0x1d56c), # A-Z + (0x0061, 0x007a, 'bf', 0x1d586), # a-z + ], + }, + 'scr': + [ + (0x0041, 0x0041, 'it', 0x1d49c), # A + (0x0042, 0x0042, 'it', 0x212c), # B + (0x0043, 0x0044, 'it', 0x1d49e), # C-D + (0x0045, 0x0046, 'it', 0x2130), # E-F + (0x0047, 0x0047, 'it', 0x1d4a2), # G + (0x0048, 0x0048, 'it', 0x210b), # H + (0x0049, 0x0049, 'it', 0x2110), # I + (0x004a, 0x004b, 'it', 0x1d4a5), # J-K + (0x004c, 0x004c, 'it', 0x2112), # L + (0x004d, 0x004d, 'it', 0x2133), # M + (0x004e, 0x0051, 'it', 0x1d4a9), # N-Q + (0x0052, 0x0052, 'it', 0x211b), # R + (0x0053, 0x005a, 'it', 0x1d4ae), # S-Z + (0x0061, 0x0064, 'it', 0x1d4b6), # a-d + (0x0065, 0x0065, 'it', 0x212f), # e + (0x0066, 0x0066, 'it', 0x1d4bb), # f + (0x0067, 0x0067, 'it', 0x210a), # g + (0x0068, 0x006e, 'it', 0x1d4bd), # h-n + (0x006f, 0x006f, 'it', 0x2134), # o + (0x0070, 0x007a, 'it', 0x1d4c5), # p-z + ], + 'sf': + { + 'rm': + [ + (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9 + (0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z + (0x0061, 0x007a, 'rm', 0x1d5ba), # a-z + (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega + (0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega + (0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant + (0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant + (0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant + (0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant + (0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon + (0x2202, 0x2202, 'rm', 0xe17c), # partial differential + ], + 'it': + [ + # These numerals are actually upright. We don't actually + # want italic numerals ever. + (0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9 + (0x0041, 0x005a, 'it', 0x1d608), # A-Z + (0x0061, 0x007a, 'it', 0x1d622), # a-z + (0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega + (0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega + (0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant + (0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant + (0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant + (0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant + (0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon + ], + 'bf': + [ + (0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9 + (0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z + (0x0061, 0x007a, 'bf', 0x1d5ee), # a-z + (0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega + (0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega + (0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant + (0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant + (0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant + (0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant + (0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant + (0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon + (0x2202, 0x2202, 'bf', 0x1d789), # partial differential + (0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla + ], + }, + 'tt': + [ + (0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9 + (0x0041, 0x005a, 'rm', 0x1d670), # A-Z + (0x0061, 0x007a, 'rm', 0x1d68a) # a-z + ], + } diff --git a/venv/Lib/site-packages/matplotlib/_path.cp36-win32.pyd b/venv/Lib/site-packages/matplotlib/_path.cp36-win32.pyd new file mode 100644 index 000000000..578de187f Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/_path.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/matplotlib/_pylab_helpers.py b/venv/Lib/site-packages/matplotlib/_pylab_helpers.py new file mode 100644 index 000000000..26ba7a171 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_pylab_helpers.py @@ -0,0 +1,141 @@ +""" +Manage figures for the pyplot interface. +""" + +import atexit +from collections import OrderedDict +import gc + + +class Gcf: + """ + Singleton to maintain the relation between figures and their managers, and + keep track of and "active" figure and manager. + + The canvas of a figure created through pyplot is associated with a figure + manager, which handles the interaction between the figure and the backend. + pyplot keeps track of figure managers using an identifier, the "figure + number" or "manager number" (which can actually be any hashable value); + this number is available as the :attr:`number` attribute of the manager. + + This class is never instantiated; it consists of an `OrderedDict` mapping + figure/manager numbers to managers, and a set of class methods that + manipulate this `OrderedDict`. + + Attributes + ---------- + figs : OrderedDict + `OrderedDict` mapping numbers to managers; the active manager is at the + end. + """ + + figs = OrderedDict() + + @classmethod + def get_fig_manager(cls, num): + """ + If manager number *num* exists, make it the active one and return it; + otherwise return *None*. + """ + manager = cls.figs.get(num, None) + if manager is not None: + cls.set_active(manager) + return manager + + @classmethod + def destroy(cls, num): + """ + Destroy manager *num* -- either a manager instance or a manager number. + + In the interactive backends, this is bound to the window "destroy" and + "delete" events. + + It is recommended to pass a manager instance, to avoid confusion when + two managers share the same number. + """ + if all(hasattr(num, attr) for attr in ["num", "_cidgcf", "destroy"]): + manager = num + if cls.figs.get(manager.num) is manager: + cls.figs.pop(manager.num) + else: + return + else: + try: + manager = cls.figs.pop(num) + except KeyError: + return + manager.canvas.mpl_disconnect(manager._cidgcf) + manager.destroy() + gc.collect(1) + + @classmethod + def destroy_fig(cls, fig): + """Destroy figure *fig*.""" + num = next((manager.num for manager in cls.figs.values() + if manager.canvas.figure == fig), None) + if num is not None: + cls.destroy(num) + + @classmethod + def destroy_all(cls): + """Destroy all figures.""" + # Reimport gc in case the module globals have already been removed + # during interpreter shutdown. + import gc + for manager in list(cls.figs.values()): + manager.canvas.mpl_disconnect(manager._cidgcf) + manager.destroy() + cls.figs.clear() + gc.collect(1) + + @classmethod + def has_fignum(cls, num): + """Return whether figure number *num* exists.""" + return num in cls.figs + + @classmethod + def get_all_fig_managers(cls): + """Return a list of figure managers.""" + return list(cls.figs.values()) + + @classmethod + def get_num_fig_managers(cls): + """Return the number of figures being managed.""" + return len(cls.figs) + + @classmethod + def get_active(cls): + """Return the active manager, or *None* if there is no manager.""" + return next(reversed(cls.figs.values())) if cls.figs else None + + @classmethod + def _set_new_active_manager(cls, manager): + """Adopt *manager* into pyplot and make it the active manager.""" + if not hasattr(manager, "_cidgcf"): + manager._cidgcf = manager.canvas.mpl_connect( + "button_press_event", lambda event: cls.set_active(manager)) + fig = manager.canvas.figure + fig.number = manager.num + label = fig.get_label() + if label: + manager.set_window_title(label) + cls.set_active(manager) + + @classmethod + def set_active(cls, manager): + """Make *manager* the active manager.""" + cls.figs[manager.num] = manager + cls.figs.move_to_end(manager.num) + + @classmethod + def draw_all(cls, force=False): + """ + Redraw all stale managed figures, or, if *force* is True, all managed + figures. + """ + for manager in cls.get_all_fig_managers(): + if force or manager.canvas.figure.stale: + manager.canvas.draw_idle() + + +atexit.register(Gcf.destroy_all) diff --git a/venv/Lib/site-packages/matplotlib/_qhull.cp36-win32.pyd b/venv/Lib/site-packages/matplotlib/_qhull.cp36-win32.pyd new file mode 100644 index 000000000..52d13bb98 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/_qhull.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/matplotlib/_text_layout.py b/venv/Lib/site-packages/matplotlib/_text_layout.py new file mode 100644 index 000000000..e9fed1316 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_text_layout.py @@ -0,0 +1,38 @@ +""" +Text layouting utilities. +""" + +from .ft2font import KERNING_DEFAULT, LOAD_NO_HINTING + + +def layout(string, font, *, kern_mode=KERNING_DEFAULT): + """ + Render *string* with *font*. For each character in *string*, yield a + (glyph-index, x-position) pair. When such a pair is yielded, the font's + glyph is set to the corresponding character. + + Parameters + ---------- + string : str + The string to be rendered. + font : FT2Font + The font. + kern_mode : int + A FreeType kerning mode. + + Yields + ------ + glyph_index : int + x_position : float + """ + x = 0 + last_glyph_idx = None + for char in string: + glyph_idx = font.get_char_index(ord(char)) + kern = (font.get_kerning(last_glyph_idx, glyph_idx, kern_mode) + if last_glyph_idx is not None else 0) / 64 + x += kern + glyph = font.load_glyph(glyph_idx, flags=LOAD_NO_HINTING) + yield glyph_idx, x + x += glyph.linearHoriAdvance / 65536 + last_glyph_idx = glyph_idx diff --git a/venv/Lib/site-packages/matplotlib/_tri.cp36-win32.pyd b/venv/Lib/site-packages/matplotlib/_tri.cp36-win32.pyd new file mode 100644 index 000000000..fb9cd6327 Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/_tri.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/matplotlib/_ttconv.cp36-win32.pyd b/venv/Lib/site-packages/matplotlib/_ttconv.cp36-win32.pyd new file mode 100644 index 000000000..bef50143b Binary files /dev/null and b/venv/Lib/site-packages/matplotlib/_ttconv.cp36-win32.pyd differ diff --git a/venv/Lib/site-packages/matplotlib/_version.py b/venv/Lib/site-packages/matplotlib/_version.py new file mode 100644 index 000000000..d9f85368a --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/_version.py @@ -0,0 +1,21 @@ + +# This file was generated by 'versioneer.py' (0.15) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +import json +import sys + +version_json = ''' +{ + "dirty": false, + "error": null, + "full-revisionid": "6e4d72c663c9930115720ac469341ed56a9505ec", + "version": "3.3.2" +} +''' # END VERSION_JSON + + +def get_versions(): + return json.loads(version_json) diff --git a/venv/Lib/site-packages/matplotlib/afm.py b/venv/Lib/site-packages/matplotlib/afm.py new file mode 100644 index 000000000..ad3e41c08 --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/afm.py @@ -0,0 +1,528 @@ +""" +A python interface to Adobe Font Metrics Files. + +Although a number of other python implementations exist, and may be more +complete than this, it was decided not to go with them because they were +either: + +1) copyrighted or used a non-BSD compatible license +2) had too many dependencies and a free standing lib was needed +3) did more than needed and it was easier to write afresh rather than + figure out how to get just what was needed. + +It is pretty easy to use, and has no external dependencies: + +>>> import matplotlib as mpl +>>> from pathlib import Path +>>> afm_path = Path(mpl.get_data_path(), 'fonts', 'afm', 'ptmr8a.afm') +>>> +>>> from matplotlib.afm import AFM +>>> with afm_path.open('rb') as fh: +... afm = AFM(fh) +>>> afm.string_width_height('What the heck?') +(6220.0, 694) +>>> afm.get_fontname() +'Times-Roman' +>>> afm.get_kern_dist('A', 'f') +0 +>>> afm.get_kern_dist('A', 'y') +-92.0 +>>> afm.get_bbox_char('!') +[130, -9, 238, 676] + +As in the Adobe Font Metrics File Format Specification, all dimensions +are given in units of 1/1000 of the scale factor (point size) of the font +being used. +""" + +from collections import namedtuple +import logging +import re + +from ._mathtext_data import uni2type1 + + +_log = logging.getLogger(__name__) + + +def _to_int(x): + # Some AFM files have floats where we are expecting ints -- there is + # probably a better way to handle this (support floats, round rather than + # truncate). But I don't know what the best approach is now and this + # change to _to_int should at least prevent Matplotlib from crashing on + # these. JDH (2009-11-06) + return int(float(x)) + + +def _to_float(x): + # Some AFM files use "," instead of "." as decimal separator -- this + # shouldn't be ambiguous (unless someone is wicked enough to use "," as + # thousands separator...). + if isinstance(x, bytes): + # Encoding doesn't really matter -- if we have codepoints >127 the call + # to float() will error anyways. + x = x.decode('latin-1') + return float(x.replace(',', '.')) + + +def _to_str(x): + return x.decode('utf8') + + +def _to_list_of_ints(s): + s = s.replace(b',', b' ') + return [_to_int(val) for val in s.split()] + + +def _to_list_of_floats(s): + return [_to_float(val) for val in s.split()] + + +def _to_bool(s): + if s.lower().strip() in (b'false', b'0', b'no'): + return False + else: + return True + + +def _parse_header(fh): + """ + Read the font metrics header (up to the char metrics) and returns + a dictionary mapping *key* to *val*. *val* will be converted to the + appropriate python type as necessary; e.g.: + + * 'False'->False + * '0'->0 + * '-168 -218 1000 898'-> [-168, -218, 1000, 898] + + Dictionary keys are + + StartFontMetrics, FontName, FullName, FamilyName, Weight, + ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition, + UnderlineThickness, Version, Notice, EncodingScheme, CapHeight, + XHeight, Ascender, Descender, StartCharMetrics + """ + header_converters = { + b'StartFontMetrics': _to_float, + b'FontName': _to_str, + b'FullName': _to_str, + b'FamilyName': _to_str, + b'Weight': _to_str, + b'ItalicAngle': _to_float, + b'IsFixedPitch': _to_bool, + b'FontBBox': _to_list_of_ints, + b'UnderlinePosition': _to_float, + b'UnderlineThickness': _to_float, + b'Version': _to_str, + # Some AFM files have non-ASCII characters (which are not allowed by + # the spec). Given that there is actually no public API to even access + # this field, just return it as straight bytes. + b'Notice': lambda x: x, + b'EncodingScheme': _to_str, + b'CapHeight': _to_float, # Is the second version a mistake, or + b'Capheight': _to_float, # do some AFM files contain 'Capheight'? -JKS + b'XHeight': _to_float, + b'Ascender': _to_float, + b'Descender': _to_float, + b'StdHW': _to_float, + b'StdVW': _to_float, + b'StartCharMetrics': _to_int, + b'CharacterSet': _to_str, + b'Characters': _to_int, + } + d = {} + first_line = True + for line in fh: + line = line.rstrip() + if line.startswith(b'Comment'): + continue + lst = line.split(b' ', 1) + key = lst[0] + if first_line: + # AFM spec, Section 4: The StartFontMetrics keyword + # [followed by a version number] must be the first line in + # the file, and the EndFontMetrics keyword must be the + # last non-empty line in the file. We just check the + # first header entry. + if key != b'StartFontMetrics': + raise RuntimeError('Not an AFM file') + first_line = False + if len(lst) == 2: + val = lst[1] + else: + val = b'' + try: + converter = header_converters[key] + except KeyError: + _log.error('Found an unknown keyword in AFM header (was %r)' % key) + continue + try: + d[key] = converter(val) + except ValueError: + _log.error('Value error parsing header in AFM: %s, %s', key, val) + continue + if key == b'StartCharMetrics': + break + else: + raise RuntimeError('Bad parse') + return d + + +CharMetrics = namedtuple('CharMetrics', 'width, name, bbox') +CharMetrics.__doc__ = """ + Represents the character metrics of a single character. + + Notes + ----- + The fields do currently only describe a subset of character metrics + information defined in the AFM standard. + """ +CharMetrics.width.__doc__ = """The character width (WX).""" +CharMetrics.name.__doc__ = """The character name (N).""" +CharMetrics.bbox.__doc__ = """ + The bbox of the character (B) as a tuple (*llx*, *lly*, *urx*, *ury*).""" + + +def _parse_char_metrics(fh): + """ + Parse the given filehandle for character metrics information and return + the information as dicts. + + It is assumed that the file cursor is on the line behind + 'StartCharMetrics'. + + Returns + ------- + ascii_d : dict + A mapping "ASCII num of the character" to `.CharMetrics`. + name_d : dict + A mapping "character name" to `.CharMetrics`. + + Notes + ----- + This function is incomplete per the standard, but thus far parses + all the sample afm files tried. + """ + required_keys = {'C', 'WX', 'N', 'B'} + + ascii_d = {} + name_d = {} + for line in fh: + # We are defensively letting values be utf8. The spec requires + # ascii, but there are non-compliant fonts in circulation + line = _to_str(line.rstrip()) # Convert from byte-literal + if line.startswith('EndCharMetrics'): + return ascii_d, name_d + # Split the metric line into a dictionary, keyed by metric identifiers + vals = dict(s.strip().split(' ', 1) for s in line.split(';') if s) + # There may be other metrics present, but only these are needed + if not required_keys.issubset(vals): + raise RuntimeError('Bad char metrics line: %s' % line) + num = _to_int(vals['C']) + wx = _to_float(vals['WX']) + name = vals['N'] + bbox = _to_list_of_floats(vals['B']) + bbox = list(map(int, bbox)) + metrics = CharMetrics(wx, name, bbox) + # Workaround: If the character name is 'Euro', give it the + # corresponding character code, according to WinAnsiEncoding (see PDF + # Reference). + if name == 'Euro': + num = 128 + elif name == 'minus': + num = ord("\N{MINUS SIGN}") # 0x2212 + if num != -1: + ascii_d[num] = metrics + name_d[name] = metrics + raise RuntimeError('Bad parse') + + +def _parse_kern_pairs(fh): + """ + Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and + values are the kern pair value. For example, a kern pairs line like + ``KPX A y -50`` + + will be represented as:: + + d[ ('A', 'y') ] = -50 + + """ + + line = next(fh) + if not line.startswith(b'StartKernPairs'): + raise RuntimeError('Bad start of kern pairs data: %s' % line) + + d = {} + for line in fh: + line = line.rstrip() + if not line: + continue + if line.startswith(b'EndKernPairs'): + next(fh) # EndKernData + return d + vals = line.split() + if len(vals) != 4 or vals[0] != b'KPX': + raise RuntimeError('Bad kern pairs line: %s' % line) + c1, c2, val = _to_str(vals[1]), _to_str(vals[2]), _to_float(vals[3]) + d[(c1, c2)] = val + raise RuntimeError('Bad kern pairs parse') + + +CompositePart = namedtuple('CompositePart', 'name, dx, dy') +CompositePart.__doc__ = """ + Represents the information on a composite element of a composite char.""" +CompositePart.name.__doc__ = """Name of the part, e.g. 'acute'.""" +CompositePart.dx.__doc__ = """x-displacement of the part from the origin.""" +CompositePart.dy.__doc__ = """y-displacement of the part from the origin.""" + + +def _parse_composites(fh): + """ + Parse the given filehandle for composites information return them as a + dict. + + It is assumed that the file cursor is on the line behind 'StartComposites'. + + Returns + ------- + dict + A dict mapping composite character names to a parts list. The parts + list is a list of `.CompositePart` entries describing the parts of + the composite. + + Examples + -------- + A composite definition line:: + + CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ; + + will be represented as:: + + composites['Aacute'] = [CompositePart(name='A', dx=0, dy=0), + CompositePart(name='acute', dx=160, dy=170)] + + """ + composites = {} + for line in fh: + line = line.rstrip() + if not line: + continue + if line.startswith(b'EndComposites'): + return composites + vals = line.split(b';') + cc = vals[0].split() + name, numParts = cc[1], _to_int(cc[2]) + pccParts = [] + for s in vals[1:-1]: + pcc = s.split() + part = CompositePart(pcc[1], _to_float(pcc[2]), _to_float(pcc[3])) + pccParts.append(part) + composites[name] = pccParts + + raise RuntimeError('Bad composites parse') + + +def _parse_optional(fh): + """ + Parse the optional fields for kern pair data and composites. + + Returns + ------- + kern_data : dict + A dict containing kerning information. May be empty. + See `._parse_kern_pairs`. + composites : dict + A dict containing composite information. May be empty. + See `._parse_composites`. + """ + optional = { + b'StartKernData': _parse_kern_pairs, + b'StartComposites': _parse_composites, + } + + d = {b'StartKernData': {}, + b'StartComposites': {}} + for line in fh: + line = line.rstrip() + if not line: + continue + key = line.split()[0] + + if key in optional: + d[key] = optional[key](fh) + + return d[b'StartKernData'], d[b'StartComposites'] + + +class AFM: + + def __init__(self, fh): + """Parse the AFM file in file object *fh*.""" + self._header = _parse_header(fh) + self._metrics, self._metrics_by_name = _parse_char_metrics(fh) + self._kern, self._composite = _parse_optional(fh) + + def get_bbox_char(self, c, isord=False): + if not isord: + c = ord(c) + return self._metrics[c].bbox + + def string_width_height(self, s): + """ + Return the string width (including kerning) and string height + as a (*w*, *h*) tuple. + """ + if not len(s): + return 0, 0 + total_width = 0 + namelast = None + miny = 1e9 + maxy = 0 + for c in s: + if c == '\n': + continue + wx, name, bbox = self._metrics[ord(c)] + + total_width += wx + self._kern.get((namelast, name), 0) + l, b, w, h = bbox + miny = min(miny, b) + maxy = max(maxy, b + h) + + namelast = name + + return total_width, maxy - miny + + def get_str_bbox_and_descent(self, s): + """Return the string bounding box and the maximal descent.""" + if not len(s): + return 0, 0, 0, 0, 0 + total_width = 0 + namelast = None + miny = 1e9 + maxy = 0 + left = 0 + if not isinstance(s, str): + s = _to_str(s) + for c in s: + if c == '\n': + continue + name = uni2type1.get(ord(c), f"uni{ord(c):04X}") + try: + wx, _, bbox = self._metrics_by_name[name] + except KeyError: + name = 'question' + wx, _, bbox = self._metrics_by_name[name] + total_width += wx + self._kern.get((namelast, name), 0) + l, b, w, h = bbox + left = min(left, l) + miny = min(miny, b) + maxy = max(maxy, b + h) + + namelast = name + + return left, miny, total_width, maxy - miny, -miny + + def get_str_bbox(self, s): + """Return the string bounding box.""" + return self.get_str_bbox_and_descent(s)[:4] + + def get_name_char(self, c, isord=False): + """Get the name of the character, i.e., ';' is 'semicolon'.""" + if not isord: + c = ord(c) + return self._metrics[c].name + + def get_width_char(self, c, isord=False): + """ + Get the width of the character from the character metric WX field. + """ + if not isord: + c = ord(c) + return self._metrics[c].width + + def get_width_from_char_name(self, name): + """Get the width of the character from a type1 character name.""" + return self._metrics_by_name[name].width + + def get_height_char(self, c, isord=False): + """Get the bounding box (ink) height of character *c* (space is 0).""" + if not isord: + c = ord(c) + return self._metrics[c].bbox[-1] + + def get_kern_dist(self, c1, c2): + """ + Return the kerning pair distance (possibly 0) for chars *c1* and *c2*. + """ + name1, name2 = self.get_name_char(c1), self.get_name_char(c2) + return self.get_kern_dist_from_name(name1, name2) + + def get_kern_dist_from_name(self, name1, name2): + """ + Return the kerning pair distance (possibly 0) for chars + *name1* and *name2*. + """ + return self._kern.get((name1, name2), 0) + + def get_fontname(self): + """Return the font name, e.g., 'Times-Roman'.""" + return self._header[b'FontName'] + + def get_fullname(self): + """Return the font full name, e.g., 'Times-Roman'.""" + name = self._header.get(b'FullName') + if name is None: # use FontName as a substitute + name = self._header[b'FontName'] + return name + + def get_familyname(self): + """Return the font family name, e.g., 'Times'.""" + name = self._header.get(b'FamilyName') + if name is not None: + return name + + # FamilyName not specified so we'll make a guess + name = self.get_fullname() + extras = (r'(?i)([ -](regular|plain|italic|oblique|bold|semibold|' + r'light|ultralight|extra|condensed))+$') + return re.sub(extras, '', name) + + @property + def family_name(self): + """The font family name, e.g., 'Times'.""" + return self.get_familyname() + + def get_weight(self): + """Return the font weight, e.g., 'Bold' or 'Roman'.""" + return self._header[b'Weight'] + + def get_angle(self): + """Return the fontangle as float.""" + return self._header[b'ItalicAngle'] + + def get_capheight(self): + """Return the cap height as float.""" + return self._header[b'CapHeight'] + + def get_xheight(self): + """Return the xheight as float.""" + return self._header[b'XHeight'] + + def get_underline_thickness(self): + """Return the underline thickness as float.""" + return self._header[b'UnderlineThickness'] + + def get_horizontal_stem_width(self): + """ + Return the standard horizontal stem width as float, or *None* if + not specified in AFM file. + """ + return self._header.get(b'StdHW', None) + + def get_vertical_stem_width(self): + """ + Return the standard vertical stem width as float, or *None* if + not specified in AFM file. + """ + return self._header.get(b'StdVW', None) diff --git a/venv/Lib/site-packages/matplotlib/animation.py b/venv/Lib/site-packages/matplotlib/animation.py new file mode 100644 index 000000000..2568d455d --- /dev/null +++ b/venv/Lib/site-packages/matplotlib/animation.py @@ -0,0 +1,1735 @@ +# TODO: +# * Documentation -- this will need a new section of the User's Guide. +# Both for Animations and just timers. +# - Also need to update http://www.scipy.org/Cookbook/Matplotlib/Animations +# * Blit +# * Currently broken with Qt4 for widgets that don't start on screen +# * Still a few edge cases that aren't working correctly +# * Can this integrate better with existing matplotlib animation artist flag? +# - If animated removes from default draw(), perhaps we could use this to +# simplify initial draw. +# * Example +# * Frameless animation - pure procedural with no loop +# * Need example that uses something like inotify or subprocess +# * Complex syncing examples +# * Movies +# * Can blit be enabled for movies? +# * Need to consider event sources to allow clicking through multiple figures + +import abc +import base64 +import contextlib +from io import BytesIO, TextIOWrapper +import itertools +import logging +from pathlib import Path +import shutil +import subprocess +import sys +from tempfile import TemporaryDirectory +import uuid + +import numpy as np + +import matplotlib as mpl +from matplotlib._animation_data import ( + DISPLAY_TEMPLATE, INCLUDED_FRAMES, JS_INCLUDE, STYLE_INCLUDE) +from matplotlib import cbook + + +_log = logging.getLogger(__name__) + +# Process creation flag for subprocess to prevent it raising a terminal +# window. See for example: +# https://stackoverflow.com/questions/24130623/using-python-subprocess-popen-cant-prevent-exe-stopped-working-prompt +if sys.platform == 'win32': + subprocess_creation_flags = CREATE_NO_WINDOW = 0x08000000 +else: + # Apparently None won't work here + subprocess_creation_flags = 0 + +# Other potential writing methods: +# * http://pymedia.org/ +# * libming (produces swf) python wrappers: https://github.com/libming/libming +# * Wrap x264 API: + +# (http://stackoverflow.com/questions/2940671/ +# how-to-encode-series-of-images-into-h264-using-x264-api-c-c ) + + +def adjusted_figsize(w, h, dpi, n): + """ + Compute figure size so that pixels are a multiple of n. + + Parameters + ---------- + w, h : float + Size in inches. + + dpi : float + The dpi. + + n : int + The target multiple. + + Returns + ------- + wnew, hnew : float + The new figure size in inches. + """ + + # this maybe simplified if / when we adopt consistent rounding for + # pixel size across the whole library + def correct_roundoff(x, dpi, n): + if int(x*dpi) % n != 0: + if int(np.nextafter(x, np.inf)*dpi) % n == 0: + x = np.nextafter(x, np.inf) + elif int(np.nextafter(x, -np.inf)*dpi) % n == 0: + x = np.nextafter(x, -np.inf) + return x + + wnew = int(w * dpi / n) * n / dpi + hnew = int(h * dpi / n) * n / dpi + return correct_roundoff(wnew, dpi, n), correct_roundoff(hnew, dpi, n) + + +class MovieWriterRegistry: + """Registry of available writer classes by human readable name.""" + + def __init__(self): + self._registered = dict() + + @cbook.deprecated("3.2") + def set_dirty(self): + """Sets a flag to re-setup the writers.""" + + def register(self, name): + """ + Decorator for registering a class under a name. + + Example use:: + + @registry.register(name) + class Foo: + pass + """ + def wrapper(writer_cls): + self._registered[name] = writer_cls + return writer_cls + return wrapper + + @cbook.deprecated("3.2") + def ensure_not_dirty(self): + """If dirty, reasks the writers if they are available""" + + @cbook.deprecated("3.2") + def reset_available_writers(self): + """Reset the available state of all registered writers""" + + @cbook.deprecated("3.2") + @property + def avail(self): + return {name: self._registered[name] for name in self.list()} + + def is_available(self, name): + """ + Check if given writer is available by name. + + Parameters + ---------- + name : str + + Returns + ------- + bool + """ + try: + cls = self._registered[name] + except KeyError: + return False + return cls.isAvailable() + + def __iter__(self): + """Iterate over names of available writer class.""" + for name in self._registered: + if self.is_available(name): + yield name + + def list(self): + """Get a list of available MovieWriters.""" + return [*self] + + def __getitem__(self, name): + """Get an available writer class from its name.""" + if self.is_available(name): + return self._registered[name] + raise RuntimeError(f"Requested MovieWriter ({name}) not available") + + +writers = MovieWriterRegistry() + + +class AbstractMovieWriter(abc.ABC): + """ + Abstract base class for writing movies. Fundamentally, what a MovieWriter + does is provide is a way to grab frames by calling grab_frame(). + + setup() is called to start the process and finish() is called afterwards. + + This class is set up to provide for writing movie frame data to a pipe. + saving() is provided as a context manager to facilitate this process as:: + + with moviewriter.saving(fig, outfile='myfile.mp4', dpi=100): + # Iterate over frames + moviewriter.grab_frame(**savefig_kwargs) + + The use of the context manager ensures that setup() and finish() are + performed as necessary. + + An instance of a concrete subclass of this class can be given as the + ``writer`` argument of `Animation.save()`. + """ + + def __init__(self, fps=5, metadata=None, codec=None, bitrate=None): + self.fps = fps + self.metadata = metadata if metadata is not None else {} + self.codec = ( + mpl.rcParams['animation.codec'] if codec is None else codec) + self.bitrate = ( + mpl.rcParams['animation.bitrate'] if bitrate is None else bitrate) + + @abc.abstractmethod + def setup(self, fig, outfile, dpi=None): + """ + Setup for writing the movie file. + + Parameters + ---------- + fig : `~matplotlib.figure.Figure` + The figure object that contains the information for frames. + outfile : str + The filename of the resulting movie file. + dpi : float, default: ``fig.dpi`` + The DPI (or resolution) for the file. This controls the size + in pixels of the resulting movie file. + """ + self.outfile = outfile + self.fig = fig + if dpi is None: + dpi = self.fig.dpi + self.dpi = dpi + + @property + def frame_size(self): + """A tuple ``(width, height)`` in pixels of a movie frame.""" + w, h = self.fig.get_size_inches() + return int(w * self.dpi), int(h * self.dpi) + + @abc.abstractmethod + def grab_frame(self, **savefig_kwargs): + """ + Grab the image information from the figure and save as a movie frame. + + All keyword arguments in *savefig_kwargs* are passed on to the + `~.Figure.savefig` call that saves the figure. + """ + + @abc.abstractmethod + def finish(self): + """Finish any processing for writing the movie.""" + + @contextlib.contextmanager + def saving(self, fig, outfile, dpi, *args, **kwargs): + """ + Context manager to facilitate writing the movie file. + + ``*args, **kw`` are any parameters that should be passed to `setup`. + """ + # This particular sequence is what contextlib.contextmanager wants + self.setup(fig, outfile, dpi, *args, **kwargs) + try: + yield self + finally: + self.finish() + + +class MovieWriter(AbstractMovieWriter): + """ + Base class for writing movies. + + This is a base class for MovieWriter subclasses that write a movie frame + data to a pipe. You cannot instantiate this class directly. + See examples for how to use its subclasses. + + Attributes + ---------- + frame_format : str + The format used in writing frame data, defaults to 'rgba'. + fig : `~matplotlib.figure.Figure` + The figure to capture data from. + This must be provided by the sub-classes. + """ + + # Builtin writer subclasses additionally define the _exec_key and _args_key + # attributes, which indicate the rcParams entries where the path to the + # executable and additional command-line arguments to the executable are + # stored. Third-party writers cannot meaningfully set these as they cannot + # extend rcParams with new keys. + + exec_key = cbook._deprecate_privatize_attribute("3.3") + args_key = cbook._deprecate_privatize_attribute("3.3") + + def __init__(self, fps=5, codec=None, bitrate=None, extra_args=None, + metadata=None): + """ + Parameters + ---------- + fps : int, default: 5 + Movie frame rate (per second). + codec : str or None, default: :rc:`animation.codec` + The codec to use. + bitrate : int, default: :rc:`animation.bitrate` + The bitrate of the movie, in kilobits per second. Higher values + means higher quality movies, but increase the file size. A value + of -1 lets the underlying movie encoder select the bitrate. + extra_args : list of str or None, optional + Extra command-line arguments passed to the underlying movie + encoder. The default, None, means to use + :rc:`animation.[name-of-encoder]_args` for the builtin writers. + metadata : Dict[str, str], default: {} + A dictionary of keys and values for metadata to include in the + output file. Some keys that may be of use include: + title, artist, genre, subject, copyright, srcform, comment. + """ + if type(self) is MovieWriter: + # TODO MovieWriter is still an abstract class and needs to be + # extended with a mixin. This should be clearer in naming + # and description. For now, just give a reasonable error + # message to users. + raise TypeError( + 'MovieWriter cannot be instantiated directly. Please use one ' + 'of its subclasses.') + + super().__init__(fps=fps, metadata=metadata, codec=codec, + bitrate=bitrate) + + self.frame_format = 'rgba' + self.extra_args = extra_args + + def _adjust_frame_size(self): + if self.codec == 'h264': + wo, ho = self.fig.get_size_inches() + w, h = adjusted_figsize(wo, ho, self.dpi, 2) + if (wo, ho) != (w, h): + self.fig.set_size_inches(w, h, forward=True) + _log.info('figure size in inches has been adjusted ' + 'from %s x %s to %s x %s', wo, ho, w, h) + else: + w, h = self.fig.get_size_inches() + _log.debug('frame size in pixels is %s x %s', *self.frame_size) + return w, h + + def setup(self, fig, outfile, dpi=None): + # docstring inherited + super().setup(fig, outfile, dpi=dpi) + self._w, self._h = self._adjust_frame_size() + # Run here so that grab_frame() can write the data to a pipe. This + # eliminates the need for temp files. + self._run() + + def _run(self): + # Uses subprocess to call the program for assembling frames into a + # movie file. *args* returns the sequence of command line arguments + # from a few configuration options. + command = self._args() + _log.info('MovieWriter._run: running command: %s', + cbook._pformat_subprocess(command)) + PIPE = subprocess.PIPE + self._proc = subprocess.Popen( + command, stdin=PIPE, stdout=PIPE, stderr=PIPE, + creationflags=subprocess_creation_flags) + + def finish(self): + """Finish any processing for writing the movie.""" + self.cleanup() + + def grab_frame(self, **savefig_kwargs): + # docstring inherited + _log.debug('MovieWriter.grab_frame: Grabbing frame.') + # Readjust the figure size in case it has been changed by the user. + # All frames must have the same size to save the movie correctly. + self.fig.set_size_inches(self._w, self._h) + # Save the figure data to the sink, using the frame format and dpi. + self.fig.savefig(self._frame_sink(), format=self.frame_format, + dpi=self.dpi, **savefig_kwargs) + + def _frame_sink(self): + """Return the place to which frames should be written.""" + return self._proc.stdin + + def _args(self): + """Assemble list of encoder-specific command-line arguments.""" + return NotImplementedError("args needs to be implemented by subclass.") + + def cleanup(self): + """Clean-up and collect the process used to write the movie file.""" + out, err = self._proc.communicate() + self._frame_sink().close() + # Use the encoding/errors that universal_newlines would use. + out = TextIOWrapper(BytesIO(out)).read() + err = TextIOWrapper(BytesIO(err)).read() + if out: + _log.log( + logging.WARNING if self._proc.returncode else logging.DEBUG, + "MovieWriter stdout:\n%s", out) + if err: + _log.log( + logging.WARNING if self._proc.returncode else logging.DEBUG, + "MovieWriter stderr:\n%s", err) + if self._proc.returncode: + raise subprocess.CalledProcessError( + self._proc.returncode, self._proc.args, out, err) + + @classmethod + def bin_path(cls): + """ + Return the binary path to the commandline tool used by a specific + subclass. This is a class method so that the tool can be looked for + before making a particular MovieWriter subclass available. + """ + return str(mpl.rcParams[cls._exec_key]) + + @classmethod + def isAvailable(cls): + """Return whether a MovieWriter subclass is actually available.""" + return shutil.which(cls.bin_path()) is not None + + +class FileMovieWriter(MovieWriter): + """ + `MovieWriter` for writing to individual files and stitching at the end. + + This must be sub-classed to be useful. + """ + def __init__(self, *args, **kwargs): + MovieWriter.__init__(self, *args, **kwargs) + self.frame_format = mpl.rcParams['animation.frame_format'] + + @cbook._delete_parameter("3.3", "clear_temp") + def setup(self, fig, outfile, dpi=None, frame_prefix=None, + clear_temp=True): + """ + Setup for writing the movie file. + + Parameters + ---------- + fig : `~matplotlib.figure.Figure` + The figure to grab the rendered frames from. + outfile : str + The filename of the resulting movie file. + dpi : float, optional + The dpi of the output file. This, with the figure size, + controls the size in pixels of the resulting movie file. + Default is ``fig.dpi``. + frame_prefix : str, optional + The filename prefix to use for temporary files. If None (the + default), files are written to a temporary directory which is + deleted by `cleanup` (regardless of the value of *clear_temp*). + clear_temp : bool, optional + If the temporary files should be deleted after stitching + the final result. Setting this to ``False`` can be useful for + debugging. Defaults to ``True``. + """ + self.fig = fig + self.outfile = outfile + if dpi is None: + dpi = self.fig.dpi + self.dpi = dpi + self._adjust_frame_size() + + if frame_prefix is None: + self._tmpdir = TemporaryDirectory() + self.temp_prefix = str(Path(self._tmpdir.name, 'tmp')) + else: + self._tmpdir = None + self.temp_prefix = frame_prefix + self._clear_temp = clear_temp + self._frame_counter = 0 # used for generating sequential file names + self._temp_paths = list() + self.fname_format_str = '%s%%07d.%s' + + @cbook.deprecated("3.3") + @property + def clear_temp(self): + return self._clear_temp + + @clear_temp.setter + def clear_temp(self, value): + self._clear_temp = value + + @property + def frame_format(self): + """ + Format (png, jpeg, etc.) to use for saving the frames, which can be + decided by the individual subclasses. + """ + return self._frame_format + + @frame_format.setter + def frame_format(self, frame_format): + if frame_format in self.supported_formats: + self._frame_format = frame_format + else: + self._frame_format = self.supported_formats[0] + + def _base_temp_name(self): + # Generates a template name (without number) given the frame format + # for extension and the prefix. + return self.fname_format_str % (self.temp_prefix, self.frame_format) + + def _frame_sink(self): + # Creates a filename for saving using the basename and the current + # counter. + path = Path(self._base_temp_name() % self._frame_counter) + + # Save the filename so we can delete it later if necessary + self._temp_paths.append(path) + _log.debug('FileMovieWriter.frame_sink: saving frame %d to path=%s', + self._frame_counter, path) + self._frame_counter += 1 # Ensures each created name is 'unique' + + # This file returned here will be closed once it's used by savefig() + # because it will no longer be referenced and will be gc-ed. + return open(path, 'wb') + + def grab_frame(self, **savefig_kwargs): + # docstring inherited + # Overloaded to explicitly close temp file. + _log.debug('MovieWriter.grab_frame: Grabbing frame.') + # Tell the figure to save its data to the sink, using the + # frame format and dpi. + with self._frame_sink() as myframesink: + self.fig.savefig(myframesink, format=self.frame_format, + dpi=self.dpi, **savefig_kwargs) + + def finish(self): + # Call run here now that all frame grabbing is done. All temp files + # are available to be assembled. + self._run() + MovieWriter.finish(self) # Will call clean-up + + def cleanup(self): + MovieWriter.cleanup(self) + if self._tmpdir: + _log.debug('MovieWriter: clearing temporary path=%s', self._tmpdir) + self._tmpdir.cleanup() + else: + if self._clear_temp: + _log.debug('MovieWriter: clearing temporary paths=%s', + self._temp_paths) + for path in self._temp_paths: + path.unlink() + + +@writers.register('pillow') +class PillowWriter(AbstractMovieWriter): + @classmethod + def isAvailable(cls): + return True + + def setup(self, fig, outfile, dpi=None): + super().setup(fig, outfile, dpi=dpi) + self._frames = [] + + def grab_frame(self, **savefig_kwargs): + from PIL import Image + buf = BytesIO() + self.fig.savefig( + buf, **{**savefig_kwargs, "format": "rgba", "dpi": self.dpi}) + renderer = self.fig.canvas.get_renderer() + self._frames.append(Image.frombuffer( + "RGBA", self.frame_size, buf.getbuffer(), "raw", "RGBA", 0, 1)) + + def finish(self): + self._frames[0].save( + self.outfile, save_all=True, append_images=self._frames[1:], + duration=int(1000 / self.fps), loop=0) + + +# Base class of ffmpeg information. Has the config keys and the common set +# of arguments that controls the *output* side of things. +class FFMpegBase: + """ + Mixin class for FFMpeg output. + + To be useful this must be multiply-inherited from with a + `MovieWriterBase` sub-class. + """ + + _exec_key = 'animation.ffmpeg_path' + _args_key = 'animation.ffmpeg_args' + + @property + def output_args(self): + args = [] + if Path(self.outfile).suffix == '.gif': + self.codec = 'gif' + else: + args.extend(['-vcodec', self.codec]) + extra_args = (self.extra_args if self.extra_args is not None + else mpl.rcParams[self._args_key]) + # For h264, the default format is yuv444p, which is not compatible + # with quicktime (and others). Specifying yuv420p fixes playback on + # iOS, as well as HTML5 video in firefox and safari (on both Win and + # OSX). Also fixes internet explorer. This is as of 2015/10/29. + if self.codec == 'h264' and '-pix_fmt' not in extra_args: + args.extend(['-pix_fmt', 'yuv420p']) + # For GIF, we're telling FFMPEG to split the video stream, to generate + # a palette, and then use it for encoding. + elif self.codec == 'gif' and '-filter_complex' not in extra_args: + args.extend(['-filter_complex', + 'split [a][b];[a] palettegen [p];[b][p] paletteuse']) + if self.bitrate > 0: + args.extend(['-b', '%dk' % self.bitrate]) # %dk: bitrate in kbps. + args.extend(extra_args) + for k, v in self.metadata.items(): + args.extend(['-metadata', '%s=%s' % (k, v)]) + + return args + ['-y', self.outfile] + + @classmethod + def isAvailable(cls): + return ( + super().isAvailable() + # Ubuntu 12.04 ships a broken ffmpeg binary which we shouldn't use. + # NOTE: when removed, remove the same method in AVConvBase. + and b'LibAv' not in subprocess.run( + [cls.bin_path()], creationflags=subprocess_creation_flags, + stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE).stderr) + + +# Combine FFMpeg options with pipe-based writing +@writers.register('ffmpeg') +class FFMpegWriter(FFMpegBase, MovieWriter): + """ + Pipe-based ffmpeg writer. + + Frames are streamed directly to ffmpeg via a pipe and written in a single + pass. + """ + def _args(self): + # Returns the command line parameters for subprocess to use + # ffmpeg to create a movie using a pipe. + args = [self.bin_path(), '-f', 'rawvideo', '-vcodec', 'rawvideo', + '-s', '%dx%d' % self.frame_size, '-pix_fmt', self.frame_format, + '-r', str(self.fps)] + # Logging is quieted because subprocess.PIPE has limited buffer size. + # If you have a lot of frames in your animation and set logging to + # DEBUG, you will have a buffer overrun. + if _log.getEffectiveLevel() > logging.DEBUG: + args += ['-loglevel', 'error'] + args += ['-i', 'pipe:'] + self.output_args + return args + + +# Combine FFMpeg options with temp file-based writing +@writers.register('ffmpeg_file') +class FFMpegFileWriter(FFMpegBase, FileMovieWriter): + """ + File-based ffmpeg writer. + + Frames are written to temporary files on disk and then stitched + together at the end. + """ + supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp', + 'pbm', 'raw', 'rgba'] + + def _args(self): + # Returns the command line parameters for subprocess to use + # ffmpeg to create a movie using a collection of temp images + return [self.bin_path(), '-r', str(self.fps), + '-i', self._base_temp_name(), + '-vframes', str(self._frame_counter)] + self.output_args + + +# Base class of avconv information. AVConv has identical arguments to FFMpeg. +@cbook.deprecated('3.3') +class AVConvBase(FFMpegBase): + """ + Mixin class for avconv output. + + To be useful this must be multiply-inherited from with a + `MovieWriterBase` sub-class. + """ + + _exec_key = 'animation.avconv_path' + _args_key = 'animation.avconv_args' + + # NOTE : should be removed when the same method is removed in FFMpegBase. + isAvailable = classmethod(MovieWriter.isAvailable.__func__) + + +# Combine AVConv options with pipe-based writing +@writers.register('avconv') +class AVConvWriter(AVConvBase, FFMpegWriter): + """ + Pipe-based avconv writer. + + Frames are streamed directly to avconv via a pipe and written in a single + pass. + """ + + +# Combine AVConv options with file-based writing +@writers.register('avconv_file') +class AVConvFileWriter(AVConvBase, FFMpegFileWriter): + """ + File-based avconv writer. + + Frames are written to temporary files on disk and then stitched + together at the end. + """ + + +# Base class for animated GIFs with ImageMagick +class ImageMagickBase: + """ + Mixin class for ImageMagick output. + + To be useful this must be multiply-inherited from with a + `MovieWriterBase` sub-class. + """ + + _exec_key = 'animation.convert_path' + _args_key = 'animation.convert_args' + + @property + def delay(self): + return 100. / self.fps + + @property + def output_args(self): + extra_args = (self.extra_args if self.extra_args is not None + else mpl.rcParams[self._args_key]) + return [*extra_args, self.outfile] + + @classmethod + def bin_path(cls): + binpath = super().bin_path() + if binpath == 'convert': + binpath = mpl._get_executable_info('magick').executable + return binpath + + @classmethod + def isAvailable(cls): + try: + return super().isAvailable() + except mpl.ExecutableNotFoundError as _enf: + # May be raised by get_executable_info. + _log.debug('ImageMagick unavailable due to: %s', _enf) + return False + + +# Combine ImageMagick options with pipe-based writing +@writers.register('imagemagick') +class ImageMagickWriter(ImageMagickBase, MovieWriter): + """ + Pipe-based animated gif. + + Frames are streamed directly to ImageMagick via a pipe and written + in a single pass. + + """ + def _args(self): + return ([self.bin_path(), + '-size', '%ix%i' % self.frame_size, '-depth', '8', + '-delay', str(self.delay), '-loop', '0', + '%s:-' % self.frame_format] + + self.output_args) + + +# Combine ImageMagick options with temp file-based writing +@writers.register('imagemagick_file') +class ImageMagickFileWriter(ImageMagickBase, FileMovieWriter): + """ + File-based animated gif writer. + + Frames are written to temporary files on disk and then stitched + together at the end. + + """ + + supported_formats = ['png', 'jpeg', 'ppm', 'tiff', 'sgi', 'bmp', + 'pbm', 'raw', 'rgba'] + + def _args(self): + return ([self.bin_path(), '-delay', str(self.delay), '-loop', '0', + '%s*.%s' % (self.temp_prefix, self.frame_format)] + + self.output_args) + + +# Taken directly from jakevdp's JSAnimation package at +# http://github.com/jakevdp/JSAnimation +def _included_frames(paths, frame_format): + """paths should be a list of Paths""" + return INCLUDED_FRAMES.format(Nframes=len(paths), + frame_dir=paths[0].parent, + frame_format=frame_format) + + +def _embedded_frames(frame_list, frame_format): + """frame_list should be a list of base64-encoded png files""" + template = ' frames[{0}] = "data:image/{1};base64,{2}"\n' + return "\n" + "".join( + template.format(i, frame_format, frame_data.replace('\n', '\\\n')) + for i, frame_data in enumerate(frame_list)) + + +@writers.register('html') +class HTMLWriter(FileMovieWriter): + """Writer for JavaScript-based HTML movies.""" + + supported_formats = ['png', 'jpeg', 'tiff', 'svg'] + + @cbook.deprecated("3.3") + @property + def args_key(self): + return 'animation.html_args' + + @classmethod + def isAvailable(cls): + return True + + def __init__(self, fps=30, codec=None, bitrate=None, extra_args=None, + metadata=None, embed_frames=False, default_mode='loop', + embed_limit=None): + + if extra_args: + _log.warning("HTMLWriter ignores 'extra_args'") + extra_args = () # Don't lookup nonexistent rcParam[args_key]. + self.embed_frames = embed_frames + self.default_mode = default_mode.lower() + cbook._check_in_list(['loop', 'once', 'reflect'], + default_mode=self.default_mode) + + # Save embed limit, which is given in MB + if embed_limit is None: + self._bytes_limit = mpl.rcParams['animation.embed_limit'] + else: + self._bytes_limit = embed_limit + # Convert from MB to bytes + self._bytes_limit *= 1024 * 1024 + + super().__init__(fps, codec, bitrate, extra_args, metadata) + + def setup(self, fig, outfile, dpi, frame_dir=None): + outfile = Path(outfile) + cbook._check_in_list(['.html', '.htm'], + outfile_extension=outfile.suffix) + + self._saved_frames = [] + self._total_bytes = 0 + self._hit_limit = False + + if not self.embed_frames: + if frame_dir is None: + frame_dir = outfile.with_name(outfile.stem + '_frames') + frame_dir.mkdir(parents=True, exist_ok=True) + frame_prefix = frame_dir / 'frame' + else: + frame_prefix = None + + super().setup(fig, outfile, dpi, frame_prefix) + self._clear_temp = False + + def grab_frame(self, **savefig_kwargs): + if self.embed_frames: + # Just stop processing if we hit the limit + if self._hit_limit: + return + f = BytesIO() + self.fig.savefig(f, format=self.frame_format, + dpi=self.dpi, **savefig_kwargs) + imgdata64 = base64.encodebytes(f.getvalue()).decode('ascii') + self._total_bytes += len(imgdata64) + if self._total_bytes >= self._bytes_limit: + _log.warning( + "Animation size has reached %s bytes, exceeding the limit " + "of %s. If you're sure you want a larger animation " + "embedded, set the animation.embed_limit rc parameter to " + "a larger value (in MB). This and further frames will be " + "dropped.", self._total_bytes, self._bytes_limit) + self._hit_limit = True + else: + self._saved_frames.append(imgdata64) + else: + return super().grab_frame(**savefig_kwargs) + + def finish(self): + # save the frames to an html file + if self.embed_frames: + fill_frames = _embedded_frames(self._saved_frames, + self.frame_format) + Nframes = len(self._saved_frames) + else: + # temp names is filled by FileMovieWriter + fill_frames = _included_frames(self._temp_paths, self.frame_format) + Nframes = len(self._temp_paths) + mode_dict = dict(once_checked='', + loop_checked='', + reflect_checked='') + mode_dict[self.default_mode + '_checked'] = 'checked' + + interval = 1000 // self.fps + + with open(self.outfile, 'w') as of: + of.write(JS_INCLUDE + STYLE_INCLUDE) + of.write(DISPLAY_TEMPLATE.format(id=uuid.uuid4().hex, + Nframes=Nframes, + fill_frames=fill_frames, + interval=interval, + **mode_dict)) + + +class Animation: + """ + A base class for Animations. + + This class is not usable as is, and should be subclassed to provide needed + behavior. + + Parameters + ---------- + fig : `~matplotlib.figure.Figure` + The figure object used to get needed events, such as draw or resize. + + event_source : object, optional + A class that can run a callback when desired events + are generated, as well as be stopped and started. + + Examples include timers (see `TimedAnimation`) and file + system notifications. + + blit : bool, default: False + Whether blitting is used to optimize drawing. + + See Also + -------- + FuncAnimation, ArtistAnimation + """ + + def __init__(self, fig, event_source=None, blit=False): + self._fig = fig + # Disables blitting for backends that don't support it. This + # allows users to request it if available, but still have a + # fallback that works if it is not. + self._blit = blit and fig.canvas.supports_blit + + # These are the basics of the animation. The frame sequence represents + # information for each frame of the animation and depends on how the + # drawing is handled by the subclasses. The event source fires events + # that cause the frame sequence to be iterated. + self.frame_seq = self.new_frame_seq() + self.event_source = event_source + + # Instead of starting the event source now, we connect to the figure's + # draw_event, so that we only start once the figure has been drawn. + self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start) + + # Connect to the figure's close_event so that we don't continue to + # fire events and try to draw to a deleted figure. + self._close_id = self._fig.canvas.mpl_connect('close_event', + self._stop) + if self._blit: + self._setup_blit() + + def _start(self, *args): + """ + Starts interactive animation. Adds the draw frame command to the GUI + handler, calls show to start the event loop. + """ + # Do not start the event source if saving() it. + if self._fig.canvas.is_saving(): + return + # First disconnect our draw event handler + self._fig.canvas.mpl_disconnect(self._first_draw_id) + + # Now do any initial draw + self._init_draw() + + # Add our callback for stepping the animation and + # actually start the event_source. + self.event_source.add_callback(self._step) + self.event_source.start() + + def _stop(self, *args): + # On stop we disconnect all of our events. + if self._blit: + self._fig.canvas.mpl_disconnect(self._resize_id) + self._fig.canvas.mpl_disconnect(self._close_id) + self.event_source.remove_callback(self._step) + self.event_source = None + + def save(self, filename, writer=None, fps=None, dpi=None, codec=None, + bitrate=None, extra_args=None, metadata=None, extra_anim=None, + savefig_kwargs=None, *, progress_callback=None): + """ + Save the animation as a movie file by drawing every frame. + + Parameters + ---------- + filename : str + The output filename, e.g., :file:`mymovie.mp4`. + + writer : `MovieWriter` or str, default: :rc:`animation.writer` + A `MovieWriter` instance to use or a key that identifies a + class to use, such as 'ffmpeg'. + + fps : int, optional + Movie frame rate (per second). If not set, the frame rate from the + animation's frame interval. + + dpi : float, default: :rc:`savefig.dpi` + Controls the dots per inch for the movie frames. Together with + the figure's size in inches, this controls the size of the movie. + + codec : str, default: :rc:`animation.codec`. + The video codec to use. Not all codecs are supported by a given + `MovieWriter`. + + bitrate : int, default: :rc:`animation.bitrate` + The bitrate of the movie, in kilobits per second. Higher values + means higher quality movies, but increase the file size. A value + of -1 lets the underlying movie encoder select the bitrate. + + extra_args : list of str or None, optional + Extra command-line arguments passed to the underlying movie + encoder. The default, None, means to use + :rc:`animation.[name-of-encoder]_args` for the builtin writers. + + metadata : Dict[str, str], default {} + Dictionary of keys and values for metadata to include in + the output file. Some keys that may be of use include: + title, artist, genre, subject, copyright, srcform, comment. + + extra_anim : list, default: [] + Additional `Animation` objects that should be included + in the saved movie file. These need to be from the same + `matplotlib.figure.Figure` instance. Also, animation frames will + just be simply combined, so there should be a 1:1 correspondence + between the frames from the different animations. + + savefig_kwargs : dict, default: {} + Keyword arguments passed to each `~.Figure.savefig` call used to + save the individual frames. + + progress_callback : function, optional + A callback function that will be called for every frame to notify + the saving progress. It must have the signature :: + + def func(current_frame: int, total_frames: int) -> Any + + where *current_frame* is the current frame number and + *total_frames* is the total number of frames to be saved. + *total_frames* is set to None, if the total number of frames can + not be determined. Return values may exist but are ignored. + + Example code to write the progress to stdout:: + + progress_callback =\ + lambda i, n: print(f'Saving frame {i} of {n}') + + Notes + ----- + *fps*, *codec*, *bitrate*, *extra_args* and *metadata* are used to + construct a `.MovieWriter` instance and can only be passed if + *writer* is a string. If they are passed as non-*None* and *writer* + is a `.MovieWriter`, a `RuntimeError` will be raised. + """ + + if writer is None: + writer = mpl.rcParams['animation.writer'] + elif (not isinstance(writer, str) and + any(arg is not None + for arg in (fps, codec, bitrate, extra_args, metadata))): + raise RuntimeError('Passing in values for arguments ' + 'fps, codec, bitrate, extra_args, or metadata ' + 'is not supported when writer is an existing ' + 'MovieWriter instance. These should instead be ' + 'passed as arguments when creating the ' + 'MovieWriter instance.') + + if savefig_kwargs is None: + savefig_kwargs = {} + + if fps is None and hasattr(self, '_interval'): + # Convert interval in ms to frames per second + fps = 1000. / self._interval + + # Re-use the savefig DPI for ours if none is given + if dpi is None: + dpi = mpl.rcParams['savefig.dpi'] + if dpi == 'figure': + dpi = self._fig.dpi + + writer_kwargs = {} + if codec is not None: + writer_kwargs['codec'] = codec + if bitrate is not None: + writer_kwargs['bitrate'] = bitrate + if extra_args is not None: + writer_kwargs['extra_args'] = extra_args + if metadata is not None: + writer_kwargs['metadata'] = metadata + + all_anim = [self] + if extra_anim is not None: + all_anim.extend(anim + for anim + in extra_anim if anim._fig is self._fig) + + # If we have the name of a writer, instantiate an instance of the + # registered class. + if isinstance(writer, str): + try: + writer_cls = writers[writer] + except RuntimeError: # Raised if not available. + writer_cls = PillowWriter # Always available. + _log.warning("MovieWriter %s unavailable; using Pillow " + "instead.", writer) + writer = writer_cls(fps, **writer_kwargs) + _log.info('Animation.save using %s', type(writer)) + + if 'bbox_inches' in savefig_kwargs: + _log.warning("Warning: discarding the 'bbox_inches' argument in " + "'savefig_kwargs' as it may cause frame size " + "to vary, which is inappropriate for animation.") + savefig_kwargs.pop('bbox_inches') + + # Create a new sequence of frames for saved data. This is different + # from new_frame_seq() to give the ability to save 'live' generated + # frame information to be saved later. + # TODO: Right now, after closing the figure, saving a movie won't work + # since GUI widgets are gone. Either need to remove extra code to + # allow for this non-existent use case or find a way to make it work. + if mpl.rcParams['savefig.bbox'] == 'tight': + _log.info("Disabling savefig.bbox = 'tight', as it may cause " + "frame size to vary, which is inappropriate for " + "animation.") + # canvas._is_saving = True makes the draw_event animation-starting + # callback a no-op; canvas.manager = None prevents resizing the GUI + # widget (both are likewise done in savefig()). + with mpl.rc_context({'savefig.bbox': None}), \ + writer.saving(self._fig, filename, dpi), \ + cbook._setattr_cm(self._fig.canvas, + _is_saving=True, manager=None): + for anim in all_anim: + anim._init_draw() # Clear the initial frame + frame_number = 0 + # TODO: Currently only FuncAnimation has a save_count + # attribute. Can we generalize this to all Animations? + save_count_list = [getattr(a, 'save_count', None) + for a in all_anim] + if None in save_count_list: + total_frames = None + else: + total_frames = sum(save_count_list) + for data in zip(*[a.new_saved_frame_seq() for a in all_anim]): + for anim, d in zip(all_anim, data): + # TODO: See if turning off blit is really necessary + anim._draw_next_frame(d, blit=False) + if progress_callback is not None: + progress_callback(frame_number, total_frames) + frame_number += 1 + writer.grab_frame(**savefig_kwargs) + + def _step(self, *args): + """ + Handler for getting events. By default, gets the next frame in the + sequence and hands the data off to be drawn. + """ + # Returns True to indicate that the event source should continue to + # call _step, until the frame sequence reaches the end of iteration, + # at which point False will be returned. + try: + framedata = next(self.frame_seq) + self._draw_next_frame(framedata, self._blit) + return True + except StopIteration: + return False + + def new_frame_seq(self): + """Return a new sequence of frame information.""" + # Default implementation is just an iterator over self._framedata + return iter(self._framedata) + + def new_saved_frame_seq(self): + """Return a new sequence of saved/cached frame information.""" + # Default is the same as the regular frame sequence + return self.new_frame_seq() + + def _draw_next_frame(self, framedata, blit): + # Breaks down the drawing of the next frame into steps of pre- and + # post- draw, as well as the drawing of the frame itself. + self._pre_draw(framedata, blit) + self._draw_frame(framedata) + self._post_draw(framedata, blit) + + def _init_draw(self): + # Initial draw to clear the frame. Also used by the blitting code + # when a clean base is required. + pass + + def _pre_draw(self, framedata, blit): + # Perform any cleaning or whatnot before the drawing of the frame. + # This default implementation allows blit to clear the frame. + if blit: + self._blit_clear(self._drawn_artists) + + def _draw_frame(self, framedata): + # Performs actual drawing of the frame. + raise NotImplementedError('Needs to be implemented by subclasses to' + ' actually make an animation.') + + def _post_draw(self, framedata, blit): + # After the frame is rendered, this handles the actual flushing of + # the draw, which can be a direct draw_idle() or make use of the + # blitting. + if blit and self._drawn_artists: + self._blit_draw(self._drawn_artists) + else: + self._fig.canvas.draw_idle() + + # The rest of the code in this class is to facilitate easy blitting + def _blit_draw(self, artists): + # Handles blitted drawing, which renders only the artists given instead + # of the entire figure. + updated_ax = {a.axes for a in artists} + # Enumerate artists to cache axes' backgrounds. We do not draw + # artists yet to not cache foreground from plots with shared axes + for ax in updated_ax: + # If we haven't cached the background for the current view of this + # axes object, do so now. This might not always be reliable, but + # it's an attempt to automate the process. + cur_view = ax._get_view() + view, bg = self._blit_cache.get(ax, (object(), None)) + if cur_view != view: + self._blit_cache[ax] = ( + cur_view, ax.figure.canvas.copy_from_bbox(ax.bbox)) + # Make a separate pass to draw foreground. + for a in artists: + a.axes.draw_artist(a) + # After rendering all the needed artists, blit each axes individually. + for ax in updated_ax: + ax.figure.canvas.blit(ax.bbox) + + def _blit_clear(self, artists): + # Get a list of the axes that need clearing from the artists that + # have been drawn. Grab the appropriate saved background from the + # cache and restore. + axes = {a.axes for a in artists} + for ax in axes: + try: + view, bg = self._blit_cache[ax] + except KeyError: + continue + if ax._get_view() == view: + ax.figure.canvas.restore_region(bg) + else: + self._blit_cache.pop(ax) + + def _setup_blit(self): + # Setting up the blit requires: a cache of the background for the + # axes + self._blit_cache = dict() + self._drawn_artists = [] + self._resize_id = self._fig.canvas.mpl_connect('resize_event', + self._on_resize) + self._post_draw(None, self._blit) + + def _on_resize(self, event): + # On resize, we need to disable the resize event handling so we don't + # get too many events. Also stop the animation events, so that + # we're paused. Reset the cache and re-init. Set up an event handler + # to catch once the draw has actually taken place. + self._fig.canvas.mpl_disconnect(self._resize_id) + self.event_source.stop() + self._blit_cache.clear() + self._init_draw() + self._resize_id = self._fig.canvas.mpl_connect('draw_event', + self._end_redraw) + + def _end_redraw(self, event): + # Now that the redraw has happened, do the post draw flushing and + # blit handling. Then re-enable all of the original events. + self._post_draw(None, False) + self.event_source.start() + self._fig.canvas.mpl_disconnect(self._resize_id) + self._resize_id = self._fig.canvas.mpl_connect('resize_event', + self._on_resize) + + def to_html5_video(self, embed_limit=None): + """ + Convert the animation to an HTML5 ``