use system PIL and simplejson on Linux
This commit is contained in:
parent
523ae0fd51
commit
93422b0274
179 changed files with 1 additions and 33421 deletions
|
@ -1,54 +0,0 @@
|
|||
Metadata-Version: 1.1
|
||||
Name: simplejson
|
||||
Version: 3.6.4
|
||||
Summary: Simple, fast, extensible JSON encoder/decoder for Python
|
||||
Home-page: http://github.com/simplejson/simplejson
|
||||
Author: Bob Ippolito
|
||||
Author-email: bob@redivi.com
|
||||
License: MIT License
|
||||
Description: simplejson is a simple, fast, complete, correct and extensible
|
||||
JSON <http://json.org> encoder and decoder for Python 2.5+
|
||||
and Python 3.3+. It is pure Python code with no dependencies,
|
||||
but includes an optional C extension for a serious speed boost.
|
||||
|
||||
The latest documentation for simplejson can be read online here:
|
||||
http://simplejson.readthedocs.org/
|
||||
|
||||
simplejson is the externally maintained development version of the
|
||||
json library included with Python 2.6 and Python 3.0, but maintains
|
||||
backwards compatibility with Python 2.5.
|
||||
|
||||
The encoder can be specialized to provide serialization in any kind of
|
||||
situation, without any special support by the objects to be serialized
|
||||
(somewhat like pickle). This is best done with the ``default`` kwarg
|
||||
to dumps.
|
||||
|
||||
The decoder can handle incoming JSON strings of any specified encoding
|
||||
(UTF-8 by default). It can also be specialized to post-process JSON
|
||||
objects with the ``object_hook`` or ``object_pairs_hook`` kwargs. This
|
||||
is particularly useful for implementing protocols such as JSON-RPC
|
||||
that have a richer type system than JSON itself.
|
||||
|
||||
For those of you that have legacy systems to maintain, there is a
|
||||
very old fork of simplejson in the `python2.2`_ branch that supports
|
||||
Python 2.2. This is based off of a very old version of simplejson,
|
||||
is not maintained, and should only be used as a last resort.
|
||||
|
||||
.. _python2.2: https://github.com/simplejson/simplejson/tree/python2.2
|
||||
|
||||
Platform: any
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: License :: OSI Approved :: Academic Free License (AFL)
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.5
|
||||
Classifier: Programming Language :: Python :: 2.6
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
@ -1,48 +0,0 @@
|
|||
CHANGES.txt
|
||||
LICENSE.txt
|
||||
MANIFEST.in
|
||||
README.rst
|
||||
conf.py
|
||||
index.rst
|
||||
setup.cfg
|
||||
setup.py
|
||||
scripts/make_docs.py
|
||||
simplejson/__init__.py
|
||||
simplejson/_speedups.c
|
||||
simplejson/compat.py
|
||||
simplejson/decoder.py
|
||||
simplejson/encoder.py
|
||||
simplejson/ordered_dict.py
|
||||
simplejson/scanner.py
|
||||
simplejson/tool.py
|
||||
simplejson.egg-info/PKG-INFO
|
||||
simplejson.egg-info/SOURCES.txt
|
||||
simplejson.egg-info/dependency_links.txt
|
||||
simplejson.egg-info/top_level.txt
|
||||
simplejson/tests/__init__.py
|
||||
simplejson/tests/test_bigint_as_string.py
|
||||
simplejson/tests/test_bitsize_int_as_string.py
|
||||
simplejson/tests/test_check_circular.py
|
||||
simplejson/tests/test_decimal.py
|
||||
simplejson/tests/test_decode.py
|
||||
simplejson/tests/test_default.py
|
||||
simplejson/tests/test_dump.py
|
||||
simplejson/tests/test_encode_basestring_ascii.py
|
||||
simplejson/tests/test_encode_for_html.py
|
||||
simplejson/tests/test_errors.py
|
||||
simplejson/tests/test_fail.py
|
||||
simplejson/tests/test_float.py
|
||||
simplejson/tests/test_for_json.py
|
||||
simplejson/tests/test_indent.py
|
||||
simplejson/tests/test_item_sort_key.py
|
||||
simplejson/tests/test_namedtuple.py
|
||||
simplejson/tests/test_pass1.py
|
||||
simplejson/tests/test_pass2.py
|
||||
simplejson/tests/test_pass3.py
|
||||
simplejson/tests/test_recursion.py
|
||||
simplejson/tests/test_scanstring.py
|
||||
simplejson/tests/test_separators.py
|
||||
simplejson/tests/test_speedups.py
|
||||
simplejson/tests/test_tool.py
|
||||
simplejson/tests/test_tuple.py
|
||||
simplejson/tests/test_unicode.py
|
|
@ -1 +0,0 @@
|
|||
|
|
@ -1,74 +0,0 @@
|
|||
../simplejson/scanner.py
|
||||
../simplejson/compat.py
|
||||
../simplejson/__init__.py
|
||||
../simplejson/encoder.py
|
||||
../simplejson/decoder.py
|
||||
../simplejson/tool.py
|
||||
../simplejson/ordered_dict.py
|
||||
../simplejson/tests/test_namedtuple.py
|
||||
../simplejson/tests/test_recursion.py
|
||||
../simplejson/tests/test_encode_for_html.py
|
||||
../simplejson/tests/test_for_json.py
|
||||
../simplejson/tests/test_bigint_as_string.py
|
||||
../simplejson/tests/test_item_sort_key.py
|
||||
../simplejson/tests/__init__.py
|
||||
../simplejson/tests/test_scanstring.py
|
||||
../simplejson/tests/test_speedups.py
|
||||
../simplejson/tests/test_pass3.py
|
||||
../simplejson/tests/test_tool.py
|
||||
../simplejson/tests/test_tuple.py
|
||||
../simplejson/tests/test_errors.py
|
||||
../simplejson/tests/test_unicode.py
|
||||
../simplejson/tests/test_fail.py
|
||||
../simplejson/tests/test_separators.py
|
||||
../simplejson/tests/test_encode_basestring_ascii.py
|
||||
../simplejson/tests/test_check_circular.py
|
||||
../simplejson/tests/test_decimal.py
|
||||
../simplejson/tests/test_pass2.py
|
||||
../simplejson/tests/test_decode.py
|
||||
../simplejson/tests/test_indent.py
|
||||
../simplejson/tests/test_bitsize_int_as_string.py
|
||||
../simplejson/tests/test_pass1.py
|
||||
../simplejson/tests/test_dump.py
|
||||
../simplejson/tests/test_float.py
|
||||
../simplejson/tests/test_default.py
|
||||
../simplejson/__pycache__/scanner.cpython-34.pyc
|
||||
../simplejson/__pycache__/compat.cpython-34.pyc
|
||||
../simplejson/__pycache__/__init__.cpython-34.pyc
|
||||
../simplejson/__pycache__/encoder.cpython-34.pyc
|
||||
../simplejson/__pycache__/decoder.cpython-34.pyc
|
||||
../simplejson/__pycache__/tool.cpython-34.pyc
|
||||
../simplejson/__pycache__/ordered_dict.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_namedtuple.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_recursion.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_encode_for_html.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_for_json.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_bigint_as_string.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_item_sort_key.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/__init__.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_scanstring.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_speedups.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_pass3.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_tool.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_tuple.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_errors.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_unicode.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_fail.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_separators.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_encode_basestring_ascii.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_check_circular.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_decimal.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_pass2.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_decode.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_indent.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_bitsize_int_as_string.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_pass1.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_dump.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_float.cpython-34.pyc
|
||||
../simplejson/tests/__pycache__/test_default.cpython-34.pyc
|
||||
../simplejson/_speedups.cpython-34m.so
|
||||
./
|
||||
dependency_links.txt
|
||||
PKG-INFO
|
||||
SOURCES.txt
|
||||
top_level.txt
|
|
@ -1 +0,0 @@
|
|||
simplejson
|
|
@ -1,564 +0,0 @@
|
|||
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
|
||||
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
|
||||
interchange format.
|
||||
|
||||
:mod:`simplejson` exposes an API familiar to users of the standard library
|
||||
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
|
||||
version of the :mod:`json` library contained in Python 2.6, but maintains
|
||||
compatibility with Python 2.4 and Python 2.5 and (currently) has
|
||||
significant performance advantages, even without using the optional C
|
||||
extension for speedups.
|
||||
|
||||
Encoding basic Python object hierarchies::
|
||||
|
||||
>>> import simplejson as json
|
||||
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
|
||||
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
|
||||
>>> print(json.dumps("\"foo\bar"))
|
||||
"\"foo\bar"
|
||||
>>> print(json.dumps(u'\u1234'))
|
||||
"\u1234"
|
||||
>>> print(json.dumps('\\'))
|
||||
"\\"
|
||||
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
|
||||
{"a": 0, "b": 0, "c": 0}
|
||||
>>> from simplejson.compat import StringIO
|
||||
>>> io = StringIO()
|
||||
>>> json.dump(['streaming API'], io)
|
||||
>>> io.getvalue()
|
||||
'["streaming API"]'
|
||||
|
||||
Compact encoding::
|
||||
|
||||
>>> import simplejson as json
|
||||
>>> obj = [1,2,3,{'4': 5, '6': 7}]
|
||||
>>> json.dumps(obj, separators=(',',':'), sort_keys=True)
|
||||
'[1,2,3,{"4":5,"6":7}]'
|
||||
|
||||
Pretty printing::
|
||||
|
||||
>>> import simplejson as json
|
||||
>>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' '))
|
||||
{
|
||||
"4": 5,
|
||||
"6": 7
|
||||
}
|
||||
|
||||
Decoding JSON::
|
||||
|
||||
>>> import simplejson as json
|
||||
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
|
||||
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
|
||||
True
|
||||
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
|
||||
True
|
||||
>>> from simplejson.compat import StringIO
|
||||
>>> io = StringIO('["streaming API"]')
|
||||
>>> json.load(io)[0] == 'streaming API'
|
||||
True
|
||||
|
||||
Specializing JSON object decoding::
|
||||
|
||||
>>> import simplejson as json
|
||||
>>> def as_complex(dct):
|
||||
... if '__complex__' in dct:
|
||||
... return complex(dct['real'], dct['imag'])
|
||||
... return dct
|
||||
...
|
||||
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
|
||||
... object_hook=as_complex)
|
||||
(1+2j)
|
||||
>>> from decimal import Decimal
|
||||
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
|
||||
True
|
||||
|
||||
Specializing JSON object encoding::
|
||||
|
||||
>>> import simplejson as json
|
||||
>>> def encode_complex(obj):
|
||||
... if isinstance(obj, complex):
|
||||
... return [obj.real, obj.imag]
|
||||
... raise TypeError(repr(o) + " is not JSON serializable")
|
||||
...
|
||||
>>> json.dumps(2 + 1j, default=encode_complex)
|
||||
'[2.0, 1.0]'
|
||||
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
|
||||
'[2.0, 1.0]'
|
||||
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
|
||||
'[2.0, 1.0]'
|
||||
|
||||
|
||||
Using simplejson.tool from the shell to validate and pretty-print::
|
||||
|
||||
$ echo '{"json":"obj"}' | python -m simplejson.tool
|
||||
{
|
||||
"json": "obj"
|
||||
}
|
||||
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
|
||||
Expecting property name: line 1 column 3 (char 2)
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
__version__ = '3.6.4'
|
||||
__all__ = [
|
||||
'dump', 'dumps', 'load', 'loads',
|
||||
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
|
||||
'OrderedDict', 'simple_first',
|
||||
]
|
||||
|
||||
__author__ = 'Bob Ippolito <bob@redivi.com>'
|
||||
|
||||
from decimal import Decimal
|
||||
|
||||
from .scanner import JSONDecodeError
|
||||
from .decoder import JSONDecoder
|
||||
from .encoder import JSONEncoder, JSONEncoderForHTML
|
||||
def _import_OrderedDict():
|
||||
import collections
|
||||
try:
|
||||
return collections.OrderedDict
|
||||
except AttributeError:
|
||||
from . import ordered_dict
|
||||
return ordered_dict.OrderedDict
|
||||
OrderedDict = _import_OrderedDict()
|
||||
|
||||
def _import_c_make_encoder():
|
||||
try:
|
||||
from ._speedups import make_encoder
|
||||
return make_encoder
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
_default_encoder = JSONEncoder(
|
||||
skipkeys=False,
|
||||
ensure_ascii=True,
|
||||
check_circular=True,
|
||||
allow_nan=True,
|
||||
indent=None,
|
||||
separators=None,
|
||||
encoding='utf-8',
|
||||
default=None,
|
||||
use_decimal=True,
|
||||
namedtuple_as_object=True,
|
||||
tuple_as_array=True,
|
||||
bigint_as_string=False,
|
||||
item_sort_key=None,
|
||||
for_json=False,
|
||||
ignore_nan=False,
|
||||
int_as_string_bitcount=None,
|
||||
)
|
||||
|
||||
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
|
||||
allow_nan=True, cls=None, indent=None, separators=None,
|
||||
encoding='utf-8', default=None, use_decimal=True,
|
||||
namedtuple_as_object=True, tuple_as_array=True,
|
||||
bigint_as_string=False, sort_keys=False, item_sort_key=None,
|
||||
for_json=False, ignore_nan=False, int_as_string_bitcount=None, **kw):
|
||||
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
|
||||
``.write()``-supporting file-like object).
|
||||
|
||||
If *skipkeys* is true then ``dict`` keys that are not basic types
|
||||
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
|
||||
will be skipped instead of raising a ``TypeError``.
|
||||
|
||||
If *ensure_ascii* is false, then the some chunks written to ``fp``
|
||||
may be ``unicode`` instances, subject to normal Python ``str`` to
|
||||
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
|
||||
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
|
||||
to cause an error.
|
||||
|
||||
If *check_circular* is false, then the circular reference check
|
||||
for container types will be skipped and a circular reference will
|
||||
result in an ``OverflowError`` (or worse).
|
||||
|
||||
If *allow_nan* is false, then it will be a ``ValueError`` to
|
||||
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
|
||||
in strict compliance of the original JSON specification, instead of using
|
||||
the JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). See
|
||||
*ignore_nan* for ECMA-262 compliant behavior.
|
||||
|
||||
If *indent* is a string, then JSON array elements and object members
|
||||
will be pretty-printed with a newline followed by that string repeated
|
||||
for each level of nesting. ``None`` (the default) selects the most compact
|
||||
representation without any newlines. For backwards compatibility with
|
||||
versions of simplejson earlier than 2.1.0, an integer is also accepted
|
||||
and is converted to a string with that many spaces.
|
||||
|
||||
If specified, *separators* should be an
|
||||
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
|
||||
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
|
||||
compact JSON representation, you should specify ``(',', ':')`` to eliminate
|
||||
whitespace.
|
||||
|
||||
*encoding* is the character encoding for str instances, default is UTF-8.
|
||||
|
||||
*default(obj)* is a function that should return a serializable version
|
||||
of obj or raise ``TypeError``. The default simply raises ``TypeError``.
|
||||
|
||||
If *use_decimal* is true (default: ``True``) then decimal.Decimal
|
||||
will be natively serialized to JSON with full precision.
|
||||
|
||||
If *namedtuple_as_object* is true (default: ``True``),
|
||||
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
|
||||
as JSON objects.
|
||||
|
||||
If *tuple_as_array* is true (default: ``True``),
|
||||
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
|
||||
|
||||
If *bigint_as_string* is true (default: ``False``), ints 2**53 and higher
|
||||
or lower than -2**53 will be encoded as strings. This is to avoid the
|
||||
rounding that happens in Javascript otherwise. Note that this is still a
|
||||
lossy operation that will not round-trip correctly and should be used
|
||||
sparingly.
|
||||
|
||||
If *int_as_string_bitcount* is a positive number (n), then int of size
|
||||
greater than or equal to 2**n or lower than or equal to -2**n will be
|
||||
encoded as strings.
|
||||
|
||||
If specified, *item_sort_key* is a callable used to sort the items in
|
||||
each dictionary. This is useful if you want to sort items other than
|
||||
in alphabetical order by key. This option takes precedence over
|
||||
*sort_keys*.
|
||||
|
||||
If *sort_keys* is true (default: ``False``), the output of dictionaries
|
||||
will be sorted by item.
|
||||
|
||||
If *for_json* is true (default: ``False``), objects with a ``for_json()``
|
||||
method will use the return value of that method for encoding as JSON
|
||||
instead of the object.
|
||||
|
||||
If *ignore_nan* is true (default: ``False``), then out of range
|
||||
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
|
||||
``null`` in compliance with the ECMA-262 specification. If true, this will
|
||||
override *allow_nan*.
|
||||
|
||||
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
|
||||
``.default()`` method to serialize additional types), specify it with
|
||||
the ``cls`` kwarg. NOTE: You should use *default* or *for_json* instead
|
||||
of subclassing whenever possible.
|
||||
|
||||
"""
|
||||
# cached encoder
|
||||
if (not skipkeys and ensure_ascii and
|
||||
check_circular and allow_nan and
|
||||
cls is None and indent is None and separators is None and
|
||||
encoding == 'utf-8' and default is None and use_decimal
|
||||
and namedtuple_as_object and tuple_as_array
|
||||
and not bigint_as_string and not sort_keys
|
||||
and not item_sort_key and not for_json
|
||||
and not ignore_nan and int_as_string_bitcount is None
|
||||
and not kw
|
||||
):
|
||||
iterable = _default_encoder.iterencode(obj)
|
||||
else:
|
||||
if cls is None:
|
||||
cls = JSONEncoder
|
||||
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
|
||||
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
|
||||
separators=separators, encoding=encoding,
|
||||
default=default, use_decimal=use_decimal,
|
||||
namedtuple_as_object=namedtuple_as_object,
|
||||
tuple_as_array=tuple_as_array,
|
||||
bigint_as_string=bigint_as_string,
|
||||
sort_keys=sort_keys,
|
||||
item_sort_key=item_sort_key,
|
||||
for_json=for_json,
|
||||
ignore_nan=ignore_nan,
|
||||
int_as_string_bitcount=int_as_string_bitcount,
|
||||
**kw).iterencode(obj)
|
||||
# could accelerate with writelines in some versions of Python, at
|
||||
# a debuggability cost
|
||||
for chunk in iterable:
|
||||
fp.write(chunk)
|
||||
|
||||
|
||||
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
|
||||
allow_nan=True, cls=None, indent=None, separators=None,
|
||||
encoding='utf-8', default=None, use_decimal=True,
|
||||
namedtuple_as_object=True, tuple_as_array=True,
|
||||
bigint_as_string=False, sort_keys=False, item_sort_key=None,
|
||||
for_json=False, ignore_nan=False, int_as_string_bitcount=None, **kw):
|
||||
"""Serialize ``obj`` to a JSON formatted ``str``.
|
||||
|
||||
If ``skipkeys`` is false then ``dict`` keys that are not basic types
|
||||
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
|
||||
will be skipped instead of raising a ``TypeError``.
|
||||
|
||||
If ``ensure_ascii`` is false, then the return value will be a
|
||||
``unicode`` instance subject to normal Python ``str`` to ``unicode``
|
||||
coercion rules instead of being escaped to an ASCII ``str``.
|
||||
|
||||
If ``check_circular`` is false, then the circular reference check
|
||||
for container types will be skipped and a circular reference will
|
||||
result in an ``OverflowError`` (or worse).
|
||||
|
||||
If ``allow_nan`` is false, then it will be a ``ValueError`` to
|
||||
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
|
||||
strict compliance of the JSON specification, instead of using the
|
||||
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
|
||||
|
||||
If ``indent`` is a string, then JSON array elements and object members
|
||||
will be pretty-printed with a newline followed by that string repeated
|
||||
for each level of nesting. ``None`` (the default) selects the most compact
|
||||
representation without any newlines. For backwards compatibility with
|
||||
versions of simplejson earlier than 2.1.0, an integer is also accepted
|
||||
and is converted to a string with that many spaces.
|
||||
|
||||
If specified, ``separators`` should be an
|
||||
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
|
||||
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
|
||||
compact JSON representation, you should specify ``(',', ':')`` to eliminate
|
||||
whitespace.
|
||||
|
||||
``encoding`` is the character encoding for str instances, default is UTF-8.
|
||||
|
||||
``default(obj)`` is a function that should return a serializable version
|
||||
of obj or raise TypeError. The default simply raises TypeError.
|
||||
|
||||
If *use_decimal* is true (default: ``True``) then decimal.Decimal
|
||||
will be natively serialized to JSON with full precision.
|
||||
|
||||
If *namedtuple_as_object* is true (default: ``True``),
|
||||
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
|
||||
as JSON objects.
|
||||
|
||||
If *tuple_as_array* is true (default: ``True``),
|
||||
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
|
||||
|
||||
If *bigint_as_string* is true (not the default), ints 2**53 and higher
|
||||
or lower than -2**53 will be encoded as strings. This is to avoid the
|
||||
rounding that happens in Javascript otherwise.
|
||||
|
||||
If *int_as_string_bitcount* is a positive number (n), then int of size
|
||||
greater than or equal to 2**n or lower than or equal to -2**n will be
|
||||
encoded as strings.
|
||||
|
||||
If specified, *item_sort_key* is a callable used to sort the items in
|
||||
each dictionary. This is useful if you want to sort items other than
|
||||
in alphabetical order by key. This option takes precendence over
|
||||
*sort_keys*.
|
||||
|
||||
If *sort_keys* is true (default: ``False``), the output of dictionaries
|
||||
will be sorted by item.
|
||||
|
||||
If *for_json* is true (default: ``False``), objects with a ``for_json()``
|
||||
method will use the return value of that method for encoding as JSON
|
||||
instead of the object.
|
||||
|
||||
If *ignore_nan* is true (default: ``False``), then out of range
|
||||
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
|
||||
``null`` in compliance with the ECMA-262 specification. If true, this will
|
||||
override *allow_nan*.
|
||||
|
||||
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
|
||||
``.default()`` method to serialize additional types), specify it with
|
||||
the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing
|
||||
whenever possible.
|
||||
|
||||
"""
|
||||
# cached encoder
|
||||
if (
|
||||
not skipkeys and ensure_ascii and
|
||||
check_circular and allow_nan and
|
||||
cls is None and indent is None and separators is None and
|
||||
encoding == 'utf-8' and default is None and use_decimal
|
||||
and namedtuple_as_object and tuple_as_array
|
||||
and not bigint_as_string and not sort_keys
|
||||
and not item_sort_key and not for_json
|
||||
and not ignore_nan and int_as_string_bitcount is None
|
||||
and not kw
|
||||
):
|
||||
return _default_encoder.encode(obj)
|
||||
if cls is None:
|
||||
cls = JSONEncoder
|
||||
return cls(
|
||||
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
|
||||
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
|
||||
separators=separators, encoding=encoding, default=default,
|
||||
use_decimal=use_decimal,
|
||||
namedtuple_as_object=namedtuple_as_object,
|
||||
tuple_as_array=tuple_as_array,
|
||||
bigint_as_string=bigint_as_string,
|
||||
sort_keys=sort_keys,
|
||||
item_sort_key=item_sort_key,
|
||||
for_json=for_json,
|
||||
ignore_nan=ignore_nan,
|
||||
int_as_string_bitcount=int_as_string_bitcount,
|
||||
**kw).encode(obj)
|
||||
|
||||
|
||||
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
|
||||
object_pairs_hook=None)
|
||||
|
||||
|
||||
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
|
||||
parse_int=None, parse_constant=None, object_pairs_hook=None,
|
||||
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
|
||||
**kw):
|
||||
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
|
||||
a JSON document) to a Python object.
|
||||
|
||||
*encoding* determines the encoding used to interpret any
|
||||
:class:`str` objects decoded by this instance (``'utf-8'`` by
|
||||
default). It has no effect when decoding :class:`unicode` objects.
|
||||
|
||||
Note that currently only encodings that are a superset of ASCII work,
|
||||
strings of other encodings should be passed in as :class:`unicode`.
|
||||
|
||||
*object_hook*, if specified, will be called with the result of every
|
||||
JSON object decoded and its return value will be used in place of the
|
||||
given :class:`dict`. This can be used to provide custom
|
||||
deserializations (e.g. to support JSON-RPC class hinting).
|
||||
|
||||
*object_pairs_hook* is an optional function that will be called with
|
||||
the result of any object literal decode with an ordered list of pairs.
|
||||
The return value of *object_pairs_hook* will be used instead of the
|
||||
:class:`dict`. This feature can be used to implement custom decoders
|
||||
that rely on the order that the key and value pairs are decoded (for
|
||||
example, :func:`collections.OrderedDict` will remember the order of
|
||||
insertion). If *object_hook* is also defined, the *object_pairs_hook*
|
||||
takes priority.
|
||||
|
||||
*parse_float*, if specified, will be called with the string of every
|
||||
JSON float to be decoded. By default, this is equivalent to
|
||||
``float(num_str)``. This can be used to use another datatype or parser
|
||||
for JSON floats (e.g. :class:`decimal.Decimal`).
|
||||
|
||||
*parse_int*, if specified, will be called with the string of every
|
||||
JSON int to be decoded. By default, this is equivalent to
|
||||
``int(num_str)``. This can be used to use another datatype or parser
|
||||
for JSON integers (e.g. :class:`float`).
|
||||
|
||||
*parse_constant*, if specified, will be called with one of the
|
||||
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
|
||||
can be used to raise an exception if invalid JSON numbers are
|
||||
encountered.
|
||||
|
||||
If *use_decimal* is true (default: ``False``) then it implies
|
||||
parse_float=decimal.Decimal for parity with ``dump``.
|
||||
|
||||
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
|
||||
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
|
||||
of subclassing whenever possible.
|
||||
|
||||
"""
|
||||
return loads(fp.read(),
|
||||
encoding=encoding, cls=cls, object_hook=object_hook,
|
||||
parse_float=parse_float, parse_int=parse_int,
|
||||
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
|
||||
use_decimal=use_decimal, **kw)
|
||||
|
||||
|
||||
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
|
||||
parse_int=None, parse_constant=None, object_pairs_hook=None,
|
||||
use_decimal=False, **kw):
|
||||
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
|
||||
document) to a Python object.
|
||||
|
||||
*encoding* determines the encoding used to interpret any
|
||||
:class:`str` objects decoded by this instance (``'utf-8'`` by
|
||||
default). It has no effect when decoding :class:`unicode` objects.
|
||||
|
||||
Note that currently only encodings that are a superset of ASCII work,
|
||||
strings of other encodings should be passed in as :class:`unicode`.
|
||||
|
||||
*object_hook*, if specified, will be called with the result of every
|
||||
JSON object decoded and its return value will be used in place of the
|
||||
given :class:`dict`. This can be used to provide custom
|
||||
deserializations (e.g. to support JSON-RPC class hinting).
|
||||
|
||||
*object_pairs_hook* is an optional function that will be called with
|
||||
the result of any object literal decode with an ordered list of pairs.
|
||||
The return value of *object_pairs_hook* will be used instead of the
|
||||
:class:`dict`. This feature can be used to implement custom decoders
|
||||
that rely on the order that the key and value pairs are decoded (for
|
||||
example, :func:`collections.OrderedDict` will remember the order of
|
||||
insertion). If *object_hook* is also defined, the *object_pairs_hook*
|
||||
takes priority.
|
||||
|
||||
*parse_float*, if specified, will be called with the string of every
|
||||
JSON float to be decoded. By default, this is equivalent to
|
||||
``float(num_str)``. This can be used to use another datatype or parser
|
||||
for JSON floats (e.g. :class:`decimal.Decimal`).
|
||||
|
||||
*parse_int*, if specified, will be called with the string of every
|
||||
JSON int to be decoded. By default, this is equivalent to
|
||||
``int(num_str)``. This can be used to use another datatype or parser
|
||||
for JSON integers (e.g. :class:`float`).
|
||||
|
||||
*parse_constant*, if specified, will be called with one of the
|
||||
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
|
||||
can be used to raise an exception if invalid JSON numbers are
|
||||
encountered.
|
||||
|
||||
If *use_decimal* is true (default: ``False``) then it implies
|
||||
parse_float=decimal.Decimal for parity with ``dump``.
|
||||
|
||||
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
|
||||
kwarg. NOTE: You should use *object_hook* or *object_pairs_hook* instead
|
||||
of subclassing whenever possible.
|
||||
|
||||
"""
|
||||
if (cls is None and encoding is None and object_hook is None and
|
||||
parse_int is None and parse_float is None and
|
||||
parse_constant is None and object_pairs_hook is None
|
||||
and not use_decimal and not kw):
|
||||
return _default_decoder.decode(s)
|
||||
if cls is None:
|
||||
cls = JSONDecoder
|
||||
if object_hook is not None:
|
||||
kw['object_hook'] = object_hook
|
||||
if object_pairs_hook is not None:
|
||||
kw['object_pairs_hook'] = object_pairs_hook
|
||||
if parse_float is not None:
|
||||
kw['parse_float'] = parse_float
|
||||
if parse_int is not None:
|
||||
kw['parse_int'] = parse_int
|
||||
if parse_constant is not None:
|
||||
kw['parse_constant'] = parse_constant
|
||||
if use_decimal:
|
||||
if parse_float is not None:
|
||||
raise TypeError("use_decimal=True implies parse_float=Decimal")
|
||||
kw['parse_float'] = Decimal
|
||||
return cls(encoding=encoding, **kw).decode(s)
|
||||
|
||||
|
||||
def _toggle_speedups(enabled):
|
||||
from . import decoder as dec
|
||||
from . import encoder as enc
|
||||
from . import scanner as scan
|
||||
c_make_encoder = _import_c_make_encoder()
|
||||
if enabled:
|
||||
dec.scanstring = dec.c_scanstring or dec.py_scanstring
|
||||
enc.c_make_encoder = c_make_encoder
|
||||
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
|
||||
enc.py_encode_basestring_ascii)
|
||||
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
|
||||
else:
|
||||
dec.scanstring = dec.py_scanstring
|
||||
enc.c_make_encoder = None
|
||||
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
|
||||
scan.make_scanner = scan.py_make_scanner
|
||||
dec.make_scanner = scan.make_scanner
|
||||
global _default_decoder
|
||||
_default_decoder = JSONDecoder(
|
||||
encoding=None,
|
||||
object_hook=None,
|
||||
object_pairs_hook=None,
|
||||
)
|
||||
global _default_encoder
|
||||
_default_encoder = JSONEncoder(
|
||||
skipkeys=False,
|
||||
ensure_ascii=True,
|
||||
check_circular=True,
|
||||
allow_nan=True,
|
||||
indent=None,
|
||||
separators=None,
|
||||
encoding='utf-8',
|
||||
default=None,
|
||||
)
|
||||
|
||||
def simple_first(kv):
|
||||
"""Helper function to pass to item_sort_key to sort simple
|
||||
elements to the top, then container elements.
|
||||
"""
|
||||
return (isinstance(kv[1], (list, dict, tuple)), kv[0])
|
Binary file not shown.
|
@ -1,46 +0,0 @@
|
|||
"""Python 3 compatibility shims
|
||||
"""
|
||||
import sys
|
||||
if sys.version_info[0] < 3:
|
||||
PY3 = False
|
||||
def b(s):
|
||||
return s
|
||||
def u(s):
|
||||
return unicode(s, 'unicode_escape')
|
||||
import cStringIO as StringIO
|
||||
StringIO = BytesIO = StringIO.StringIO
|
||||
text_type = unicode
|
||||
binary_type = str
|
||||
string_types = (basestring,)
|
||||
integer_types = (int, long)
|
||||
unichr = unichr
|
||||
reload_module = reload
|
||||
def fromhex(s):
|
||||
return s.decode('hex')
|
||||
|
||||
else:
|
||||
PY3 = True
|
||||
if sys.version_info[:2] >= (3, 4):
|
||||
from importlib import reload as reload_module
|
||||
else:
|
||||
from imp import reload as reload_module
|
||||
import codecs
|
||||
def b(s):
|
||||
return codecs.latin_1_encode(s)[0]
|
||||
def u(s):
|
||||
return s
|
||||
import io
|
||||
StringIO = io.StringIO
|
||||
BytesIO = io.BytesIO
|
||||
text_type = str
|
||||
binary_type = bytes
|
||||
string_types = (str,)
|
||||
integer_types = (int,)
|
||||
|
||||
def unichr(s):
|
||||
return u(chr(s))
|
||||
|
||||
def fromhex(s):
|
||||
return bytes.fromhex(s)
|
||||
|
||||
long_type = integer_types[-1]
|
|
@ -1,400 +0,0 @@
|
|||
"""Implementation of JSONDecoder
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import re
|
||||
import sys
|
||||
import struct
|
||||
from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr
|
||||
from .scanner import make_scanner, JSONDecodeError
|
||||
|
||||
def _import_c_scanstring():
|
||||
try:
|
||||
from ._speedups import scanstring
|
||||
return scanstring
|
||||
except ImportError:
|
||||
return None
|
||||
c_scanstring = _import_c_scanstring()
|
||||
|
||||
# NOTE (3.1.0): JSONDecodeError may still be imported from this module for
|
||||
# compatibility, but it was never in the __all__
|
||||
__all__ = ['JSONDecoder']
|
||||
|
||||
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
|
||||
|
||||
def _floatconstants():
|
||||
_BYTES = fromhex('7FF80000000000007FF0000000000000')
|
||||
# The struct module in Python 2.4 would get frexp() out of range here
|
||||
# when an endian is specified in the format string. Fixed in Python 2.5+
|
||||
if sys.byteorder != 'big':
|
||||
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
|
||||
nan, inf = struct.unpack('dd', _BYTES)
|
||||
return nan, inf, -inf
|
||||
|
||||
NaN, PosInf, NegInf = _floatconstants()
|
||||
|
||||
_CONSTANTS = {
|
||||
'-Infinity': NegInf,
|
||||
'Infinity': PosInf,
|
||||
'NaN': NaN,
|
||||
}
|
||||
|
||||
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
|
||||
BACKSLASH = {
|
||||
'"': u('"'), '\\': u('\u005c'), '/': u('/'),
|
||||
'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'),
|
||||
}
|
||||
|
||||
DEFAULT_ENCODING = "utf-8"
|
||||
|
||||
def py_scanstring(s, end, encoding=None, strict=True,
|
||||
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
|
||||
_PY3=PY3, _maxunicode=sys.maxunicode):
|
||||
"""Scan the string s for a JSON string. End is the index of the
|
||||
character in s after the quote that started the JSON string.
|
||||
Unescapes all valid JSON string escape sequences and raises ValueError
|
||||
on attempt to decode an invalid string. If strict is False then literal
|
||||
control characters are allowed in the string.
|
||||
|
||||
Returns a tuple of the decoded string and the index of the character in s
|
||||
after the end quote."""
|
||||
if encoding is None:
|
||||
encoding = DEFAULT_ENCODING
|
||||
chunks = []
|
||||
_append = chunks.append
|
||||
begin = end - 1
|
||||
while 1:
|
||||
chunk = _m(s, end)
|
||||
if chunk is None:
|
||||
raise JSONDecodeError(
|
||||
"Unterminated string starting at", s, begin)
|
||||
end = chunk.end()
|
||||
content, terminator = chunk.groups()
|
||||
# Content is contains zero or more unescaped string characters
|
||||
if content:
|
||||
if not _PY3 and not isinstance(content, text_type):
|
||||
content = text_type(content, encoding)
|
||||
_append(content)
|
||||
# Terminator is the end of string, a literal control character,
|
||||
# or a backslash denoting that an escape sequence follows
|
||||
if terminator == '"':
|
||||
break
|
||||
elif terminator != '\\':
|
||||
if strict:
|
||||
msg = "Invalid control character %r at"
|
||||
raise JSONDecodeError(msg, s, end)
|
||||
else:
|
||||
_append(terminator)
|
||||
continue
|
||||
try:
|
||||
esc = s[end]
|
||||
except IndexError:
|
||||
raise JSONDecodeError(
|
||||
"Unterminated string starting at", s, begin)
|
||||
# If not a unicode escape sequence, must be in the lookup table
|
||||
if esc != 'u':
|
||||
try:
|
||||
char = _b[esc]
|
||||
except KeyError:
|
||||
msg = "Invalid \\X escape sequence %r"
|
||||
raise JSONDecodeError(msg, s, end)
|
||||
end += 1
|
||||
else:
|
||||
# Unicode escape sequence
|
||||
msg = "Invalid \\uXXXX escape sequence"
|
||||
esc = s[end + 1:end + 5]
|
||||
escX = esc[1:2]
|
||||
if len(esc) != 4 or escX == 'x' or escX == 'X':
|
||||
raise JSONDecodeError(msg, s, end - 1)
|
||||
try:
|
||||
uni = int(esc, 16)
|
||||
except ValueError:
|
||||
raise JSONDecodeError(msg, s, end - 1)
|
||||
end += 5
|
||||
# Check for surrogate pair on UCS-4 systems
|
||||
# Note that this will join high/low surrogate pairs
|
||||
# but will also pass unpaired surrogates through
|
||||
if (_maxunicode > 65535 and
|
||||
uni & 0xfc00 == 0xd800 and
|
||||
s[end:end + 2] == '\\u'):
|
||||
esc2 = s[end + 2:end + 6]
|
||||
escX = esc2[1:2]
|
||||
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
|
||||
try:
|
||||
uni2 = int(esc2, 16)
|
||||
except ValueError:
|
||||
raise JSONDecodeError(msg, s, end)
|
||||
if uni2 & 0xfc00 == 0xdc00:
|
||||
uni = 0x10000 + (((uni - 0xd800) << 10) |
|
||||
(uni2 - 0xdc00))
|
||||
end += 6
|
||||
char = unichr(uni)
|
||||
# Append the unescaped character
|
||||
_append(char)
|
||||
return _join(chunks), end
|
||||
|
||||
|
||||
# Use speedup if available
|
||||
scanstring = c_scanstring or py_scanstring
|
||||
|
||||
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
|
||||
WHITESPACE_STR = ' \t\n\r'
|
||||
|
||||
def JSONObject(state, encoding, strict, scan_once, object_hook,
|
||||
object_pairs_hook, memo=None,
|
||||
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
|
||||
(s, end) = state
|
||||
# Backwards compatibility
|
||||
if memo is None:
|
||||
memo = {}
|
||||
memo_get = memo.setdefault
|
||||
pairs = []
|
||||
# Use a slice to prevent IndexError from being raised, the following
|
||||
# check will raise a more specific ValueError if the string is empty
|
||||
nextchar = s[end:end + 1]
|
||||
# Normally we expect nextchar == '"'
|
||||
if nextchar != '"':
|
||||
if nextchar in _ws:
|
||||
end = _w(s, end).end()
|
||||
nextchar = s[end:end + 1]
|
||||
# Trivial empty object
|
||||
if nextchar == '}':
|
||||
if object_pairs_hook is not None:
|
||||
result = object_pairs_hook(pairs)
|
||||
return result, end + 1
|
||||
pairs = {}
|
||||
if object_hook is not None:
|
||||
pairs = object_hook(pairs)
|
||||
return pairs, end + 1
|
||||
elif nextchar != '"':
|
||||
raise JSONDecodeError(
|
||||
"Expecting property name enclosed in double quotes",
|
||||
s, end)
|
||||
end += 1
|
||||
while True:
|
||||
key, end = scanstring(s, end, encoding, strict)
|
||||
key = memo_get(key, key)
|
||||
|
||||
# To skip some function call overhead we optimize the fast paths where
|
||||
# the JSON key separator is ": " or just ":".
|
||||
if s[end:end + 1] != ':':
|
||||
end = _w(s, end).end()
|
||||
if s[end:end + 1] != ':':
|
||||
raise JSONDecodeError("Expecting ':' delimiter", s, end)
|
||||
|
||||
end += 1
|
||||
|
||||
try:
|
||||
if s[end] in _ws:
|
||||
end += 1
|
||||
if s[end] in _ws:
|
||||
end = _w(s, end + 1).end()
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
value, end = scan_once(s, end)
|
||||
pairs.append((key, value))
|
||||
|
||||
try:
|
||||
nextchar = s[end]
|
||||
if nextchar in _ws:
|
||||
end = _w(s, end + 1).end()
|
||||
nextchar = s[end]
|
||||
except IndexError:
|
||||
nextchar = ''
|
||||
end += 1
|
||||
|
||||
if nextchar == '}':
|
||||
break
|
||||
elif nextchar != ',':
|
||||
raise JSONDecodeError("Expecting ',' delimiter or '}'", s, end - 1)
|
||||
|
||||
try:
|
||||
nextchar = s[end]
|
||||
if nextchar in _ws:
|
||||
end += 1
|
||||
nextchar = s[end]
|
||||
if nextchar in _ws:
|
||||
end = _w(s, end + 1).end()
|
||||
nextchar = s[end]
|
||||
except IndexError:
|
||||
nextchar = ''
|
||||
|
||||
end += 1
|
||||
if nextchar != '"':
|
||||
raise JSONDecodeError(
|
||||
"Expecting property name enclosed in double quotes",
|
||||
s, end - 1)
|
||||
|
||||
if object_pairs_hook is not None:
|
||||
result = object_pairs_hook(pairs)
|
||||
return result, end
|
||||
pairs = dict(pairs)
|
||||
if object_hook is not None:
|
||||
pairs = object_hook(pairs)
|
||||
return pairs, end
|
||||
|
||||
def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
|
||||
(s, end) = state
|
||||
values = []
|
||||
nextchar = s[end:end + 1]
|
||||
if nextchar in _ws:
|
||||
end = _w(s, end + 1).end()
|
||||
nextchar = s[end:end + 1]
|
||||
# Look-ahead for trivial empty array
|
||||
if nextchar == ']':
|
||||
return values, end + 1
|
||||
elif nextchar == '':
|
||||
raise JSONDecodeError("Expecting value or ']'", s, end)
|
||||
_append = values.append
|
||||
while True:
|
||||
value, end = scan_once(s, end)
|
||||
_append(value)
|
||||
nextchar = s[end:end + 1]
|
||||
if nextchar in _ws:
|
||||
end = _w(s, end + 1).end()
|
||||
nextchar = s[end:end + 1]
|
||||
end += 1
|
||||
if nextchar == ']':
|
||||
break
|
||||
elif nextchar != ',':
|
||||
raise JSONDecodeError("Expecting ',' delimiter or ']'", s, end - 1)
|
||||
|
||||
try:
|
||||
if s[end] in _ws:
|
||||
end += 1
|
||||
if s[end] in _ws:
|
||||
end = _w(s, end + 1).end()
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
return values, end
|
||||
|
||||
class JSONDecoder(object):
|
||||
"""Simple JSON <http://json.org> decoder
|
||||
|
||||
Performs the following translations in decoding by default:
|
||||
|
||||
+---------------+-------------------+
|
||||
| JSON | Python |
|
||||
+===============+===================+
|
||||
| object | dict |
|
||||
+---------------+-------------------+
|
||||
| array | list |
|
||||
+---------------+-------------------+
|
||||
| string | str, unicode |
|
||||
+---------------+-------------------+
|
||||
| number (int) | int, long |
|
||||
+---------------+-------------------+
|
||||
| number (real) | float |
|
||||
+---------------+-------------------+
|
||||
| true | True |
|
||||
+---------------+-------------------+
|
||||
| false | False |
|
||||
+---------------+-------------------+
|
||||
| null | None |
|
||||
+---------------+-------------------+
|
||||
|
||||
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
|
||||
their corresponding ``float`` values, which is outside the JSON spec.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, encoding=None, object_hook=None, parse_float=None,
|
||||
parse_int=None, parse_constant=None, strict=True,
|
||||
object_pairs_hook=None):
|
||||
"""
|
||||
*encoding* determines the encoding used to interpret any
|
||||
:class:`str` objects decoded by this instance (``'utf-8'`` by
|
||||
default). It has no effect when decoding :class:`unicode` objects.
|
||||
|
||||
Note that currently only encodings that are a superset of ASCII work,
|
||||
strings of other encodings should be passed in as :class:`unicode`.
|
||||
|
||||
*object_hook*, if specified, will be called with the result of every
|
||||
JSON object decoded and its return value will be used in place of the
|
||||
given :class:`dict`. This can be used to provide custom
|
||||
deserializations (e.g. to support JSON-RPC class hinting).
|
||||
|
||||
*object_pairs_hook* is an optional function that will be called with
|
||||
the result of any object literal decode with an ordered list of pairs.
|
||||
The return value of *object_pairs_hook* will be used instead of the
|
||||
:class:`dict`. This feature can be used to implement custom decoders
|
||||
that rely on the order that the key and value pairs are decoded (for
|
||||
example, :func:`collections.OrderedDict` will remember the order of
|
||||
insertion). If *object_hook* is also defined, the *object_pairs_hook*
|
||||
takes priority.
|
||||
|
||||
*parse_float*, if specified, will be called with the string of every
|
||||
JSON float to be decoded. By default, this is equivalent to
|
||||
``float(num_str)``. This can be used to use another datatype or parser
|
||||
for JSON floats (e.g. :class:`decimal.Decimal`).
|
||||
|
||||
*parse_int*, if specified, will be called with the string of every
|
||||
JSON int to be decoded. By default, this is equivalent to
|
||||
``int(num_str)``. This can be used to use another datatype or parser
|
||||
for JSON integers (e.g. :class:`float`).
|
||||
|
||||
*parse_constant*, if specified, will be called with one of the
|
||||
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
|
||||
can be used to raise an exception if invalid JSON numbers are
|
||||
encountered.
|
||||
|
||||
*strict* controls the parser's behavior when it encounters an
|
||||
invalid control character in a string. The default setting of
|
||||
``True`` means that unescaped control characters are parse errors, if
|
||||
``False`` then control characters will be allowed in strings.
|
||||
|
||||
"""
|
||||
if encoding is None:
|
||||
encoding = DEFAULT_ENCODING
|
||||
self.encoding = encoding
|
||||
self.object_hook = object_hook
|
||||
self.object_pairs_hook = object_pairs_hook
|
||||
self.parse_float = parse_float or float
|
||||
self.parse_int = parse_int or int
|
||||
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
|
||||
self.strict = strict
|
||||
self.parse_object = JSONObject
|
||||
self.parse_array = JSONArray
|
||||
self.parse_string = scanstring
|
||||
self.memo = {}
|
||||
self.scan_once = make_scanner(self)
|
||||
|
||||
def decode(self, s, _w=WHITESPACE.match, _PY3=PY3):
|
||||
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
|
||||
instance containing a JSON document)
|
||||
|
||||
"""
|
||||
if _PY3 and isinstance(s, binary_type):
|
||||
s = s.decode(self.encoding)
|
||||
obj, end = self.raw_decode(s)
|
||||
end = _w(s, end).end()
|
||||
if end != len(s):
|
||||
raise JSONDecodeError("Extra data", s, end, len(s))
|
||||
return obj
|
||||
|
||||
def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3):
|
||||
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
|
||||
beginning with a JSON document) and return a 2-tuple of the Python
|
||||
representation and the index in ``s`` where the document ended.
|
||||
Optionally, ``idx`` can be used to specify an offset in ``s`` where
|
||||
the JSON document begins.
|
||||
|
||||
This can be used to decode a JSON document from a string that may
|
||||
have extraneous data at the end.
|
||||
|
||||
"""
|
||||
if idx < 0:
|
||||
# Ensure that raw_decode bails on negative indexes, the regex
|
||||
# would otherwise mask this behavior. #98
|
||||
raise JSONDecodeError('Expecting value', s, idx)
|
||||
if _PY3 and not isinstance(s, text_type):
|
||||
raise TypeError("Input string must be text, not bytes")
|
||||
# strip UTF-8 bom
|
||||
if len(s) > idx:
|
||||
ord0 = ord(s[idx])
|
||||
if ord0 == 0xfeff:
|
||||
idx += 1
|
||||
elif ord0 == 0xef and s[idx:idx + 3] == '\xef\xbb\xbf':
|
||||
idx += 3
|
||||
return self.scan_once(s, idx=_w(s, idx).end())
|
|
@ -1,648 +0,0 @@
|
|||
"""Implementation of JSONEncoder
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
import re
|
||||
from operator import itemgetter
|
||||
from decimal import Decimal
|
||||
from .compat import u, unichr, binary_type, string_types, integer_types, PY3
|
||||
def _import_speedups():
|
||||
try:
|
||||
from . import _speedups
|
||||
return _speedups.encode_basestring_ascii, _speedups.make_encoder
|
||||
except ImportError:
|
||||
return None, None
|
||||
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
|
||||
|
||||
from simplejson.decoder import PosInf
|
||||
|
||||
#ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
|
||||
# This is required because u() will mangle the string and ur'' isn't valid
|
||||
# python3 syntax
|
||||
ESCAPE = re.compile(u'[\\x00-\\x1f\\\\"\\b\\f\\n\\r\\t\u2028\u2029]')
|
||||
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
|
||||
HAS_UTF8 = re.compile(r'[\x80-\xff]')
|
||||
ESCAPE_DCT = {
|
||||
'\\': '\\\\',
|
||||
'"': '\\"',
|
||||
'\b': '\\b',
|
||||
'\f': '\\f',
|
||||
'\n': '\\n',
|
||||
'\r': '\\r',
|
||||
'\t': '\\t',
|
||||
}
|
||||
for i in range(0x20):
|
||||
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
|
||||
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
|
||||
for i in [0x2028, 0x2029]:
|
||||
ESCAPE_DCT.setdefault(unichr(i), '\\u%04x' % (i,))
|
||||
|
||||
FLOAT_REPR = repr
|
||||
|
||||
def encode_basestring(s, _PY3=PY3, _q=u('"')):
|
||||
"""Return a JSON representation of a Python string
|
||||
|
||||
"""
|
||||
if _PY3:
|
||||
if isinstance(s, binary_type):
|
||||
s = s.decode('utf-8')
|
||||
else:
|
||||
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
|
||||
s = s.decode('utf-8')
|
||||
def replace(match):
|
||||
return ESCAPE_DCT[match.group(0)]
|
||||
return _q + ESCAPE.sub(replace, s) + _q
|
||||
|
||||
|
||||
def py_encode_basestring_ascii(s, _PY3=PY3):
|
||||
"""Return an ASCII-only JSON representation of a Python string
|
||||
|
||||
"""
|
||||
if _PY3:
|
||||
if isinstance(s, binary_type):
|
||||
s = s.decode('utf-8')
|
||||
else:
|
||||
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
|
||||
s = s.decode('utf-8')
|
||||
def replace(match):
|
||||
s = match.group(0)
|
||||
try:
|
||||
return ESCAPE_DCT[s]
|
||||
except KeyError:
|
||||
n = ord(s)
|
||||
if n < 0x10000:
|
||||
#return '\\u{0:04x}'.format(n)
|
||||
return '\\u%04x' % (n,)
|
||||
else:
|
||||
# surrogate pair
|
||||
n -= 0x10000
|
||||
s1 = 0xd800 | ((n >> 10) & 0x3ff)
|
||||
s2 = 0xdc00 | (n & 0x3ff)
|
||||
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
|
||||
return '\\u%04x\\u%04x' % (s1, s2)
|
||||
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
|
||||
|
||||
|
||||
encode_basestring_ascii = (
|
||||
c_encode_basestring_ascii or py_encode_basestring_ascii)
|
||||
|
||||
class JSONEncoder(object):
|
||||
"""Extensible JSON <http://json.org> encoder for Python data structures.
|
||||
|
||||
Supports the following objects and types by default:
|
||||
|
||||
+-------------------+---------------+
|
||||
| Python | JSON |
|
||||
+===================+===============+
|
||||
| dict, namedtuple | object |
|
||||
+-------------------+---------------+
|
||||
| list, tuple | array |
|
||||
+-------------------+---------------+
|
||||
| str, unicode | string |
|
||||
+-------------------+---------------+
|
||||
| int, long, float | number |
|
||||
+-------------------+---------------+
|
||||
| True | true |
|
||||
+-------------------+---------------+
|
||||
| False | false |
|
||||
+-------------------+---------------+
|
||||
| None | null |
|
||||
+-------------------+---------------+
|
||||
|
||||
To extend this to recognize other objects, subclass and implement a
|
||||
``.default()`` method with another method that returns a serializable
|
||||
object for ``o`` if possible, otherwise it should call the superclass
|
||||
implementation (to raise ``TypeError``).
|
||||
|
||||
"""
|
||||
item_separator = ', '
|
||||
key_separator = ': '
|
||||
|
||||
def __init__(self, skipkeys=False, ensure_ascii=True,
|
||||
check_circular=True, allow_nan=True, sort_keys=False,
|
||||
indent=None, separators=None, encoding='utf-8', default=None,
|
||||
use_decimal=True, namedtuple_as_object=True,
|
||||
tuple_as_array=True, bigint_as_string=False,
|
||||
item_sort_key=None, for_json=False, ignore_nan=False,
|
||||
int_as_string_bitcount=None):
|
||||
"""Constructor for JSONEncoder, with sensible defaults.
|
||||
|
||||
If skipkeys is false, then it is a TypeError to attempt
|
||||
encoding of keys that are not str, int, long, float or None. If
|
||||
skipkeys is True, such items are simply skipped.
|
||||
|
||||
If ensure_ascii is true, the output is guaranteed to be str
|
||||
objects with all incoming unicode characters escaped. If
|
||||
ensure_ascii is false, the output will be unicode object.
|
||||
|
||||
If check_circular is true, then lists, dicts, and custom encoded
|
||||
objects will be checked for circular references during encoding to
|
||||
prevent an infinite recursion (which would cause an OverflowError).
|
||||
Otherwise, no such check takes place.
|
||||
|
||||
If allow_nan is true, then NaN, Infinity, and -Infinity will be
|
||||
encoded as such. This behavior is not JSON specification compliant,
|
||||
but is consistent with most JavaScript based encoders and decoders.
|
||||
Otherwise, it will be a ValueError to encode such floats.
|
||||
|
||||
If sort_keys is true, then the output of dictionaries will be
|
||||
sorted by key; this is useful for regression tests to ensure
|
||||
that JSON serializations can be compared on a day-to-day basis.
|
||||
|
||||
If indent is a string, then JSON array elements and object members
|
||||
will be pretty-printed with a newline followed by that string repeated
|
||||
for each level of nesting. ``None`` (the default) selects the most compact
|
||||
representation without any newlines. For backwards compatibility with
|
||||
versions of simplejson earlier than 2.1.0, an integer is also accepted
|
||||
and is converted to a string with that many spaces.
|
||||
|
||||
If specified, separators should be an (item_separator, key_separator)
|
||||
tuple. The default is (', ', ': ') if *indent* is ``None`` and
|
||||
(',', ': ') otherwise. To get the most compact JSON representation,
|
||||
you should specify (',', ':') to eliminate whitespace.
|
||||
|
||||
If specified, default is a function that gets called for objects
|
||||
that can't otherwise be serialized. It should return a JSON encodable
|
||||
version of the object or raise a ``TypeError``.
|
||||
|
||||
If encoding is not None, then all input strings will be
|
||||
transformed into unicode using that encoding prior to JSON-encoding.
|
||||
The default is UTF-8.
|
||||
|
||||
If use_decimal is true (not the default), ``decimal.Decimal`` will
|
||||
be supported directly by the encoder. For the inverse, decode JSON
|
||||
with ``parse_float=decimal.Decimal``.
|
||||
|
||||
If namedtuple_as_object is true (the default), objects with
|
||||
``_asdict()`` methods will be encoded as JSON objects.
|
||||
|
||||
If tuple_as_array is true (the default), tuple (and subclasses) will
|
||||
be encoded as JSON arrays.
|
||||
|
||||
If bigint_as_string is true (not the default), ints 2**53 and higher
|
||||
or lower than -2**53 will be encoded as strings. This is to avoid the
|
||||
rounding that happens in Javascript otherwise.
|
||||
|
||||
If int_as_string_bitcount is a positive number (n), then int of size
|
||||
greater than or equal to 2**n or lower than or equal to -2**n will be
|
||||
encoded as strings.
|
||||
|
||||
If specified, item_sort_key is a callable used to sort the items in
|
||||
each dictionary. This is useful if you want to sort items other than
|
||||
in alphabetical order by key.
|
||||
|
||||
If for_json is true (not the default), objects with a ``for_json()``
|
||||
method will use the return value of that method for encoding as JSON
|
||||
instead of the object.
|
||||
|
||||
If *ignore_nan* is true (default: ``False``), then out of range
|
||||
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized
|
||||
as ``null`` in compliance with the ECMA-262 specification. If true,
|
||||
this will override *allow_nan*.
|
||||
|
||||
"""
|
||||
|
||||
self.skipkeys = skipkeys
|
||||
self.ensure_ascii = ensure_ascii
|
||||
self.check_circular = check_circular
|
||||
self.allow_nan = allow_nan
|
||||
self.sort_keys = sort_keys
|
||||
self.use_decimal = use_decimal
|
||||
self.namedtuple_as_object = namedtuple_as_object
|
||||
self.tuple_as_array = tuple_as_array
|
||||
self.bigint_as_string = bigint_as_string
|
||||
self.item_sort_key = item_sort_key
|
||||
self.for_json = for_json
|
||||
self.ignore_nan = ignore_nan
|
||||
self.int_as_string_bitcount = int_as_string_bitcount
|
||||
if indent is not None and not isinstance(indent, string_types):
|
||||
indent = indent * ' '
|
||||
self.indent = indent
|
||||
if separators is not None:
|
||||
self.item_separator, self.key_separator = separators
|
||||
elif indent is not None:
|
||||
self.item_separator = ','
|
||||
if default is not None:
|
||||
self.default = default
|
||||
self.encoding = encoding
|
||||
|
||||
def default(self, o):
|
||||
"""Implement this method in a subclass such that it returns
|
||||
a serializable object for ``o``, or calls the base implementation
|
||||
(to raise a ``TypeError``).
|
||||
|
||||
For example, to support arbitrary iterators, you could
|
||||
implement default like this::
|
||||
|
||||
def default(self, o):
|
||||
try:
|
||||
iterable = iter(o)
|
||||
except TypeError:
|
||||
pass
|
||||
else:
|
||||
return list(iterable)
|
||||
return JSONEncoder.default(self, o)
|
||||
|
||||
"""
|
||||
raise TypeError(repr(o) + " is not JSON serializable")
|
||||
|
||||
def encode(self, o):
|
||||
"""Return a JSON string representation of a Python data structure.
|
||||
|
||||
>>> from simplejson import JSONEncoder
|
||||
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
|
||||
'{"foo": ["bar", "baz"]}'
|
||||
|
||||
"""
|
||||
# This is for extremely simple cases and benchmarks.
|
||||
if isinstance(o, binary_type):
|
||||
_encoding = self.encoding
|
||||
if (_encoding is not None and not (_encoding == 'utf-8')):
|
||||
o = o.decode(_encoding)
|
||||
if isinstance(o, string_types):
|
||||
if self.ensure_ascii:
|
||||
return encode_basestring_ascii(o)
|
||||
else:
|
||||
return encode_basestring(o)
|
||||
# This doesn't pass the iterator directly to ''.join() because the
|
||||
# exceptions aren't as detailed. The list call should be roughly
|
||||
# equivalent to the PySequence_Fast that ''.join() would do.
|
||||
chunks = self.iterencode(o, _one_shot=True)
|
||||
if not isinstance(chunks, (list, tuple)):
|
||||
chunks = list(chunks)
|
||||
if self.ensure_ascii:
|
||||
return ''.join(chunks)
|
||||
else:
|
||||
return u''.join(chunks)
|
||||
|
||||
def iterencode(self, o, _one_shot=False):
|
||||
"""Encode the given object and yield each string
|
||||
representation as available.
|
||||
|
||||
For example::
|
||||
|
||||
for chunk in JSONEncoder().iterencode(bigobject):
|
||||
mysocket.write(chunk)
|
||||
|
||||
"""
|
||||
if self.check_circular:
|
||||
markers = {}
|
||||
else:
|
||||
markers = None
|
||||
if self.ensure_ascii:
|
||||
_encoder = encode_basestring_ascii
|
||||
else:
|
||||
_encoder = encode_basestring
|
||||
if self.encoding != 'utf-8':
|
||||
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
|
||||
if isinstance(o, binary_type):
|
||||
o = o.decode(_encoding)
|
||||
return _orig_encoder(o)
|
||||
|
||||
def floatstr(o, allow_nan=self.allow_nan, ignore_nan=self.ignore_nan,
|
||||
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
|
||||
# Check for specials. Note that this type of test is processor
|
||||
# and/or platform-specific, so do tests which don't depend on
|
||||
# the internals.
|
||||
|
||||
if o != o:
|
||||
text = 'NaN'
|
||||
elif o == _inf:
|
||||
text = 'Infinity'
|
||||
elif o == _neginf:
|
||||
text = '-Infinity'
|
||||
else:
|
||||
return _repr(o)
|
||||
|
||||
if ignore_nan:
|
||||
text = 'null'
|
||||
elif not allow_nan:
|
||||
raise ValueError(
|
||||
"Out of range float values are not JSON compliant: " +
|
||||
repr(o))
|
||||
|
||||
return text
|
||||
|
||||
key_memo = {}
|
||||
int_as_string_bitcount = (
|
||||
53 if self.bigint_as_string else self.int_as_string_bitcount)
|
||||
if (_one_shot and c_make_encoder is not None
|
||||
and self.indent is None):
|
||||
_iterencode = c_make_encoder(
|
||||
markers, self.default, _encoder, self.indent,
|
||||
self.key_separator, self.item_separator, self.sort_keys,
|
||||
self.skipkeys, self.allow_nan, key_memo, self.use_decimal,
|
||||
self.namedtuple_as_object, self.tuple_as_array,
|
||||
int_as_string_bitcount,
|
||||
self.item_sort_key, self.encoding, self.for_json,
|
||||
self.ignore_nan, Decimal)
|
||||
else:
|
||||
_iterencode = _make_iterencode(
|
||||
markers, self.default, _encoder, self.indent, floatstr,
|
||||
self.key_separator, self.item_separator, self.sort_keys,
|
||||
self.skipkeys, _one_shot, self.use_decimal,
|
||||
self.namedtuple_as_object, self.tuple_as_array,
|
||||
int_as_string_bitcount,
|
||||
self.item_sort_key, self.encoding, self.for_json,
|
||||
Decimal=Decimal)
|
||||
try:
|
||||
return _iterencode(o, 0)
|
||||
finally:
|
||||
key_memo.clear()
|
||||
|
||||
|
||||
class JSONEncoderForHTML(JSONEncoder):
|
||||
"""An encoder that produces JSON safe to embed in HTML.
|
||||
|
||||
To embed JSON content in, say, a script tag on a web page, the
|
||||
characters &, < and > should be escaped. They cannot be escaped
|
||||
with the usual entities (e.g. &) because they are not expanded
|
||||
within <script> tags.
|
||||
"""
|
||||
|
||||
def encode(self, o):
|
||||
# Override JSONEncoder.encode because it has hacks for
|
||||
# performance that make things more complicated.
|
||||
chunks = self.iterencode(o, True)
|
||||
if self.ensure_ascii:
|
||||
return ''.join(chunks)
|
||||
else:
|
||||
return u''.join(chunks)
|
||||
|
||||
def iterencode(self, o, _one_shot=False):
|
||||
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
|
||||
for chunk in chunks:
|
||||
chunk = chunk.replace('&', '\\u0026')
|
||||
chunk = chunk.replace('<', '\\u003c')
|
||||
chunk = chunk.replace('>', '\\u003e')
|
||||
yield chunk
|
||||
|
||||
|
||||
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
|
||||
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
|
||||
_use_decimal, _namedtuple_as_object, _tuple_as_array,
|
||||
_int_as_string_bitcount, _item_sort_key,
|
||||
_encoding,_for_json,
|
||||
## HACK: hand-optimized bytecode; turn globals into locals
|
||||
_PY3=PY3,
|
||||
ValueError=ValueError,
|
||||
string_types=string_types,
|
||||
Decimal=Decimal,
|
||||
dict=dict,
|
||||
float=float,
|
||||
id=id,
|
||||
integer_types=integer_types,
|
||||
isinstance=isinstance,
|
||||
list=list,
|
||||
str=str,
|
||||
tuple=tuple,
|
||||
):
|
||||
if _item_sort_key and not callable(_item_sort_key):
|
||||
raise TypeError("item_sort_key must be None or callable")
|
||||
elif _sort_keys and not _item_sort_key:
|
||||
_item_sort_key = itemgetter(0)
|
||||
|
||||
if (_int_as_string_bitcount is not None and
|
||||
(_int_as_string_bitcount <= 0 or
|
||||
not isinstance(_int_as_string_bitcount, integer_types))):
|
||||
raise TypeError("int_as_string_bitcount must be a positive integer")
|
||||
|
||||
def _encode_int(value):
|
||||
skip_quoting = (
|
||||
_int_as_string_bitcount is None
|
||||
or
|
||||
_int_as_string_bitcount < 1
|
||||
)
|
||||
if (
|
||||
skip_quoting or
|
||||
(-1 << _int_as_string_bitcount)
|
||||
< value <
|
||||
(1 << _int_as_string_bitcount)
|
||||
):
|
||||
return str(value)
|
||||
return '"' + str(value) + '"'
|
||||
|
||||
def _iterencode_list(lst, _current_indent_level):
|
||||
if not lst:
|
||||
yield '[]'
|
||||
return
|
||||
if markers is not None:
|
||||
markerid = id(lst)
|
||||
if markerid in markers:
|
||||
raise ValueError("Circular reference detected")
|
||||
markers[markerid] = lst
|
||||
buf = '['
|
||||
if _indent is not None:
|
||||
_current_indent_level += 1
|
||||
newline_indent = '\n' + (_indent * _current_indent_level)
|
||||
separator = _item_separator + newline_indent
|
||||
buf += newline_indent
|
||||
else:
|
||||
newline_indent = None
|
||||
separator = _item_separator
|
||||
first = True
|
||||
for value in lst:
|
||||
if first:
|
||||
first = False
|
||||
else:
|
||||
buf = separator
|
||||
if (isinstance(value, string_types) or
|
||||
(_PY3 and isinstance(value, binary_type))):
|
||||
yield buf + _encoder(value)
|
||||
elif value is None:
|
||||
yield buf + 'null'
|
||||
elif value is True:
|
||||
yield buf + 'true'
|
||||
elif value is False:
|
||||
yield buf + 'false'
|
||||
elif isinstance(value, integer_types):
|
||||
yield buf + _encode_int(value)
|
||||
elif isinstance(value, float):
|
||||
yield buf + _floatstr(value)
|
||||
elif _use_decimal and isinstance(value, Decimal):
|
||||
yield buf + str(value)
|
||||
else:
|
||||
yield buf
|
||||
for_json = _for_json and getattr(value, 'for_json', None)
|
||||
if for_json and callable(for_json):
|
||||
chunks = _iterencode(for_json(), _current_indent_level)
|
||||
elif isinstance(value, list):
|
||||
chunks = _iterencode_list(value, _current_indent_level)
|
||||
else:
|
||||
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
|
||||
if _asdict and callable(_asdict):
|
||||
chunks = _iterencode_dict(_asdict(),
|
||||
_current_indent_level)
|
||||
elif _tuple_as_array and isinstance(value, tuple):
|
||||
chunks = _iterencode_list(value, _current_indent_level)
|
||||
elif isinstance(value, dict):
|
||||
chunks = _iterencode_dict(value, _current_indent_level)
|
||||
else:
|
||||
chunks = _iterencode(value, _current_indent_level)
|
||||
for chunk in chunks:
|
||||
yield chunk
|
||||
if newline_indent is not None:
|
||||
_current_indent_level -= 1
|
||||
yield '\n' + (_indent * _current_indent_level)
|
||||
yield ']'
|
||||
if markers is not None:
|
||||
del markers[markerid]
|
||||
|
||||
def _stringify_key(key):
|
||||
if isinstance(key, string_types): # pragma: no cover
|
||||
pass
|
||||
elif isinstance(key, binary_type):
|
||||
key = key.decode(_encoding)
|
||||
elif isinstance(key, float):
|
||||
key = _floatstr(key)
|
||||
elif key is True:
|
||||
key = 'true'
|
||||
elif key is False:
|
||||
key = 'false'
|
||||
elif key is None:
|
||||
key = 'null'
|
||||
elif isinstance(key, integer_types):
|
||||
key = str(key)
|
||||
elif _use_decimal and isinstance(key, Decimal):
|
||||
key = str(key)
|
||||
elif _skipkeys:
|
||||
key = None
|
||||
else:
|
||||
raise TypeError("key " + repr(key) + " is not a string")
|
||||
return key
|
||||
|
||||
def _iterencode_dict(dct, _current_indent_level):
|
||||
if not dct:
|
||||
yield '{}'
|
||||
return
|
||||
if markers is not None:
|
||||
markerid = id(dct)
|
||||
if markerid in markers:
|
||||
raise ValueError("Circular reference detected")
|
||||
markers[markerid] = dct
|
||||
yield '{'
|
||||
if _indent is not None:
|
||||
_current_indent_level += 1
|
||||
newline_indent = '\n' + (_indent * _current_indent_level)
|
||||
item_separator = _item_separator + newline_indent
|
||||
yield newline_indent
|
||||
else:
|
||||
newline_indent = None
|
||||
item_separator = _item_separator
|
||||
first = True
|
||||
if _PY3:
|
||||
iteritems = dct.items()
|
||||
else:
|
||||
iteritems = dct.iteritems()
|
||||
if _item_sort_key:
|
||||
items = []
|
||||
for k, v in dct.items():
|
||||
if not isinstance(k, string_types):
|
||||
k = _stringify_key(k)
|
||||
if k is None:
|
||||
continue
|
||||
items.append((k, v))
|
||||
items.sort(key=_item_sort_key)
|
||||
else:
|
||||
items = iteritems
|
||||
for key, value in items:
|
||||
if not (_item_sort_key or isinstance(key, string_types)):
|
||||
key = _stringify_key(key)
|
||||
if key is None:
|
||||
# _skipkeys must be True
|
||||
continue
|
||||
if first:
|
||||
first = False
|
||||
else:
|
||||
yield item_separator
|
||||
yield _encoder(key)
|
||||
yield _key_separator
|
||||
if (isinstance(value, string_types) or
|
||||
(_PY3 and isinstance(value, binary_type))):
|
||||
yield _encoder(value)
|
||||
elif value is None:
|
||||
yield 'null'
|
||||
elif value is True:
|
||||
yield 'true'
|
||||
elif value is False:
|
||||
yield 'false'
|
||||
elif isinstance(value, integer_types):
|
||||
yield _encode_int(value)
|
||||
elif isinstance(value, float):
|
||||
yield _floatstr(value)
|
||||
elif _use_decimal and isinstance(value, Decimal):
|
||||
yield str(value)
|
||||
else:
|
||||
for_json = _for_json and getattr(value, 'for_json', None)
|
||||
if for_json and callable(for_json):
|
||||
chunks = _iterencode(for_json(), _current_indent_level)
|
||||
elif isinstance(value, list):
|
||||
chunks = _iterencode_list(value, _current_indent_level)
|
||||
else:
|
||||
_asdict = _namedtuple_as_object and getattr(value, '_asdict', None)
|
||||
if _asdict and callable(_asdict):
|
||||
chunks = _iterencode_dict(_asdict(),
|
||||
_current_indent_level)
|
||||
elif _tuple_as_array and isinstance(value, tuple):
|
||||
chunks = _iterencode_list(value, _current_indent_level)
|
||||
elif isinstance(value, dict):
|
||||
chunks = _iterencode_dict(value, _current_indent_level)
|
||||
else:
|
||||
chunks = _iterencode(value, _current_indent_level)
|
||||
for chunk in chunks:
|
||||
yield chunk
|
||||
if newline_indent is not None:
|
||||
_current_indent_level -= 1
|
||||
yield '\n' + (_indent * _current_indent_level)
|
||||
yield '}'
|
||||
if markers is not None:
|
||||
del markers[markerid]
|
||||
|
||||
def _iterencode(o, _current_indent_level):
|
||||
if (isinstance(o, string_types) or
|
||||
(_PY3 and isinstance(o, binary_type))):
|
||||
yield _encoder(o)
|
||||
elif o is None:
|
||||
yield 'null'
|
||||
elif o is True:
|
||||
yield 'true'
|
||||
elif o is False:
|
||||
yield 'false'
|
||||
elif isinstance(o, integer_types):
|
||||
yield _encode_int(o)
|
||||
elif isinstance(o, float):
|
||||
yield _floatstr(o)
|
||||
else:
|
||||
for_json = _for_json and getattr(o, 'for_json', None)
|
||||
if for_json and callable(for_json):
|
||||
for chunk in _iterencode(for_json(), _current_indent_level):
|
||||
yield chunk
|
||||
elif isinstance(o, list):
|
||||
for chunk in _iterencode_list(o, _current_indent_level):
|
||||
yield chunk
|
||||
else:
|
||||
_asdict = _namedtuple_as_object and getattr(o, '_asdict', None)
|
||||
if _asdict and callable(_asdict):
|
||||
for chunk in _iterencode_dict(_asdict(),
|
||||
_current_indent_level):
|
||||
yield chunk
|
||||
elif (_tuple_as_array and isinstance(o, tuple)):
|
||||
for chunk in _iterencode_list(o, _current_indent_level):
|
||||
yield chunk
|
||||
elif isinstance(o, dict):
|
||||
for chunk in _iterencode_dict(o, _current_indent_level):
|
||||
yield chunk
|
||||
elif _use_decimal and isinstance(o, Decimal):
|
||||
yield str(o)
|
||||
else:
|
||||
if markers is not None:
|
||||
markerid = id(o)
|
||||
if markerid in markers:
|
||||
raise ValueError("Circular reference detected")
|
||||
markers[markerid] = o
|
||||
o = _default(o)
|
||||
for chunk in _iterencode(o, _current_indent_level):
|
||||
yield chunk
|
||||
if markers is not None:
|
||||
del markers[markerid]
|
||||
|
||||
return _iterencode
|
|
@ -1,119 +0,0 @@
|
|||
"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger
|
||||
|
||||
http://code.activestate.com/recipes/576693/
|
||||
|
||||
"""
|
||||
from UserDict import DictMixin
|
||||
|
||||
# Modified from original to support Python 2.4, see
|
||||
# http://code.google.com/p/simplejson/issues/detail?id=53
|
||||
try:
|
||||
all
|
||||
except NameError:
|
||||
def all(seq):
|
||||
for elem in seq:
|
||||
if not elem:
|
||||
return False
|
||||
return True
|
||||
|
||||
class OrderedDict(dict, DictMixin):
|
||||
|
||||
def __init__(self, *args, **kwds):
|
||||
if len(args) > 1:
|
||||
raise TypeError('expected at most 1 arguments, got %d' % len(args))
|
||||
try:
|
||||
self.__end
|
||||
except AttributeError:
|
||||
self.clear()
|
||||
self.update(*args, **kwds)
|
||||
|
||||
def clear(self):
|
||||
self.__end = end = []
|
||||
end += [None, end, end] # sentinel node for doubly linked list
|
||||
self.__map = {} # key --> [key, prev, next]
|
||||
dict.clear(self)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key not in self:
|
||||
end = self.__end
|
||||
curr = end[1]
|
||||
curr[2] = end[1] = self.__map[key] = [key, curr, end]
|
||||
dict.__setitem__(self, key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
dict.__delitem__(self, key)
|
||||
key, prev, next = self.__map.pop(key)
|
||||
prev[2] = next
|
||||
next[1] = prev
|
||||
|
||||
def __iter__(self):
|
||||
end = self.__end
|
||||
curr = end[2]
|
||||
while curr is not end:
|
||||
yield curr[0]
|
||||
curr = curr[2]
|
||||
|
||||
def __reversed__(self):
|
||||
end = self.__end
|
||||
curr = end[1]
|
||||
while curr is not end:
|
||||
yield curr[0]
|
||||
curr = curr[1]
|
||||
|
||||
def popitem(self, last=True):
|
||||
if not self:
|
||||
raise KeyError('dictionary is empty')
|
||||
# Modified from original to support Python 2.4, see
|
||||
# http://code.google.com/p/simplejson/issues/detail?id=53
|
||||
if last:
|
||||
key = reversed(self).next()
|
||||
else:
|
||||
key = iter(self).next()
|
||||
value = self.pop(key)
|
||||
return key, value
|
||||
|
||||
def __reduce__(self):
|
||||
items = [[k, self[k]] for k in self]
|
||||
tmp = self.__map, self.__end
|
||||
del self.__map, self.__end
|
||||
inst_dict = vars(self).copy()
|
||||
self.__map, self.__end = tmp
|
||||
if inst_dict:
|
||||
return (self.__class__, (items,), inst_dict)
|
||||
return self.__class__, (items,)
|
||||
|
||||
def keys(self):
|
||||
return list(self)
|
||||
|
||||
setdefault = DictMixin.setdefault
|
||||
update = DictMixin.update
|
||||
pop = DictMixin.pop
|
||||
values = DictMixin.values
|
||||
items = DictMixin.items
|
||||
iterkeys = DictMixin.iterkeys
|
||||
itervalues = DictMixin.itervalues
|
||||
iteritems = DictMixin.iteritems
|
||||
|
||||
def __repr__(self):
|
||||
if not self:
|
||||
return '%s()' % (self.__class__.__name__,)
|
||||
return '%s(%r)' % (self.__class__.__name__, self.items())
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self)
|
||||
|
||||
@classmethod
|
||||
def fromkeys(cls, iterable, value=None):
|
||||
d = cls()
|
||||
for key in iterable:
|
||||
d[key] = value
|
||||
return d
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, OrderedDict):
|
||||
return len(self)==len(other) and \
|
||||
all(p==q for p, q in zip(self.items(), other.items()))
|
||||
return dict.__eq__(self, other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
|
@ -1,133 +0,0 @@
|
|||
"""JSON token scanner
|
||||
"""
|
||||
import re
|
||||
def _import_c_make_scanner():
|
||||
try:
|
||||
from simplejson._speedups import make_scanner
|
||||
return make_scanner
|
||||
except ImportError:
|
||||
return None
|
||||
c_make_scanner = _import_c_make_scanner()
|
||||
|
||||
__all__ = ['make_scanner', 'JSONDecodeError']
|
||||
|
||||
NUMBER_RE = re.compile(
|
||||
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
|
||||
(re.VERBOSE | re.MULTILINE | re.DOTALL))
|
||||
|
||||
class JSONDecodeError(ValueError):
|
||||
"""Subclass of ValueError with the following additional properties:
|
||||
|
||||
msg: The unformatted error message
|
||||
doc: The JSON document being parsed
|
||||
pos: The start index of doc where parsing failed
|
||||
end: The end index of doc where parsing failed (may be None)
|
||||
lineno: The line corresponding to pos
|
||||
colno: The column corresponding to pos
|
||||
endlineno: The line corresponding to end (may be None)
|
||||
endcolno: The column corresponding to end (may be None)
|
||||
|
||||
"""
|
||||
# Note that this exception is used from _speedups
|
||||
def __init__(self, msg, doc, pos, end=None):
|
||||
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
|
||||
self.msg = msg
|
||||
self.doc = doc
|
||||
self.pos = pos
|
||||
self.end = end
|
||||
self.lineno, self.colno = linecol(doc, pos)
|
||||
if end is not None:
|
||||
self.endlineno, self.endcolno = linecol(doc, end)
|
||||
else:
|
||||
self.endlineno, self.endcolno = None, None
|
||||
|
||||
def __reduce__(self):
|
||||
return self.__class__, (self.msg, self.doc, self.pos, self.end)
|
||||
|
||||
|
||||
def linecol(doc, pos):
|
||||
lineno = doc.count('\n', 0, pos) + 1
|
||||
if lineno == 1:
|
||||
colno = pos + 1
|
||||
else:
|
||||
colno = pos - doc.rindex('\n', 0, pos)
|
||||
return lineno, colno
|
||||
|
||||
|
||||
def errmsg(msg, doc, pos, end=None):
|
||||
lineno, colno = linecol(doc, pos)
|
||||
msg = msg.replace('%r', repr(doc[pos:pos + 1]))
|
||||
if end is None:
|
||||
fmt = '%s: line %d column %d (char %d)'
|
||||
return fmt % (msg, lineno, colno, pos)
|
||||
endlineno, endcolno = linecol(doc, end)
|
||||
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
|
||||
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
|
||||
|
||||
|
||||
def py_make_scanner(context):
|
||||
parse_object = context.parse_object
|
||||
parse_array = context.parse_array
|
||||
parse_string = context.parse_string
|
||||
match_number = NUMBER_RE.match
|
||||
encoding = context.encoding
|
||||
strict = context.strict
|
||||
parse_float = context.parse_float
|
||||
parse_int = context.parse_int
|
||||
parse_constant = context.parse_constant
|
||||
object_hook = context.object_hook
|
||||
object_pairs_hook = context.object_pairs_hook
|
||||
memo = context.memo
|
||||
|
||||
def _scan_once(string, idx):
|
||||
errmsg = 'Expecting value'
|
||||
try:
|
||||
nextchar = string[idx]
|
||||
except IndexError:
|
||||
raise JSONDecodeError(errmsg, string, idx)
|
||||
|
||||
if nextchar == '"':
|
||||
return parse_string(string, idx + 1, encoding, strict)
|
||||
elif nextchar == '{':
|
||||
return parse_object((string, idx + 1), encoding, strict,
|
||||
_scan_once, object_hook, object_pairs_hook, memo)
|
||||
elif nextchar == '[':
|
||||
return parse_array((string, idx + 1), _scan_once)
|
||||
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
|
||||
return None, idx + 4
|
||||
elif nextchar == 't' and string[idx:idx + 4] == 'true':
|
||||
return True, idx + 4
|
||||
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
|
||||
return False, idx + 5
|
||||
|
||||
m = match_number(string, idx)
|
||||
if m is not None:
|
||||
integer, frac, exp = m.groups()
|
||||
if frac or exp:
|
||||
res = parse_float(integer + (frac or '') + (exp or ''))
|
||||
else:
|
||||
res = parse_int(integer)
|
||||
return res, m.end()
|
||||
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
|
||||
return parse_constant('NaN'), idx + 3
|
||||
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
|
||||
return parse_constant('Infinity'), idx + 8
|
||||
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
|
||||
return parse_constant('-Infinity'), idx + 9
|
||||
else:
|
||||
raise JSONDecodeError(errmsg, string, idx)
|
||||
|
||||
def scan_once(string, idx):
|
||||
if idx < 0:
|
||||
# Ensure the same behavior as the C speedup, otherwise
|
||||
# this would work for *some* negative string indices due
|
||||
# to the behavior of __getitem__ for strings. #98
|
||||
raise JSONDecodeError('Expecting value', string, idx)
|
||||
try:
|
||||
return _scan_once(string, idx)
|
||||
finally:
|
||||
memo.clear()
|
||||
|
||||
return scan_once
|
||||
|
||||
make_scanner = c_make_scanner or py_make_scanner
|
|
@ -1,88 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
import unittest
|
||||
import doctest
|
||||
import sys
|
||||
|
||||
|
||||
class NoExtensionTestSuite(unittest.TestSuite):
|
||||
def run(self, result):
|
||||
import simplejson
|
||||
simplejson._toggle_speedups(False)
|
||||
result = unittest.TestSuite.run(self, result)
|
||||
simplejson._toggle_speedups(True)
|
||||
return result
|
||||
|
||||
|
||||
class TestMissingSpeedups(unittest.TestCase):
|
||||
def runTest(self):
|
||||
if hasattr(sys, 'pypy_translation_info'):
|
||||
"PyPy doesn't need speedups! :)"
|
||||
elif hasattr(self, 'skipTest'):
|
||||
self.skipTest('_speedups.so is missing!')
|
||||
|
||||
|
||||
def additional_tests(suite=None):
|
||||
import simplejson
|
||||
import simplejson.encoder
|
||||
import simplejson.decoder
|
||||
if suite is None:
|
||||
suite = unittest.TestSuite()
|
||||
for mod in (simplejson, simplejson.encoder, simplejson.decoder):
|
||||
suite.addTest(doctest.DocTestSuite(mod))
|
||||
suite.addTest(doctest.DocFileSuite('../../index.rst'))
|
||||
return suite
|
||||
|
||||
|
||||
def all_tests_suite():
|
||||
def get_suite():
|
||||
return additional_tests(
|
||||
unittest.TestLoader().loadTestsFromNames([
|
||||
'simplejson.tests.test_bitsize_int_as_string',
|
||||
'simplejson.tests.test_bigint_as_string',
|
||||
'simplejson.tests.test_check_circular',
|
||||
'simplejson.tests.test_decode',
|
||||
'simplejson.tests.test_default',
|
||||
'simplejson.tests.test_dump',
|
||||
'simplejson.tests.test_encode_basestring_ascii',
|
||||
'simplejson.tests.test_encode_for_html',
|
||||
'simplejson.tests.test_errors',
|
||||
'simplejson.tests.test_fail',
|
||||
'simplejson.tests.test_float',
|
||||
'simplejson.tests.test_indent',
|
||||
'simplejson.tests.test_pass1',
|
||||
'simplejson.tests.test_pass2',
|
||||
'simplejson.tests.test_pass3',
|
||||
'simplejson.tests.test_recursion',
|
||||
'simplejson.tests.test_scanstring',
|
||||
'simplejson.tests.test_separators',
|
||||
'simplejson.tests.test_speedups',
|
||||
'simplejson.tests.test_unicode',
|
||||
'simplejson.tests.test_decimal',
|
||||
'simplejson.tests.test_tuple',
|
||||
'simplejson.tests.test_namedtuple',
|
||||
'simplejson.tests.test_tool',
|
||||
'simplejson.tests.test_for_json',
|
||||
]))
|
||||
suite = get_suite()
|
||||
import simplejson
|
||||
if simplejson._import_c_make_encoder() is None:
|
||||
suite.addTest(TestMissingSpeedups())
|
||||
else:
|
||||
suite = unittest.TestSuite([
|
||||
suite,
|
||||
NoExtensionTestSuite([get_suite()]),
|
||||
])
|
||||
return suite
|
||||
|
||||
|
||||
def main():
|
||||
runner = unittest.TextTestRunner(verbosity=1 + sys.argv.count('-v'))
|
||||
suite = all_tests_suite()
|
||||
raise SystemExit(not runner.run(suite).wasSuccessful())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
||||
main()
|
|
@ -1,67 +0,0 @@
|
|||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
|
||||
|
||||
class TestBigintAsString(TestCase):
|
||||
# Python 2.5, at least the one that ships on Mac OS X, calculates
|
||||
# 2 ** 53 as 0! It manages to calculate 1 << 53 correctly.
|
||||
values = [(200, 200),
|
||||
((1 << 53) - 1, 9007199254740991),
|
||||
((1 << 53), '9007199254740992'),
|
||||
((1 << 53) + 1, '9007199254740993'),
|
||||
(-100, -100),
|
||||
((-1 << 53), '-9007199254740992'),
|
||||
((-1 << 53) - 1, '-9007199254740993'),
|
||||
((-1 << 53) + 1, -9007199254740991)]
|
||||
|
||||
options = (
|
||||
{"bigint_as_string": True},
|
||||
{"int_as_string_bitcount": 53}
|
||||
)
|
||||
|
||||
def test_ints(self):
|
||||
for opts in self.options:
|
||||
for val, expect in self.values:
|
||||
self.assertEqual(
|
||||
val,
|
||||
json.loads(json.dumps(val)))
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val, **opts)))
|
||||
|
||||
def test_lists(self):
|
||||
for opts in self.options:
|
||||
for val, expect in self.values:
|
||||
val = [val, val]
|
||||
expect = [expect, expect]
|
||||
self.assertEqual(
|
||||
val,
|
||||
json.loads(json.dumps(val)))
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val, **opts)))
|
||||
|
||||
def test_dicts(self):
|
||||
for opts in self.options:
|
||||
for val, expect in self.values:
|
||||
val = {'k': val}
|
||||
expect = {'k': expect}
|
||||
self.assertEqual(
|
||||
val,
|
||||
json.loads(json.dumps(val)))
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val, **opts)))
|
||||
|
||||
def test_dict_keys(self):
|
||||
for opts in self.options:
|
||||
for val, _ in self.values:
|
||||
expect = {str(val): 'value'}
|
||||
val = {val: 'value'}
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val)))
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val, **opts)))
|
|
@ -1,73 +0,0 @@
|
|||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
|
||||
|
||||
class TestBitSizeIntAsString(TestCase):
|
||||
# Python 2.5, at least the one that ships on Mac OS X, calculates
|
||||
# 2 ** 31 as 0! It manages to calculate 1 << 31 correctly.
|
||||
values = [
|
||||
(200, 200),
|
||||
((1 << 31) - 1, (1 << 31) - 1),
|
||||
((1 << 31), str(1 << 31)),
|
||||
((1 << 31) + 1, str((1 << 31) + 1)),
|
||||
(-100, -100),
|
||||
((-1 << 31), str(-1 << 31)),
|
||||
((-1 << 31) - 1, str((-1 << 31) - 1)),
|
||||
((-1 << 31) + 1, (-1 << 31) + 1),
|
||||
]
|
||||
|
||||
def test_invalid_counts(self):
|
||||
for n in ['foo', -1, 0, 1.0]:
|
||||
self.assertRaises(
|
||||
TypeError,
|
||||
json.dumps, 0, int_as_string_bitcount=n)
|
||||
|
||||
def test_ints_outside_range_fails(self):
|
||||
self.assertNotEqual(
|
||||
str(1 << 15),
|
||||
json.loads(json.dumps(1 << 15, int_as_string_bitcount=16)),
|
||||
)
|
||||
|
||||
def test_ints(self):
|
||||
for val, expect in self.values:
|
||||
self.assertEqual(
|
||||
val,
|
||||
json.loads(json.dumps(val)))
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val, int_as_string_bitcount=31)),
|
||||
)
|
||||
|
||||
def test_lists(self):
|
||||
for val, expect in self.values:
|
||||
val = [val, val]
|
||||
expect = [expect, expect]
|
||||
self.assertEqual(
|
||||
val,
|
||||
json.loads(json.dumps(val)))
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val, int_as_string_bitcount=31)))
|
||||
|
||||
def test_dicts(self):
|
||||
for val, expect in self.values:
|
||||
val = {'k': val}
|
||||
expect = {'k': expect}
|
||||
self.assertEqual(
|
||||
val,
|
||||
json.loads(json.dumps(val)))
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val, int_as_string_bitcount=31)))
|
||||
|
||||
def test_dict_keys(self):
|
||||
for val, _ in self.values:
|
||||
expect = {str(val): 'value'}
|
||||
val = {val: 'value'}
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val)))
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.loads(json.dumps(val, int_as_string_bitcount=31)))
|
|
@ -1,30 +0,0 @@
|
|||
from unittest import TestCase
|
||||
import simplejson as json
|
||||
|
||||
def default_iterable(obj):
|
||||
return list(obj)
|
||||
|
||||
class TestCheckCircular(TestCase):
|
||||
def test_circular_dict(self):
|
||||
dct = {}
|
||||
dct['a'] = dct
|
||||
self.assertRaises(ValueError, json.dumps, dct)
|
||||
|
||||
def test_circular_list(self):
|
||||
lst = []
|
||||
lst.append(lst)
|
||||
self.assertRaises(ValueError, json.dumps, lst)
|
||||
|
||||
def test_circular_composite(self):
|
||||
dct2 = {}
|
||||
dct2['a'] = []
|
||||
dct2['a'].append(dct2)
|
||||
self.assertRaises(ValueError, json.dumps, dct2)
|
||||
|
||||
def test_circular_default(self):
|
||||
json.dumps([set()], default=default_iterable)
|
||||
self.assertRaises(TypeError, json.dumps, [set()])
|
||||
|
||||
def test_circular_off_default(self):
|
||||
json.dumps([set()], default=default_iterable, check_circular=False)
|
||||
self.assertRaises(TypeError, json.dumps, [set()], check_circular=False)
|
|
@ -1,71 +0,0 @@
|
|||
import decimal
|
||||
from decimal import Decimal
|
||||
from unittest import TestCase
|
||||
from simplejson.compat import StringIO, reload_module
|
||||
|
||||
import simplejson as json
|
||||
|
||||
class TestDecimal(TestCase):
|
||||
NUMS = "1.0", "10.00", "1.1", "1234567890.1234567890", "500"
|
||||
def dumps(self, obj, **kw):
|
||||
sio = StringIO()
|
||||
json.dump(obj, sio, **kw)
|
||||
res = json.dumps(obj, **kw)
|
||||
self.assertEqual(res, sio.getvalue())
|
||||
return res
|
||||
|
||||
def loads(self, s, **kw):
|
||||
sio = StringIO(s)
|
||||
res = json.loads(s, **kw)
|
||||
self.assertEqual(res, json.load(sio, **kw))
|
||||
return res
|
||||
|
||||
def test_decimal_encode(self):
|
||||
for d in map(Decimal, self.NUMS):
|
||||
self.assertEqual(self.dumps(d, use_decimal=True), str(d))
|
||||
|
||||
def test_decimal_decode(self):
|
||||
for s in self.NUMS:
|
||||
self.assertEqual(self.loads(s, parse_float=Decimal), Decimal(s))
|
||||
|
||||
def test_stringify_key(self):
|
||||
for d in map(Decimal, self.NUMS):
|
||||
v = {d: d}
|
||||
self.assertEqual(
|
||||
self.loads(
|
||||
self.dumps(v, use_decimal=True), parse_float=Decimal),
|
||||
{str(d): d})
|
||||
|
||||
def test_decimal_roundtrip(self):
|
||||
for d in map(Decimal, self.NUMS):
|
||||
# The type might not be the same (int and Decimal) but they
|
||||
# should still compare equal.
|
||||
for v in [d, [d], {'': d}]:
|
||||
self.assertEqual(
|
||||
self.loads(
|
||||
self.dumps(v, use_decimal=True), parse_float=Decimal),
|
||||
v)
|
||||
|
||||
def test_decimal_defaults(self):
|
||||
d = Decimal('1.1')
|
||||
# use_decimal=True is the default
|
||||
self.assertRaises(TypeError, json.dumps, d, use_decimal=False)
|
||||
self.assertEqual('1.1', json.dumps(d))
|
||||
self.assertEqual('1.1', json.dumps(d, use_decimal=True))
|
||||
self.assertRaises(TypeError, json.dump, d, StringIO(),
|
||||
use_decimal=False)
|
||||
sio = StringIO()
|
||||
json.dump(d, sio)
|
||||
self.assertEqual('1.1', sio.getvalue())
|
||||
sio = StringIO()
|
||||
json.dump(d, sio, use_decimal=True)
|
||||
self.assertEqual('1.1', sio.getvalue())
|
||||
|
||||
def test_decimal_reload(self):
|
||||
# Simulate a subinterpreter that reloads the Python modules but not
|
||||
# the C code https://github.com/simplejson/simplejson/issues/34
|
||||
global Decimal
|
||||
Decimal = reload_module(decimal).Decimal
|
||||
import simplejson.encoder
|
||||
simplejson.encoder.Decimal = Decimal
|
||||
self.test_decimal_roundtrip()
|
|
@ -1,99 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
import decimal
|
||||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
from simplejson.compat import StringIO
|
||||
from simplejson import OrderedDict
|
||||
|
||||
class TestDecode(TestCase):
|
||||
if not hasattr(TestCase, 'assertIs'):
|
||||
def assertIs(self, a, b):
|
||||
self.assertTrue(a is b, '%r is %r' % (a, b))
|
||||
|
||||
def test_decimal(self):
|
||||
rval = json.loads('1.1', parse_float=decimal.Decimal)
|
||||
self.assertTrue(isinstance(rval, decimal.Decimal))
|
||||
self.assertEqual(rval, decimal.Decimal('1.1'))
|
||||
|
||||
def test_float(self):
|
||||
rval = json.loads('1', parse_int=float)
|
||||
self.assertTrue(isinstance(rval, float))
|
||||
self.assertEqual(rval, 1.0)
|
||||
|
||||
def test_decoder_optimizations(self):
|
||||
# Several optimizations were made that skip over calls to
|
||||
# the whitespace regex, so this test is designed to try and
|
||||
# exercise the uncommon cases. The array cases are already covered.
|
||||
rval = json.loads('{ "key" : "value" , "k":"v" }')
|
||||
self.assertEqual(rval, {"key":"value", "k":"v"})
|
||||
|
||||
def test_empty_objects(self):
|
||||
s = '{}'
|
||||
self.assertEqual(json.loads(s), eval(s))
|
||||
s = '[]'
|
||||
self.assertEqual(json.loads(s), eval(s))
|
||||
s = '""'
|
||||
self.assertEqual(json.loads(s), eval(s))
|
||||
|
||||
def test_object_pairs_hook(self):
|
||||
s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
|
||||
p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4),
|
||||
("qrt", 5), ("pad", 6), ("hoy", 7)]
|
||||
self.assertEqual(json.loads(s), eval(s))
|
||||
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
|
||||
self.assertEqual(json.load(StringIO(s),
|
||||
object_pairs_hook=lambda x: x), p)
|
||||
od = json.loads(s, object_pairs_hook=OrderedDict)
|
||||
self.assertEqual(od, OrderedDict(p))
|
||||
self.assertEqual(type(od), OrderedDict)
|
||||
# the object_pairs_hook takes priority over the object_hook
|
||||
self.assertEqual(json.loads(s,
|
||||
object_pairs_hook=OrderedDict,
|
||||
object_hook=lambda x: None),
|
||||
OrderedDict(p))
|
||||
|
||||
def check_keys_reuse(self, source, loads):
|
||||
rval = loads(source)
|
||||
(a, b), (c, d) = sorted(rval[0]), sorted(rval[1])
|
||||
self.assertIs(a, c)
|
||||
self.assertIs(b, d)
|
||||
|
||||
def test_keys_reuse_str(self):
|
||||
s = u'[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]'.encode('utf8')
|
||||
self.check_keys_reuse(s, json.loads)
|
||||
|
||||
def test_keys_reuse_unicode(self):
|
||||
s = u'[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]'
|
||||
self.check_keys_reuse(s, json.loads)
|
||||
|
||||
def test_empty_strings(self):
|
||||
self.assertEqual(json.loads('""'), "")
|
||||
self.assertEqual(json.loads(u'""'), u"")
|
||||
self.assertEqual(json.loads('[""]'), [""])
|
||||
self.assertEqual(json.loads(u'[""]'), [u""])
|
||||
|
||||
def test_raw_decode(self):
|
||||
cls = json.decoder.JSONDecoder
|
||||
self.assertEqual(
|
||||
({'a': {}}, 9),
|
||||
cls().raw_decode("{\"a\": {}}"))
|
||||
# http://code.google.com/p/simplejson/issues/detail?id=85
|
||||
self.assertEqual(
|
||||
({'a': {}}, 9),
|
||||
cls(object_pairs_hook=dict).raw_decode("{\"a\": {}}"))
|
||||
# https://github.com/simplejson/simplejson/pull/38
|
||||
self.assertEqual(
|
||||
({'a': {}}, 11),
|
||||
cls().raw_decode(" \n{\"a\": {}}"))
|
||||
|
||||
def test_bounds_checking(self):
|
||||
# https://github.com/simplejson/simplejson/issues/98
|
||||
j = json.decoder.JSONDecoder()
|
||||
for i in [4, 5, 6, -1, -2, -3, -4, -5, -6]:
|
||||
self.assertRaises(ValueError, j.scan_once, '1234', i)
|
||||
self.assertRaises(ValueError, j.raw_decode, '1234', i)
|
||||
x, y = sorted(['128931233', '472389423'], key=id)
|
||||
diff = id(x) - id(y)
|
||||
self.assertRaises(ValueError, j.scan_once, y, diff)
|
||||
self.assertRaises(ValueError, j.raw_decode, y, i)
|
|
@ -1,9 +0,0 @@
|
|||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
|
||||
class TestDefault(TestCase):
|
||||
def test_default(self):
|
||||
self.assertEqual(
|
||||
json.dumps(type, default=repr),
|
||||
json.dumps(repr(type)))
|
|
@ -1,130 +0,0 @@
|
|||
from unittest import TestCase
|
||||
from simplejson.compat import StringIO, long_type, b, binary_type, PY3
|
||||
import simplejson as json
|
||||
|
||||
def as_text_type(s):
|
||||
if PY3 and isinstance(s, binary_type):
|
||||
return s.decode('ascii')
|
||||
return s
|
||||
|
||||
class TestDump(TestCase):
|
||||
def test_dump(self):
|
||||
sio = StringIO()
|
||||
json.dump({}, sio)
|
||||
self.assertEqual(sio.getvalue(), '{}')
|
||||
|
||||
def test_constants(self):
|
||||
for c in [None, True, False]:
|
||||
self.assertTrue(json.loads(json.dumps(c)) is c)
|
||||
self.assertTrue(json.loads(json.dumps([c]))[0] is c)
|
||||
self.assertTrue(json.loads(json.dumps({'a': c}))['a'] is c)
|
||||
|
||||
def test_stringify_key(self):
|
||||
items = [(b('bytes'), 'bytes'),
|
||||
(1.0, '1.0'),
|
||||
(10, '10'),
|
||||
(True, 'true'),
|
||||
(False, 'false'),
|
||||
(None, 'null'),
|
||||
(long_type(100), '100')]
|
||||
for k, expect in items:
|
||||
self.assertEqual(
|
||||
json.loads(json.dumps({k: expect})),
|
||||
{expect: expect})
|
||||
self.assertEqual(
|
||||
json.loads(json.dumps({k: expect}, sort_keys=True)),
|
||||
{expect: expect})
|
||||
self.assertRaises(TypeError, json.dumps, {json: 1})
|
||||
for v in [{}, {'other': 1}, {b('derp'): 1, 'herp': 2}]:
|
||||
for sort_keys in [False, True]:
|
||||
v0 = dict(v)
|
||||
v0[json] = 1
|
||||
v1 = dict((as_text_type(key), val) for (key, val) in v.items())
|
||||
self.assertEqual(
|
||||
json.loads(json.dumps(v0, skipkeys=True, sort_keys=sort_keys)),
|
||||
v1)
|
||||
self.assertEqual(
|
||||
json.loads(json.dumps({'': v0}, skipkeys=True, sort_keys=sort_keys)),
|
||||
{'': v1})
|
||||
self.assertEqual(
|
||||
json.loads(json.dumps([v0], skipkeys=True, sort_keys=sort_keys)),
|
||||
[v1])
|
||||
|
||||
def test_dumps(self):
|
||||
self.assertEqual(json.dumps({}), '{}')
|
||||
|
||||
def test_encode_truefalse(self):
|
||||
self.assertEqual(json.dumps(
|
||||
{True: False, False: True}, sort_keys=True),
|
||||
'{"false": true, "true": false}')
|
||||
self.assertEqual(
|
||||
json.dumps(
|
||||
{2: 3.0,
|
||||
4.0: long_type(5),
|
||||
False: 1,
|
||||
long_type(6): True,
|
||||
"7": 0},
|
||||
sort_keys=True),
|
||||
'{"2": 3.0, "4.0": 5, "6": true, "7": 0, "false": 1}')
|
||||
|
||||
def test_ordered_dict(self):
|
||||
# http://bugs.python.org/issue6105
|
||||
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
|
||||
s = json.dumps(json.OrderedDict(items))
|
||||
self.assertEqual(
|
||||
s,
|
||||
'{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}')
|
||||
|
||||
def test_indent_unknown_type_acceptance(self):
|
||||
"""
|
||||
A test against the regression mentioned at `github issue 29`_.
|
||||
|
||||
The indent parameter should accept any type which pretends to be
|
||||
an instance of int or long when it comes to being multiplied by
|
||||
strings, even if it is not actually an int or long, for
|
||||
backwards compatibility.
|
||||
|
||||
.. _github issue 29:
|
||||
http://github.com/simplejson/simplejson/issue/29
|
||||
"""
|
||||
|
||||
class AwesomeInt(object):
|
||||
"""An awesome reimplementation of integers"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if len(args) > 0:
|
||||
# [construct from literals, objects, etc.]
|
||||
# ...
|
||||
|
||||
# Finally, if args[0] is an integer, store it
|
||||
if isinstance(args[0], int):
|
||||
self._int = args[0]
|
||||
|
||||
# [various methods]
|
||||
|
||||
def __mul__(self, other):
|
||||
# [various ways to multiply AwesomeInt objects]
|
||||
# ... finally, if the right-hand operand is not awesome enough,
|
||||
# try to do a normal integer multiplication
|
||||
if hasattr(self, '_int'):
|
||||
return self._int * other
|
||||
else:
|
||||
raise NotImplementedError("To do non-awesome things with"
|
||||
" this object, please construct it from an integer!")
|
||||
|
||||
s = json.dumps([0, 1, 2], indent=AwesomeInt(3))
|
||||
self.assertEqual(s, '[\n 0,\n 1,\n 2\n]')
|
||||
|
||||
def test_accumulator(self):
|
||||
# the C API uses an accumulator that collects after 100,000 appends
|
||||
lst = [0] * 100000
|
||||
self.assertEqual(json.loads(json.dumps(lst)), lst)
|
||||
|
||||
def test_sort_keys(self):
|
||||
# https://github.com/simplejson/simplejson/issues/106
|
||||
for num_keys in range(2, 32):
|
||||
p = dict((str(x), x) for x in range(num_keys))
|
||||
sio = StringIO()
|
||||
json.dump(p, sio, sort_keys=True)
|
||||
self.assertEqual(sio.getvalue(), json.dumps(p, sort_keys=True))
|
||||
self.assertEqual(json.loads(sio.getvalue()), p)
|
|
@ -1,47 +0,0 @@
|
|||
from unittest import TestCase
|
||||
|
||||
import simplejson.encoder
|
||||
from simplejson.compat import b
|
||||
|
||||
CASES = [
|
||||
(u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
|
||||
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
|
||||
(u'controls', '"controls"'),
|
||||
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
|
||||
(u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
|
||||
(u' s p a c e d ', '" s p a c e d "'),
|
||||
(u'\U0001d120', '"\\ud834\\udd20"'),
|
||||
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
|
||||
(b('\xce\xb1\xce\xa9'), '"\\u03b1\\u03a9"'),
|
||||
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
|
||||
(b('\xce\xb1\xce\xa9'), '"\\u03b1\\u03a9"'),
|
||||
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
|
||||
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
|
||||
(u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
|
||||
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
|
||||
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
|
||||
]
|
||||
|
||||
class TestEncodeBaseStringAscii(TestCase):
|
||||
def test_py_encode_basestring_ascii(self):
|
||||
self._test_encode_basestring_ascii(simplejson.encoder.py_encode_basestring_ascii)
|
||||
|
||||
def test_c_encode_basestring_ascii(self):
|
||||
if not simplejson.encoder.c_encode_basestring_ascii:
|
||||
return
|
||||
self._test_encode_basestring_ascii(simplejson.encoder.c_encode_basestring_ascii)
|
||||
|
||||
def _test_encode_basestring_ascii(self, encode_basestring_ascii):
|
||||
fname = encode_basestring_ascii.__name__
|
||||
for input_string, expect in CASES:
|
||||
result = encode_basestring_ascii(input_string)
|
||||
#self.assertEqual(result, expect,
|
||||
# '{0!r} != {1!r} for {2}({3!r})'.format(
|
||||
# result, expect, fname, input_string))
|
||||
self.assertEqual(result, expect,
|
||||
'%r != %r for %s(%r)' % (result, expect, fname, input_string))
|
||||
|
||||
def test_sorted_dict(self):
|
||||
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
|
||||
s = simplejson.dumps(dict(items), sort_keys=True)
|
||||
self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}')
|
|
@ -1,30 +0,0 @@
|
|||
import unittest
|
||||
|
||||
import simplejson as json
|
||||
|
||||
class TestEncodeForHTML(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.decoder = json.JSONDecoder()
|
||||
self.encoder = json.JSONEncoderForHTML()
|
||||
|
||||
def test_basic_encode(self):
|
||||
self.assertEqual(r'"\u0026"', self.encoder.encode('&'))
|
||||
self.assertEqual(r'"\u003c"', self.encoder.encode('<'))
|
||||
self.assertEqual(r'"\u003e"', self.encoder.encode('>'))
|
||||
|
||||
def test_basic_roundtrip(self):
|
||||
for char in '&<>':
|
||||
self.assertEqual(
|
||||
char, self.decoder.decode(
|
||||
self.encoder.encode(char)))
|
||||
|
||||
def test_prevent_script_breakout(self):
|
||||
bad_string = '</script><script>alert("gotcha")</script>'
|
||||
self.assertEqual(
|
||||
r'"\u003c/script\u003e\u003cscript\u003e'
|
||||
r'alert(\"gotcha\")\u003c/script\u003e"',
|
||||
self.encoder.encode(bad_string))
|
||||
self.assertEqual(
|
||||
bad_string, self.decoder.decode(
|
||||
self.encoder.encode(bad_string)))
|
|
@ -1,51 +0,0 @@
|
|||
import sys, pickle
|
||||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
from simplejson.compat import u, b
|
||||
|
||||
class TestErrors(TestCase):
|
||||
def test_string_keys_error(self):
|
||||
data = [{'a': 'A', 'b': (2, 4), 'c': 3.0, ('d',): 'D tuple'}]
|
||||
self.assertRaises(TypeError, json.dumps, data)
|
||||
|
||||
def test_decode_error(self):
|
||||
err = None
|
||||
try:
|
||||
json.loads('{}\na\nb')
|
||||
except json.JSONDecodeError:
|
||||
err = sys.exc_info()[1]
|
||||
else:
|
||||
self.fail('Expected JSONDecodeError')
|
||||
self.assertEqual(err.lineno, 2)
|
||||
self.assertEqual(err.colno, 1)
|
||||
self.assertEqual(err.endlineno, 3)
|
||||
self.assertEqual(err.endcolno, 2)
|
||||
|
||||
def test_scan_error(self):
|
||||
err = None
|
||||
for t in (u, b):
|
||||
try:
|
||||
json.loads(t('{"asdf": "'))
|
||||
except json.JSONDecodeError:
|
||||
err = sys.exc_info()[1]
|
||||
else:
|
||||
self.fail('Expected JSONDecodeError')
|
||||
self.assertEqual(err.lineno, 1)
|
||||
self.assertEqual(err.colno, 10)
|
||||
|
||||
def test_error_is_pickable(self):
|
||||
err = None
|
||||
try:
|
||||
json.loads('{}\na\nb')
|
||||
except json.JSONDecodeError:
|
||||
err = sys.exc_info()[1]
|
||||
else:
|
||||
self.fail('Expected JSONDecodeError')
|
||||
s = pickle.dumps(err)
|
||||
e = pickle.loads(s)
|
||||
|
||||
self.assertEqual(err.msg, e.msg)
|
||||
self.assertEqual(err.doc, e.doc)
|
||||
self.assertEqual(err.pos, e.pos)
|
||||
self.assertEqual(err.end, e.end)
|
|
@ -1,176 +0,0 @@
|
|||
import sys
|
||||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
|
||||
# 2007-10-05
|
||||
JSONDOCS = [
|
||||
# http://json.org/JSON_checker/test/fail1.json
|
||||
'"A JSON payload should be an object or array, not a string."',
|
||||
# http://json.org/JSON_checker/test/fail2.json
|
||||
'["Unclosed array"',
|
||||
# http://json.org/JSON_checker/test/fail3.json
|
||||
'{unquoted_key: "keys must be quoted"}',
|
||||
# http://json.org/JSON_checker/test/fail4.json
|
||||
'["extra comma",]',
|
||||
# http://json.org/JSON_checker/test/fail5.json
|
||||
'["double extra comma",,]',
|
||||
# http://json.org/JSON_checker/test/fail6.json
|
||||
'[ , "<-- missing value"]',
|
||||
# http://json.org/JSON_checker/test/fail7.json
|
||||
'["Comma after the close"],',
|
||||
# http://json.org/JSON_checker/test/fail8.json
|
||||
'["Extra close"]]',
|
||||
# http://json.org/JSON_checker/test/fail9.json
|
||||
'{"Extra comma": true,}',
|
||||
# http://json.org/JSON_checker/test/fail10.json
|
||||
'{"Extra value after close": true} "misplaced quoted value"',
|
||||
# http://json.org/JSON_checker/test/fail11.json
|
||||
'{"Illegal expression": 1 + 2}',
|
||||
# http://json.org/JSON_checker/test/fail12.json
|
||||
'{"Illegal invocation": alert()}',
|
||||
# http://json.org/JSON_checker/test/fail13.json
|
||||
'{"Numbers cannot have leading zeroes": 013}',
|
||||
# http://json.org/JSON_checker/test/fail14.json
|
||||
'{"Numbers cannot be hex": 0x14}',
|
||||
# http://json.org/JSON_checker/test/fail15.json
|
||||
'["Illegal backslash escape: \\x15"]',
|
||||
# http://json.org/JSON_checker/test/fail16.json
|
||||
'[\\naked]',
|
||||
# http://json.org/JSON_checker/test/fail17.json
|
||||
'["Illegal backslash escape: \\017"]',
|
||||
# http://json.org/JSON_checker/test/fail18.json
|
||||
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
|
||||
# http://json.org/JSON_checker/test/fail19.json
|
||||
'{"Missing colon" null}',
|
||||
# http://json.org/JSON_checker/test/fail20.json
|
||||
'{"Double colon":: null}',
|
||||
# http://json.org/JSON_checker/test/fail21.json
|
||||
'{"Comma instead of colon", null}',
|
||||
# http://json.org/JSON_checker/test/fail22.json
|
||||
'["Colon instead of comma": false]',
|
||||
# http://json.org/JSON_checker/test/fail23.json
|
||||
'["Bad value", truth]',
|
||||
# http://json.org/JSON_checker/test/fail24.json
|
||||
"['single quote']",
|
||||
# http://json.org/JSON_checker/test/fail25.json
|
||||
'["\ttab\tcharacter\tin\tstring\t"]',
|
||||
# http://json.org/JSON_checker/test/fail26.json
|
||||
'["tab\\ character\\ in\\ string\\ "]',
|
||||
# http://json.org/JSON_checker/test/fail27.json
|
||||
'["line\nbreak"]',
|
||||
# http://json.org/JSON_checker/test/fail28.json
|
||||
'["line\\\nbreak"]',
|
||||
# http://json.org/JSON_checker/test/fail29.json
|
||||
'[0e]',
|
||||
# http://json.org/JSON_checker/test/fail30.json
|
||||
'[0e+]',
|
||||
# http://json.org/JSON_checker/test/fail31.json
|
||||
'[0e+-1]',
|
||||
# http://json.org/JSON_checker/test/fail32.json
|
||||
'{"Comma instead if closing brace": true,',
|
||||
# http://json.org/JSON_checker/test/fail33.json
|
||||
'["mismatch"}',
|
||||
# http://code.google.com/p/simplejson/issues/detail?id=3
|
||||
u'["A\u001FZ control characters in string"]',
|
||||
# misc based on coverage
|
||||
'{',
|
||||
'{]',
|
||||
'{"foo": "bar"]',
|
||||
'{"foo": "bar"',
|
||||
'nul',
|
||||
'nulx',
|
||||
'-',
|
||||
'-x',
|
||||
'-e',
|
||||
'-e0',
|
||||
'-Infinite',
|
||||
'-Inf',
|
||||
'Infinit',
|
||||
'Infinite',
|
||||
'NaM',
|
||||
'NuN',
|
||||
'falsy',
|
||||
'fal',
|
||||
'trug',
|
||||
'tru',
|
||||
'1e',
|
||||
'1ex',
|
||||
'1e-',
|
||||
'1e-x',
|
||||
]
|
||||
|
||||
SKIPS = {
|
||||
1: "why not have a string payload?",
|
||||
18: "spec doesn't specify any nesting limitations",
|
||||
}
|
||||
|
||||
class TestFail(TestCase):
|
||||
def test_failures(self):
|
||||
for idx, doc in enumerate(JSONDOCS):
|
||||
idx = idx + 1
|
||||
if idx in SKIPS:
|
||||
json.loads(doc)
|
||||
continue
|
||||
try:
|
||||
json.loads(doc)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
else:
|
||||
self.fail("Expected failure for fail%d.json: %r" % (idx, doc))
|
||||
|
||||
def test_array_decoder_issue46(self):
|
||||
# http://code.google.com/p/simplejson/issues/detail?id=46
|
||||
for doc in [u'[,]', '[,]']:
|
||||
try:
|
||||
json.loads(doc)
|
||||
except json.JSONDecodeError:
|
||||
e = sys.exc_info()[1]
|
||||
self.assertEqual(e.pos, 1)
|
||||
self.assertEqual(e.lineno, 1)
|
||||
self.assertEqual(e.colno, 2)
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
self.fail("Unexpected exception raised %r %s" % (e, e))
|
||||
else:
|
||||
self.fail("Unexpected success parsing '[,]'")
|
||||
|
||||
def test_truncated_input(self):
|
||||
test_cases = [
|
||||
('', 'Expecting value', 0),
|
||||
('[', "Expecting value or ']'", 1),
|
||||
('[42', "Expecting ',' delimiter", 3),
|
||||
('[42,', 'Expecting value', 4),
|
||||
('["', 'Unterminated string starting at', 1),
|
||||
('["spam', 'Unterminated string starting at', 1),
|
||||
('["spam"', "Expecting ',' delimiter", 7),
|
||||
('["spam",', 'Expecting value', 8),
|
||||
('{', 'Expecting property name enclosed in double quotes', 1),
|
||||
('{"', 'Unterminated string starting at', 1),
|
||||
('{"spam', 'Unterminated string starting at', 1),
|
||||
('{"spam"', "Expecting ':' delimiter", 7),
|
||||
('{"spam":', 'Expecting value', 8),
|
||||
('{"spam":42', "Expecting ',' delimiter", 10),
|
||||
('{"spam":42,', 'Expecting property name enclosed in double quotes',
|
||||
11),
|
||||
('"', 'Unterminated string starting at', 0),
|
||||
('"spam', 'Unterminated string starting at', 0),
|
||||
('[,', "Expecting value", 1),
|
||||
]
|
||||
for data, msg, idx in test_cases:
|
||||
try:
|
||||
json.loads(data)
|
||||
except json.JSONDecodeError:
|
||||
e = sys.exc_info()[1]
|
||||
self.assertEqual(
|
||||
e.msg[:len(msg)],
|
||||
msg,
|
||||
"%r doesn't start with %r for %r" % (e.msg, msg, data))
|
||||
self.assertEqual(
|
||||
e.pos, idx,
|
||||
"pos %r != %r for %r" % (e.pos, idx, data))
|
||||
except Exception:
|
||||
e = sys.exc_info()[1]
|
||||
self.fail("Unexpected exception raised %r %s" % (e, e))
|
||||
else:
|
||||
self.fail("Unexpected success parsing '%r'" % (data,))
|
|
@ -1,35 +0,0 @@
|
|||
import math
|
||||
from unittest import TestCase
|
||||
from simplejson.compat import long_type, text_type
|
||||
import simplejson as json
|
||||
from simplejson.decoder import NaN, PosInf, NegInf
|
||||
|
||||
class TestFloat(TestCase):
|
||||
def test_degenerates_allow(self):
|
||||
for inf in (PosInf, NegInf):
|
||||
self.assertEqual(json.loads(json.dumps(inf)), inf)
|
||||
# Python 2.5 doesn't have math.isnan
|
||||
nan = json.loads(json.dumps(NaN))
|
||||
self.assertTrue((0 + nan) != nan)
|
||||
|
||||
def test_degenerates_ignore(self):
|
||||
for f in (PosInf, NegInf, NaN):
|
||||
self.assertEqual(json.loads(json.dumps(f, ignore_nan=True)), None)
|
||||
|
||||
def test_degenerates_deny(self):
|
||||
for f in (PosInf, NegInf, NaN):
|
||||
self.assertRaises(ValueError, json.dumps, f, allow_nan=False)
|
||||
|
||||
def test_floats(self):
|
||||
for num in [1617161771.7650001, math.pi, math.pi**100,
|
||||
math.pi**-100, 3.1]:
|
||||
self.assertEqual(float(json.dumps(num)), num)
|
||||
self.assertEqual(json.loads(json.dumps(num)), num)
|
||||
self.assertEqual(json.loads(text_type(json.dumps(num))), num)
|
||||
|
||||
def test_ints(self):
|
||||
for num in [1, long_type(1), 1<<32, 1<<64]:
|
||||
self.assertEqual(json.dumps(num), str(num))
|
||||
self.assertEqual(int(json.dumps(num)), num)
|
||||
self.assertEqual(json.loads(json.dumps(num)), num)
|
||||
self.assertEqual(json.loads(text_type(json.dumps(num))), num)
|
|
@ -1,97 +0,0 @@
|
|||
import unittest
|
||||
import simplejson as json
|
||||
|
||||
|
||||
class ForJson(object):
|
||||
def for_json(self):
|
||||
return {'for_json': 1}
|
||||
|
||||
|
||||
class NestedForJson(object):
|
||||
def for_json(self):
|
||||
return {'nested': ForJson()}
|
||||
|
||||
|
||||
class ForJsonList(object):
|
||||
def for_json(self):
|
||||
return ['list']
|
||||
|
||||
|
||||
class DictForJson(dict):
|
||||
def for_json(self):
|
||||
return {'alpha': 1}
|
||||
|
||||
|
||||
class ListForJson(list):
|
||||
def for_json(self):
|
||||
return ['list']
|
||||
|
||||
|
||||
class TestForJson(unittest.TestCase):
|
||||
def assertRoundTrip(self, obj, other, for_json=True):
|
||||
if for_json is None:
|
||||
# None will use the default
|
||||
s = json.dumps(obj)
|
||||
else:
|
||||
s = json.dumps(obj, for_json=for_json)
|
||||
self.assertEqual(
|
||||
json.loads(s),
|
||||
other)
|
||||
|
||||
def test_for_json_encodes_stand_alone_object(self):
|
||||
self.assertRoundTrip(
|
||||
ForJson(),
|
||||
ForJson().for_json())
|
||||
|
||||
def test_for_json_encodes_object_nested_in_dict(self):
|
||||
self.assertRoundTrip(
|
||||
{'hooray': ForJson()},
|
||||
{'hooray': ForJson().for_json()})
|
||||
|
||||
def test_for_json_encodes_object_nested_in_list_within_dict(self):
|
||||
self.assertRoundTrip(
|
||||
{'list': [0, ForJson(), 2, 3]},
|
||||
{'list': [0, ForJson().for_json(), 2, 3]})
|
||||
|
||||
def test_for_json_encodes_object_nested_within_object(self):
|
||||
self.assertRoundTrip(
|
||||
NestedForJson(),
|
||||
{'nested': {'for_json': 1}})
|
||||
|
||||
def test_for_json_encodes_list(self):
|
||||
self.assertRoundTrip(
|
||||
ForJsonList(),
|
||||
ForJsonList().for_json())
|
||||
|
||||
def test_for_json_encodes_list_within_object(self):
|
||||
self.assertRoundTrip(
|
||||
{'nested': ForJsonList()},
|
||||
{'nested': ForJsonList().for_json()})
|
||||
|
||||
def test_for_json_encodes_dict_subclass(self):
|
||||
self.assertRoundTrip(
|
||||
DictForJson(a=1),
|
||||
DictForJson(a=1).for_json())
|
||||
|
||||
def test_for_json_encodes_list_subclass(self):
|
||||
self.assertRoundTrip(
|
||||
ListForJson(['l']),
|
||||
ListForJson(['l']).for_json())
|
||||
|
||||
def test_for_json_ignored_if_not_true_with_dict_subclass(self):
|
||||
for for_json in (None, False):
|
||||
self.assertRoundTrip(
|
||||
DictForJson(a=1),
|
||||
{'a': 1},
|
||||
for_json=for_json)
|
||||
|
||||
def test_for_json_ignored_if_not_true_with_list_subclass(self):
|
||||
for for_json in (None, False):
|
||||
self.assertRoundTrip(
|
||||
ListForJson(['l']),
|
||||
['l'],
|
||||
for_json=for_json)
|
||||
|
||||
def test_raises_typeerror_if_for_json_not_true_with_object(self):
|
||||
self.assertRaises(TypeError, json.dumps, ForJson())
|
||||
self.assertRaises(TypeError, json.dumps, ForJson(), for_json=False)
|
|
@ -1,86 +0,0 @@
|
|||
from unittest import TestCase
|
||||
import textwrap
|
||||
|
||||
import simplejson as json
|
||||
from simplejson.compat import StringIO
|
||||
|
||||
class TestIndent(TestCase):
|
||||
def test_indent(self):
|
||||
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh',
|
||||
'i-vhbjkhnth',
|
||||
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
|
||||
|
||||
expect = textwrap.dedent("""\
|
||||
[
|
||||
\t[
|
||||
\t\t"blorpie"
|
||||
\t],
|
||||
\t[
|
||||
\t\t"whoops"
|
||||
\t],
|
||||
\t[],
|
||||
\t"d-shtaeou",
|
||||
\t"d-nthiouh",
|
||||
\t"i-vhbjkhnth",
|
||||
\t{
|
||||
\t\t"nifty": 87
|
||||
\t},
|
||||
\t{
|
||||
\t\t"field": "yes",
|
||||
\t\t"morefield": false
|
||||
\t}
|
||||
]""")
|
||||
|
||||
|
||||
d1 = json.dumps(h)
|
||||
d2 = json.dumps(h, indent='\t', sort_keys=True, separators=(',', ': '))
|
||||
d3 = json.dumps(h, indent=' ', sort_keys=True, separators=(',', ': '))
|
||||
d4 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
|
||||
|
||||
h1 = json.loads(d1)
|
||||
h2 = json.loads(d2)
|
||||
h3 = json.loads(d3)
|
||||
h4 = json.loads(d4)
|
||||
|
||||
self.assertEqual(h1, h)
|
||||
self.assertEqual(h2, h)
|
||||
self.assertEqual(h3, h)
|
||||
self.assertEqual(h4, h)
|
||||
self.assertEqual(d3, expect.replace('\t', ' '))
|
||||
self.assertEqual(d4, expect.replace('\t', ' '))
|
||||
# NOTE: Python 2.4 textwrap.dedent converts tabs to spaces,
|
||||
# so the following is expected to fail. Python 2.4 is not a
|
||||
# supported platform in simplejson 2.1.0+.
|
||||
self.assertEqual(d2, expect)
|
||||
|
||||
def test_indent0(self):
|
||||
h = {3: 1}
|
||||
def check(indent, expected):
|
||||
d1 = json.dumps(h, indent=indent)
|
||||
self.assertEqual(d1, expected)
|
||||
|
||||
sio = StringIO()
|
||||
json.dump(h, sio, indent=indent)
|
||||
self.assertEqual(sio.getvalue(), expected)
|
||||
|
||||
# indent=0 should emit newlines
|
||||
check(0, '{\n"3": 1\n}')
|
||||
# indent=None is more compact
|
||||
check(None, '{"3": 1}')
|
||||
|
||||
def test_separators(self):
|
||||
lst = [1,2,3,4]
|
||||
expect = '[\n1,\n2,\n3,\n4\n]'
|
||||
expect_spaces = '[\n1, \n2, \n3, \n4\n]'
|
||||
# Ensure that separators still works
|
||||
self.assertEqual(
|
||||
expect_spaces,
|
||||
json.dumps(lst, indent=0, separators=(', ', ': ')))
|
||||
# Force the new defaults
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.dumps(lst, indent=0, separators=(',', ': ')))
|
||||
# Added in 2.1.4
|
||||
self.assertEqual(
|
||||
expect,
|
||||
json.dumps(lst, indent=0))
|
|
@ -1,20 +0,0 @@
|
|||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
from operator import itemgetter
|
||||
|
||||
class TestItemSortKey(TestCase):
|
||||
def test_simple_first(self):
|
||||
a = {'a': 1, 'c': 5, 'jack': 'jill', 'pick': 'axe', 'array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
|
||||
self.assertEqual(
|
||||
'{"a": 1, "c": 5, "crate": "dog", "jack": "jill", "pick": "axe", "zeak": "oh", "array": [1, 5, 6, 9], "tuple": [83, 12, 3]}',
|
||||
json.dumps(a, item_sort_key=json.simple_first))
|
||||
|
||||
def test_case(self):
|
||||
a = {'a': 1, 'c': 5, 'Jack': 'jill', 'pick': 'axe', 'Array': [1, 5, 6, 9], 'tuple': (83, 12, 3), 'crate': 'dog', 'zeak': 'oh'}
|
||||
self.assertEqual(
|
||||
'{"Array": [1, 5, 6, 9], "Jack": "jill", "a": 1, "c": 5, "crate": "dog", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
|
||||
json.dumps(a, item_sort_key=itemgetter(0)))
|
||||
self.assertEqual(
|
||||
'{"a": 1, "Array": [1, 5, 6, 9], "c": 5, "crate": "dog", "Jack": "jill", "pick": "axe", "tuple": [83, 12, 3], "zeak": "oh"}',
|
||||
json.dumps(a, item_sort_key=lambda kv: kv[0].lower()))
|
|
@ -1,122 +0,0 @@
|
|||
from __future__ import absolute_import
|
||||
import unittest
|
||||
import simplejson as json
|
||||
from simplejson.compat import StringIO
|
||||
|
||||
try:
|
||||
from collections import namedtuple
|
||||
except ImportError:
|
||||
class Value(tuple):
|
||||
def __new__(cls, *args):
|
||||
return tuple.__new__(cls, args)
|
||||
|
||||
def _asdict(self):
|
||||
return {'value': self[0]}
|
||||
class Point(tuple):
|
||||
def __new__(cls, *args):
|
||||
return tuple.__new__(cls, args)
|
||||
|
||||
def _asdict(self):
|
||||
return {'x': self[0], 'y': self[1]}
|
||||
else:
|
||||
Value = namedtuple('Value', ['value'])
|
||||
Point = namedtuple('Point', ['x', 'y'])
|
||||
|
||||
class DuckValue(object):
|
||||
def __init__(self, *args):
|
||||
self.value = Value(*args)
|
||||
|
||||
def _asdict(self):
|
||||
return self.value._asdict()
|
||||
|
||||
class DuckPoint(object):
|
||||
def __init__(self, *args):
|
||||
self.point = Point(*args)
|
||||
|
||||
def _asdict(self):
|
||||
return self.point._asdict()
|
||||
|
||||
class DeadDuck(object):
|
||||
_asdict = None
|
||||
|
||||
class DeadDict(dict):
|
||||
_asdict = None
|
||||
|
||||
CONSTRUCTORS = [
|
||||
lambda v: v,
|
||||
lambda v: [v],
|
||||
lambda v: [{'key': v}],
|
||||
]
|
||||
|
||||
class TestNamedTuple(unittest.TestCase):
|
||||
def test_namedtuple_dumps(self):
|
||||
for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]:
|
||||
d = v._asdict()
|
||||
self.assertEqual(d, json.loads(json.dumps(v)))
|
||||
self.assertEqual(
|
||||
d,
|
||||
json.loads(json.dumps(v, namedtuple_as_object=True)))
|
||||
self.assertEqual(d, json.loads(json.dumps(v, tuple_as_array=False)))
|
||||
self.assertEqual(
|
||||
d,
|
||||
json.loads(json.dumps(v, namedtuple_as_object=True,
|
||||
tuple_as_array=False)))
|
||||
|
||||
def test_namedtuple_dumps_false(self):
|
||||
for v in [Value(1), Point(1, 2)]:
|
||||
l = list(v)
|
||||
self.assertEqual(
|
||||
l,
|
||||
json.loads(json.dumps(v, namedtuple_as_object=False)))
|
||||
self.assertRaises(TypeError, json.dumps, v,
|
||||
tuple_as_array=False, namedtuple_as_object=False)
|
||||
|
||||
def test_namedtuple_dump(self):
|
||||
for v in [Value(1), Point(1, 2), DuckValue(1), DuckPoint(1, 2)]:
|
||||
d = v._asdict()
|
||||
sio = StringIO()
|
||||
json.dump(v, sio)
|
||||
self.assertEqual(d, json.loads(sio.getvalue()))
|
||||
sio = StringIO()
|
||||
json.dump(v, sio, namedtuple_as_object=True)
|
||||
self.assertEqual(
|
||||
d,
|
||||
json.loads(sio.getvalue()))
|
||||
sio = StringIO()
|
||||
json.dump(v, sio, tuple_as_array=False)
|
||||
self.assertEqual(d, json.loads(sio.getvalue()))
|
||||
sio = StringIO()
|
||||
json.dump(v, sio, namedtuple_as_object=True,
|
||||
tuple_as_array=False)
|
||||
self.assertEqual(
|
||||
d,
|
||||
json.loads(sio.getvalue()))
|
||||
|
||||
def test_namedtuple_dump_false(self):
|
||||
for v in [Value(1), Point(1, 2)]:
|
||||
l = list(v)
|
||||
sio = StringIO()
|
||||
json.dump(v, sio, namedtuple_as_object=False)
|
||||
self.assertEqual(
|
||||
l,
|
||||
json.loads(sio.getvalue()))
|
||||
self.assertRaises(TypeError, json.dump, v, StringIO(),
|
||||
tuple_as_array=False, namedtuple_as_object=False)
|
||||
|
||||
def test_asdict_not_callable_dump(self):
|
||||
for f in CONSTRUCTORS:
|
||||
self.assertRaises(TypeError,
|
||||
json.dump, f(DeadDuck()), StringIO(), namedtuple_as_object=True)
|
||||
sio = StringIO()
|
||||
json.dump(f(DeadDict()), sio, namedtuple_as_object=True)
|
||||
self.assertEqual(
|
||||
json.dumps(f({})),
|
||||
sio.getvalue())
|
||||
|
||||
def test_asdict_not_callable_dumps(self):
|
||||
for f in CONSTRUCTORS:
|
||||
self.assertRaises(TypeError,
|
||||
json.dumps, f(DeadDuck()), namedtuple_as_object=True)
|
||||
self.assertEqual(
|
||||
json.dumps(f({})),
|
||||
json.dumps(f(DeadDict()), namedtuple_as_object=True))
|
|
@ -1,71 +0,0 @@
|
|||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
|
||||
# from http://json.org/JSON_checker/test/pass1.json
|
||||
JSON = r'''
|
||||
[
|
||||
"JSON Test Pattern pass1",
|
||||
{"object with 1 member":["array with 1 element"]},
|
||||
{},
|
||||
[],
|
||||
-42,
|
||||
true,
|
||||
false,
|
||||
null,
|
||||
{
|
||||
"integer": 1234567890,
|
||||
"real": -9876.543210,
|
||||
"e": 0.123456789e-12,
|
||||
"E": 1.234567890E+34,
|
||||
"": 23456789012E66,
|
||||
"zero": 0,
|
||||
"one": 1,
|
||||
"space": " ",
|
||||
"quote": "\"",
|
||||
"backslash": "\\",
|
||||
"controls": "\b\f\n\r\t",
|
||||
"slash": "/ & \/",
|
||||
"alpha": "abcdefghijklmnopqrstuvwyz",
|
||||
"ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ",
|
||||
"digit": "0123456789",
|
||||
"special": "`1~!@#$%^&*()_+-={':[,]}|;.</>?",
|
||||
"hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A",
|
||||
"true": true,
|
||||
"false": false,
|
||||
"null": null,
|
||||
"array":[ ],
|
||||
"object":{ },
|
||||
"address": "50 St. James Street",
|
||||
"url": "http://www.JSON.org/",
|
||||
"comment": "// /* <!-- --",
|
||||
"# -- --> */": " ",
|
||||
" s p a c e d " :[1,2 , 3
|
||||
|
||||
,
|
||||
|
||||
4 , 5 , 6 ,7 ],"compact": [1,2,3,4,5,6,7],
|
||||
"jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}",
|
||||
"quotes": "" \u0022 %22 0x22 034 "",
|
||||
"\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?"
|
||||
: "A key can be any string"
|
||||
},
|
||||
0.5 ,98.6
|
||||
,
|
||||
99.44
|
||||
,
|
||||
|
||||
1066,
|
||||
1e1,
|
||||
0.1e1,
|
||||
1e-1,
|
||||
1e00,2e+00,2e-00
|
||||
,"rosebud"]
|
||||
'''
|
||||
|
||||
class TestPass1(TestCase):
|
||||
def test_parse(self):
|
||||
# test in/out equivalence and parsing
|
||||
res = json.loads(JSON)
|
||||
out = json.dumps(res)
|
||||
self.assertEqual(res, json.loads(out))
|
|
@ -1,14 +0,0 @@
|
|||
from unittest import TestCase
|
||||
import simplejson as json
|
||||
|
||||
# from http://json.org/JSON_checker/test/pass2.json
|
||||
JSON = r'''
|
||||
[[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]]
|
||||
'''
|
||||
|
||||
class TestPass2(TestCase):
|
||||
def test_parse(self):
|
||||
# test in/out equivalence and parsing
|
||||
res = json.loads(JSON)
|
||||
out = json.dumps(res)
|
||||
self.assertEqual(res, json.loads(out))
|
|
@ -1,20 +0,0 @@
|
|||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
|
||||
# from http://json.org/JSON_checker/test/pass3.json
|
||||
JSON = r'''
|
||||
{
|
||||
"JSON Test Pattern pass3": {
|
||||
"The outermost value": "must be an object or array.",
|
||||
"In this test": "It is an object."
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
class TestPass3(TestCase):
|
||||
def test_parse(self):
|
||||
# test in/out equivalence and parsing
|
||||
res = json.loads(JSON)
|
||||
out = json.dumps(res)
|
||||
self.assertEqual(res, json.loads(out))
|
|
@ -1,67 +0,0 @@
|
|||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
|
||||
class JSONTestObject:
|
||||
pass
|
||||
|
||||
|
||||
class RecursiveJSONEncoder(json.JSONEncoder):
|
||||
recurse = False
|
||||
def default(self, o):
|
||||
if o is JSONTestObject:
|
||||
if self.recurse:
|
||||
return [JSONTestObject]
|
||||
else:
|
||||
return 'JSONTestObject'
|
||||
return json.JSONEncoder.default(o)
|
||||
|
||||
|
||||
class TestRecursion(TestCase):
|
||||
def test_listrecursion(self):
|
||||
x = []
|
||||
x.append(x)
|
||||
try:
|
||||
json.dumps(x)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
self.fail("didn't raise ValueError on list recursion")
|
||||
x = []
|
||||
y = [x]
|
||||
x.append(y)
|
||||
try:
|
||||
json.dumps(x)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
self.fail("didn't raise ValueError on alternating list recursion")
|
||||
y = []
|
||||
x = [y, y]
|
||||
# ensure that the marker is cleared
|
||||
json.dumps(x)
|
||||
|
||||
def test_dictrecursion(self):
|
||||
x = {}
|
||||
x["test"] = x
|
||||
try:
|
||||
json.dumps(x)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
self.fail("didn't raise ValueError on dict recursion")
|
||||
x = {}
|
||||
y = {"a": x, "b": x}
|
||||
# ensure that the marker is cleared
|
||||
json.dumps(y)
|
||||
|
||||
def test_defaultrecursion(self):
|
||||
enc = RecursiveJSONEncoder()
|
||||
self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"')
|
||||
enc.recurse = True
|
||||
try:
|
||||
enc.encode(JSONTestObject)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
self.fail("didn't raise ValueError on default recursion")
|
|
@ -1,194 +0,0 @@
|
|||
import sys
|
||||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
import simplejson.decoder
|
||||
from simplejson.compat import b, PY3
|
||||
|
||||
class TestScanString(TestCase):
|
||||
# The bytes type is intentionally not used in most of these tests
|
||||
# under Python 3 because the decoder immediately coerces to str before
|
||||
# calling scanstring. In Python 2 we are testing the code paths
|
||||
# for both unicode and str.
|
||||
#
|
||||
# The reason this is done is because Python 3 would require
|
||||
# entirely different code paths for parsing bytes and str.
|
||||
#
|
||||
def test_py_scanstring(self):
|
||||
self._test_scanstring(simplejson.decoder.py_scanstring)
|
||||
|
||||
def test_c_scanstring(self):
|
||||
if not simplejson.decoder.c_scanstring:
|
||||
return
|
||||
self._test_scanstring(simplejson.decoder.c_scanstring)
|
||||
|
||||
def _test_scanstring(self, scanstring):
|
||||
if sys.maxunicode == 65535:
|
||||
self.assertEqual(
|
||||
scanstring(u'"z\U0001d120x"', 1, None, True),
|
||||
(u'z\U0001d120x', 6))
|
||||
else:
|
||||
self.assertEqual(
|
||||
scanstring(u'"z\U0001d120x"', 1, None, True),
|
||||
(u'z\U0001d120x', 5))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('"\\u007b"', 1, None, True),
|
||||
(u'{', 8))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('"A JSON payload should be an object or array, not a string."', 1, None, True),
|
||||
(u'A JSON payload should be an object or array, not a string.', 60))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('["Unclosed array"', 2, None, True),
|
||||
(u'Unclosed array', 17))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('["extra comma",]', 2, None, True),
|
||||
(u'extra comma', 14))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('["double extra comma",,]', 2, None, True),
|
||||
(u'double extra comma', 21))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('["Comma after the close"],', 2, None, True),
|
||||
(u'Comma after the close', 24))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('["Extra close"]]', 2, None, True),
|
||||
(u'Extra close', 14))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Extra comma": true,}', 2, None, True),
|
||||
(u'Extra comma', 14))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Extra value after close": true} "misplaced quoted value"', 2, None, True),
|
||||
(u'Extra value after close', 26))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Illegal expression": 1 + 2}', 2, None, True),
|
||||
(u'Illegal expression', 21))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Illegal invocation": alert()}', 2, None, True),
|
||||
(u'Illegal invocation', 21))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Numbers cannot have leading zeroes": 013}', 2, None, True),
|
||||
(u'Numbers cannot have leading zeroes', 37))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Numbers cannot be hex": 0x14}', 2, None, True),
|
||||
(u'Numbers cannot be hex', 24))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]', 21, None, True),
|
||||
(u'Too deep', 30))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Missing colon" null}', 2, None, True),
|
||||
(u'Missing colon', 16))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Double colon":: null}', 2, None, True),
|
||||
(u'Double colon', 15))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('{"Comma instead of colon", null}', 2, None, True),
|
||||
(u'Comma instead of colon', 25))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('["Colon instead of comma": false]', 2, None, True),
|
||||
(u'Colon instead of comma', 25))
|
||||
|
||||
self.assertEqual(
|
||||
scanstring('["Bad value", truth]', 2, None, True),
|
||||
(u'Bad value', 12))
|
||||
|
||||
for c in map(chr, range(0x00, 0x1f)):
|
||||
self.assertEqual(
|
||||
scanstring(c + '"', 0, None, False),
|
||||
(c, 2))
|
||||
self.assertRaises(
|
||||
ValueError,
|
||||
scanstring, c + '"', 0, None, True)
|
||||
|
||||
self.assertRaises(ValueError, scanstring, '', 0, None, True)
|
||||
self.assertRaises(ValueError, scanstring, 'a', 0, None, True)
|
||||
self.assertRaises(ValueError, scanstring, '\\', 0, None, True)
|
||||
self.assertRaises(ValueError, scanstring, '\\u', 0, None, True)
|
||||
self.assertRaises(ValueError, scanstring, '\\u0', 0, None, True)
|
||||
self.assertRaises(ValueError, scanstring, '\\u01', 0, None, True)
|
||||
self.assertRaises(ValueError, scanstring, '\\u012', 0, None, True)
|
||||
self.assertRaises(ValueError, scanstring, '\\u0123', 0, None, True)
|
||||
if sys.maxunicode > 65535:
|
||||
self.assertRaises(ValueError,
|
||||
scanstring, '\\ud834\\u"', 0, None, True)
|
||||
self.assertRaises(ValueError,
|
||||
scanstring, '\\ud834\\x0123"', 0, None, True)
|
||||
|
||||
def test_issue3623(self):
|
||||
self.assertRaises(ValueError, json.decoder.scanstring, "xxx", 1,
|
||||
"xxx")
|
||||
self.assertRaises(UnicodeDecodeError,
|
||||
json.encoder.encode_basestring_ascii, b("xx\xff"))
|
||||
|
||||
def test_overflow(self):
|
||||
# Python 2.5 does not have maxsize, Python 3 does not have maxint
|
||||
maxsize = getattr(sys, 'maxsize', getattr(sys, 'maxint', None))
|
||||
assert maxsize is not None
|
||||
self.assertRaises(OverflowError, json.decoder.scanstring, "xxx",
|
||||
maxsize + 1)
|
||||
|
||||
def test_surrogates(self):
|
||||
scanstring = json.decoder.scanstring
|
||||
|
||||
def assertScan(given, expect, test_utf8=True):
|
||||
givens = [given]
|
||||
if not PY3 and test_utf8:
|
||||
givens.append(given.encode('utf8'))
|
||||
for given in givens:
|
||||
(res, count) = scanstring(given, 1, None, True)
|
||||
self.assertEqual(len(given), count)
|
||||
self.assertEqual(res, expect)
|
||||
|
||||
assertScan(
|
||||
u'"z\\ud834\\u0079x"',
|
||||
u'z\ud834yx')
|
||||
assertScan(
|
||||
u'"z\\ud834\\udd20x"',
|
||||
u'z\U0001d120x')
|
||||
assertScan(
|
||||
u'"z\\ud834\\ud834\\udd20x"',
|
||||
u'z\ud834\U0001d120x')
|
||||
assertScan(
|
||||
u'"z\\ud834x"',
|
||||
u'z\ud834x')
|
||||
assertScan(
|
||||
u'"z\\udd20x"',
|
||||
u'z\udd20x')
|
||||
assertScan(
|
||||
u'"z\ud834x"',
|
||||
u'z\ud834x')
|
||||
# It may look strange to join strings together, but Python is drunk.
|
||||
# https://gist.github.com/etrepum/5538443
|
||||
assertScan(
|
||||
u'"z\\ud834\udd20x12345"',
|
||||
u''.join([u'z\ud834', u'\udd20x12345']))
|
||||
assertScan(
|
||||
u'"z\ud834\\udd20x"',
|
||||
u''.join([u'z\ud834', u'\udd20x']))
|
||||
# these have different behavior given UTF8 input, because the surrogate
|
||||
# pair may be joined (in maxunicode > 65535 builds)
|
||||
assertScan(
|
||||
u''.join([u'"z\ud834', u'\udd20x"']),
|
||||
u''.join([u'z\ud834', u'\udd20x']),
|
||||
test_utf8=False)
|
||||
|
||||
self.assertRaises(ValueError,
|
||||
scanstring, u'"z\\ud83x"', 1, None, True)
|
||||
self.assertRaises(ValueError,
|
||||
scanstring, u'"z\\ud834\\udd2x"', 1, None, True)
|
|
@ -1,42 +0,0 @@
|
|||
import textwrap
|
||||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
|
||||
|
||||
class TestSeparators(TestCase):
|
||||
def test_separators(self):
|
||||
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
|
||||
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
|
||||
|
||||
expect = textwrap.dedent("""\
|
||||
[
|
||||
[
|
||||
"blorpie"
|
||||
] ,
|
||||
[
|
||||
"whoops"
|
||||
] ,
|
||||
[] ,
|
||||
"d-shtaeou" ,
|
||||
"d-nthiouh" ,
|
||||
"i-vhbjkhnth" ,
|
||||
{
|
||||
"nifty" : 87
|
||||
} ,
|
||||
{
|
||||
"field" : "yes" ,
|
||||
"morefield" : false
|
||||
}
|
||||
]""")
|
||||
|
||||
|
||||
d1 = json.dumps(h)
|
||||
d2 = json.dumps(h, indent=' ', sort_keys=True, separators=(' ,', ' : '))
|
||||
|
||||
h1 = json.loads(d1)
|
||||
h2 = json.loads(d2)
|
||||
|
||||
self.assertEqual(h1, h)
|
||||
self.assertEqual(h2, h)
|
||||
self.assertEqual(d2, expect)
|
|
@ -1,39 +0,0 @@
|
|||
import sys
|
||||
import unittest
|
||||
from unittest import TestCase
|
||||
|
||||
from simplejson import encoder, scanner
|
||||
|
||||
|
||||
def has_speedups():
|
||||
return encoder.c_make_encoder is not None
|
||||
|
||||
|
||||
def skip_if_speedups_missing(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
if not has_speedups():
|
||||
if hasattr(unittest, 'SkipTest'):
|
||||
raise unittest.SkipTest("C Extension not available")
|
||||
else:
|
||||
sys.stdout.write("C Extension not available")
|
||||
return
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class TestDecode(TestCase):
|
||||
@skip_if_speedups_missing
|
||||
def test_make_scanner(self):
|
||||
self.assertRaises(AttributeError, scanner.c_make_scanner, 1)
|
||||
|
||||
@skip_if_speedups_missing
|
||||
def test_make_encoder(self):
|
||||
self.assertRaises(
|
||||
TypeError,
|
||||
encoder.c_make_encoder,
|
||||
None,
|
||||
("\xCD\x7D\x3D\x4E\x12\x4C\xF9\x79\xD7"
|
||||
"\x52\xBA\x82\xF2\x27\x4A\x7D\xA0\xCA\x75"),
|
||||
None
|
||||
)
|
|
@ -1,97 +0,0 @@
|
|||
from __future__ import with_statement
|
||||
import os
|
||||
import sys
|
||||
import textwrap
|
||||
import unittest
|
||||
import subprocess
|
||||
import tempfile
|
||||
try:
|
||||
# Python 3.x
|
||||
from test.support import strip_python_stderr
|
||||
except ImportError:
|
||||
# Python 2.6+
|
||||
try:
|
||||
from test.test_support import strip_python_stderr
|
||||
except ImportError:
|
||||
# Python 2.5
|
||||
import re
|
||||
def strip_python_stderr(stderr):
|
||||
return re.sub(
|
||||
r"\[\d+ refs\]\r?\n?$".encode(),
|
||||
"".encode(),
|
||||
stderr).strip()
|
||||
|
||||
class TestTool(unittest.TestCase):
|
||||
data = """
|
||||
|
||||
[["blorpie"],[ "whoops" ] , [
|
||||
],\t"d-shtaeou",\r"d-nthiouh",
|
||||
"i-vhbjkhnth", {"nifty":87}, {"morefield" :\tfalse,"field"
|
||||
:"yes"} ]
|
||||
"""
|
||||
|
||||
expect = textwrap.dedent("""\
|
||||
[
|
||||
[
|
||||
"blorpie"
|
||||
],
|
||||
[
|
||||
"whoops"
|
||||
],
|
||||
[],
|
||||
"d-shtaeou",
|
||||
"d-nthiouh",
|
||||
"i-vhbjkhnth",
|
||||
{
|
||||
"nifty": 87
|
||||
},
|
||||
{
|
||||
"field": "yes",
|
||||
"morefield": false
|
||||
}
|
||||
]
|
||||
""")
|
||||
|
||||
def runTool(self, args=None, data=None):
|
||||
argv = [sys.executable, '-m', 'simplejson.tool']
|
||||
if args:
|
||||
argv.extend(args)
|
||||
proc = subprocess.Popen(argv,
|
||||
stdin=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE)
|
||||
out, err = proc.communicate(data)
|
||||
self.assertEqual(strip_python_stderr(err), ''.encode())
|
||||
self.assertEqual(proc.returncode, 0)
|
||||
return out
|
||||
|
||||
def test_stdin_stdout(self):
|
||||
self.assertEqual(
|
||||
self.runTool(data=self.data.encode()),
|
||||
self.expect.encode())
|
||||
|
||||
def test_infile_stdout(self):
|
||||
with tempfile.NamedTemporaryFile() as infile:
|
||||
infile.write(self.data.encode())
|
||||
infile.flush()
|
||||
self.assertEqual(
|
||||
self.runTool(args=[infile.name]),
|
||||
self.expect.encode())
|
||||
|
||||
def test_infile_outfile(self):
|
||||
with tempfile.NamedTemporaryFile() as infile:
|
||||
infile.write(self.data.encode())
|
||||
infile.flush()
|
||||
# outfile will get overwritten by tool, so the delete
|
||||
# may not work on some platforms. Do it manually.
|
||||
outfile = tempfile.NamedTemporaryFile()
|
||||
try:
|
||||
self.assertEqual(
|
||||
self.runTool(args=[infile.name, outfile.name]),
|
||||
''.encode())
|
||||
with open(outfile.name, 'rb') as f:
|
||||
self.assertEqual(f.read(), self.expect.encode())
|
||||
finally:
|
||||
outfile.close()
|
||||
if os.path.exists(outfile.name):
|
||||
os.unlink(outfile.name)
|
|
@ -1,51 +0,0 @@
|
|||
import unittest
|
||||
|
||||
from simplejson.compat import StringIO
|
||||
import simplejson as json
|
||||
|
||||
class TestTuples(unittest.TestCase):
|
||||
def test_tuple_array_dumps(self):
|
||||
t = (1, 2, 3)
|
||||
expect = json.dumps(list(t))
|
||||
# Default is True
|
||||
self.assertEqual(expect, json.dumps(t))
|
||||
self.assertEqual(expect, json.dumps(t, tuple_as_array=True))
|
||||
self.assertRaises(TypeError, json.dumps, t, tuple_as_array=False)
|
||||
# Ensure that the "default" does not get called
|
||||
self.assertEqual(expect, json.dumps(t, default=repr))
|
||||
self.assertEqual(expect, json.dumps(t, tuple_as_array=True,
|
||||
default=repr))
|
||||
# Ensure that the "default" gets called
|
||||
self.assertEqual(
|
||||
json.dumps(repr(t)),
|
||||
json.dumps(t, tuple_as_array=False, default=repr))
|
||||
|
||||
def test_tuple_array_dump(self):
|
||||
t = (1, 2, 3)
|
||||
expect = json.dumps(list(t))
|
||||
# Default is True
|
||||
sio = StringIO()
|
||||
json.dump(t, sio)
|
||||
self.assertEqual(expect, sio.getvalue())
|
||||
sio = StringIO()
|
||||
json.dump(t, sio, tuple_as_array=True)
|
||||
self.assertEqual(expect, sio.getvalue())
|
||||
self.assertRaises(TypeError, json.dump, t, StringIO(),
|
||||
tuple_as_array=False)
|
||||
# Ensure that the "default" does not get called
|
||||
sio = StringIO()
|
||||
json.dump(t, sio, default=repr)
|
||||
self.assertEqual(expect, sio.getvalue())
|
||||
sio = StringIO()
|
||||
json.dump(t, sio, tuple_as_array=True, default=repr)
|
||||
self.assertEqual(expect, sio.getvalue())
|
||||
# Ensure that the "default" gets called
|
||||
sio = StringIO()
|
||||
json.dump(t, sio, tuple_as_array=False, default=repr)
|
||||
self.assertEqual(
|
||||
json.dumps(repr(t)),
|
||||
sio.getvalue())
|
||||
|
||||
class TestNamedTuple(unittest.TestCase):
|
||||
def test_namedtuple_dump(self):
|
||||
pass
|
|
@ -1,153 +0,0 @@
|
|||
import sys
|
||||
import codecs
|
||||
from unittest import TestCase
|
||||
|
||||
import simplejson as json
|
||||
from simplejson.compat import unichr, text_type, b, u, BytesIO
|
||||
|
||||
class TestUnicode(TestCase):
|
||||
def test_encoding1(self):
|
||||
encoder = json.JSONEncoder(encoding='utf-8')
|
||||
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
|
||||
s = u.encode('utf-8')
|
||||
ju = encoder.encode(u)
|
||||
js = encoder.encode(s)
|
||||
self.assertEqual(ju, js)
|
||||
|
||||
def test_encoding2(self):
|
||||
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
|
||||
s = u.encode('utf-8')
|
||||
ju = json.dumps(u, encoding='utf-8')
|
||||
js = json.dumps(s, encoding='utf-8')
|
||||
self.assertEqual(ju, js)
|
||||
|
||||
def test_encoding3(self):
|
||||
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
|
||||
j = json.dumps(u)
|
||||
self.assertEqual(j, '"\\u03b1\\u03a9"')
|
||||
|
||||
def test_encoding4(self):
|
||||
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
|
||||
j = json.dumps([u])
|
||||
self.assertEqual(j, '["\\u03b1\\u03a9"]')
|
||||
|
||||
def test_encoding5(self):
|
||||
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
|
||||
j = json.dumps(u, ensure_ascii=False)
|
||||
self.assertEqual(j, u'"' + u + u'"')
|
||||
|
||||
def test_encoding6(self):
|
||||
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
|
||||
j = json.dumps([u], ensure_ascii=False)
|
||||
self.assertEqual(j, u'["' + u + u'"]')
|
||||
|
||||
def test_big_unicode_encode(self):
|
||||
u = u'\U0001d120'
|
||||
self.assertEqual(json.dumps(u), '"\\ud834\\udd20"')
|
||||
self.assertEqual(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
|
||||
|
||||
def test_big_unicode_decode(self):
|
||||
u = u'z\U0001d120x'
|
||||
self.assertEqual(json.loads('"' + u + '"'), u)
|
||||
self.assertEqual(json.loads('"z\\ud834\\udd20x"'), u)
|
||||
|
||||
def test_unicode_decode(self):
|
||||
for i in range(0, 0xd7ff):
|
||||
u = unichr(i)
|
||||
#s = '"\\u{0:04x}"'.format(i)
|
||||
s = '"\\u%04x"' % (i,)
|
||||
self.assertEqual(json.loads(s), u)
|
||||
|
||||
def test_object_pairs_hook_with_unicode(self):
|
||||
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
|
||||
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
|
||||
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
|
||||
self.assertEqual(json.loads(s), eval(s))
|
||||
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
|
||||
od = json.loads(s, object_pairs_hook=json.OrderedDict)
|
||||
self.assertEqual(od, json.OrderedDict(p))
|
||||
self.assertEqual(type(od), json.OrderedDict)
|
||||
# the object_pairs_hook takes priority over the object_hook
|
||||
self.assertEqual(json.loads(s,
|
||||
object_pairs_hook=json.OrderedDict,
|
||||
object_hook=lambda x: None),
|
||||
json.OrderedDict(p))
|
||||
|
||||
|
||||
def test_default_encoding(self):
|
||||
self.assertEqual(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
|
||||
{'a': u'\xe9'})
|
||||
|
||||
def test_unicode_preservation(self):
|
||||
self.assertEqual(type(json.loads(u'""')), text_type)
|
||||
self.assertEqual(type(json.loads(u'"a"')), text_type)
|
||||
self.assertEqual(type(json.loads(u'["a"]')[0]), text_type)
|
||||
|
||||
def test_ensure_ascii_false_returns_unicode(self):
|
||||
# http://code.google.com/p/simplejson/issues/detail?id=48
|
||||
self.assertEqual(type(json.dumps([], ensure_ascii=False)), text_type)
|
||||
self.assertEqual(type(json.dumps(0, ensure_ascii=False)), text_type)
|
||||
self.assertEqual(type(json.dumps({}, ensure_ascii=False)), text_type)
|
||||
self.assertEqual(type(json.dumps("", ensure_ascii=False)), text_type)
|
||||
|
||||
def test_ensure_ascii_false_bytestring_encoding(self):
|
||||
# http://code.google.com/p/simplejson/issues/detail?id=48
|
||||
doc1 = {u'quux': b('Arr\xc3\xaat sur images')}
|
||||
doc2 = {u'quux': u('Arr\xeat sur images')}
|
||||
doc_ascii = '{"quux": "Arr\\u00eat sur images"}'
|
||||
doc_unicode = u'{"quux": "Arr\xeat sur images"}'
|
||||
self.assertEqual(json.dumps(doc1), doc_ascii)
|
||||
self.assertEqual(json.dumps(doc2), doc_ascii)
|
||||
self.assertEqual(json.dumps(doc1, ensure_ascii=False), doc_unicode)
|
||||
self.assertEqual(json.dumps(doc2, ensure_ascii=False), doc_unicode)
|
||||
|
||||
def test_ensure_ascii_linebreak_encoding(self):
|
||||
# http://timelessrepo.com/json-isnt-a-javascript-subset
|
||||
s1 = u'\u2029\u2028'
|
||||
s2 = s1.encode('utf8')
|
||||
expect = '"\\u2029\\u2028"'
|
||||
self.assertEqual(json.dumps(s1), expect)
|
||||
self.assertEqual(json.dumps(s2), expect)
|
||||
self.assertEqual(json.dumps(s1, ensure_ascii=False), expect)
|
||||
self.assertEqual(json.dumps(s2, ensure_ascii=False), expect)
|
||||
|
||||
def test_invalid_escape_sequences(self):
|
||||
# incomplete escape sequence
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1234')
|
||||
# invalid escape sequence
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u123x"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u12x4"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\u1x34"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ux234"')
|
||||
if sys.maxunicode > 65535:
|
||||
# invalid escape sequence for low surrogate
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u000x"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u00x0"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\u0x00"')
|
||||
self.assertRaises(json.JSONDecodeError, json.loads, '"\\ud800\\ux000"')
|
||||
|
||||
def test_ensure_ascii_still_works(self):
|
||||
# in the ascii range, ensure that everything is the same
|
||||
for c in map(unichr, range(0, 127)):
|
||||
self.assertEqual(
|
||||
json.dumps(c, ensure_ascii=False),
|
||||
json.dumps(c))
|
||||
snowman = u'\N{SNOWMAN}'
|
||||
self.assertEqual(
|
||||
json.dumps(c, ensure_ascii=False),
|
||||
'"' + c + '"')
|
||||
|
||||
def test_strip_bom(self):
|
||||
content = u"\u3053\u3093\u306b\u3061\u308f"
|
||||
json_doc = codecs.BOM_UTF8 + b(json.dumps(content))
|
||||
self.assertEqual(json.load(BytesIO(json_doc)), content)
|
||||
for doc in json_doc, json_doc.decode('utf8'):
|
||||
self.assertEqual(json.loads(doc), content)
|
|
@ -1,42 +0,0 @@
|
|||
r"""Command-line tool to validate and pretty-print JSON
|
||||
|
||||
Usage::
|
||||
|
||||
$ echo '{"json":"obj"}' | python -m simplejson.tool
|
||||
{
|
||||
"json": "obj"
|
||||
}
|
||||
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
|
||||
Expecting property name: line 1 column 2 (char 2)
|
||||
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
import sys
|
||||
import simplejson as json
|
||||
|
||||
def main():
|
||||
if len(sys.argv) == 1:
|
||||
infile = sys.stdin
|
||||
outfile = sys.stdout
|
||||
elif len(sys.argv) == 2:
|
||||
infile = open(sys.argv[1], 'r')
|
||||
outfile = sys.stdout
|
||||
elif len(sys.argv) == 3:
|
||||
infile = open(sys.argv[1], 'r')
|
||||
outfile = open(sys.argv[2], 'w')
|
||||
else:
|
||||
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
|
||||
with infile:
|
||||
try:
|
||||
obj = json.load(infile,
|
||||
object_pairs_hook=json.OrderedDict,
|
||||
use_decimal=True)
|
||||
except ValueError:
|
||||
raise SystemExit(sys.exc_info()[1])
|
||||
with outfile:
|
||||
json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True)
|
||||
outfile.write('\n')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,132 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# bitmap distribution font (bdf) file parser
|
||||
#
|
||||
# history:
|
||||
# 1996-05-16 fl created (as bdf2pil)
|
||||
# 1997-08-25 fl converted to FontFile driver
|
||||
# 2001-05-25 fl removed bogus __init__ call
|
||||
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
|
||||
# 2003-04-22 fl more robustification (from Graham Dumpleton)
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1997-2003 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
from PIL import FontFile
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# parse X Bitmap Distribution Format (BDF)
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
bdf_slant = {
|
||||
"R": "Roman",
|
||||
"I": "Italic",
|
||||
"O": "Oblique",
|
||||
"RI": "Reverse Italic",
|
||||
"RO": "Reverse Oblique",
|
||||
"OT": "Other"
|
||||
}
|
||||
|
||||
bdf_spacing = {
|
||||
"P": "Proportional",
|
||||
"M": "Monospaced",
|
||||
"C": "Cell"
|
||||
}
|
||||
|
||||
def bdf_char(f):
|
||||
|
||||
# skip to STARTCHAR
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s:
|
||||
return None
|
||||
if s[:9] == b"STARTCHAR":
|
||||
break
|
||||
id = s[9:].strip().decode('ascii')
|
||||
|
||||
# load symbol properties
|
||||
props = {}
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s or s[:6] == b"BITMAP":
|
||||
break
|
||||
i = s.find(b" ")
|
||||
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
|
||||
|
||||
# load bitmap
|
||||
bitmap = []
|
||||
while True:
|
||||
s = f.readline()
|
||||
if not s or s[:7] == b"ENDCHAR":
|
||||
break
|
||||
bitmap.append(s[:-1])
|
||||
bitmap = b"".join(bitmap)
|
||||
|
||||
[x, y, l, d] = [int(s) for s in props["BBX"].split()]
|
||||
[dx, dy] = [int(s) for s in props["DWIDTH"].split()]
|
||||
|
||||
bbox = (dx, dy), (l, -d-y, x+l, -d), (0, 0, x, y)
|
||||
|
||||
try:
|
||||
im = Image.frombytes("1", (x, y), bitmap, "hex", "1")
|
||||
except ValueError:
|
||||
# deal with zero-width characters
|
||||
im = Image.new("1", (x, y))
|
||||
|
||||
return id, int(props["ENCODING"]), bbox, im
|
||||
|
||||
##
|
||||
# Font file plugin for the X11 BDF format.
|
||||
|
||||
class BdfFontFile(FontFile.FontFile):
|
||||
|
||||
def __init__(self, fp):
|
||||
|
||||
FontFile.FontFile.__init__(self)
|
||||
|
||||
s = fp.readline()
|
||||
if s[:13] != b"STARTFONT 2.1":
|
||||
raise SyntaxError("not a valid BDF file")
|
||||
|
||||
props = {}
|
||||
comments = []
|
||||
|
||||
while True:
|
||||
s = fp.readline()
|
||||
if not s or s[:13] == b"ENDPROPERTIES":
|
||||
break
|
||||
i = s.find(b" ")
|
||||
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
|
||||
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
|
||||
if s.find(b"LogicalFontDescription") < 0:
|
||||
comments.append(s[i+1:-1].decode('ascii'))
|
||||
|
||||
font = props["FONT"].split("-")
|
||||
|
||||
font[4] = bdf_slant[font[4].upper()]
|
||||
font[11] = bdf_spacing[font[11].upper()]
|
||||
|
||||
ascent = int(props["FONT_ASCENT"])
|
||||
descent = int(props["FONT_DESCENT"])
|
||||
|
||||
fontname = ";".join(font[1:])
|
||||
|
||||
# print "#", fontname
|
||||
# for i in comments:
|
||||
# print "#", i
|
||||
|
||||
font = []
|
||||
while True:
|
||||
c = bdf_char(fp)
|
||||
if not c:
|
||||
break
|
||||
id, ch, (xy, dst, src), im = c
|
||||
if 0 <= ch < len(self.glyph):
|
||||
self.glyph[ch] = xy, dst, src, im
|
|
@ -1,260 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# BMP file handler
|
||||
#
|
||||
# Windows (and OS/2) native bitmap storage format.
|
||||
#
|
||||
# history:
|
||||
# 1995-09-01 fl Created
|
||||
# 1996-04-30 fl Added save
|
||||
# 1997-08-27 fl Fixed save of 1-bit images
|
||||
# 1998-03-06 fl Load P images as L where possible
|
||||
# 1998-07-03 fl Load P images as 1 where possible
|
||||
# 1998-12-29 fl Handle small palettes
|
||||
# 2002-12-30 fl Fixed load of 1-bit palette images
|
||||
# 2003-04-21 fl Fixed load of 1-bit monochrome images
|
||||
# 2003-04-23 fl Added limited support for BI_BITFIELDS compression
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB
|
||||
# Copyright (c) 1995-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.7"
|
||||
|
||||
|
||||
from PIL import Image, ImageFile, ImagePalette, _binary
|
||||
import math
|
||||
|
||||
i8 = _binary.i8
|
||||
i16 = _binary.i16le
|
||||
i32 = _binary.i32le
|
||||
o8 = _binary.o8
|
||||
o16 = _binary.o16le
|
||||
o32 = _binary.o32le
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Read BMP file
|
||||
|
||||
BIT2MODE = {
|
||||
# bits => mode, rawmode
|
||||
1: ("P", "P;1"),
|
||||
4: ("P", "P;4"),
|
||||
8: ("P", "P"),
|
||||
16: ("RGB", "BGR;15"),
|
||||
24: ("RGB", "BGR"),
|
||||
32: ("RGB", "BGRX")
|
||||
}
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:2] == b"BM"
|
||||
|
||||
##
|
||||
# Image plugin for the Windows BMP format.
|
||||
|
||||
class BmpImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "BMP"
|
||||
format_description = "Windows Bitmap"
|
||||
|
||||
def _bitmap(self, header = 0, offset = 0):
|
||||
|
||||
if header:
|
||||
self.fp.seek(header)
|
||||
|
||||
read = self.fp.read
|
||||
|
||||
# CORE/INFO
|
||||
s = read(4)
|
||||
s = s + ImageFile._safe_read(self.fp, i32(s)-4)
|
||||
|
||||
if len(s) == 12:
|
||||
|
||||
# OS/2 1.0 CORE
|
||||
bits = i16(s[10:])
|
||||
self.size = i16(s[4:]), i16(s[6:])
|
||||
compression = 0
|
||||
lutsize = 3
|
||||
colors = 0
|
||||
direction = -1
|
||||
|
||||
elif len(s) in [40, 64, 108, 124]:
|
||||
|
||||
# WIN 3.1 or OS/2 2.0 INFO
|
||||
bits = i16(s[14:])
|
||||
self.size = i32(s[4:]), i32(s[8:])
|
||||
compression = i32(s[16:])
|
||||
pxperm = (i32(s[24:]), i32(s[28:])) # Pixels per meter
|
||||
lutsize = 4
|
||||
colors = i32(s[32:])
|
||||
direction = -1
|
||||
if i8(s[11]) == 0xff:
|
||||
# upside-down storage
|
||||
self.size = self.size[0], 2**32 - self.size[1]
|
||||
direction = 0
|
||||
|
||||
self.info["dpi"] = tuple(map(lambda x: math.ceil(x / 39.3701), pxperm))
|
||||
|
||||
else:
|
||||
raise IOError("Unsupported BMP header type (%d)" % len(s))
|
||||
|
||||
if (self.size[0]*self.size[1]) > 2**31:
|
||||
# Prevent DOS for > 2gb images
|
||||
raise IOError("Unsupported BMP Size: (%dx%d)" % self.size)
|
||||
|
||||
if not colors:
|
||||
colors = 1 << bits
|
||||
|
||||
# MODE
|
||||
try:
|
||||
self.mode, rawmode = BIT2MODE[bits]
|
||||
except KeyError:
|
||||
raise IOError("Unsupported BMP pixel depth (%d)" % bits)
|
||||
|
||||
if compression == 3:
|
||||
# BI_BITFIELDS compression
|
||||
mask = i32(read(4)), i32(read(4)), i32(read(4))
|
||||
if bits == 32 and mask == (0xff0000, 0x00ff00, 0x0000ff):
|
||||
rawmode = "BGRX"
|
||||
elif bits == 16 and mask == (0x00f800, 0x0007e0, 0x00001f):
|
||||
rawmode = "BGR;16"
|
||||
elif bits == 16 and mask == (0x007c00, 0x0003e0, 0x00001f):
|
||||
rawmode = "BGR;15"
|
||||
else:
|
||||
# print bits, map(hex, mask)
|
||||
raise IOError("Unsupported BMP bitfields layout")
|
||||
elif compression != 0:
|
||||
raise IOError("Unsupported BMP compression (%d)" % compression)
|
||||
|
||||
# LUT
|
||||
if self.mode == "P":
|
||||
palette = []
|
||||
greyscale = 1
|
||||
if colors == 2:
|
||||
indices = (0, 255)
|
||||
elif colors > 2**16 or colors <=0: #We're reading a i32.
|
||||
raise IOError("Unsupported BMP Palette size (%d)" % colors)
|
||||
else:
|
||||
indices = list(range(colors))
|
||||
for i in indices:
|
||||
rgb = read(lutsize)[:3]
|
||||
if rgb != o8(i)*3:
|
||||
greyscale = 0
|
||||
palette.append(rgb)
|
||||
if greyscale:
|
||||
if colors == 2:
|
||||
self.mode = rawmode = "1"
|
||||
else:
|
||||
self.mode = rawmode = "L"
|
||||
else:
|
||||
self.mode = "P"
|
||||
self.palette = ImagePalette.raw(
|
||||
"BGR", b"".join(palette)
|
||||
)
|
||||
|
||||
if not offset:
|
||||
offset = self.fp.tell()
|
||||
|
||||
self.tile = [("raw",
|
||||
(0, 0) + self.size,
|
||||
offset,
|
||||
(rawmode, ((self.size[0]*bits+31)>>3)&(~3), direction))]
|
||||
|
||||
self.info["compression"] = compression
|
||||
|
||||
def _open(self):
|
||||
|
||||
# HEAD
|
||||
s = self.fp.read(14)
|
||||
if s[:2] != b"BM":
|
||||
raise SyntaxError("Not a BMP file")
|
||||
offset = i32(s[10:])
|
||||
|
||||
self._bitmap(offset=offset)
|
||||
|
||||
|
||||
class DibImageFile(BmpImageFile):
|
||||
|
||||
format = "DIB"
|
||||
format_description = "Windows Bitmap"
|
||||
|
||||
def _open(self):
|
||||
self._bitmap()
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Write BMP file
|
||||
|
||||
SAVE = {
|
||||
"1": ("1", 1, 2),
|
||||
"L": ("L", 8, 256),
|
||||
"P": ("P", 8, 256),
|
||||
"RGB": ("BGR", 24, 0),
|
||||
}
|
||||
|
||||
def _save(im, fp, filename, check=0):
|
||||
|
||||
try:
|
||||
rawmode, bits, colors = SAVE[im.mode]
|
||||
except KeyError:
|
||||
raise IOError("cannot write mode %s as BMP" % im.mode)
|
||||
|
||||
if check:
|
||||
return check
|
||||
|
||||
info = im.encoderinfo
|
||||
|
||||
dpi = info.get("dpi", (96, 96))
|
||||
|
||||
# 1 meter == 39.3701 inches
|
||||
ppm = tuple(map(lambda x: int(x * 39.3701), dpi))
|
||||
|
||||
stride = ((im.size[0]*bits+7)//8+3)&(~3)
|
||||
header = 40 # or 64 for OS/2 version 2
|
||||
offset = 14 + header + colors * 4
|
||||
image = stride * im.size[1]
|
||||
|
||||
# bitmap header
|
||||
fp.write(b"BM" + # file type (magic)
|
||||
o32(offset+image) + # file size
|
||||
o32(0) + # reserved
|
||||
o32(offset)) # image data offset
|
||||
|
||||
# bitmap info header
|
||||
fp.write(o32(header) + # info header size
|
||||
o32(im.size[0]) + # width
|
||||
o32(im.size[1]) + # height
|
||||
o16(1) + # planes
|
||||
o16(bits) + # depth
|
||||
o32(0) + # compression (0=uncompressed)
|
||||
o32(image) + # size of bitmap
|
||||
o32(ppm[0]) + o32(ppm[1]) + # resolution
|
||||
o32(colors) + # colors used
|
||||
o32(colors)) # colors important
|
||||
|
||||
fp.write(b"\0" * (header - 40)) # padding (for OS/2 format)
|
||||
|
||||
if im.mode == "1":
|
||||
for i in (0, 255):
|
||||
fp.write(o8(i) * 4)
|
||||
elif im.mode == "L":
|
||||
for i in range(256):
|
||||
fp.write(o8(i) * 4)
|
||||
elif im.mode == "P":
|
||||
fp.write(im.im.getpalette("RGB", "BGRX"))
|
||||
|
||||
ImageFile._save(im, fp, [("raw", (0,0)+im.size, 0, (rawmode, stride, -1))])
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(BmpImageFile.format, BmpImageFile, _accept)
|
||||
Image.register_save(BmpImageFile.format, _save)
|
||||
|
||||
Image.register_extension(BmpImageFile.format, ".bmp")
|
|
@ -1,68 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# BUFR stub adapter
|
||||
#
|
||||
# Copyright (c) 1996-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image, ImageFile
|
||||
|
||||
_handler = None
|
||||
|
||||
##
|
||||
# Install application-specific BUFR image handler.
|
||||
#
|
||||
# @param handler Handler object.
|
||||
|
||||
def register_handler(handler):
|
||||
global _handler
|
||||
_handler = handler
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Image adapter
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC"
|
||||
|
||||
class BufrStubImageFile(ImageFile.StubImageFile):
|
||||
|
||||
format = "BUFR"
|
||||
format_description = "BUFR"
|
||||
|
||||
def _open(self):
|
||||
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(8)):
|
||||
raise SyntaxError("Not a BUFR file")
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# make something up
|
||||
self.mode = "F"
|
||||
self.size = 1, 1
|
||||
|
||||
loader = self._load()
|
||||
if loader:
|
||||
loader.open(self)
|
||||
|
||||
def _load(self):
|
||||
return _handler
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if _handler is None or not hasattr("_handler", "save"):
|
||||
raise IOError("BUFR save handler not installed")
|
||||
_handler.save(im, fp, filename)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept)
|
||||
Image.register_save(BufrStubImageFile.format, _save)
|
||||
|
||||
Image.register_extension(BufrStubImageFile.format, ".bufr")
|
|
@ -1,116 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a class to read from a container file
|
||||
#
|
||||
# History:
|
||||
# 1995-06-18 fl Created
|
||||
# 1995-09-07 fl Added readline(), readlines()
|
||||
#
|
||||
# Copyright (c) 1997-2001 by Secret Labs AB
|
||||
# Copyright (c) 1995 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
##
|
||||
# A file object that provides read access to a part of an existing
|
||||
# file (for example a TAR file).
|
||||
|
||||
class ContainerIO:
|
||||
|
||||
##
|
||||
# Create file object.
|
||||
#
|
||||
# @param file Existing file.
|
||||
# @param offset Start of region, in bytes.
|
||||
# @param length Size of region, in bytes.
|
||||
|
||||
def __init__(self, file, offset, length):
|
||||
self.fh = file
|
||||
self.pos = 0
|
||||
self.offset = offset
|
||||
self.length = length
|
||||
self.fh.seek(offset)
|
||||
|
||||
##
|
||||
# Always false.
|
||||
|
||||
def isatty(self):
|
||||
return 0
|
||||
|
||||
##
|
||||
# Move file pointer.
|
||||
#
|
||||
# @param offset Offset in bytes.
|
||||
# @param mode Starting position. Use 0 for beginning of region, 1
|
||||
# for current offset, and 2 for end of region. You cannot move
|
||||
# the pointer outside the defined region.
|
||||
|
||||
def seek(self, offset, mode = 0):
|
||||
if mode == 1:
|
||||
self.pos = self.pos + offset
|
||||
elif mode == 2:
|
||||
self.pos = self.length + offset
|
||||
else:
|
||||
self.pos = offset
|
||||
# clamp
|
||||
self.pos = max(0, min(self.pos, self.length))
|
||||
self.fh.seek(self.offset + self.pos)
|
||||
|
||||
##
|
||||
# Get current file pointer.
|
||||
#
|
||||
# @return Offset from start of region, in bytes.
|
||||
|
||||
def tell(self):
|
||||
return self.pos
|
||||
|
||||
##
|
||||
# Read data.
|
||||
#
|
||||
# @def read(bytes=0)
|
||||
# @param bytes Number of bytes to read. If omitted or zero,
|
||||
# read until end of region.
|
||||
# @return An 8-bit string.
|
||||
|
||||
def read(self, n = 0):
|
||||
if n:
|
||||
n = min(n, self.length - self.pos)
|
||||
else:
|
||||
n = self.length - self.pos
|
||||
if not n: # EOF
|
||||
return ""
|
||||
self.pos = self.pos + n
|
||||
return self.fh.read(n)
|
||||
|
||||
##
|
||||
# Read a line of text.
|
||||
#
|
||||
# @return An 8-bit string.
|
||||
|
||||
def readline(self):
|
||||
s = ""
|
||||
while True:
|
||||
c = self.read(1)
|
||||
if not c:
|
||||
break
|
||||
s = s + c
|
||||
if c == "\n":
|
||||
break
|
||||
return s
|
||||
|
||||
##
|
||||
# Read multiple lines of text.
|
||||
#
|
||||
# @return A list of 8-bit strings.
|
||||
|
||||
def readlines(self):
|
||||
l = []
|
||||
while True:
|
||||
s = self.readline()
|
||||
if not s:
|
||||
break
|
||||
l.append(s)
|
||||
return l
|
|
@ -1,86 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Windows Cursor support for PIL
|
||||
#
|
||||
# notes:
|
||||
# uses BmpImagePlugin.py to read the bitmap data.
|
||||
#
|
||||
# history:
|
||||
# 96-05-27 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
from PIL import Image, BmpImagePlugin, _binary
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
i8 = _binary.i8
|
||||
i16 = _binary.i16le
|
||||
i32 = _binary.i32le
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"\0\0\2\0"
|
||||
|
||||
##
|
||||
# Image plugin for Windows Cursor files.
|
||||
|
||||
class CurImageFile(BmpImagePlugin.BmpImageFile):
|
||||
|
||||
format = "CUR"
|
||||
format_description = "Windows Cursor"
|
||||
|
||||
def _open(self):
|
||||
|
||||
offset = self.fp.tell()
|
||||
|
||||
# check magic
|
||||
s = self.fp.read(6)
|
||||
if not _accept(s):
|
||||
raise SyntaxError("not an CUR file")
|
||||
|
||||
# pick the largest cursor in the file
|
||||
m = b""
|
||||
for i in range(i16(s[4:])):
|
||||
s = self.fp.read(16)
|
||||
if not m:
|
||||
m = s
|
||||
elif i8(s[0]) > i8(m[0]) and i8(s[1]) > i8(m[1]):
|
||||
m = s
|
||||
#print "width", i8(s[0])
|
||||
#print "height", i8(s[1])
|
||||
#print "colors", i8(s[2])
|
||||
#print "reserved", i8(s[3])
|
||||
#print "hotspot x", i16(s[4:])
|
||||
#print "hotspot y", i16(s[6:])
|
||||
#print "bytes", i32(s[8:])
|
||||
#print "offset", i32(s[12:])
|
||||
|
||||
# load as bitmap
|
||||
self._bitmap(i32(m[12:]) + offset)
|
||||
|
||||
# patch up the bitmap height
|
||||
self.size = self.size[0], self.size[1]//2
|
||||
d, e, o, a = self.tile[0]
|
||||
self.tile[0] = d, (0,0)+self.size, o, a
|
||||
|
||||
return
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open("CUR", CurImageFile, _accept)
|
||||
|
||||
Image.register_extension("CUR", ".cur")
|
|
@ -1,77 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# DCX file handling
|
||||
#
|
||||
# DCX is a container file format defined by Intel, commonly used
|
||||
# for fax applications. Each DCX file consists of a directory
|
||||
# (a list of file offsets) followed by a set of (usually 1-bit)
|
||||
# PCX files.
|
||||
#
|
||||
# History:
|
||||
# 1995-09-09 fl Created
|
||||
# 1996-03-20 fl Properly derived from PcxImageFile.
|
||||
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
||||
# 2002-07-30 fl Fixed file handling
|
||||
#
|
||||
# Copyright (c) 1997-98 by Secret Labs AB.
|
||||
# Copyright (c) 1995-96 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
__version__ = "0.2"
|
||||
|
||||
from PIL import Image, _binary
|
||||
|
||||
from PIL.PcxImagePlugin import PcxImageFile
|
||||
|
||||
MAGIC = 0x3ADE68B1 # QUIZ: what's this value, then?
|
||||
|
||||
i32 = _binary.i32le
|
||||
|
||||
def _accept(prefix):
|
||||
return i32(prefix) == MAGIC
|
||||
|
||||
##
|
||||
# Image plugin for the Intel DCX format.
|
||||
|
||||
class DcxImageFile(PcxImageFile):
|
||||
|
||||
format = "DCX"
|
||||
format_description = "Intel DCX"
|
||||
|
||||
def _open(self):
|
||||
|
||||
# Header
|
||||
s = self.fp.read(4)
|
||||
if i32(s) != MAGIC:
|
||||
raise SyntaxError("not a DCX file")
|
||||
|
||||
# Component directory
|
||||
self._offset = []
|
||||
for i in range(1024):
|
||||
offset = i32(self.fp.read(4))
|
||||
if not offset:
|
||||
break
|
||||
self._offset.append(offset)
|
||||
|
||||
self.__fp = self.fp
|
||||
self.seek(0)
|
||||
|
||||
def seek(self, frame):
|
||||
if frame >= len(self._offset):
|
||||
raise EOFError("attempt to seek outside DCX directory")
|
||||
self.frame = frame
|
||||
self.fp = self.__fp
|
||||
self.fp.seek(self._offset[frame])
|
||||
PcxImageFile._open(self)
|
||||
|
||||
def tell(self):
|
||||
return self.frame
|
||||
|
||||
|
||||
Image.register_open("DCX", DcxImageFile, _accept)
|
||||
|
||||
Image.register_extension("DCX", ".dcx")
|
|
@ -1,446 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# EPS file handling
|
||||
#
|
||||
# History:
|
||||
# 1995-09-01 fl Created (0.1)
|
||||
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
|
||||
# 1996-08-22 fl Don't choke on floating point BoundingBox values
|
||||
# 1996-08-23 fl Handle files from Macintosh (0.3)
|
||||
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
|
||||
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
|
||||
# 2014-05-07 e Handling of EPS with binary preview and fixed resolution resizing
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
__version__ = "0.5"
|
||||
|
||||
import re
|
||||
import io
|
||||
from PIL import Image, ImageFile, _binary
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
i32 = _binary.i32le
|
||||
o32 = _binary.o32le
|
||||
|
||||
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
|
||||
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
|
||||
|
||||
gs_windows_binary = None
|
||||
import sys
|
||||
if sys.platform.startswith('win'):
|
||||
import shutil
|
||||
if hasattr(shutil, 'which'):
|
||||
which = shutil.which
|
||||
else:
|
||||
# Python < 3.3
|
||||
import distutils.spawn
|
||||
which = distutils.spawn.find_executable
|
||||
for binary in ('gswin32c', 'gswin64c', 'gs'):
|
||||
if which(binary) is not None:
|
||||
gs_windows_binary = binary
|
||||
break
|
||||
else:
|
||||
gs_windows_binary = False
|
||||
|
||||
def has_ghostscript():
|
||||
if gs_windows_binary:
|
||||
return True
|
||||
if not sys.platform.startswith('win'):
|
||||
import subprocess
|
||||
try:
|
||||
gs = subprocess.Popen(['gs','--version'], stdout=subprocess.PIPE)
|
||||
gs.stdout.read()
|
||||
return True
|
||||
except OSError:
|
||||
# no ghostscript
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def Ghostscript(tile, size, fp, scale=1):
|
||||
"""Render an image using Ghostscript"""
|
||||
|
||||
# Unpack decoder tile
|
||||
decoder, tile, offset, data = tile[0]
|
||||
length, bbox = data
|
||||
|
||||
#Hack to support hi-res rendering
|
||||
scale = int(scale) or 1
|
||||
orig_size = size
|
||||
orig_bbox = bbox
|
||||
size = (size[0] * scale, size[1] * scale)
|
||||
# resolution is dependend on bbox and size
|
||||
res = ( float((72.0 * size[0]) / (bbox[2]-bbox[0])), float((72.0 * size[1]) / (bbox[3]-bbox[1])) )
|
||||
#print("Ghostscript", scale, size, orig_size, bbox, orig_bbox, res)
|
||||
|
||||
import tempfile, os, subprocess
|
||||
|
||||
out_fd, outfile = tempfile.mkstemp()
|
||||
os.close(out_fd)
|
||||
in_fd, infile = tempfile.mkstemp()
|
||||
os.close(in_fd)
|
||||
|
||||
# ignore length and offset!
|
||||
# ghostscript can read it
|
||||
# copy whole file to read in ghostscript
|
||||
with open(infile, 'wb') as f:
|
||||
# fetch length of fp
|
||||
fp.seek(0, 2)
|
||||
fsize = fp.tell()
|
||||
# ensure start position
|
||||
# go back
|
||||
fp.seek(0)
|
||||
lengthfile = fsize
|
||||
while lengthfile > 0:
|
||||
s = fp.read(min(lengthfile, 100*1024))
|
||||
if not s:
|
||||
break
|
||||
length -= len(s)
|
||||
f.write(s)
|
||||
|
||||
# Build ghostscript command
|
||||
command = ["gs",
|
||||
"-q", # quiet mode
|
||||
"-g%dx%d" % size, # set output geometry (pixels)
|
||||
"-r%fx%f" % res, # set input DPI (dots per inch)
|
||||
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
|
||||
"-sDEVICE=ppmraw", # ppm driver
|
||||
"-sOutputFile=%s" % outfile, # output file
|
||||
"-c", "%d %d translate" % (-bbox[0], -bbox[1]),
|
||||
# adjust for image origin
|
||||
"-f", infile, # input file
|
||||
]
|
||||
|
||||
if gs_windows_binary is not None:
|
||||
if not gs_windows_binary:
|
||||
raise WindowsError('Unable to locate Ghostscript on paths')
|
||||
command[0] = gs_windows_binary
|
||||
|
||||
# push data through ghostscript
|
||||
try:
|
||||
gs = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
|
||||
gs.stdin.close()
|
||||
status = gs.wait()
|
||||
if status:
|
||||
raise IOError("gs failed (status %d)" % status)
|
||||
im = Image.core.open_ppm(outfile)
|
||||
finally:
|
||||
try:
|
||||
os.unlink(outfile)
|
||||
os.unlink(infile)
|
||||
except: pass
|
||||
|
||||
return im
|
||||
|
||||
|
||||
class PSFile:
|
||||
"""Wrapper that treats either CR or LF as end of line."""
|
||||
def __init__(self, fp):
|
||||
self.fp = fp
|
||||
self.char = None
|
||||
def __getattr__(self, id):
|
||||
v = getattr(self.fp, id)
|
||||
setattr(self, id, v)
|
||||
return v
|
||||
def seek(self, offset, whence=0):
|
||||
self.char = None
|
||||
self.fp.seek(offset, whence)
|
||||
def read(self, count):
|
||||
return self.fp.read(count).decode('latin-1')
|
||||
def readbinary(self, count):
|
||||
return self.fp.read(count)
|
||||
def tell(self):
|
||||
pos = self.fp.tell()
|
||||
if self.char:
|
||||
pos -= 1
|
||||
return pos
|
||||
def readline(self):
|
||||
s = b""
|
||||
if self.char:
|
||||
c = self.char
|
||||
self.char = None
|
||||
else:
|
||||
c = self.fp.read(1)
|
||||
while c not in b"\r\n":
|
||||
s = s + c
|
||||
c = self.fp.read(1)
|
||||
if c == b"\r":
|
||||
self.char = self.fp.read(1)
|
||||
if self.char == b"\n":
|
||||
self.char = None
|
||||
return s.decode('latin-1') + "\n"
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == b"%!PS" or i32(prefix) == 0xC6D3D0C5
|
||||
|
||||
##
|
||||
# Image plugin for Encapsulated Postscript. This plugin supports only
|
||||
# a few variants of this format.
|
||||
|
||||
class EpsImageFile(ImageFile.ImageFile):
|
||||
"""EPS File Parser for the Python Imaging Library"""
|
||||
|
||||
format = "EPS"
|
||||
format_description = "Encapsulated Postscript"
|
||||
|
||||
def _open(self):
|
||||
|
||||
fp = PSFile(self.fp)
|
||||
|
||||
# FIX for: Some EPS file not handled correctly / issue #302
|
||||
# EPS can contain binary data
|
||||
# or start directly with latin coding
|
||||
# read header in both ways to handle both
|
||||
# file types
|
||||
# more info see http://partners.adobe.com/public/developer/en/ps/5002.EPSF_Spec.pdf
|
||||
|
||||
# for HEAD without binary preview
|
||||
s = fp.read(4)
|
||||
# for HEAD with binary preview
|
||||
fp.seek(0)
|
||||
sb = fp.readbinary(160)
|
||||
|
||||
if s[:4] == "%!PS":
|
||||
fp.seek(0, 2)
|
||||
length = fp.tell()
|
||||
offset = 0
|
||||
elif i32(sb[0:4]) == 0xC6D3D0C5:
|
||||
offset = i32(sb[4:8])
|
||||
length = i32(sb[8:12])
|
||||
else:
|
||||
raise SyntaxError("not an EPS file")
|
||||
|
||||
# go to offset - start of "%!PS"
|
||||
fp.seek(offset)
|
||||
|
||||
box = None
|
||||
|
||||
self.mode = "RGB"
|
||||
self.size = 1, 1 # FIXME: huh?
|
||||
|
||||
#
|
||||
# Load EPS header
|
||||
|
||||
s = fp.readline()
|
||||
|
||||
while s:
|
||||
|
||||
if len(s) > 255:
|
||||
raise SyntaxError("not an EPS file")
|
||||
|
||||
if s[-2:] == '\r\n':
|
||||
s = s[:-2]
|
||||
elif s[-1:] == '\n':
|
||||
s = s[:-1]
|
||||
|
||||
try:
|
||||
m = split.match(s)
|
||||
except re.error as v:
|
||||
raise SyntaxError("not an EPS file")
|
||||
|
||||
if m:
|
||||
k, v = m.group(1, 2)
|
||||
self.info[k] = v
|
||||
if k == "BoundingBox":
|
||||
try:
|
||||
# Note: The DSC spec says that BoundingBox
|
||||
# fields should be integers, but some drivers
|
||||
# put floating point values there anyway.
|
||||
box = [int(float(s)) for s in v.split()]
|
||||
self.size = box[2] - box[0], box[3] - box[1]
|
||||
self.tile = [("eps", (0,0) + self.size, offset,
|
||||
(length, box))]
|
||||
except:
|
||||
pass
|
||||
|
||||
else:
|
||||
|
||||
m = field.match(s)
|
||||
|
||||
if m:
|
||||
k = m.group(1)
|
||||
|
||||
if k == "EndComments":
|
||||
break
|
||||
if k[:8] == "PS-Adobe":
|
||||
self.info[k[:8]] = k[9:]
|
||||
else:
|
||||
self.info[k] = ""
|
||||
elif s[0:1] == '%':
|
||||
# handle non-DSC Postscript comments that some
|
||||
# tools mistakenly put in the Comments section
|
||||
pass
|
||||
else:
|
||||
raise IOError("bad EPS header")
|
||||
|
||||
s = fp.readline()
|
||||
|
||||
if s[:1] != "%":
|
||||
break
|
||||
|
||||
|
||||
#
|
||||
# Scan for an "ImageData" descriptor
|
||||
|
||||
while s[0] == "%":
|
||||
|
||||
if len(s) > 255:
|
||||
raise SyntaxError("not an EPS file")
|
||||
|
||||
if s[-2:] == '\r\n':
|
||||
s = s[:-2]
|
||||
elif s[-1:] == '\n':
|
||||
s = s[:-1]
|
||||
|
||||
if s[:11] == "%ImageData:":
|
||||
|
||||
[x, y, bi, mo, z3, z4, en, id] =\
|
||||
s[11:].split(None, 7)
|
||||
|
||||
x = int(x); y = int(y)
|
||||
|
||||
bi = int(bi)
|
||||
mo = int(mo)
|
||||
|
||||
en = int(en)
|
||||
|
||||
if en == 1:
|
||||
decoder = "eps_binary"
|
||||
elif en == 2:
|
||||
decoder = "eps_hex"
|
||||
else:
|
||||
break
|
||||
if bi != 8:
|
||||
break
|
||||
if mo == 1:
|
||||
self.mode = "L"
|
||||
elif mo == 2:
|
||||
self.mode = "LAB"
|
||||
elif mo == 3:
|
||||
self.mode = "RGB"
|
||||
else:
|
||||
break
|
||||
|
||||
if id[:1] == id[-1:] == '"':
|
||||
id = id[1:-1]
|
||||
|
||||
# Scan forward to the actual image data
|
||||
while True:
|
||||
s = fp.readline()
|
||||
if not s:
|
||||
break
|
||||
if s[:len(id)] == id:
|
||||
self.size = x, y
|
||||
self.tile2 = [(decoder,
|
||||
(0, 0, x, y),
|
||||
fp.tell(),
|
||||
0)]
|
||||
return
|
||||
|
||||
s = fp.readline()
|
||||
if not s:
|
||||
break
|
||||
|
||||
if not box:
|
||||
raise IOError("cannot determine EPS bounding box")
|
||||
|
||||
def load(self, scale=1):
|
||||
# Load EPS via Ghostscript
|
||||
if not self.tile:
|
||||
return
|
||||
self.im = Ghostscript(self.tile, self.size, self.fp, scale)
|
||||
self.mode = self.im.mode
|
||||
self.size = self.im.size
|
||||
self.tile = []
|
||||
|
||||
def load_seek(self,*args,**kwargs):
|
||||
# we can't incrementally load, so force ImageFile.parser to
|
||||
# use our custom load method by defining this method.
|
||||
pass
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
def _save(im, fp, filename, eps=1):
|
||||
"""EPS Writer for the Python Imaging Library."""
|
||||
|
||||
#
|
||||
# make sure image data is available
|
||||
im.load()
|
||||
|
||||
#
|
||||
# determine postscript image mode
|
||||
if im.mode == "L":
|
||||
operator = (8, 1, "image")
|
||||
elif im.mode == "RGB":
|
||||
operator = (8, 3, "false 3 colorimage")
|
||||
elif im.mode == "CMYK":
|
||||
operator = (8, 4, "false 4 colorimage")
|
||||
else:
|
||||
raise ValueError("image mode is not supported")
|
||||
|
||||
class NoCloseStream:
|
||||
def __init__(self, fp):
|
||||
self.fp = fp
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.fp, name)
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
base_fp = fp
|
||||
fp = NoCloseStream(fp)
|
||||
if sys.version_info[0] > 2:
|
||||
fp = io.TextIOWrapper(fp, encoding='latin-1')
|
||||
|
||||
if eps:
|
||||
#
|
||||
# write EPS header
|
||||
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
|
||||
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
|
||||
#fp.write("%%CreationDate: %s"...)
|
||||
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
|
||||
fp.write("%%Pages: 1\n")
|
||||
fp.write("%%EndComments\n")
|
||||
fp.write("%%Page: 1 1\n")
|
||||
fp.write("%%ImageData: %d %d " % im.size)
|
||||
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
|
||||
|
||||
#
|
||||
# image header
|
||||
fp.write("gsave\n")
|
||||
fp.write("10 dict begin\n")
|
||||
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
|
||||
fp.write("%d %d scale\n" % im.size)
|
||||
fp.write("%d %d 8\n" % im.size) # <= bits
|
||||
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
|
||||
fp.write("{ currentfile buf readhexstring pop } bind\n")
|
||||
fp.write(operator[2] + "\n")
|
||||
fp.flush()
|
||||
|
||||
ImageFile._save(im, base_fp, [("eps", (0,0)+im.size, 0, None)])
|
||||
|
||||
fp.write("\n%%%%EndBinary\n")
|
||||
fp.write("grestore end\n")
|
||||
fp.flush()
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
|
||||
|
||||
Image.register_save(EpsImageFile.format, _save)
|
||||
|
||||
Image.register_extension(EpsImageFile.format, ".ps")
|
||||
Image.register_extension(EpsImageFile.format, ".eps")
|
||||
|
||||
Image.register_mime(EpsImageFile.format, "application/postscript")
|
|
@ -1,193 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# EXIF tags
|
||||
#
|
||||
# Copyright (c) 2003 by Secret Labs AB
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
##
|
||||
# This module provides constants and clear-text names for various
|
||||
# well-known EXIF tags.
|
||||
##
|
||||
|
||||
##
|
||||
# Maps EXIF tags to tag names.
|
||||
|
||||
TAGS = {
|
||||
|
||||
# possibly incomplete
|
||||
0x00fe: "NewSubfileType",
|
||||
0x00ff: "SubfileType",
|
||||
0x0100: "ImageWidth",
|
||||
0x0101: "ImageLength",
|
||||
0x0102: "BitsPerSample",
|
||||
0x0103: "Compression",
|
||||
0x0106: "PhotometricInterpretation",
|
||||
0x0107: "Threshholding",
|
||||
0x0108: "CellWidth",
|
||||
0x0109: "CellLenght",
|
||||
0x010a: "FillOrder",
|
||||
0x010d: "DocumentName",
|
||||
0x011d: "PageName",
|
||||
0x010e: "ImageDescription",
|
||||
0x010f: "Make",
|
||||
0x0110: "Model",
|
||||
0x0111: "StripOffsets",
|
||||
0x0112: "Orientation",
|
||||
0x0115: "SamplesPerPixel",
|
||||
0x0116: "RowsPerStrip",
|
||||
0x0117: "StripByteConunts",
|
||||
0x0118: "MinSampleValue",
|
||||
0x0119: "MaxSampleValue",
|
||||
0x011a: "XResolution",
|
||||
0x011b: "YResolution",
|
||||
0x011c: "PlanarConfiguration",
|
||||
0x0120: "FreeOffsets",
|
||||
0x0121: "FreeByteCounts",
|
||||
0x0122: "GrayResponseUnit",
|
||||
0x0123: "GrayResponseCurve",
|
||||
0x0128: "ResolutionUnit",
|
||||
0x012d: "TransferFunction",
|
||||
0x0131: "Software",
|
||||
0x0132: "DateTime",
|
||||
0x013b: "Artist",
|
||||
0x013c: "HostComputer",
|
||||
0x013e: "WhitePoint",
|
||||
0x013f: "PrimaryChromaticities",
|
||||
0x0140: "ColorMap",
|
||||
0x0152: "ExtraSamples",
|
||||
0x0201: "JpegIFOffset",
|
||||
0x0202: "JpegIFByteCount",
|
||||
0x0211: "YCbCrCoefficients",
|
||||
0x0212: "YCbCrSubSampling",
|
||||
0x0213: "YCbCrPositioning",
|
||||
0x0214: "ReferenceBlackWhite",
|
||||
0x1000: "RelatedImageFileFormat",
|
||||
0x1001: "RelatedImageLength", # FIXME / Dictionary contains duplicate keys
|
||||
0x1001: "RelatedImageWidth", # FIXME \ Dictionary contains duplicate keys
|
||||
0x828d: "CFARepeatPatternDim",
|
||||
0x828e: "CFAPattern",
|
||||
0x828f: "BatteryLevel",
|
||||
0x8298: "Copyright",
|
||||
0x829a: "ExposureTime",
|
||||
0x829d: "FNumber",
|
||||
0x8769: "ExifOffset",
|
||||
0x8773: "InterColorProfile",
|
||||
0x8822: "ExposureProgram",
|
||||
0x8824: "SpectralSensitivity",
|
||||
0x8825: "GPSInfo",
|
||||
0x8827: "ISOSpeedRatings",
|
||||
0x8828: "OECF",
|
||||
0x8829: "Interlace",
|
||||
0x882a: "TimeZoneOffset",
|
||||
0x882b: "SelfTimerMode",
|
||||
0x9000: "ExifVersion",
|
||||
0x9003: "DateTimeOriginal",
|
||||
0x9004: "DateTimeDigitized",
|
||||
0x9101: "ComponentsConfiguration",
|
||||
0x9102: "CompressedBitsPerPixel",
|
||||
0x9201: "ShutterSpeedValue",
|
||||
0x9202: "ApertureValue",
|
||||
0x9203: "BrightnessValue",
|
||||
0x9204: "ExposureBiasValue",
|
||||
0x9205: "MaxApertureValue",
|
||||
0x9206: "SubjectDistance",
|
||||
0x9207: "MeteringMode",
|
||||
0x9208: "LightSource",
|
||||
0x9209: "Flash",
|
||||
0x920a: "FocalLength",
|
||||
0x920b: "FlashEnergy",
|
||||
0x920c: "SpatialFrequencyResponse",
|
||||
0x920d: "Noise",
|
||||
0x9211: "ImageNumber",
|
||||
0x9212: "SecurityClassification",
|
||||
0x9213: "ImageHistory",
|
||||
0x9214: "SubjectLocation",
|
||||
0x9215: "ExposureIndex",
|
||||
0x9216: "TIFF/EPStandardID",
|
||||
0x927c: "MakerNote",
|
||||
0x9286: "UserComment",
|
||||
0x9290: "SubsecTime",
|
||||
0x9291: "SubsecTimeOriginal",
|
||||
0x9292: "SubsecTimeDigitized",
|
||||
0xa000: "FlashPixVersion",
|
||||
0xa001: "ColorSpace",
|
||||
0xa002: "ExifImageWidth",
|
||||
0xa003: "ExifImageHeight",
|
||||
0xa004: "RelatedSoundFile",
|
||||
0xa005: "ExifInteroperabilityOffset",
|
||||
0xa20b: "FlashEnergy",
|
||||
0xa20c: "SpatialFrequencyResponse",
|
||||
0xa20e: "FocalPlaneXResolution",
|
||||
0xa20f: "FocalPlaneYResolution",
|
||||
0xa210: "FocalPlaneResolutionUnit",
|
||||
0xa214: "SubjectLocation",
|
||||
0xa215: "ExposureIndex",
|
||||
0xa217: "SensingMethod",
|
||||
0xa300: "FileSource",
|
||||
0xa301: "SceneType",
|
||||
0xa302: "CFAPattern",
|
||||
0xa401: "CustomRendered",
|
||||
0xa402: "ExposureMode",
|
||||
0xa403: "WhiteBalance",
|
||||
0xa404: "DigitalZoomRatio",
|
||||
0xa405: "FocalLengthIn35mmFilm",
|
||||
0xa406: "SceneCaptureType",
|
||||
0xa407: "GainControl",
|
||||
0xa408: "Contrast",
|
||||
0xa409: "Saturation",
|
||||
0xa40a: "Sharpness",
|
||||
0xa40b: "DeviceSettingDescription",
|
||||
0xa40c: "SubjectDistanceRange",
|
||||
0xa420: "ImageUniqueID",
|
||||
0xa430: "CameraOwnerName",
|
||||
0xa431: "BodySerialNumber",
|
||||
0xa432: "LensSpecification",
|
||||
0xa433: "LensMake",
|
||||
0xa434: "LensModel",
|
||||
0xa435: "LensSerialNumber",
|
||||
0xa500: "Gamma",
|
||||
|
||||
}
|
||||
|
||||
##
|
||||
# Maps EXIF GPS tags to tag names.
|
||||
|
||||
GPSTAGS = {
|
||||
0: "GPSVersionID",
|
||||
1: "GPSLatitudeRef",
|
||||
2: "GPSLatitude",
|
||||
3: "GPSLongitudeRef",
|
||||
4: "GPSLongitude",
|
||||
5: "GPSAltitudeRef",
|
||||
6: "GPSAltitude",
|
||||
7: "GPSTimeStamp",
|
||||
8: "GPSSatellites",
|
||||
9: "GPSStatus",
|
||||
10: "GPSMeasureMode",
|
||||
11: "GPSDOP",
|
||||
12: "GPSSpeedRef",
|
||||
13: "GPSSpeed",
|
||||
14: "GPSTrackRef",
|
||||
15: "GPSTrack",
|
||||
16: "GPSImgDirectionRef",
|
||||
17: "GPSImgDirection",
|
||||
18: "GPSMapDatum",
|
||||
19: "GPSDestLatitudeRef",
|
||||
20: "GPSDestLatitude",
|
||||
21: "GPSDestLongitudeRef",
|
||||
22: "GPSDestLongitude",
|
||||
23: "GPSDestBearingRef",
|
||||
24: "GPSDestBearing",
|
||||
25: "GPSDestDistanceRef",
|
||||
26: "GPSDestDistance",
|
||||
27: "GPSProcessingMethod",
|
||||
28: "GPSAreaInformation",
|
||||
29: "GPSDateStamp",
|
||||
30: "GPSDifferential",
|
||||
31: "GPSHPositioningError",
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# FITS stub adapter
|
||||
#
|
||||
# Copyright (c) 1998-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image, ImageFile
|
||||
|
||||
_handler = None
|
||||
|
||||
##
|
||||
# Install application-specific FITS image handler.
|
||||
#
|
||||
# @param handler Handler object.
|
||||
|
||||
def register_handler(handler):
|
||||
global _handler
|
||||
_handler = handler
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Image adapter
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:6] == b"SIMPLE"
|
||||
|
||||
class FITSStubImageFile(ImageFile.StubImageFile):
|
||||
|
||||
format = "FITS"
|
||||
format_description = "FITS"
|
||||
|
||||
def _open(self):
|
||||
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(6)):
|
||||
raise SyntaxError("Not a FITS file")
|
||||
|
||||
# FIXME: add more sanity checks here; mandatory header items
|
||||
# include SIMPLE, BITPIX, NAXIS, etc.
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# make something up
|
||||
self.mode = "F"
|
||||
self.size = 1, 1
|
||||
|
||||
loader = self._load()
|
||||
if loader:
|
||||
loader.open(self)
|
||||
|
||||
def _load(self):
|
||||
return _handler
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if _handler is None or not hasattr("_handler", "save"):
|
||||
raise IOError("FITS save handler not installed")
|
||||
_handler.save(im, fp, filename)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(FITSStubImageFile.format, FITSStubImageFile, _accept)
|
||||
Image.register_save(FITSStubImageFile.format, _save)
|
||||
|
||||
Image.register_extension(FITSStubImageFile.format, ".fit")
|
||||
Image.register_extension(FITSStubImageFile.format, ".fits")
|
|
@ -1,141 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# FLI/FLC file handling.
|
||||
#
|
||||
# History:
|
||||
# 95-09-01 fl Created
|
||||
# 97-01-03 fl Fixed parser, setup decoder tile
|
||||
# 98-07-15 fl Renamed offset attribute to avoid name clash
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-98.
|
||||
# Copyright (c) Fredrik Lundh 1995-97.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.2"
|
||||
|
||||
from PIL import Image, ImageFile, ImagePalette, _binary
|
||||
|
||||
i8 = _binary.i8
|
||||
i16 = _binary.i16le
|
||||
i32 = _binary.i32le
|
||||
o8 = _binary.o8
|
||||
|
||||
#
|
||||
# decoder
|
||||
|
||||
def _accept(prefix):
|
||||
return i16(prefix[4:6]) in [0xAF11, 0xAF12]
|
||||
|
||||
##
|
||||
# Image plugin for the FLI/FLC animation format. Use the <b>seek</b>
|
||||
# method to load individual frames.
|
||||
|
||||
class FliImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "FLI"
|
||||
format_description = "Autodesk FLI/FLC Animation"
|
||||
|
||||
def _open(self):
|
||||
|
||||
# HEAD
|
||||
s = self.fp.read(128)
|
||||
magic = i16(s[4:6])
|
||||
if not (magic in [0xAF11, 0xAF12] and
|
||||
i16(s[14:16]) in [0, 3] and # flags
|
||||
s[20:22] == b"\x00\x00"): # reserved
|
||||
raise SyntaxError("not an FLI/FLC file")
|
||||
|
||||
# image characteristics
|
||||
self.mode = "P"
|
||||
self.size = i16(s[8:10]), i16(s[10:12])
|
||||
|
||||
# animation speed
|
||||
duration = i32(s[16:20])
|
||||
if magic == 0xAF11:
|
||||
duration = (duration * 1000) / 70
|
||||
self.info["duration"] = duration
|
||||
|
||||
# look for palette
|
||||
palette = [(a,a,a) for a in range(256)]
|
||||
|
||||
s = self.fp.read(16)
|
||||
|
||||
self.__offset = 128
|
||||
|
||||
if i16(s[4:6]) == 0xF100:
|
||||
# prefix chunk; ignore it
|
||||
self.__offset = self.__offset + i32(s)
|
||||
s = self.fp.read(16)
|
||||
|
||||
if i16(s[4:6]) == 0xF1FA:
|
||||
# look for palette chunk
|
||||
s = self.fp.read(6)
|
||||
if i16(s[4:6]) == 11:
|
||||
self._palette(palette, 2)
|
||||
elif i16(s[4:6]) == 4:
|
||||
self._palette(palette, 0)
|
||||
|
||||
palette = [o8(r)+o8(g)+o8(b) for (r,g,b) in palette]
|
||||
self.palette = ImagePalette.raw("RGB", b"".join(palette))
|
||||
|
||||
# set things up to decode first frame
|
||||
self.frame = -1
|
||||
self.__fp = self.fp
|
||||
|
||||
self.seek(0)
|
||||
|
||||
def _palette(self, palette, shift):
|
||||
# load palette
|
||||
|
||||
i = 0
|
||||
for e in range(i16(self.fp.read(2))):
|
||||
s = self.fp.read(2)
|
||||
i = i + i8(s[0])
|
||||
n = i8(s[1])
|
||||
if n == 0:
|
||||
n = 256
|
||||
s = self.fp.read(n * 3)
|
||||
for n in range(0, len(s), 3):
|
||||
r = i8(s[n]) << shift
|
||||
g = i8(s[n+1]) << shift
|
||||
b = i8(s[n+2]) << shift
|
||||
palette[i] = (r, g, b)
|
||||
i += 1
|
||||
|
||||
def seek(self, frame):
|
||||
|
||||
if frame != self.frame + 1:
|
||||
raise ValueError("cannot seek to frame %d" % frame)
|
||||
self.frame = frame
|
||||
|
||||
# move to next frame
|
||||
self.fp = self.__fp
|
||||
self.fp.seek(self.__offset)
|
||||
|
||||
s = self.fp.read(4)
|
||||
if not s:
|
||||
raise EOFError
|
||||
|
||||
framesize = i32(s)
|
||||
|
||||
self.decodermaxblock = framesize
|
||||
self.tile = [("fli", (0,0)+self.size, self.__offset, None)]
|
||||
|
||||
self.__offset = self.__offset + framesize
|
||||
|
||||
def tell(self):
|
||||
|
||||
return self.frame
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
Image.register_open("FLI", FliImageFile, _accept)
|
||||
|
||||
Image.register_extension("FLI", ".fli")
|
||||
Image.register_extension("FLI", ".flc")
|
|
@ -1,146 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# base class for raster font file parsers
|
||||
#
|
||||
# history:
|
||||
# 1997-06-05 fl created
|
||||
# 1997-08-19 fl restrict image width
|
||||
#
|
||||
# Copyright (c) 1997-1998 by Secret Labs AB
|
||||
# Copyright (c) 1997-1998 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
import os
|
||||
from PIL import Image, _binary
|
||||
|
||||
import marshal
|
||||
|
||||
try:
|
||||
import zlib
|
||||
except ImportError:
|
||||
zlib = None
|
||||
|
||||
WIDTH = 800
|
||||
|
||||
def puti16(fp, values):
|
||||
# write network order (big-endian) 16-bit sequence
|
||||
for v in values:
|
||||
if v < 0:
|
||||
v += 65536
|
||||
fp.write(_binary.o16be(v))
|
||||
|
||||
##
|
||||
# Base class for raster font file handlers.
|
||||
|
||||
class FontFile:
|
||||
|
||||
bitmap = None
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.info = {}
|
||||
self.glyph = [None] * 256
|
||||
|
||||
def __getitem__(self, ix):
|
||||
return self.glyph[ix]
|
||||
|
||||
def compile(self):
|
||||
"Create metrics and bitmap"
|
||||
|
||||
if self.bitmap:
|
||||
return
|
||||
|
||||
# create bitmap large enough to hold all data
|
||||
h = w = maxwidth = 0
|
||||
lines = 1
|
||||
for glyph in self:
|
||||
if glyph:
|
||||
d, dst, src, im = glyph
|
||||
h = max(h, src[3] - src[1])
|
||||
w = w + (src[2] - src[0])
|
||||
if w > WIDTH:
|
||||
lines += 1
|
||||
w = (src[2] - src[0])
|
||||
maxwidth = max(maxwidth, w)
|
||||
|
||||
xsize = maxwidth
|
||||
ysize = lines * h
|
||||
|
||||
if xsize == 0 and ysize == 0:
|
||||
return ""
|
||||
|
||||
self.ysize = h
|
||||
|
||||
# paste glyphs into bitmap
|
||||
self.bitmap = Image.new("1", (xsize, ysize))
|
||||
self.metrics = [None] * 256
|
||||
x = y = 0
|
||||
for i in range(256):
|
||||
glyph = self[i]
|
||||
if glyph:
|
||||
d, dst, src, im = glyph
|
||||
xx, yy = src[2] - src[0], src[3] - src[1]
|
||||
x0, y0 = x, y
|
||||
x = x + xx
|
||||
if x > WIDTH:
|
||||
x, y = 0, y + h
|
||||
x0, y0 = x, y
|
||||
x = xx
|
||||
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
|
||||
self.bitmap.paste(im.crop(src), s)
|
||||
# print chr(i), dst, s
|
||||
self.metrics[i] = d, dst, s
|
||||
|
||||
|
||||
def save1(self, filename):
|
||||
"Save font in version 1 format"
|
||||
|
||||
self.compile()
|
||||
|
||||
# font data
|
||||
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
|
||||
|
||||
# font metrics
|
||||
fp = open(os.path.splitext(filename)[0] + ".pil", "wb")
|
||||
fp.write(b"PILfont\n")
|
||||
fp.write((";;;;;;%d;\n" % self.ysize).encode('ascii')) # HACK!!!
|
||||
fp.write(b"DATA\n")
|
||||
for id in range(256):
|
||||
m = self.metrics[id]
|
||||
if not m:
|
||||
puti16(fp, [0] * 10)
|
||||
else:
|
||||
puti16(fp, m[0] + m[1] + m[2])
|
||||
fp.close()
|
||||
|
||||
|
||||
def save2(self, filename):
|
||||
"Save font in version 2 format"
|
||||
|
||||
# THIS IS WORK IN PROGRESS
|
||||
|
||||
self.compile()
|
||||
|
||||
data = marshal.dumps((self.metrics, self.info))
|
||||
|
||||
if zlib:
|
||||
data = b"z" + zlib.compress(data, 9)
|
||||
else:
|
||||
data = b"u" + data
|
||||
|
||||
fp = open(os.path.splitext(filename)[0] + ".pil", "wb")
|
||||
|
||||
fp.write(b"PILfont2\n" + self.name + "\n" + "DATA\n")
|
||||
|
||||
fp.write(data)
|
||||
|
||||
self.bitmap.save(fp, "PNG")
|
||||
|
||||
fp.close()
|
||||
|
||||
|
||||
save = save1 # for now
|
|
@ -1,224 +0,0 @@
|
|||
#
|
||||
# THIS IS WORK IN PROGRESS
|
||||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# FlashPix support for PIL
|
||||
#
|
||||
# History:
|
||||
# 97-01-25 fl Created (reads uncompressed RGB images only)
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
|
||||
from PIL import Image, ImageFile
|
||||
from PIL.OleFileIO import *
|
||||
|
||||
|
||||
# we map from colour field tuples to (mode, rawmode) descriptors
|
||||
MODES = {
|
||||
# opacity
|
||||
(0x00007ffe): ("A", "L"),
|
||||
# monochrome
|
||||
(0x00010000,): ("L", "L"),
|
||||
(0x00018000, 0x00017ffe): ("RGBA", "LA"),
|
||||
# photo YCC
|
||||
(0x00020000, 0x00020001, 0x00020002): ("RGB", "YCC;P"),
|
||||
(0x00028000, 0x00028001, 0x00028002, 0x00027ffe): ("RGBA", "YCCA;P"),
|
||||
# standard RGB (NIFRGB)
|
||||
(0x00030000, 0x00030001, 0x00030002): ("RGB","RGB"),
|
||||
(0x00038000, 0x00038001, 0x00038002, 0x00037ffe): ("RGBA","RGBA"),
|
||||
}
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:8] == MAGIC
|
||||
|
||||
##
|
||||
# Image plugin for the FlashPix images.
|
||||
|
||||
class FpxImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "FPX"
|
||||
format_description = "FlashPix"
|
||||
|
||||
def _open(self):
|
||||
#
|
||||
# read the OLE directory and see if this is a likely
|
||||
# to be a FlashPix file
|
||||
|
||||
try:
|
||||
self.ole = OleFileIO(self.fp)
|
||||
except IOError:
|
||||
raise SyntaxError("not an FPX file; invalid OLE file")
|
||||
|
||||
if self.ole.root.clsid != "56616700-C154-11CE-8553-00AA00A1F95B":
|
||||
raise SyntaxError("not an FPX file; bad root CLSID")
|
||||
|
||||
self._open_index(1)
|
||||
|
||||
def _open_index(self, index = 1):
|
||||
#
|
||||
# get the Image Contents Property Set
|
||||
|
||||
prop = self.ole.getproperties([
|
||||
"Data Object Store %06d" % index,
|
||||
"\005Image Contents"
|
||||
])
|
||||
|
||||
# size (highest resolution)
|
||||
|
||||
self.size = prop[0x1000002], prop[0x1000003]
|
||||
|
||||
size = max(self.size)
|
||||
i = 1
|
||||
while size > 64:
|
||||
size = size / 2
|
||||
i += 1
|
||||
self.maxid = i - 1
|
||||
|
||||
# mode. instead of using a single field for this, flashpix
|
||||
# requires you to specify the mode for each channel in each
|
||||
# resolution subimage, and leaves it to the decoder to make
|
||||
# sure that they all match. for now, we'll cheat and assume
|
||||
# that this is always the case.
|
||||
|
||||
id = self.maxid << 16
|
||||
|
||||
s = prop[0x2000002|id]
|
||||
|
||||
colors = []
|
||||
for i in range(i32(s, 4)):
|
||||
# note: for now, we ignore the "uncalibrated" flag
|
||||
colors.append(i32(s, 8+i*4) & 0x7fffffff)
|
||||
|
||||
self.mode, self.rawmode = MODES[tuple(colors)]
|
||||
|
||||
# load JPEG tables, if any
|
||||
self.jpeg = {}
|
||||
for i in range(256):
|
||||
id = 0x3000001|(i << 16)
|
||||
if id in prop:
|
||||
self.jpeg[i] = prop[id]
|
||||
|
||||
# print len(self.jpeg), "tables loaded"
|
||||
|
||||
self._open_subimage(1, self.maxid)
|
||||
|
||||
def _open_subimage(self, index = 1, subimage = 0):
|
||||
#
|
||||
# setup tile descriptors for a given subimage
|
||||
|
||||
stream = [
|
||||
"Data Object Store %06d" % index,
|
||||
"Resolution %04d" % subimage,
|
||||
"Subimage 0000 Header"
|
||||
]
|
||||
|
||||
fp = self.ole.openstream(stream)
|
||||
|
||||
# skip prefix
|
||||
p = fp.read(28)
|
||||
|
||||
# header stream
|
||||
s = fp.read(36)
|
||||
|
||||
size = i32(s, 4), i32(s, 8)
|
||||
tilecount = i32(s, 12)
|
||||
tilesize = i32(s, 16), i32(s, 20)
|
||||
channels = i32(s, 24)
|
||||
offset = i32(s, 28)
|
||||
length = i32(s, 32)
|
||||
|
||||
# print size, self.mode, self.rawmode
|
||||
|
||||
if size != self.size:
|
||||
raise IOError("subimage mismatch")
|
||||
|
||||
# get tile descriptors
|
||||
fp.seek(28 + offset)
|
||||
s = fp.read(i32(s, 12) * length)
|
||||
|
||||
x = y = 0
|
||||
xsize, ysize = size
|
||||
xtile, ytile = tilesize
|
||||
self.tile = []
|
||||
|
||||
for i in range(0, len(s), length):
|
||||
|
||||
compression = i32(s, i+8)
|
||||
|
||||
if compression == 0:
|
||||
self.tile.append(("raw", (x,y,x+xtile,y+ytile),
|
||||
i32(s, i) + 28, (self.rawmode)))
|
||||
|
||||
elif compression == 1:
|
||||
|
||||
# FIXME: the fill decoder is not implemented
|
||||
self.tile.append(("fill", (x,y,x+xtile,y+ytile),
|
||||
i32(s, i) + 28, (self.rawmode, s[12:16])))
|
||||
|
||||
elif compression == 2:
|
||||
|
||||
internal_color_conversion = i8(s[14])
|
||||
jpeg_tables = i8(s[15])
|
||||
rawmode = self.rawmode
|
||||
|
||||
if internal_color_conversion:
|
||||
# The image is stored as usual (usually YCbCr).
|
||||
if rawmode == "RGBA":
|
||||
# For "RGBA", data is stored as YCbCrA based on
|
||||
# negative RGB. The following trick works around
|
||||
# this problem :
|
||||
jpegmode, rawmode = "YCbCrK", "CMYK"
|
||||
else:
|
||||
jpegmode = None # let the decoder decide
|
||||
|
||||
else:
|
||||
# The image is stored as defined by rawmode
|
||||
jpegmode = rawmode
|
||||
|
||||
self.tile.append(("jpeg", (x,y,x+xtile,y+ytile),
|
||||
i32(s, i) + 28, (rawmode, jpegmode)))
|
||||
|
||||
# FIXME: jpeg tables are tile dependent; the prefix
|
||||
# data must be placed in the tile descriptor itself!
|
||||
|
||||
if jpeg_tables:
|
||||
self.tile_prefix = self.jpeg[jpeg_tables]
|
||||
|
||||
else:
|
||||
raise IOError("unknown/invalid compression")
|
||||
|
||||
x = x + xtile
|
||||
if x >= xsize:
|
||||
x, y = 0, y + ytile
|
||||
if y >= ysize:
|
||||
break # isn't really required
|
||||
|
||||
self.stream = stream
|
||||
self.fp = None
|
||||
|
||||
def load(self):
|
||||
|
||||
if not self.fp:
|
||||
self.fp = self.ole.openstream(self.stream[:2] + ["Subimage 0000 Data"])
|
||||
|
||||
ImageFile.ImageFile.load(self)
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open("FPX", FpxImageFile, _accept)
|
||||
|
||||
Image.register_extension("FPX", ".fpx")
|
|
@ -1,69 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# load a GIMP brush file
|
||||
#
|
||||
# History:
|
||||
# 96-03-14 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image, ImageFile, _binary
|
||||
|
||||
i32 = _binary.i32be
|
||||
|
||||
def _accept(prefix):
|
||||
return i32(prefix) >= 20 and i32(prefix[4:8]) == 1
|
||||
|
||||
##
|
||||
# Image plugin for the GIMP brush format.
|
||||
|
||||
class GbrImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "GBR"
|
||||
format_description = "GIMP brush file"
|
||||
|
||||
def _open(self):
|
||||
|
||||
header_size = i32(self.fp.read(4))
|
||||
version = i32(self.fp.read(4))
|
||||
if header_size < 20 or version != 1:
|
||||
raise SyntaxError("not a GIMP brush")
|
||||
|
||||
width = i32(self.fp.read(4))
|
||||
height = i32(self.fp.read(4))
|
||||
bytes = i32(self.fp.read(4))
|
||||
if width <= 0 or height <= 0 or bytes != 1:
|
||||
raise SyntaxError("not a GIMP brush")
|
||||
|
||||
comment = self.fp.read(header_size - 20)[:-1]
|
||||
|
||||
self.mode = "L"
|
||||
self.size = width, height
|
||||
|
||||
self.info["comment"] = comment
|
||||
|
||||
# Since the brush is so small, we read the data immediately
|
||||
self.data = self.fp.read(width * height)
|
||||
|
||||
def load(self):
|
||||
|
||||
if not self.data:
|
||||
return
|
||||
|
||||
# create an image out of the brush data block
|
||||
self.im = Image.core.new(self.mode, self.size)
|
||||
self.im.frombytes(self.data)
|
||||
self.data = b""
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
Image.register_open("GBR", GbrImageFile, _accept)
|
||||
|
||||
Image.register_extension("GBR", ".gbr")
|
|
@ -1,90 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# GD file handling
|
||||
#
|
||||
# History:
|
||||
# 1996-04-12 fl Created
|
||||
#
|
||||
# Copyright (c) 1997 by Secret Labs AB.
|
||||
# Copyright (c) 1996 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
# NOTE: This format cannot be automatically recognized, so the
|
||||
# class is not registered for use with Image.open(). To open a
|
||||
# gd file, use the GdImageFile.open() function instead.
|
||||
|
||||
# THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
|
||||
# implementation is provided for convenience and demonstrational
|
||||
# purposes only.
|
||||
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
from PIL import ImageFile, ImagePalette, _binary
|
||||
from PIL._util import isPath
|
||||
|
||||
try:
|
||||
import builtins
|
||||
except ImportError:
|
||||
import __builtin__
|
||||
builtins = __builtin__
|
||||
|
||||
i16 = _binary.i16be
|
||||
|
||||
##
|
||||
# Image plugin for the GD uncompressed format. Note that this format
|
||||
# is not supported by the standard <b>Image.open</b> function. To use
|
||||
# this plugin, you have to import the <b>GdImageFile</b> module and
|
||||
# use the <b>GdImageFile.open</b> function.
|
||||
|
||||
class GdImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "GD"
|
||||
format_description = "GD uncompressed images"
|
||||
|
||||
def _open(self):
|
||||
|
||||
# Header
|
||||
s = self.fp.read(775)
|
||||
|
||||
self.mode = "L" # FIXME: "P"
|
||||
self.size = i16(s[0:2]), i16(s[2:4])
|
||||
|
||||
# transparency index
|
||||
tindex = i16(s[5:7])
|
||||
if tindex < 256:
|
||||
self.info["transparent"] = tindex
|
||||
|
||||
self.palette = ImagePalette.raw("RGB", s[7:])
|
||||
|
||||
self.tile = [("raw", (0,0)+self.size, 775, ("L", 0, -1))]
|
||||
|
||||
##
|
||||
# Load texture from a GD image file.
|
||||
#
|
||||
# @param filename GD file name, or an opened file handle.
|
||||
# @param mode Optional mode. In this version, if the mode argument
|
||||
# is given, it must be "r".
|
||||
# @return An image instance.
|
||||
# @exception IOError If the image could not be read.
|
||||
|
||||
def open(fp, mode = "r"):
|
||||
|
||||
if mode != "r":
|
||||
raise ValueError("bad mode")
|
||||
|
||||
if isPath(fp):
|
||||
filename = fp
|
||||
fp = builtins.open(fp, "rb")
|
||||
else:
|
||||
filename = ""
|
||||
|
||||
try:
|
||||
return GdImageFile(fp, filename)
|
||||
except SyntaxError:
|
||||
raise IOError("cannot identify this image file")
|
|
@ -1,506 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# GIF file handling
|
||||
#
|
||||
# History:
|
||||
# 1995-09-01 fl Created
|
||||
# 1996-12-14 fl Added interlace support
|
||||
# 1996-12-30 fl Added animation support
|
||||
# 1997-01-05 fl Added write support, fixed local colour map bug
|
||||
# 1997-02-23 fl Make sure to load raster data in getdata()
|
||||
# 1997-07-05 fl Support external decoder (0.4)
|
||||
# 1998-07-09 fl Handle all modes when saving (0.5)
|
||||
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
||||
# 2001-04-16 fl Added rewind support (seek to frame 0) (0.6)
|
||||
# 2001-04-17 fl Added palette optimization (0.7)
|
||||
# 2002-06-06 fl Added transparency support for save (0.8)
|
||||
# 2004-02-24 fl Disable interlacing for small images
|
||||
#
|
||||
# Copyright (c) 1997-2004 by Secret Labs AB
|
||||
# Copyright (c) 1995-2004 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.9"
|
||||
|
||||
|
||||
from PIL import Image, ImageFile, ImagePalette, _binary
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Helpers
|
||||
|
||||
i8 = _binary.i8
|
||||
i16 = _binary.i16le
|
||||
o8 = _binary.o8
|
||||
o16 = _binary.o16le
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Identify/read GIF files
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:6] in [b"GIF87a", b"GIF89a"]
|
||||
|
||||
##
|
||||
# Image plugin for GIF images. This plugin supports both GIF87 and
|
||||
# GIF89 images.
|
||||
|
||||
class GifImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "GIF"
|
||||
format_description = "Compuserve GIF"
|
||||
global_palette = None
|
||||
|
||||
def data(self):
|
||||
s = self.fp.read(1)
|
||||
if s and i8(s):
|
||||
return self.fp.read(i8(s))
|
||||
return None
|
||||
|
||||
def _open(self):
|
||||
|
||||
# Screen
|
||||
s = self.fp.read(13)
|
||||
if s[:6] not in [b"GIF87a", b"GIF89a"]:
|
||||
raise SyntaxError("not a GIF file")
|
||||
|
||||
self.info["version"] = s[:6]
|
||||
self.size = i16(s[6:]), i16(s[8:])
|
||||
self.tile = []
|
||||
flags = i8(s[10])
|
||||
bits = (flags & 7) + 1
|
||||
|
||||
if flags & 128:
|
||||
# get global palette
|
||||
self.info["background"] = i8(s[11])
|
||||
# check if palette contains colour indices
|
||||
p = self.fp.read(3<<bits)
|
||||
for i in range(0, len(p), 3):
|
||||
if not (i//3 == i8(p[i]) == i8(p[i+1]) == i8(p[i+2])):
|
||||
p = ImagePalette.raw("RGB", p)
|
||||
self.global_palette = self.palette = p
|
||||
break
|
||||
|
||||
self.__fp = self.fp # FIXME: hack
|
||||
self.__rewind = self.fp.tell()
|
||||
self.seek(0) # get ready to read first frame
|
||||
|
||||
def seek(self, frame):
|
||||
|
||||
if frame == 0:
|
||||
# rewind
|
||||
self.__offset = 0
|
||||
self.dispose = None
|
||||
self.__frame = -1
|
||||
self.__fp.seek(self.__rewind)
|
||||
|
||||
if frame != self.__frame + 1:
|
||||
raise ValueError("cannot seek to frame %d" % frame)
|
||||
self.__frame = frame
|
||||
|
||||
self.tile = []
|
||||
|
||||
self.fp = self.__fp
|
||||
if self.__offset:
|
||||
# backup to last frame
|
||||
self.fp.seek(self.__offset)
|
||||
while self.data():
|
||||
pass
|
||||
self.__offset = 0
|
||||
|
||||
if self.dispose:
|
||||
self.im = self.dispose
|
||||
self.dispose = None
|
||||
|
||||
from copy import copy
|
||||
self.palette = copy(self.global_palette)
|
||||
|
||||
while True:
|
||||
|
||||
s = self.fp.read(1)
|
||||
if not s or s == b";":
|
||||
break
|
||||
|
||||
elif s == b"!":
|
||||
#
|
||||
# extensions
|
||||
#
|
||||
s = self.fp.read(1)
|
||||
block = self.data()
|
||||
if i8(s) == 249:
|
||||
#
|
||||
# graphic control extension
|
||||
#
|
||||
flags = i8(block[0])
|
||||
if flags & 1:
|
||||
self.info["transparency"] = i8(block[3])
|
||||
self.info["duration"] = i16(block[1:3]) * 10
|
||||
try:
|
||||
# disposal methods
|
||||
if flags & 8:
|
||||
# replace with background colour
|
||||
self.dispose = Image.core.fill("P", self.size,
|
||||
self.info["background"])
|
||||
elif flags & 16:
|
||||
# replace with previous contents
|
||||
self.dispose = self.im.copy()
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
elif i8(s) == 255:
|
||||
#
|
||||
# application extension
|
||||
#
|
||||
self.info["extension"] = block, self.fp.tell()
|
||||
if block[:11] == b"NETSCAPE2.0":
|
||||
block = self.data()
|
||||
if len(block) >= 3 and i8(block[0]) == 1:
|
||||
self.info["loop"] = i16(block[1:3])
|
||||
while self.data():
|
||||
pass
|
||||
|
||||
elif s == b",":
|
||||
#
|
||||
# local image
|
||||
#
|
||||
s = self.fp.read(9)
|
||||
|
||||
# extent
|
||||
x0, y0 = i16(s[0:]), i16(s[2:])
|
||||
x1, y1 = x0 + i16(s[4:]), y0 + i16(s[6:])
|
||||
flags = i8(s[8])
|
||||
|
||||
interlace = (flags & 64) != 0
|
||||
|
||||
if flags & 128:
|
||||
bits = (flags & 7) + 1
|
||||
self.palette =\
|
||||
ImagePalette.raw("RGB", self.fp.read(3<<bits))
|
||||
|
||||
# image data
|
||||
bits = i8(self.fp.read(1))
|
||||
self.__offset = self.fp.tell()
|
||||
self.tile = [("gif",
|
||||
(x0, y0, x1, y1),
|
||||
self.__offset,
|
||||
(bits, interlace))]
|
||||
break
|
||||
|
||||
else:
|
||||
pass
|
||||
# raise IOError, "illegal GIF tag `%x`" % i8(s)
|
||||
|
||||
if not self.tile:
|
||||
# self.__fp = None
|
||||
raise EOFError("no more images in GIF file")
|
||||
|
||||
self.mode = "L"
|
||||
if self.palette:
|
||||
self.mode = "P"
|
||||
|
||||
def tell(self):
|
||||
return self.__frame
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Write GIF files
|
||||
|
||||
try:
|
||||
import _imaging_gif
|
||||
except ImportError:
|
||||
_imaging_gif = None
|
||||
|
||||
RAWMODE = {
|
||||
"1": "L",
|
||||
"L": "L",
|
||||
"P": "P",
|
||||
}
|
||||
|
||||
def _save(im, fp, filename):
|
||||
|
||||
if _imaging_gif:
|
||||
# call external driver
|
||||
try:
|
||||
_imaging_gif.save(im, fp, filename)
|
||||
return
|
||||
except IOError:
|
||||
pass # write uncompressed file
|
||||
|
||||
try:
|
||||
rawmode = RAWMODE[im.mode]
|
||||
imOut = im
|
||||
except KeyError:
|
||||
# convert on the fly (EXPERIMENTAL -- I'm not sure PIL
|
||||
# should automatically convert images on save...)
|
||||
if Image.getmodebase(im.mode) == "RGB":
|
||||
palette_size = 256
|
||||
if im.palette:
|
||||
palette_size = len(im.palette.getdata()[1]) // 3
|
||||
imOut = im.convert("P", palette=1, colors=palette_size)
|
||||
rawmode = "P"
|
||||
else:
|
||||
imOut = im.convert("L")
|
||||
rawmode = "L"
|
||||
|
||||
# header
|
||||
try:
|
||||
palette = im.encoderinfo["palette"]
|
||||
except KeyError:
|
||||
palette = None
|
||||
im.encoderinfo["optimize"] = im.encoderinfo.get("optimize", True)
|
||||
if im.encoderinfo["optimize"]:
|
||||
# When the mode is L, and we optimize, we end up with
|
||||
# im.mode == P and rawmode = L, which fails.
|
||||
# If we're optimizing the palette, we're going to be
|
||||
# in a rawmode of P anyway.
|
||||
rawmode = 'P'
|
||||
|
||||
header, usedPaletteColors = getheader(imOut, palette, im.encoderinfo)
|
||||
for s in header:
|
||||
fp.write(s)
|
||||
|
||||
flags = 0
|
||||
|
||||
try:
|
||||
interlace = im.encoderinfo["interlace"]
|
||||
except KeyError:
|
||||
interlace = 1
|
||||
|
||||
# workaround for @PIL153
|
||||
if min(im.size) < 16:
|
||||
interlace = 0
|
||||
|
||||
if interlace:
|
||||
flags = flags | 64
|
||||
|
||||
try:
|
||||
transparency = im.encoderinfo["transparency"]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
transparency = int(transparency)
|
||||
# optimize the block away if transparent color is not used
|
||||
transparentColorExists = True
|
||||
# adjust the transparency index after optimize
|
||||
if usedPaletteColors is not None and len(usedPaletteColors) < 256:
|
||||
for i in range(len(usedPaletteColors)):
|
||||
if usedPaletteColors[i] == transparency:
|
||||
transparency = i
|
||||
transparentColorExists = True
|
||||
break
|
||||
else:
|
||||
transparentColorExists = False
|
||||
|
||||
# transparency extension block
|
||||
if transparentColorExists:
|
||||
fp.write(b"!" +
|
||||
o8(249) + # extension intro
|
||||
o8(4) + # length
|
||||
o8(1) + # transparency info present
|
||||
o16(0) + # duration
|
||||
o8(transparency) # transparency index
|
||||
+ o8(0))
|
||||
|
||||
# local image header
|
||||
fp.write(b"," +
|
||||
o16(0) + o16(0) + # bounding box
|
||||
o16(im.size[0]) + # size
|
||||
o16(im.size[1]) +
|
||||
o8(flags) + # flags
|
||||
o8(8)) # bits
|
||||
|
||||
imOut.encoderconfig = (8, interlace)
|
||||
ImageFile._save(imOut, fp, [("gif", (0,0)+im.size, 0, rawmode)])
|
||||
|
||||
fp.write(b"\0") # end of image data
|
||||
|
||||
fp.write(b";") # end of file
|
||||
|
||||
try:
|
||||
fp.flush()
|
||||
except: pass
|
||||
|
||||
|
||||
def _save_netpbm(im, fp, filename):
|
||||
|
||||
#
|
||||
# If you need real GIF compression and/or RGB quantization, you
|
||||
# can use the external NETPBM/PBMPLUS utilities. See comments
|
||||
# below for information on how to enable this.
|
||||
|
||||
import os
|
||||
from subprocess import Popen, check_call, PIPE, CalledProcessError
|
||||
import tempfile
|
||||
file = im._dump()
|
||||
|
||||
if im.mode != "RGB":
|
||||
with open(filename, 'wb') as f:
|
||||
stderr = tempfile.TemporaryFile()
|
||||
check_call(["ppmtogif", file], stdout=f, stderr=stderr)
|
||||
else:
|
||||
with open(filename, 'wb') as f:
|
||||
|
||||
# Pipe ppmquant output into ppmtogif
|
||||
# "ppmquant 256 %s | ppmtogif > %s" % (file, filename)
|
||||
quant_cmd = ["ppmquant", "256", file]
|
||||
togif_cmd = ["ppmtogif"]
|
||||
stderr = tempfile.TemporaryFile()
|
||||
quant_proc = Popen(quant_cmd, stdout=PIPE, stderr=stderr)
|
||||
stderr = tempfile.TemporaryFile()
|
||||
togif_proc = Popen(togif_cmd, stdin=quant_proc.stdout, stdout=f, stderr=stderr)
|
||||
|
||||
# Allow ppmquant to receive SIGPIPE if ppmtogif exits
|
||||
quant_proc.stdout.close()
|
||||
|
||||
retcode = quant_proc.wait()
|
||||
if retcode:
|
||||
raise CalledProcessError(retcode, quant_cmd)
|
||||
|
||||
retcode = togif_proc.wait()
|
||||
if retcode:
|
||||
raise CalledProcessError(retcode, togif_cmd)
|
||||
|
||||
try:
|
||||
os.unlink(file)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# GIF utilities
|
||||
|
||||
def getheader(im, palette=None, info=None):
|
||||
"""Return a list of strings representing a GIF header"""
|
||||
|
||||
optimize = info and info.get("optimize", 0)
|
||||
|
||||
# Header Block
|
||||
# http://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp
|
||||
header = [
|
||||
b"GIF87a" + # signature + version
|
||||
o16(im.size[0]) + # canvas width
|
||||
o16(im.size[1]) # canvas height
|
||||
]
|
||||
|
||||
if im.mode == "P":
|
||||
if palette and isinstance(palette, bytes):
|
||||
sourcePalette = palette[:768]
|
||||
else:
|
||||
sourcePalette = im.im.getpalette("RGB")[:768]
|
||||
else: # L-mode
|
||||
if palette and isinstance(palette, bytes):
|
||||
sourcePalette = palette[:768]
|
||||
else:
|
||||
sourcePalette = bytearray([i//3 for i in range(768)])
|
||||
|
||||
usedPaletteColors = paletteBytes = None
|
||||
|
||||
if optimize:
|
||||
usedPaletteColors = []
|
||||
|
||||
# check which colors are used
|
||||
i = 0
|
||||
for count in im.histogram():
|
||||
if count:
|
||||
usedPaletteColors.append(i)
|
||||
i += 1
|
||||
|
||||
# create the new palette if not every color is used
|
||||
if len(usedPaletteColors) < 256:
|
||||
paletteBytes = b""
|
||||
newPositions = {}
|
||||
|
||||
i = 0
|
||||
# pick only the used colors from the palette
|
||||
for oldPosition in usedPaletteColors:
|
||||
paletteBytes += sourcePalette[oldPosition*3:oldPosition*3+3]
|
||||
newPositions[oldPosition] = i
|
||||
i += 1
|
||||
|
||||
# replace the palette color id of all pixel with the new id
|
||||
imageBytes = bytearray(im.tobytes())
|
||||
for i in range(len(imageBytes)):
|
||||
imageBytes[i] = newPositions[imageBytes[i]]
|
||||
im.frombytes(bytes(imageBytes))
|
||||
newPaletteBytes = paletteBytes + (768 - len(paletteBytes)) * b'\x00'
|
||||
im.putpalette(newPaletteBytes)
|
||||
im.palette = ImagePalette.ImagePalette("RGB", palette = paletteBytes, size = len(paletteBytes))
|
||||
|
||||
if not paletteBytes:
|
||||
paletteBytes = sourcePalette
|
||||
|
||||
# Logical Screen Descriptor
|
||||
# calculate the palette size for the header
|
||||
import math
|
||||
colorTableSize = int(math.ceil(math.log(len(paletteBytes)//3, 2)))-1
|
||||
if colorTableSize < 0: colorTableSize = 0
|
||||
# size of global color table + global color table flag
|
||||
header.append(o8(colorTableSize + 128))
|
||||
# background + reserved/aspect
|
||||
header.append(o8(0) + o8(0))
|
||||
# end of Logical Screen Descriptor
|
||||
|
||||
# add the missing amount of bytes
|
||||
# the palette has to be 2<<n in size
|
||||
actualTargetSizeDiff = (2<<colorTableSize) - len(paletteBytes)//3
|
||||
if actualTargetSizeDiff > 0:
|
||||
paletteBytes += o8(0) * 3 * actualTargetSizeDiff
|
||||
|
||||
# Header + Logical Screen Descriptor + Global Color Table
|
||||
header.append(paletteBytes)
|
||||
return header, usedPaletteColors
|
||||
|
||||
|
||||
def getdata(im, offset = (0, 0), **params):
|
||||
"""Return a list of strings representing this image.
|
||||
The first string is a local image header, the rest contains
|
||||
encoded image data."""
|
||||
|
||||
class collector:
|
||||
data = []
|
||||
def write(self, data):
|
||||
self.data.append(data)
|
||||
|
||||
im.load() # make sure raster data is available
|
||||
|
||||
fp = collector()
|
||||
|
||||
try:
|
||||
im.encoderinfo = params
|
||||
|
||||
# local image header
|
||||
fp.write(b"," +
|
||||
o16(offset[0]) + # offset
|
||||
o16(offset[1]) +
|
||||
o16(im.size[0]) + # size
|
||||
o16(im.size[1]) +
|
||||
o8(0) + # flags
|
||||
o8(8)) # bits
|
||||
|
||||
ImageFile._save(im, fp, [("gif", (0,0)+im.size, 0, RAWMODE[im.mode])])
|
||||
|
||||
fp.write(b"\0") # end of image data
|
||||
|
||||
finally:
|
||||
del im.encoderinfo
|
||||
|
||||
return fp.data
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(GifImageFile.format, GifImageFile, _accept)
|
||||
Image.register_save(GifImageFile.format, _save)
|
||||
Image.register_extension(GifImageFile.format, ".gif")
|
||||
Image.register_mime(GifImageFile.format, "image/gif")
|
||||
|
||||
#
|
||||
# Uncomment the following line if you wish to use NETPBM/PBMPLUS
|
||||
# instead of the built-in "uncompressed" GIF encoder
|
||||
|
||||
# Image.register_save(GifImageFile.format, _save_netpbm)
|
|
@ -1,124 +0,0 @@
|
|||
#
|
||||
# Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# stuff to read (and render) GIMP gradient files
|
||||
#
|
||||
# History:
|
||||
# 97-08-23 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from math import pi, log, sin, sqrt
|
||||
from PIL._binary import o8
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Stuff to translate curve segments to palette values (derived from
|
||||
# the corresponding code in GIMP, written by Federico Mena Quintero.
|
||||
# See the GIMP distribution for more information.)
|
||||
#
|
||||
|
||||
EPSILON = 1e-10
|
||||
|
||||
def linear(middle, pos):
|
||||
if pos <= middle:
|
||||
if middle < EPSILON:
|
||||
return 0.0
|
||||
else:
|
||||
return 0.5 * pos / middle
|
||||
else:
|
||||
pos = pos - middle
|
||||
middle = 1.0 - middle
|
||||
if middle < EPSILON:
|
||||
return 1.0
|
||||
else:
|
||||
return 0.5 + 0.5 * pos / middle
|
||||
|
||||
def curved(middle, pos):
|
||||
return pos ** (log(0.5) / log(max(middle, EPSILON)))
|
||||
|
||||
def sine(middle, pos):
|
||||
return (sin((-pi / 2.0) + pi * linear(middle, pos)) + 1.0) / 2.0
|
||||
|
||||
def sphere_increasing(middle, pos):
|
||||
return sqrt(1.0 - (linear(middle, pos) - 1.0) ** 2)
|
||||
|
||||
def sphere_decreasing(middle, pos):
|
||||
return 1.0 - sqrt(1.0 - linear(middle, pos) ** 2)
|
||||
|
||||
SEGMENTS = [ linear, curved, sine, sphere_increasing, sphere_decreasing ]
|
||||
|
||||
class GradientFile:
|
||||
|
||||
gradient = None
|
||||
|
||||
def getpalette(self, entries = 256):
|
||||
|
||||
palette = []
|
||||
|
||||
ix = 0
|
||||
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
|
||||
|
||||
for i in range(entries):
|
||||
|
||||
x = i / float(entries-1)
|
||||
|
||||
while x1 < x:
|
||||
ix += 1
|
||||
x0, x1, xm, rgb0, rgb1, segment = self.gradient[ix]
|
||||
|
||||
w = x1 - x0
|
||||
|
||||
if w < EPSILON:
|
||||
scale = segment(0.5, 0.5)
|
||||
else:
|
||||
scale = segment((xm - x0) / w, (x - x0) / w)
|
||||
|
||||
# expand to RGBA
|
||||
r = o8(int(255 * ((rgb1[0] - rgb0[0]) * scale + rgb0[0]) + 0.5))
|
||||
g = o8(int(255 * ((rgb1[1] - rgb0[1]) * scale + rgb0[1]) + 0.5))
|
||||
b = o8(int(255 * ((rgb1[2] - rgb0[2]) * scale + rgb0[2]) + 0.5))
|
||||
a = o8(int(255 * ((rgb1[3] - rgb0[3]) * scale + rgb0[3]) + 0.5))
|
||||
|
||||
# add to palette
|
||||
palette.append(r + g + b + a)
|
||||
|
||||
return b"".join(palette), "RGBA"
|
||||
|
||||
##
|
||||
# File handler for GIMP's gradient format.
|
||||
|
||||
class GimpGradientFile(GradientFile):
|
||||
|
||||
def __init__(self, fp):
|
||||
|
||||
if fp.readline()[:13] != b"GIMP Gradient":
|
||||
raise SyntaxError("not a GIMP gradient file")
|
||||
|
||||
count = int(fp.readline())
|
||||
|
||||
gradient = []
|
||||
|
||||
for i in range(count):
|
||||
|
||||
s = fp.readline().split()
|
||||
w = [float(x) for x in s[:11]]
|
||||
|
||||
x0, x1 = w[0], w[2]
|
||||
xm = w[1]
|
||||
rgb0 = w[3:7]
|
||||
rgb1 = w[7:11]
|
||||
|
||||
segment = SEGMENTS[int(s[11])]
|
||||
cspace = int(s[12])
|
||||
|
||||
if cspace != 0:
|
||||
raise IOError("cannot handle HSV colour space")
|
||||
|
||||
gradient.append((x0, x1, xm, rgb0, rgb1, segment))
|
||||
|
||||
self.gradient = gradient
|
|
@ -1,62 +0,0 @@
|
|||
#
|
||||
# Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# stuff to read GIMP palette files
|
||||
#
|
||||
# History:
|
||||
# 1997-08-23 fl Created
|
||||
# 2004-09-07 fl Support GIMP 2.0 palette files.
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
|
||||
# Copyright (c) Fredrik Lundh 1997-2004.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
import re
|
||||
from PIL._binary import o8
|
||||
|
||||
##
|
||||
# File handler for GIMP's palette format.
|
||||
|
||||
class GimpPaletteFile:
|
||||
|
||||
rawmode = "RGB"
|
||||
|
||||
def __init__(self, fp):
|
||||
|
||||
self.palette = [o8(i)*3 for i in range(256)]
|
||||
|
||||
if fp.readline()[:12] != b"GIMP Palette":
|
||||
raise SyntaxError("not a GIMP palette file")
|
||||
|
||||
i = 0
|
||||
|
||||
while i <= 255:
|
||||
|
||||
s = fp.readline()
|
||||
|
||||
if not s:
|
||||
break
|
||||
# skip fields and comment lines
|
||||
if re.match(b"\w+:|#", s):
|
||||
continue
|
||||
if len(s) > 100:
|
||||
raise SyntaxError("bad palette file")
|
||||
|
||||
v = tuple(map(int, s.split()[:3]))
|
||||
if len(v) != 3:
|
||||
raise ValueError("bad palette entry")
|
||||
|
||||
if 0 <= i <= 255:
|
||||
self.palette[i] = o8(v[0]) + o8(v[1]) + o8(v[2])
|
||||
|
||||
i += 1
|
||||
|
||||
self.palette = b"".join(self.palette)
|
||||
|
||||
|
||||
def getpalette(self):
|
||||
|
||||
return self.palette, self.rawmode
|
|
@ -1,68 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# GRIB stub adapter
|
||||
#
|
||||
# Copyright (c) 1996-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image, ImageFile
|
||||
|
||||
_handler = None
|
||||
|
||||
##
|
||||
# Install application-specific GRIB image handler.
|
||||
#
|
||||
# @param handler Handler object.
|
||||
|
||||
def register_handler(handler):
|
||||
global _handler
|
||||
_handler = handler
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Image adapter
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[0:4] == b"GRIB" and prefix[7] == b'\x01'
|
||||
|
||||
class GribStubImageFile(ImageFile.StubImageFile):
|
||||
|
||||
format = "GRIB"
|
||||
format_description = "GRIB"
|
||||
|
||||
def _open(self):
|
||||
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(8)):
|
||||
raise SyntaxError("Not a GRIB file")
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# make something up
|
||||
self.mode = "F"
|
||||
self.size = 1, 1
|
||||
|
||||
loader = self._load()
|
||||
if loader:
|
||||
loader.open(self)
|
||||
|
||||
def _load(self):
|
||||
return _handler
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if _handler is None or not hasattr("_handler", "save"):
|
||||
raise IOError("GRIB save handler not installed")
|
||||
_handler.save(im, fp, filename)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(GribStubImageFile.format, GribStubImageFile, _accept)
|
||||
Image.register_save(GribStubImageFile.format, _save)
|
||||
|
||||
Image.register_extension(GribStubImageFile.format, ".grib")
|
|
@ -1,70 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# HDF5 stub adapter
|
||||
#
|
||||
# Copyright (c) 2000-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image, ImageFile
|
||||
|
||||
_handler = None
|
||||
|
||||
##
|
||||
# Install application-specific HDF5 image handler.
|
||||
#
|
||||
# @param handler Handler object.
|
||||
|
||||
def register_handler(handler):
|
||||
global _handler
|
||||
_handler = handler
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Image adapter
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:8] == b"\x89HDF\r\n\x1a\n"
|
||||
|
||||
class HDF5StubImageFile(ImageFile.StubImageFile):
|
||||
|
||||
format = "HDF5"
|
||||
format_description = "HDF5"
|
||||
|
||||
def _open(self):
|
||||
|
||||
offset = self.fp.tell()
|
||||
|
||||
if not _accept(self.fp.read(8)):
|
||||
raise SyntaxError("Not an HDF file")
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# make something up
|
||||
self.mode = "F"
|
||||
self.size = 1, 1
|
||||
|
||||
loader = self._load()
|
||||
if loader:
|
||||
loader.open(self)
|
||||
|
||||
def _load(self):
|
||||
return _handler
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if _handler is None or not hasattr("_handler", "save"):
|
||||
raise IOError("HDF5 save handler not installed")
|
||||
_handler.save(im, fp, filename)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open(HDF5StubImageFile.format, HDF5StubImageFile, _accept)
|
||||
Image.register_save(HDF5StubImageFile.format, _save)
|
||||
|
||||
Image.register_extension(HDF5StubImageFile.format, ".h5")
|
||||
Image.register_extension(HDF5StubImageFile.format, ".hdf")
|
|
@ -1,301 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Mac OS X icns file decoder, based on icns.py by Bob Ippolito.
|
||||
#
|
||||
# history:
|
||||
# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies.
|
||||
#
|
||||
# Copyright (c) 2004 by Bob Ippolito.
|
||||
# Copyright (c) 2004 by Secret Labs.
|
||||
# Copyright (c) 2004 by Fredrik Lundh.
|
||||
# Copyright (c) 2014 by Alastair Houghton.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image, ImageFile, PngImagePlugin, _binary
|
||||
import struct, io
|
||||
|
||||
enable_jpeg2k = hasattr(Image.core, 'jp2klib_version')
|
||||
if enable_jpeg2k:
|
||||
from PIL import Jpeg2KImagePlugin
|
||||
|
||||
i8 = _binary.i8
|
||||
|
||||
HEADERSIZE = 8
|
||||
|
||||
def nextheader(fobj):
|
||||
return struct.unpack('>4sI', fobj.read(HEADERSIZE))
|
||||
|
||||
def read_32t(fobj, start_length, size):
|
||||
# The 128x128 icon seems to have an extra header for some reason.
|
||||
(start, length) = start_length
|
||||
fobj.seek(start)
|
||||
sig = fobj.read(4)
|
||||
if sig != b'\x00\x00\x00\x00':
|
||||
raise SyntaxError('Unknown signature, expecting 0x00000000')
|
||||
return read_32(fobj, (start + 4, length - 4), size)
|
||||
|
||||
def read_32(fobj, start_length, size):
|
||||
"""
|
||||
Read a 32bit RGB icon resource. Seems to be either uncompressed or
|
||||
an RLE packbits-like scheme.
|
||||
"""
|
||||
(start, length) = start_length
|
||||
fobj.seek(start)
|
||||
pixel_size = (size[0] * size[2], size[1] * size[2])
|
||||
sizesq = pixel_size[0] * pixel_size[1]
|
||||
if length == sizesq * 3:
|
||||
# uncompressed ("RGBRGBGB")
|
||||
indata = fobj.read(length)
|
||||
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
|
||||
else:
|
||||
# decode image
|
||||
im = Image.new("RGB", pixel_size, None)
|
||||
for band_ix in range(3):
|
||||
data = []
|
||||
bytesleft = sizesq
|
||||
while bytesleft > 0:
|
||||
byte = fobj.read(1)
|
||||
if not byte:
|
||||
break
|
||||
byte = i8(byte)
|
||||
if byte & 0x80:
|
||||
blocksize = byte - 125
|
||||
byte = fobj.read(1)
|
||||
for i in range(blocksize):
|
||||
data.append(byte)
|
||||
else:
|
||||
blocksize = byte + 1
|
||||
data.append(fobj.read(blocksize))
|
||||
bytesleft -= blocksize
|
||||
if bytesleft <= 0:
|
||||
break
|
||||
if bytesleft != 0:
|
||||
raise SyntaxError(
|
||||
"Error reading channel [%r left]" % bytesleft
|
||||
)
|
||||
band = Image.frombuffer(
|
||||
"L", pixel_size, b"".join(data), "raw", "L", 0, 1
|
||||
)
|
||||
im.im.putband(band.im, band_ix)
|
||||
return {"RGB": im}
|
||||
|
||||
def read_mk(fobj, start_length, size):
|
||||
# Alpha masks seem to be uncompressed
|
||||
(start, length) = start_length
|
||||
fobj.seek(start)
|
||||
pixel_size = (size[0] * size[2], size[1] * size[2])
|
||||
sizesq = pixel_size[0] * pixel_size[1]
|
||||
band = Image.frombuffer(
|
||||
"L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1
|
||||
)
|
||||
return {"A": band}
|
||||
|
||||
def read_png_or_jpeg2000(fobj, start_length, size):
|
||||
(start, length) = start_length
|
||||
fobj.seek(start)
|
||||
sig = fobj.read(12)
|
||||
if sig[:8] == b'\x89PNG\x0d\x0a\x1a\x0a':
|
||||
fobj.seek(start)
|
||||
im = PngImagePlugin.PngImageFile(fobj)
|
||||
return {"RGBA": im}
|
||||
elif sig[:4] == b'\xff\x4f\xff\x51' \
|
||||
or sig[:4] == b'\x0d\x0a\x87\x0a' \
|
||||
or sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a':
|
||||
if not enable_jpeg2k:
|
||||
raise ValueError('Unsupported icon subimage format (rebuild PIL with JPEG 2000 support to fix this)')
|
||||
# j2k, jpc or j2c
|
||||
fobj.seek(start)
|
||||
jp2kstream = fobj.read(length)
|
||||
f = io.BytesIO(jp2kstream)
|
||||
im = Jpeg2KImagePlugin.Jpeg2KImageFile(f)
|
||||
if im.mode != 'RGBA':
|
||||
im = im.convert('RGBA')
|
||||
return {"RGBA": im}
|
||||
else:
|
||||
raise ValueError('Unsupported icon subimage format')
|
||||
|
||||
class IcnsFile:
|
||||
|
||||
SIZES = {
|
||||
(512, 512, 2): [
|
||||
(b'ic10', read_png_or_jpeg2000),
|
||||
],
|
||||
(512, 512, 1): [
|
||||
(b'ic09', read_png_or_jpeg2000),
|
||||
],
|
||||
(256, 256, 2): [
|
||||
(b'ic14', read_png_or_jpeg2000),
|
||||
],
|
||||
(256, 256, 1): [
|
||||
(b'ic08', read_png_or_jpeg2000),
|
||||
],
|
||||
(128, 128, 2): [
|
||||
(b'ic13', read_png_or_jpeg2000),
|
||||
],
|
||||
(128, 128, 1): [
|
||||
(b'ic07', read_png_or_jpeg2000),
|
||||
(b'it32', read_32t),
|
||||
(b't8mk', read_mk),
|
||||
],
|
||||
(64, 64, 1): [
|
||||
(b'icp6', read_png_or_jpeg2000),
|
||||
],
|
||||
(32, 32, 2): [
|
||||
(b'ic12', read_png_or_jpeg2000),
|
||||
],
|
||||
(48, 48, 1): [
|
||||
(b'ih32', read_32),
|
||||
(b'h8mk', read_mk),
|
||||
],
|
||||
(32, 32, 1): [
|
||||
(b'icp5', read_png_or_jpeg2000),
|
||||
(b'il32', read_32),
|
||||
(b'l8mk', read_mk),
|
||||
],
|
||||
(16, 16, 2): [
|
||||
(b'ic11', read_png_or_jpeg2000),
|
||||
],
|
||||
(16, 16, 1): [
|
||||
(b'icp4', read_png_or_jpeg2000),
|
||||
(b'is32', read_32),
|
||||
(b's8mk', read_mk),
|
||||
],
|
||||
}
|
||||
|
||||
def __init__(self, fobj):
|
||||
"""
|
||||
fobj is a file-like object as an icns resource
|
||||
"""
|
||||
# signature : (start, length)
|
||||
self.dct = dct = {}
|
||||
self.fobj = fobj
|
||||
sig, filesize = nextheader(fobj)
|
||||
if sig != b'icns':
|
||||
raise SyntaxError('not an icns file')
|
||||
i = HEADERSIZE
|
||||
while i < filesize:
|
||||
sig, blocksize = nextheader(fobj)
|
||||
if blocksize <= 0:
|
||||
raise SyntaxError('invalid block header')
|
||||
i += HEADERSIZE
|
||||
blocksize -= HEADERSIZE
|
||||
dct[sig] = (i, blocksize)
|
||||
fobj.seek(blocksize, 1)
|
||||
i += blocksize
|
||||
|
||||
def itersizes(self):
|
||||
sizes = []
|
||||
for size, fmts in self.SIZES.items():
|
||||
for (fmt, reader) in fmts:
|
||||
if fmt in self.dct:
|
||||
sizes.append(size)
|
||||
break
|
||||
return sizes
|
||||
|
||||
def bestsize(self):
|
||||
sizes = self.itersizes()
|
||||
if not sizes:
|
||||
raise SyntaxError("No 32bit icon resources found")
|
||||
return max(sizes)
|
||||
|
||||
def dataforsize(self, size):
|
||||
"""
|
||||
Get an icon resource as {channel: array}. Note that
|
||||
the arrays are bottom-up like windows bitmaps and will likely
|
||||
need to be flipped or transposed in some way.
|
||||
"""
|
||||
dct = {}
|
||||
for code, reader in self.SIZES[size]:
|
||||
desc = self.dct.get(code)
|
||||
if desc is not None:
|
||||
dct.update(reader(self.fobj, desc, size))
|
||||
return dct
|
||||
|
||||
def getimage(self, size=None):
|
||||
if size is None:
|
||||
size = self.bestsize()
|
||||
if len(size) == 2:
|
||||
size = (size[0], size[1], 1)
|
||||
channels = self.dataforsize(size)
|
||||
|
||||
im = channels.get('RGBA', None)
|
||||
if im:
|
||||
return im
|
||||
|
||||
im = channels.get("RGB").copy()
|
||||
try:
|
||||
im.putalpha(channels["A"])
|
||||
except KeyError:
|
||||
pass
|
||||
return im
|
||||
|
||||
##
|
||||
# Image plugin for Mac OS icons.
|
||||
|
||||
class IcnsImageFile(ImageFile.ImageFile):
|
||||
"""
|
||||
PIL read-only image support for Mac OS .icns files.
|
||||
Chooses the best resolution, but will possibly load
|
||||
a different size image if you mutate the size attribute
|
||||
before calling 'load'.
|
||||
|
||||
The info dictionary has a key 'sizes' that is a list
|
||||
of sizes that the icns file has.
|
||||
"""
|
||||
|
||||
format = "ICNS"
|
||||
format_description = "Mac OS icns resource"
|
||||
|
||||
def _open(self):
|
||||
self.icns = IcnsFile(self.fp)
|
||||
self.mode = 'RGBA'
|
||||
self.best_size = self.icns.bestsize()
|
||||
self.size = (self.best_size[0] * self.best_size[2],
|
||||
self.best_size[1] * self.best_size[2])
|
||||
self.info['sizes'] = self.icns.itersizes()
|
||||
# Just use this to see if it's loaded or not yet.
|
||||
self.tile = ('',)
|
||||
|
||||
def load(self):
|
||||
if len(self.size) == 3:
|
||||
self.best_size = self.size
|
||||
self.size = (self.best_size[0] * self.best_size[2],
|
||||
self.best_size[1] * self.best_size[2])
|
||||
|
||||
Image.Image.load(self)
|
||||
if not self.tile:
|
||||
return
|
||||
self.load_prepare()
|
||||
# This is likely NOT the best way to do it, but whatever.
|
||||
im = self.icns.getimage(self.best_size)
|
||||
|
||||
# If this is a PNG or JPEG 2000, it won't be loaded yet
|
||||
im.load()
|
||||
|
||||
self.im = im.im
|
||||
self.mode = im.mode
|
||||
self.size = im.size
|
||||
self.fp = None
|
||||
self.icns = None
|
||||
self.tile = ()
|
||||
self.load_end()
|
||||
|
||||
Image.register_open("ICNS", IcnsImageFile, lambda x: x[:4] == b'icns')
|
||||
Image.register_extension("ICNS", '.icns')
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os, sys
|
||||
imf = IcnsImageFile(open(sys.argv[1], 'rb'))
|
||||
for size in imf.info['sizes']:
|
||||
imf.size = size
|
||||
imf.load()
|
||||
im = imf.im
|
||||
im.save('out-%s-%s-%s.png' % size)
|
||||
im = Image.open(open(sys.argv[1], "rb"))
|
||||
im.save("out.png")
|
||||
if sys.platform == 'windows':
|
||||
os.startfile("out.png")
|
|
@ -1,233 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Windows Icon support for PIL
|
||||
#
|
||||
# History:
|
||||
# 96-05-27 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis <casadebender@gmail.com>.
|
||||
# https://code.google.com/p/casadebender/wiki/Win32IconImagePlugin
|
||||
#
|
||||
# Icon format references:
|
||||
# * http://en.wikipedia.org/wiki/ICO_(file_format)
|
||||
# * http://msdn.microsoft.com/en-us/library/ms997538.aspx
|
||||
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
from PIL import Image, ImageFile, BmpImagePlugin, PngImagePlugin, _binary
|
||||
from math import log, ceil
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
i8 = _binary.i8
|
||||
i16 = _binary.i16le
|
||||
i32 = _binary.i32le
|
||||
|
||||
_MAGIC = b"\0\0\1\0"
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] == _MAGIC
|
||||
|
||||
|
||||
class IcoFile:
|
||||
def __init__(self, buf):
|
||||
"""
|
||||
Parse image from file-like object containing ico file data
|
||||
"""
|
||||
|
||||
# check magic
|
||||
s = buf.read(6)
|
||||
if not _accept(s):
|
||||
raise SyntaxError("not an ICO file")
|
||||
|
||||
self.buf = buf
|
||||
self.entry = []
|
||||
|
||||
# Number of items in file
|
||||
self.nb_items = i16(s[4:])
|
||||
|
||||
# Get headers for each item
|
||||
for i in range(self.nb_items):
|
||||
s = buf.read(16)
|
||||
|
||||
icon_header = {
|
||||
'width': i8(s[0]),
|
||||
'height': i8(s[1]),
|
||||
'nb_color': i8(s[2]), # Number of colors in image (0 if >=8bpp)
|
||||
'reserved': i8(s[3]),
|
||||
'planes': i16(s[4:]),
|
||||
'bpp': i16(s[6:]),
|
||||
'size': i32(s[8:]),
|
||||
'offset': i32(s[12:])
|
||||
}
|
||||
|
||||
# See Wikipedia
|
||||
for j in ('width', 'height'):
|
||||
if not icon_header[j]:
|
||||
icon_header[j] = 256
|
||||
|
||||
# See Wikipedia notes about color depth.
|
||||
# We need this just to differ images with equal sizes
|
||||
icon_header['color_depth'] = (icon_header['bpp'] or (icon_header['nb_color'] != 0 and ceil(log(icon_header['nb_color'],2))) or 256)
|
||||
|
||||
icon_header['dim'] = (icon_header['width'], icon_header['height'])
|
||||
icon_header['square'] = icon_header['width'] * icon_header['height']
|
||||
|
||||
self.entry.append(icon_header)
|
||||
|
||||
self.entry = sorted(self.entry, key=lambda x: x['color_depth'])
|
||||
# ICO images are usually squares
|
||||
# self.entry = sorted(self.entry, key=lambda x: x['width'])
|
||||
self.entry = sorted(self.entry, key=lambda x: x['square'])
|
||||
self.entry.reverse()
|
||||
|
||||
def sizes(self):
|
||||
"""
|
||||
Get a list of all available icon sizes and color depths.
|
||||
"""
|
||||
return set((h['width'], h['height']) for h in self.entry)
|
||||
|
||||
def getimage(self, size, bpp=False):
|
||||
"""
|
||||
Get an image from the icon
|
||||
"""
|
||||
for (i, h) in enumerate(self.entry):
|
||||
if size == h['dim'] and (bpp == False or bpp == h['color_depth']):
|
||||
return self.frame(i)
|
||||
return self.frame(0)
|
||||
|
||||
def frame(self, idx):
|
||||
"""
|
||||
Get an image from frame idx
|
||||
"""
|
||||
|
||||
header = self.entry[idx]
|
||||
|
||||
self.buf.seek(header['offset'])
|
||||
data = self.buf.read(8)
|
||||
self.buf.seek(header['offset'])
|
||||
|
||||
if data[:8] == PngImagePlugin._MAGIC:
|
||||
# png frame
|
||||
im = PngImagePlugin.PngImageFile(self.buf)
|
||||
else:
|
||||
# XOR + AND mask bmp frame
|
||||
im = BmpImagePlugin.DibImageFile(self.buf)
|
||||
|
||||
# change tile dimension to only encompass XOR image
|
||||
im.size = (im.size[0], int(im.size[1] / 2))
|
||||
d, e, o, a = im.tile[0]
|
||||
im.tile[0] = d, (0,0) + im.size, o, a
|
||||
|
||||
# figure out where AND mask image starts
|
||||
mode = a[0]
|
||||
bpp = 8
|
||||
for k in BmpImagePlugin.BIT2MODE.keys():
|
||||
if mode == BmpImagePlugin.BIT2MODE[k][1]:
|
||||
bpp = k
|
||||
break
|
||||
|
||||
if 32 == bpp:
|
||||
# 32-bit color depth icon image allows semitransparent areas
|
||||
# PIL's DIB format ignores transparency bits, recover them
|
||||
# The DIB is packed in BGRX byte order where X is the alpha channel
|
||||
|
||||
# Back up to start of bmp data
|
||||
self.buf.seek(o)
|
||||
# extract every 4th byte (eg. 3,7,11,15,...)
|
||||
alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
|
||||
|
||||
# convert to an 8bpp grayscale image
|
||||
mask = Image.frombuffer(
|
||||
'L', # 8bpp
|
||||
im.size, # (w, h)
|
||||
alpha_bytes, # source chars
|
||||
'raw', # raw decoder
|
||||
('L', 0, -1) # 8bpp inverted, unpadded, reversed
|
||||
)
|
||||
else:
|
||||
# get AND image from end of bitmap
|
||||
w = im.size[0]
|
||||
if (w % 32) > 0:
|
||||
# bitmap row data is aligned to word boundaries
|
||||
w += 32 - (im.size[0] % 32)
|
||||
|
||||
# the total mask data is padded row size * height / bits per char
|
||||
|
||||
and_mask_offset = o + int(im.size[0] * im.size[1] * (bpp / 8.0))
|
||||
total_bytes = int((w * im.size[1]) / 8)
|
||||
|
||||
self.buf.seek(and_mask_offset)
|
||||
maskData = self.buf.read(total_bytes)
|
||||
|
||||
# convert raw data to image
|
||||
mask = Image.frombuffer(
|
||||
'1', # 1 bpp
|
||||
im.size, # (w, h)
|
||||
maskData, # source chars
|
||||
'raw', # raw decoder
|
||||
('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed
|
||||
)
|
||||
|
||||
# now we have two images, im is XOR image and mask is AND image
|
||||
|
||||
# apply mask image as alpha channel
|
||||
im = im.convert('RGBA')
|
||||
im.putalpha(mask)
|
||||
|
||||
return im
|
||||
|
||||
##
|
||||
# Image plugin for Windows Icon files.
|
||||
|
||||
class IcoImageFile(ImageFile.ImageFile):
|
||||
"""
|
||||
PIL read-only image support for Microsoft Windows .ico files.
|
||||
|
||||
By default the largest resolution image in the file will be loaded. This can
|
||||
be changed by altering the 'size' attribute before calling 'load'.
|
||||
|
||||
The info dictionary has a key 'sizes' that is a list of the sizes available
|
||||
in the icon file.
|
||||
|
||||
Handles classic, XP and Vista icon formats.
|
||||
|
||||
This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis <casadebender@gmail.com>.
|
||||
https://code.google.com/p/casadebender/wiki/Win32IconImagePlugin
|
||||
"""
|
||||
format = "ICO"
|
||||
format_description = "Windows Icon"
|
||||
|
||||
def _open(self):
|
||||
self.ico = IcoFile(self.fp)
|
||||
self.info['sizes'] = self.ico.sizes()
|
||||
self.size = self.ico.entry[0]['dim']
|
||||
self.load()
|
||||
|
||||
def load(self):
|
||||
im = self.ico.getimage(self.size)
|
||||
# if tile is PNG, it won't really be loaded yet
|
||||
im.load()
|
||||
self.im = im.im
|
||||
self.mode = im.mode
|
||||
self.size = im.size
|
||||
|
||||
|
||||
def load_seek(self):
|
||||
# Flage the ImageFile.Parser so that it just does all the decode at the end.
|
||||
pass
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open("ICO", IcoImageFile, _accept)
|
||||
Image.register_extension("ICO", ".ico")
|
|
@ -1,342 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# IFUNC IM file handling for PIL
|
||||
#
|
||||
# history:
|
||||
# 1995-09-01 fl Created.
|
||||
# 1997-01-03 fl Save palette images
|
||||
# 1997-01-08 fl Added sequence support
|
||||
# 1997-01-23 fl Added P and RGB save support
|
||||
# 1997-05-31 fl Read floating point images
|
||||
# 1997-06-22 fl Save floating point images
|
||||
# 1997-08-27 fl Read and save 1-bit images
|
||||
# 1998-06-25 fl Added support for RGB+LUT images
|
||||
# 1998-07-02 fl Added support for YCC images
|
||||
# 1998-07-15 fl Renamed offset attribute to avoid name clash
|
||||
# 1998-12-29 fl Added I;16 support
|
||||
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
|
||||
# 2003-09-26 fl Added LA/PA support
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-2001 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.7"
|
||||
|
||||
import re
|
||||
from PIL import Image, ImageFile, ImagePalette
|
||||
from PIL._binary import i8, o8
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Standard tags
|
||||
|
||||
COMMENT = "Comment"
|
||||
DATE = "Date"
|
||||
EQUIPMENT = "Digitalization equipment"
|
||||
FRAMES = "File size (no of images)"
|
||||
LUT = "Lut"
|
||||
NAME = "Name"
|
||||
SCALE = "Scale (x,y)"
|
||||
SIZE = "Image size (x*y)"
|
||||
MODE = "Image type"
|
||||
|
||||
TAGS = { COMMENT:0, DATE:0, EQUIPMENT:0, FRAMES:0, LUT:0, NAME:0,
|
||||
SCALE:0, SIZE:0, MODE:0 }
|
||||
|
||||
OPEN = {
|
||||
# ifunc93/p3cfunc formats
|
||||
"0 1 image": ("1", "1"),
|
||||
"L 1 image": ("1", "1"),
|
||||
"Greyscale image": ("L", "L"),
|
||||
"Grayscale image": ("L", "L"),
|
||||
"RGB image": ("RGB", "RGB;L"),
|
||||
"RLB image": ("RGB", "RLB"),
|
||||
"RYB image": ("RGB", "RLB"),
|
||||
"B1 image": ("1", "1"),
|
||||
"B2 image": ("P", "P;2"),
|
||||
"B4 image": ("P", "P;4"),
|
||||
"X 24 image": ("RGB", "RGB"),
|
||||
"L 32 S image": ("I", "I;32"),
|
||||
"L 32 F image": ("F", "F;32"),
|
||||
# old p3cfunc formats
|
||||
"RGB3 image": ("RGB", "RGB;T"),
|
||||
"RYB3 image": ("RGB", "RYB;T"),
|
||||
# extensions
|
||||
"LA image": ("LA", "LA;L"),
|
||||
"RGBA image": ("RGBA", "RGBA;L"),
|
||||
"RGBX image": ("RGBX", "RGBX;L"),
|
||||
"CMYK image": ("CMYK", "CMYK;L"),
|
||||
"YCC image": ("YCbCr", "YCbCr;L"),
|
||||
}
|
||||
|
||||
# ifunc95 extensions
|
||||
for i in ["8", "8S", "16", "16S", "32", "32F"]:
|
||||
OPEN["L %s image" % i] = ("F", "F;%s" % i)
|
||||
OPEN["L*%s image" % i] = ("F", "F;%s" % i)
|
||||
for i in ["16", "16L", "16B"]:
|
||||
OPEN["L %s image" % i] = ("I;%s" % i, "I;%s" % i)
|
||||
OPEN["L*%s image" % i] = ("I;%s" % i, "I;%s" % i)
|
||||
for i in ["32S"]:
|
||||
OPEN["L %s image" % i] = ("I", "I;%s" % i)
|
||||
OPEN["L*%s image" % i] = ("I", "I;%s" % i)
|
||||
for i in range(2, 33):
|
||||
OPEN["L*%s image" % i] = ("F", "F;%s" % i)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Read IM directory
|
||||
|
||||
split = re.compile(br"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$")
|
||||
|
||||
def number(s):
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
return float(s)
|
||||
|
||||
##
|
||||
# Image plugin for the IFUNC IM file format.
|
||||
|
||||
class ImImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "IM"
|
||||
format_description = "IFUNC Image Memory"
|
||||
|
||||
def _open(self):
|
||||
|
||||
# Quick rejection: if there's not an LF among the first
|
||||
# 100 bytes, this is (probably) not a text header.
|
||||
|
||||
if not b"\n" in self.fp.read(100):
|
||||
raise SyntaxError("not an IM file")
|
||||
self.fp.seek(0)
|
||||
|
||||
n = 0
|
||||
|
||||
# Default values
|
||||
self.info[MODE] = "L"
|
||||
self.info[SIZE] = (512, 512)
|
||||
self.info[FRAMES] = 1
|
||||
|
||||
self.rawmode = "L"
|
||||
|
||||
while True:
|
||||
|
||||
s = self.fp.read(1)
|
||||
|
||||
# Some versions of IFUNC uses \n\r instead of \r\n...
|
||||
if s == b"\r":
|
||||
continue
|
||||
|
||||
if not s or s == b'\0' or s == b'\x1A':
|
||||
break
|
||||
|
||||
# FIXME: this may read whole file if not a text file
|
||||
s = s + self.fp.readline()
|
||||
|
||||
if len(s) > 100:
|
||||
raise SyntaxError("not an IM file")
|
||||
|
||||
if s[-2:] == b'\r\n':
|
||||
s = s[:-2]
|
||||
elif s[-1:] == b'\n':
|
||||
s = s[:-1]
|
||||
|
||||
try:
|
||||
m = split.match(s)
|
||||
except re.error as v:
|
||||
raise SyntaxError("not an IM file")
|
||||
|
||||
if m:
|
||||
|
||||
k, v = m.group(1,2)
|
||||
|
||||
# Don't know if this is the correct encoding, but a decent guess
|
||||
# (I guess)
|
||||
k = k.decode('latin-1', 'replace')
|
||||
v = v.decode('latin-1', 'replace')
|
||||
|
||||
# Convert value as appropriate
|
||||
if k in [FRAMES, SCALE, SIZE]:
|
||||
v = v.replace("*", ",")
|
||||
v = tuple(map(number, v.split(",")))
|
||||
if len(v) == 1:
|
||||
v = v[0]
|
||||
elif k == MODE and v in OPEN:
|
||||
v, self.rawmode = OPEN[v]
|
||||
|
||||
# Add to dictionary. Note that COMMENT tags are
|
||||
# combined into a list of strings.
|
||||
if k == COMMENT:
|
||||
if k in self.info:
|
||||
self.info[k].append(v)
|
||||
else:
|
||||
self.info[k] = [v]
|
||||
else:
|
||||
self.info[k] = v
|
||||
|
||||
if k in TAGS:
|
||||
n += 1
|
||||
|
||||
else:
|
||||
|
||||
raise SyntaxError("Syntax error in IM header: " + s.decode('ascii', 'replace'))
|
||||
|
||||
if not n:
|
||||
raise SyntaxError("Not an IM file")
|
||||
|
||||
# Basic attributes
|
||||
self.size = self.info[SIZE]
|
||||
self.mode = self.info[MODE]
|
||||
|
||||
# Skip forward to start of image data
|
||||
while s and s[0:1] != b'\x1A':
|
||||
s = self.fp.read(1)
|
||||
if not s:
|
||||
raise SyntaxError("File truncated")
|
||||
|
||||
if LUT in self.info:
|
||||
# convert lookup table to palette or lut attribute
|
||||
palette = self.fp.read(768)
|
||||
greyscale = 1 # greyscale palette
|
||||
linear = 1 # linear greyscale palette
|
||||
for i in range(256):
|
||||
if palette[i] == palette[i+256] == palette[i+512]:
|
||||
if i8(palette[i]) != i:
|
||||
linear = 0
|
||||
else:
|
||||
greyscale = 0
|
||||
if self.mode == "L" or self.mode == "LA":
|
||||
if greyscale:
|
||||
if not linear:
|
||||
self.lut = [i8(c) for c in palette[:256]]
|
||||
else:
|
||||
if self.mode == "L":
|
||||
self.mode = self.rawmode = "P"
|
||||
elif self.mode == "LA":
|
||||
self.mode = self.rawmode = "PA"
|
||||
self.palette = ImagePalette.raw("RGB;L", palette)
|
||||
elif self.mode == "RGB":
|
||||
if not greyscale or not linear:
|
||||
self.lut = [i8(c) for c in palette]
|
||||
|
||||
self.frame = 0
|
||||
|
||||
self.__offset = offs = self.fp.tell()
|
||||
|
||||
self.__fp = self.fp # FIXME: hack
|
||||
|
||||
if self.rawmode[:2] == "F;":
|
||||
|
||||
# ifunc95 formats
|
||||
try:
|
||||
# use bit decoder (if necessary)
|
||||
bits = int(self.rawmode[2:])
|
||||
if bits not in [8, 16, 32]:
|
||||
self.tile = [("bit", (0,0)+self.size, offs,
|
||||
(bits, 8, 3, 0, -1))]
|
||||
return
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
if self.rawmode in ["RGB;T", "RYB;T"]:
|
||||
# Old LabEye/3PC files. Would be very surprised if anyone
|
||||
# ever stumbled upon such a file ;-)
|
||||
size = self.size[0] * self.size[1]
|
||||
self.tile = [("raw", (0,0)+self.size, offs, ("G", 0, -1)),
|
||||
("raw", (0,0)+self.size, offs+size, ("R", 0, -1)),
|
||||
("raw", (0,0)+self.size, offs+2*size, ("B", 0, -1))]
|
||||
else:
|
||||
# LabEye/IFUNC files
|
||||
self.tile = [("raw", (0,0)+self.size, offs, (self.rawmode, 0, -1))]
|
||||
|
||||
def seek(self, frame):
|
||||
|
||||
if frame < 0 or frame >= self.info[FRAMES]:
|
||||
raise EOFError("seek outside sequence")
|
||||
|
||||
if self.frame == frame:
|
||||
return
|
||||
|
||||
self.frame = frame
|
||||
|
||||
if self.mode == "1":
|
||||
bits = 1
|
||||
else:
|
||||
bits = 8 * len(self.mode)
|
||||
|
||||
size = ((self.size[0] * bits + 7) // 8) * self.size[1]
|
||||
offs = self.__offset + frame * size
|
||||
|
||||
self.fp = self.__fp
|
||||
|
||||
self.tile = [("raw", (0,0)+self.size, offs, (self.rawmode, 0, -1))]
|
||||
|
||||
def tell(self):
|
||||
|
||||
return self.frame
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Save IM files
|
||||
|
||||
SAVE = {
|
||||
# mode: (im type, raw mode)
|
||||
"1": ("0 1", "1"),
|
||||
"L": ("Greyscale", "L"),
|
||||
"LA": ("LA", "LA;L"),
|
||||
"P": ("Greyscale", "P"),
|
||||
"PA": ("LA", "PA;L"),
|
||||
"I": ("L 32S", "I;32S"),
|
||||
"I;16": ("L 16", "I;16"),
|
||||
"I;16L": ("L 16L", "I;16L"),
|
||||
"I;16B": ("L 16B", "I;16B"),
|
||||
"F": ("L 32F", "F;32F"),
|
||||
"RGB": ("RGB", "RGB;L"),
|
||||
"RGBA": ("RGBA", "RGBA;L"),
|
||||
"RGBX": ("RGBX", "RGBX;L"),
|
||||
"CMYK": ("CMYK", "CMYK;L"),
|
||||
"YCbCr": ("YCC", "YCbCr;L")
|
||||
}
|
||||
|
||||
def _save(im, fp, filename, check=0):
|
||||
|
||||
try:
|
||||
type, rawmode = SAVE[im.mode]
|
||||
except KeyError:
|
||||
raise ValueError("Cannot save %s images as IM" % im.mode)
|
||||
|
||||
try:
|
||||
frames = im.encoderinfo["frames"]
|
||||
except KeyError:
|
||||
frames = 1
|
||||
|
||||
if check:
|
||||
return check
|
||||
|
||||
fp.write(("Image type: %s image\r\n" % type).encode('ascii'))
|
||||
if filename:
|
||||
fp.write(("Name: %s\r\n" % filename).encode('ascii'))
|
||||
fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode('ascii'))
|
||||
fp.write(("File size (no of images): %d\r\n" % frames).encode('ascii'))
|
||||
if im.mode == "P":
|
||||
fp.write(b"Lut: 1\r\n")
|
||||
fp.write(b"\000" * (511-fp.tell()) + b"\032")
|
||||
if im.mode == "P":
|
||||
fp.write(im.im.getpalette("RGB", "RGB;L")) # 768 bytes
|
||||
ImageFile._save(im, fp, [("raw", (0,0)+im.size, 0, (rawmode, 0, -1))])
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Registry
|
||||
|
||||
Image.register_open("IM", ImImageFile)
|
||||
Image.register_save("IM", _save)
|
||||
|
||||
Image.register_extension("IM", ".im")
|
File diff suppressed because it is too large
Load diff
|
@ -1,283 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# standard channel operations
|
||||
#
|
||||
# History:
|
||||
# 1996-03-24 fl Created
|
||||
# 1996-08-13 fl Added logical operations (for "1" images)
|
||||
# 2000-10-12 fl Added offset method (from Image.py)
|
||||
#
|
||||
# Copyright (c) 1997-2000 by Secret Labs AB
|
||||
# Copyright (c) 1996-2000 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def constant(image, value):
|
||||
"""Fill a channel with a given grey level.
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
return Image.new("L", image.size, value)
|
||||
|
||||
|
||||
def duplicate(image):
|
||||
"""Copy a channel. Alias for :py:meth:`PIL.Image.Image.copy`.
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
return image.copy()
|
||||
|
||||
|
||||
def invert(image):
|
||||
"""
|
||||
Invert an image (channel).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = MAX - image
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image.load()
|
||||
return image._new(image.im.chop_invert())
|
||||
|
||||
|
||||
def lighter(image1, image2):
|
||||
"""
|
||||
Compares the two images, pixel by pixel, and returns a new image containing
|
||||
the lighter values.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = max(image1, image2)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_lighter(image2.im))
|
||||
|
||||
|
||||
def darker(image1, image2):
|
||||
"""
|
||||
Compares the two images, pixel by pixel, and returns a new image
|
||||
containing the darker values.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = min(image1, image2)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_darker(image2.im))
|
||||
|
||||
|
||||
def difference(image1, image2):
|
||||
"""
|
||||
Returns the absolute value of the pixel-by-pixel difference between the two
|
||||
images.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = abs(image1 - image2)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_difference(image2.im))
|
||||
|
||||
|
||||
def multiply(image1, image2):
|
||||
"""
|
||||
Superimposes two images on top of each other.
|
||||
|
||||
If you multiply an image with a solid black image, the result is black. If
|
||||
you multiply with a solid white image, the image is unaffected.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = image1 * image2 / MAX
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_multiply(image2.im))
|
||||
|
||||
|
||||
def screen(image1, image2):
|
||||
"""
|
||||
Superimposes two inverted images on top of each other.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = MAX - ((MAX - image1) * (MAX - image2) / MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_screen(image2.im))
|
||||
|
||||
|
||||
def add(image1, image2, scale=1.0, offset=0):
|
||||
"""
|
||||
Adds two images, dividing the result by scale and adding the
|
||||
offset. If omitted, scale defaults to 1.0, and offset to 0.0.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = ((image1 + image2) / scale + offset)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_add(image2.im, scale, offset))
|
||||
|
||||
|
||||
def subtract(image1, image2, scale=1.0, offset=0):
|
||||
"""
|
||||
Subtracts two images, dividing the result by scale and adding the
|
||||
offset. If omitted, scale defaults to 1.0, and offset to 0.0.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = ((image1 - image2) / scale + offset)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
|
||||
|
||||
|
||||
def add_modulo(image1, image2):
|
||||
"""Add two images, without clipping the result.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = ((image1 + image2) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_add_modulo(image2.im))
|
||||
|
||||
|
||||
def subtract_modulo(image1, image2):
|
||||
"""Subtract two images, without clipping the result.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = ((image1 - image2) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_subtract_modulo(image2.im))
|
||||
|
||||
|
||||
def logical_and(image1, image2):
|
||||
"""Logical AND between two images.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = ((image1 and image2) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_and(image2.im))
|
||||
|
||||
|
||||
def logical_or(image1, image2):
|
||||
"""Logical OR between two images.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = ((image1 or image2) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_or(image2.im))
|
||||
|
||||
|
||||
def logical_xor(image1, image2):
|
||||
"""Logical XOR between two images.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
out = ((bool(image1) != bool(image2)) % MAX)
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
image1.load()
|
||||
image2.load()
|
||||
return image1._new(image1.im.chop_xor(image2.im))
|
||||
|
||||
|
||||
def blend(image1, image2, alpha):
|
||||
"""Blend images using constant transparency weight. Alias for
|
||||
:py:meth:`PIL.Image.Image.blend`.
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
return Image.blend(image1, image2, alpha)
|
||||
|
||||
|
||||
def composite(image1, image2, mask):
|
||||
"""Create composite using transparency mask. Alias for
|
||||
:py:meth:`PIL.Image.Image.composite`.
|
||||
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
return Image.composite(image1, image2, mask)
|
||||
|
||||
|
||||
def offset(image, xoffset, yoffset=None):
|
||||
"""Returns a copy of the image where data has been offset by the given
|
||||
distances. Data wraps around the edges. If **yoffset** is omitted, it
|
||||
is assumed to be equal to **xoffset**.
|
||||
|
||||
:param xoffset: The horizontal distance.
|
||||
:param yoffset: The vertical distance. If omitted, both
|
||||
distances are set to the same value.
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
|
||||
if yoffset is None:
|
||||
yoffset = xoffset
|
||||
image.load()
|
||||
return image._new(image.im.offset(xoffset, yoffset))
|
|
@ -1,952 +0,0 @@
|
|||
"""
|
||||
The Python Imaging Library.
|
||||
$Id$
|
||||
|
||||
Optional color managment support, based on Kevin Cazabon's PyCMS
|
||||
library.
|
||||
|
||||
History:
|
||||
2009-03-08 fl Added to PIL.
|
||||
|
||||
Copyright (C) 2002-2003 Kevin Cazabon
|
||||
Copyright (c) 2009 by Fredrik Lundh
|
||||
|
||||
See the README file for information on usage and redistribution. See
|
||||
below for the original description.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
DESCRIPTION = """
|
||||
pyCMS
|
||||
|
||||
a Python / PIL interface to the littleCMS ICC Color Management System
|
||||
Copyright (C) 2002-2003 Kevin Cazabon
|
||||
kevin@cazabon.com
|
||||
http://www.cazabon.com
|
||||
|
||||
pyCMS home page: http://www.cazabon.com/pyCMS
|
||||
littleCMS home page: http://www.littlecms.com
|
||||
(littleCMS is Copyright (C) 1998-2001 Marti Maria)
|
||||
|
||||
Originally released under LGPL. Graciously donated to PIL in
|
||||
March 2009, for distribution under the standard PIL license
|
||||
|
||||
The pyCMS.py module provides a "clean" interface between Python/PIL and
|
||||
pyCMSdll, taking care of some of the more complex handling of the direct
|
||||
pyCMSdll functions, as well as error-checking and making sure that all
|
||||
relevant data is kept together.
|
||||
|
||||
While it is possible to call pyCMSdll functions directly, it's not highly
|
||||
recommended.
|
||||
|
||||
Version History:
|
||||
|
||||
1.0.0 pil Oct 2013 Port to LCMS 2.
|
||||
|
||||
0.1.0 pil mod March 10, 2009
|
||||
|
||||
Renamed display profile to proof profile. The proof
|
||||
profile is the profile of the device that is being
|
||||
simulated, not the profile of the device which is
|
||||
actually used to display/print the final simulation
|
||||
(that'd be the output profile) - also see LCMSAPI.txt
|
||||
input colorspace -> using 'renderingIntent' -> proof
|
||||
colorspace -> using 'proofRenderingIntent' -> output
|
||||
colorspace
|
||||
|
||||
Added LCMS FLAGS support.
|
||||
Added FLAGS["SOFTPROOFING"] as default flag for
|
||||
buildProofTransform (otherwise the proof profile/intent
|
||||
would be ignored).
|
||||
|
||||
0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
|
||||
|
||||
0.0.2 alpha Jan 6, 2002
|
||||
|
||||
Added try/except statements arount type() checks of
|
||||
potential CObjects... Python won't let you use type()
|
||||
on them, and raises a TypeError (stupid, if you ask
|
||||
me!)
|
||||
|
||||
Added buildProofTransformFromOpenProfiles() function.
|
||||
Additional fixes in DLL, see DLL code for details.
|
||||
|
||||
0.0.1 alpha first public release, Dec. 26, 2002
|
||||
|
||||
Known to-do list with current version (of Python interface, not pyCMSdll):
|
||||
|
||||
none
|
||||
|
||||
"""
|
||||
|
||||
VERSION = "1.0.0 pil"
|
||||
|
||||
# --------------------------------------------------------------------.
|
||||
|
||||
from PIL import Image
|
||||
try:
|
||||
from PIL import _imagingcms
|
||||
except ImportError as ex:
|
||||
# Allow error import for doc purposes, but error out when accessing
|
||||
# anything in core.
|
||||
from _util import import_err
|
||||
_imagingcms = import_err(ex)
|
||||
from PIL._util import isStringType
|
||||
|
||||
core = _imagingcms
|
||||
|
||||
#
|
||||
# intent/direction values
|
||||
|
||||
INTENT_PERCEPTUAL = 0
|
||||
INTENT_RELATIVE_COLORIMETRIC = 1
|
||||
INTENT_SATURATION = 2
|
||||
INTENT_ABSOLUTE_COLORIMETRIC = 3
|
||||
|
||||
DIRECTION_INPUT = 0
|
||||
DIRECTION_OUTPUT = 1
|
||||
DIRECTION_PROOF = 2
|
||||
|
||||
#
|
||||
# flags
|
||||
|
||||
FLAGS = {
|
||||
"MATRIXINPUT": 1,
|
||||
"MATRIXOUTPUT": 2,
|
||||
"MATRIXONLY": (1 | 2),
|
||||
"NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
|
||||
# Don't create prelinearization tables on precalculated transforms
|
||||
# (internal use):
|
||||
"NOPRELINEARIZATION": 16,
|
||||
"GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
|
||||
"NOTCACHE": 64, # Inhibit 1-pixel cache
|
||||
"NOTPRECALC": 256,
|
||||
"NULLTRANSFORM": 512, # Don't transform anyway
|
||||
"HIGHRESPRECALC": 1024, # Use more memory to give better accurancy
|
||||
"LOWRESPRECALC": 2048, # Use less memory to minimize resouces
|
||||
"WHITEBLACKCOMPENSATION": 8192,
|
||||
"BLACKPOINTCOMPENSATION": 8192,
|
||||
"GAMUTCHECK": 4096, # Out of Gamut alarm
|
||||
"SOFTPROOFING": 16384, # Do softproofing
|
||||
"PRESERVEBLACK": 32768, # Black preservation
|
||||
"NODEFAULTRESOURCEDEF": 16777216, # CRD special
|
||||
"GRIDPOINTS": lambda n: ((n) & 0xFF) << 16 # Gridpoints
|
||||
}
|
||||
|
||||
_MAX_FLAG = 0
|
||||
for flag in FLAGS.values():
|
||||
if isinstance(flag, int):
|
||||
_MAX_FLAG = _MAX_FLAG | flag
|
||||
|
||||
|
||||
# --------------------------------------------------------------------.
|
||||
# Experimental PIL-level API
|
||||
# --------------------------------------------------------------------.
|
||||
|
||||
##
|
||||
# Profile.
|
||||
|
||||
class ImageCmsProfile:
|
||||
|
||||
def __init__(self, profile):
|
||||
# accepts a string (filename), a file-like object, or a low-level
|
||||
# profile object
|
||||
if isStringType(profile):
|
||||
self._set(core.profile_open(profile), profile)
|
||||
elif hasattr(profile, "read"):
|
||||
self._set(core.profile_frombytes(profile.read()))
|
||||
else:
|
||||
self._set(profile) # assume it's already a profile
|
||||
|
||||
def _set(self, profile, filename=None):
|
||||
self.profile = profile
|
||||
self.filename = filename
|
||||
if profile:
|
||||
self.product_name = None # profile.product_name
|
||||
self.product_info = None # profile.product_info
|
||||
else:
|
||||
self.product_name = None
|
||||
self.product_info = None
|
||||
|
||||
|
||||
class ImageCmsTransform(Image.ImagePointHandler):
|
||||
|
||||
"""Transform. This can be used with the procedural API, or with the
|
||||
standard Image.point() method.
|
||||
"""
|
||||
|
||||
def __init__(self, input, output, input_mode, output_mode,
|
||||
intent=INTENT_PERCEPTUAL, proof=None,
|
||||
proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, flags=0):
|
||||
if proof is None:
|
||||
self.transform = core.buildTransform(
|
||||
input.profile, output.profile,
|
||||
input_mode, output_mode,
|
||||
intent,
|
||||
flags
|
||||
)
|
||||
else:
|
||||
self.transform = core.buildProofTransform(
|
||||
input.profile, output.profile, proof.profile,
|
||||
input_mode, output_mode,
|
||||
intent, proof_intent,
|
||||
flags
|
||||
)
|
||||
# Note: inputMode and outputMode are for pyCMS compatibility only
|
||||
self.input_mode = self.inputMode = input_mode
|
||||
self.output_mode = self.outputMode = output_mode
|
||||
|
||||
def point(self, im):
|
||||
return self.apply(im)
|
||||
|
||||
def apply(self, im, imOut=None):
|
||||
im.load()
|
||||
if imOut is None:
|
||||
imOut = Image.new(self.output_mode, im.size, None)
|
||||
self.transform.apply(im.im.id, imOut.im.id)
|
||||
return imOut
|
||||
|
||||
def apply_in_place(self, im):
|
||||
im.load()
|
||||
if im.mode != self.output_mode:
|
||||
raise ValueError("mode mismatch") # wrong output mode
|
||||
self.transform.apply(im.im.id, im.im.id)
|
||||
return im
|
||||
|
||||
|
||||
def get_display_profile(handle=None):
|
||||
""" (experimental) Fetches the profile for the current display device.
|
||||
:returns: None if the profile is not known.
|
||||
"""
|
||||
|
||||
import sys
|
||||
if sys.platform == "win32":
|
||||
from PIL import ImageWin
|
||||
if isinstance(handle, ImageWin.HDC):
|
||||
profile = core.get_display_profile_win32(handle, 1)
|
||||
else:
|
||||
profile = core.get_display_profile_win32(handle or 0)
|
||||
else:
|
||||
try:
|
||||
get = _imagingcms.get_display_profile
|
||||
except AttributeError:
|
||||
return None
|
||||
else:
|
||||
profile = get()
|
||||
return ImageCmsProfile(profile)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------.
|
||||
# pyCMS compatible layer
|
||||
# --------------------------------------------------------------------.
|
||||
|
||||
class PyCMSError(Exception):
|
||||
|
||||
""" (pyCMS) Exception class.
|
||||
This is used for all errors in the pyCMS API. """
|
||||
pass
|
||||
|
||||
|
||||
def profileToProfile(
|
||||
im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL,
|
||||
outputMode=None, inPlace=0, flags=0):
|
||||
"""
|
||||
(pyCMS) Applies an ICC transformation to a given image, mapping from
|
||||
inputProfile to outputProfile.
|
||||
|
||||
If the input or output profiles specified are not valid filenames, a
|
||||
PyCMSError will be raised. If inPlace == TRUE and outputMode != im.mode,
|
||||
a PyCMSError will be raised. If an error occurs during application of
|
||||
the profiles, a PyCMSError will be raised. If outputMode is not a mode
|
||||
supported by the outputProfile (or by pyCMS), a PyCMSError will be
|
||||
raised.
|
||||
|
||||
This function applies an ICC transformation to im from inputProfile's
|
||||
color space to outputProfile's color space using the specified rendering
|
||||
intent to decide how to handle out-of-gamut colors.
|
||||
|
||||
OutputMode can be used to specify that a color mode conversion is to
|
||||
be done using these profiles, but the specified profiles must be able
|
||||
to handle that mode. I.e., if converting im from RGB to CMYK using
|
||||
profiles, the input profile must handle RGB data, and the output
|
||||
profile must handle CMYK data.
|
||||
|
||||
:param im: An open PIL image object (i.e. Image.new(...) or
|
||||
Image.open(...), etc.)
|
||||
:param inputProfile: String, as a valid filename path to the ICC input
|
||||
profile you wish to use for this image, or a profile object
|
||||
:param outputProfile: String, as a valid filename path to the ICC output
|
||||
profile you wish to use for this image, or a profile object
|
||||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||
wish to use for the transform
|
||||
|
||||
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
|
||||
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
|
||||
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
|
||||
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
:param outputMode: A valid PIL mode for the output image (i.e. "RGB",
|
||||
"CMYK", etc.). Note: if rendering the image "inPlace", outputMode
|
||||
MUST be the same mode as the input, or omitted completely. If
|
||||
omitted, the outputMode will be the same as the mode of the input
|
||||
image (im.mode)
|
||||
:param inPlace: Boolean (1 = True, None or 0 = False). If True, the
|
||||
original image is modified in-place, and None is returned. If False
|
||||
(default), a new Image object is returned with the transform applied.
|
||||
:param flags: Integer (0-...) specifying additional flags
|
||||
:returns: Either None or a new PIL image object, depending on value of
|
||||
inPlace
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
if outputMode is None:
|
||||
outputMode = im.mode
|
||||
|
||||
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
|
||||
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
|
||||
|
||||
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
|
||||
raise PyCMSError(
|
||||
"flags must be an integer between 0 and %s" + _MAX_FLAG)
|
||||
|
||||
try:
|
||||
if not isinstance(inputProfile, ImageCmsProfile):
|
||||
inputProfile = ImageCmsProfile(inputProfile)
|
||||
if not isinstance(outputProfile, ImageCmsProfile):
|
||||
outputProfile = ImageCmsProfile(outputProfile)
|
||||
transform = ImageCmsTransform(
|
||||
inputProfile, outputProfile, im.mode, outputMode,
|
||||
renderingIntent, flags=flags
|
||||
)
|
||||
if inPlace:
|
||||
transform.apply_in_place(im)
|
||||
imOut = None
|
||||
else:
|
||||
imOut = transform.apply(im)
|
||||
except (IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
return imOut
|
||||
|
||||
|
||||
def getOpenProfile(profileFilename):
|
||||
"""
|
||||
(pyCMS) Opens an ICC profile file.
|
||||
|
||||
The PyCMSProfile object can be passed back into pyCMS for use in creating
|
||||
transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
|
||||
|
||||
If profileFilename is not a vaild filename for an ICC profile, a PyCMSError
|
||||
will be raised.
|
||||
|
||||
:param profileFilename: String, as a valid filename path to the ICC profile
|
||||
you wish to open, or a file-like object.
|
||||
:returns: A CmsProfile class object.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
try:
|
||||
return ImageCmsProfile(profileFilename)
|
||||
except (IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def buildTransform(
|
||||
inputProfile, outputProfile, inMode, outMode,
|
||||
renderingIntent=INTENT_PERCEPTUAL, flags=0):
|
||||
"""
|
||||
(pyCMS) Builds an ICC transform mapping from the inputProfile to the
|
||||
outputProfile. Use applyTransform to apply the transform to a given
|
||||
image.
|
||||
|
||||
If the input or output profiles specified are not valid filenames, a
|
||||
PyCMSError will be raised. If an error occurs during creation of the
|
||||
transform, a PyCMSError will be raised.
|
||||
|
||||
If inMode or outMode are not a mode supported by the outputProfile (or
|
||||
by pyCMS), a PyCMSError will be raised.
|
||||
|
||||
This function builds and returns an ICC transform from the inputProfile
|
||||
to the outputProfile using the renderingIntent to determine what to do
|
||||
with out-of-gamut colors. It will ONLY work for converting images that
|
||||
are in inMode to images that are in outMode color format (PIL mode,
|
||||
i.e. "RGB", "RGBA", "CMYK", etc.).
|
||||
|
||||
Building the transform is a fair part of the overhead in
|
||||
ImageCms.profileToProfile(), so if you're planning on converting multiple
|
||||
images using the same input/output settings, this can save you time.
|
||||
Once you have a transform object, it can be used with
|
||||
ImageCms.applyProfile() to convert images without the need to re-compute
|
||||
the lookup table for the transform.
|
||||
|
||||
The reason pyCMS returns a class object rather than a handle directly
|
||||
to the transform is that it needs to keep track of the PIL input/output
|
||||
modes that the transform is meant for. These attributes are stored in
|
||||
the "inMode" and "outMode" attributes of the object (which can be
|
||||
manually overridden if you really want to, but I don't know of any
|
||||
time that would be of use, or would even work).
|
||||
|
||||
:param inputProfile: String, as a valid filename path to the ICC input
|
||||
profile you wish to use for this transform, or a profile object
|
||||
:param outputProfile: String, as a valid filename path to the ICC output
|
||||
profile you wish to use for this transform, or a profile object
|
||||
:param inMode: String, as a valid PIL mode that the appropriate profile
|
||||
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
|
||||
:param outMode: String, as a valid PIL mode that the appropriate profile
|
||||
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
|
||||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||
wish to use for the transform
|
||||
|
||||
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
|
||||
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
|
||||
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
|
||||
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
:param flags: Integer (0-...) specifying additional flags
|
||||
:returns: A CmsTransform class object.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
|
||||
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
|
||||
|
||||
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
|
||||
raise PyCMSError(
|
||||
"flags must be an integer between 0 and %s" + _MAX_FLAG)
|
||||
|
||||
try:
|
||||
if not isinstance(inputProfile, ImageCmsProfile):
|
||||
inputProfile = ImageCmsProfile(inputProfile)
|
||||
if not isinstance(outputProfile, ImageCmsProfile):
|
||||
outputProfile = ImageCmsProfile(outputProfile)
|
||||
return ImageCmsTransform(
|
||||
inputProfile, outputProfile, inMode, outMode,
|
||||
renderingIntent, flags=flags)
|
||||
except (IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def buildProofTransform(
|
||||
inputProfile, outputProfile, proofProfile, inMode, outMode,
|
||||
renderingIntent=INTENT_PERCEPTUAL,
|
||||
proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC,
|
||||
flags=FLAGS["SOFTPROOFING"]):
|
||||
"""
|
||||
(pyCMS) Builds an ICC transform mapping from the inputProfile to the
|
||||
outputProfile, but tries to simulate the result that would be
|
||||
obtained on the proofProfile device.
|
||||
|
||||
If the input, output, or proof profiles specified are not valid
|
||||
filenames, a PyCMSError will be raised.
|
||||
|
||||
If an error occurs during creation of the transform, a PyCMSError will
|
||||
be raised.
|
||||
|
||||
If inMode or outMode are not a mode supported by the outputProfile
|
||||
(or by pyCMS), a PyCMSError will be raised.
|
||||
|
||||
This function builds and returns an ICC transform from the inputProfile
|
||||
to the outputProfile, but tries to simulate the result that would be
|
||||
obtained on the proofProfile device using renderingIntent and
|
||||
proofRenderingIntent to determine what to do with out-of-gamut
|
||||
colors. This is known as "soft-proofing". It will ONLY work for
|
||||
converting images that are in inMode to images that are in outMode
|
||||
color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
|
||||
|
||||
Usage of the resulting transform object is exactly the same as with
|
||||
ImageCms.buildTransform().
|
||||
|
||||
Proof profiling is generally used when using an output device to get a
|
||||
good idea of what the final printed/displayed image would look like on
|
||||
the proofProfile device when it's quicker and easier to use the
|
||||
output device for judging color. Generally, this means that the
|
||||
output device is a monitor, or a dye-sub printer (etc.), and the simulated
|
||||
device is something more expensive, complicated, or time consuming
|
||||
(making it difficult to make a real print for color judgement purposes).
|
||||
|
||||
Soft-proofing basically functions by adjusting the colors on the
|
||||
output device to match the colors of the device being simulated. However,
|
||||
when the simulated device has a much wider gamut than the output
|
||||
device, you may obtain marginal results.
|
||||
|
||||
:param inputProfile: String, as a valid filename path to the ICC input
|
||||
profile you wish to use for this transform, or a profile object
|
||||
:param outputProfile: String, as a valid filename path to the ICC output
|
||||
(monitor, usually) profile you wish to use for this transform, or a
|
||||
profile object
|
||||
:param proofProfile: String, as a valid filename path to the ICC proof
|
||||
profile you wish to use for this transform, or a profile object
|
||||
:param inMode: String, as a valid PIL mode that the appropriate profile
|
||||
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
|
||||
:param outMode: String, as a valid PIL mode that the appropriate profile
|
||||
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
|
||||
:param renderingIntent: Integer (0-3) specifying the rendering intent you
|
||||
wish to use for the input->proof (simulated) transform
|
||||
|
||||
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
|
||||
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
|
||||
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
|
||||
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent you
|
||||
wish to use for proof->output transform
|
||||
|
||||
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
|
||||
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
|
||||
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
|
||||
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
:param flags: Integer (0-...) specifying additional flags
|
||||
:returns: A CmsTransform class object.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
|
||||
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
|
||||
|
||||
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
|
||||
raise PyCMSError(
|
||||
"flags must be an integer between 0 and %s" + _MAX_FLAG)
|
||||
|
||||
try:
|
||||
if not isinstance(inputProfile, ImageCmsProfile):
|
||||
inputProfile = ImageCmsProfile(inputProfile)
|
||||
if not isinstance(outputProfile, ImageCmsProfile):
|
||||
outputProfile = ImageCmsProfile(outputProfile)
|
||||
if not isinstance(proofProfile, ImageCmsProfile):
|
||||
proofProfile = ImageCmsProfile(proofProfile)
|
||||
return ImageCmsTransform(
|
||||
inputProfile, outputProfile, inMode, outMode, renderingIntent,
|
||||
proofProfile, proofRenderingIntent, flags)
|
||||
except (IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
buildTransformFromOpenProfiles = buildTransform
|
||||
buildProofTransformFromOpenProfiles = buildProofTransform
|
||||
|
||||
|
||||
def applyTransform(im, transform, inPlace=0):
|
||||
"""
|
||||
(pyCMS) Applies a transform to a given image.
|
||||
|
||||
If im.mode != transform.inMode, a PyCMSError is raised.
|
||||
|
||||
If inPlace == TRUE and transform.inMode != transform.outMode, a
|
||||
PyCMSError is raised.
|
||||
|
||||
If im.mode, transfer.inMode, or transfer.outMode is not supported by
|
||||
pyCMSdll or the profiles you used for the transform, a PyCMSError is
|
||||
raised.
|
||||
|
||||
If an error occurs while the transform is being applied, a PyCMSError
|
||||
is raised.
|
||||
|
||||
This function applies a pre-calculated transform (from
|
||||
ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles())
|
||||
to an image. The transform can be used for multiple images, saving
|
||||
considerable calcuation time if doing the same conversion multiple times.
|
||||
|
||||
If you want to modify im in-place instead of receiving a new image as
|
||||
the return value, set inPlace to TRUE. This can only be done if
|
||||
transform.inMode and transform.outMode are the same, because we can't
|
||||
change the mode in-place (the buffer sizes for some modes are
|
||||
different). The default behavior is to return a new Image object of
|
||||
the same dimensions in mode transform.outMode.
|
||||
|
||||
:param im: A PIL Image object, and im.mode must be the same as the inMode
|
||||
supported by the transform.
|
||||
:param transform: A valid CmsTransform class object
|
||||
:param inPlace: Bool (1 == True, 0 or None == False). If True, im is
|
||||
modified in place and None is returned, if False, a new Image object
|
||||
with the transform applied is returned (and im is not changed). The
|
||||
default is False.
|
||||
:returns: Either None, or a new PIL Image object, depending on the value of
|
||||
inPlace
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
try:
|
||||
if inPlace:
|
||||
transform.apply_in_place(im)
|
||||
imOut = None
|
||||
else:
|
||||
imOut = transform.apply(im)
|
||||
except (TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
return imOut
|
||||
|
||||
|
||||
def createProfile(colorSpace, colorTemp=-1):
|
||||
"""
|
||||
(pyCMS) Creates a profile.
|
||||
|
||||
If colorSpace not in ["LAB", "XYZ", "sRGB"], a PyCMSError is raised
|
||||
|
||||
If using LAB and colorTemp != a positive integer, a PyCMSError is raised.
|
||||
|
||||
If an error occurs while creating the profile, a PyCMSError is raised.
|
||||
|
||||
Use this function to create common profiles on-the-fly instead of
|
||||
having to supply a profile on disk and knowing the path to it. It
|
||||
returns a normal CmsProfile object that can be passed to
|
||||
ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
|
||||
to images.
|
||||
|
||||
:param colorSpace: String, the color space of the profile you wish to
|
||||
create.
|
||||
Currently only "LAB", "XYZ", and "sRGB" are supported.
|
||||
:param colorTemp: Positive integer for the white point for the profile, in
|
||||
degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
|
||||
illuminant if omitted (5000k). colorTemp is ONLY applied to LAB
|
||||
profiles, and is ignored for XYZ and sRGB.
|
||||
:returns: A CmsProfile class object
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
|
||||
raise PyCMSError(
|
||||
"Color space not supported for on-the-fly profile creation (%s)"
|
||||
% colorSpace)
|
||||
|
||||
if colorSpace == "LAB":
|
||||
try:
|
||||
colorTemp = float(colorTemp)
|
||||
except:
|
||||
raise PyCMSError(
|
||||
"Color temperature must be numeric, \"%s\" not valid"
|
||||
% colorTemp)
|
||||
|
||||
try:
|
||||
return core.createProfile(colorSpace, colorTemp)
|
||||
except (TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def getProfileName(profile):
|
||||
"""
|
||||
|
||||
(pyCMS) Gets the internal product name for the given profile.
|
||||
|
||||
If profile isn't a valid CmsProfile object or filename to a profile,
|
||||
a PyCMSError is raised If an error occurs while trying to obtain the
|
||||
name tag, a PyCMSError is raised.
|
||||
|
||||
Use this function to obtain the INTERNAL name of the profile (stored
|
||||
in an ICC tag in the profile itself), usually the one used when the
|
||||
profile was originally created. Sometimes this tag also contains
|
||||
additional information supplied by the creator.
|
||||
|
||||
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
||||
filename of an ICC profile.
|
||||
:returns: A string containing the internal name of the profile as stored
|
||||
in an ICC tag.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
try:
|
||||
# add an extra newline to preserve pyCMS compatibility
|
||||
if not isinstance(profile, ImageCmsProfile):
|
||||
profile = ImageCmsProfile(profile)
|
||||
# do it in python, not c.
|
||||
# // name was "%s - %s" (model, manufacturer) || Description ,
|
||||
# // but if the Model and Manufacturer were the same or the model
|
||||
# // was long, Just the model, in 1.x
|
||||
model = profile.profile.product_model
|
||||
manufacturer = profile.profile.product_manufacturer
|
||||
|
||||
if not (model or manufacturer):
|
||||
return profile.profile.product_description + "\n"
|
||||
if not manufacturer or len(model) > 30:
|
||||
return model + "\n"
|
||||
return "%s - %s\n" % (model, manufacturer)
|
||||
|
||||
except (AttributeError, IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def getProfileInfo(profile):
|
||||
"""
|
||||
(pyCMS) Gets the internal product information for the given profile.
|
||||
|
||||
If profile isn't a valid CmsProfile object or filename to a profile,
|
||||
a PyCMSError is raised.
|
||||
|
||||
If an error occurs while trying to obtain the info tag, a PyCMSError
|
||||
is raised
|
||||
|
||||
Use this function to obtain the information stored in the profile's
|
||||
info tag. This often contains details about the profile, and how it
|
||||
was created, as supplied by the creator.
|
||||
|
||||
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
||||
filename of an ICC profile.
|
||||
:returns: A string containing the internal profile information stored in
|
||||
an ICC tag.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
try:
|
||||
if not isinstance(profile, ImageCmsProfile):
|
||||
profile = ImageCmsProfile(profile)
|
||||
# add an extra newline to preserve pyCMS compatibility
|
||||
# Python, not C. the white point bits weren't working well,
|
||||
# so skipping.
|
||||
# // info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
|
||||
description = profile.profile.product_description
|
||||
cpright = profile.profile.product_copyright
|
||||
arr = []
|
||||
for elt in (description, cpright):
|
||||
if elt:
|
||||
arr.append(elt)
|
||||
return "\r\n\r\n".join(arr) + "\r\n\r\n"
|
||||
|
||||
except (AttributeError, IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def getProfileCopyright(profile):
|
||||
"""
|
||||
(pyCMS) Gets the copyright for the given profile.
|
||||
|
||||
If profile isn't a valid CmsProfile object or filename to a profile,
|
||||
a PyCMSError is raised.
|
||||
|
||||
If an error occurs while trying to obtain the copyright tag, a PyCMSError
|
||||
is raised
|
||||
|
||||
Use this function to obtain the information stored in the profile's
|
||||
copyright tag.
|
||||
|
||||
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
||||
filename of an ICC profile.
|
||||
:returns: A string containing the internal profile information stored in
|
||||
an ICC tag.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
try:
|
||||
# add an extra newline to preserve pyCMS compatibility
|
||||
if not isinstance(profile, ImageCmsProfile):
|
||||
profile = ImageCmsProfile(profile)
|
||||
return profile.profile.product_copyright + "\n"
|
||||
except (AttributeError, IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def getProfileManufacturer(profile):
|
||||
"""
|
||||
(pyCMS) Gets the manufacturer for the given profile.
|
||||
|
||||
If profile isn't a valid CmsProfile object or filename to a profile,
|
||||
a PyCMSError is raised.
|
||||
|
||||
If an error occurs while trying to obtain the manufacturer tag, a
|
||||
PyCMSError is raised
|
||||
|
||||
Use this function to obtain the information stored in the profile's
|
||||
manufacturer tag.
|
||||
|
||||
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
||||
filename of an ICC profile.
|
||||
:returns: A string containing the internal profile information stored in
|
||||
an ICC tag.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
try:
|
||||
# add an extra newline to preserve pyCMS compatibility
|
||||
if not isinstance(profile, ImageCmsProfile):
|
||||
profile = ImageCmsProfile(profile)
|
||||
return profile.profile.product_manufacturer + "\n"
|
||||
except (AttributeError, IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def getProfileModel(profile):
|
||||
"""
|
||||
(pyCMS) Gets the model for the given profile.
|
||||
|
||||
If profile isn't a valid CmsProfile object or filename to a profile,
|
||||
a PyCMSError is raised.
|
||||
|
||||
If an error occurs while trying to obtain the model tag, a PyCMSError
|
||||
is raised
|
||||
|
||||
Use this function to obtain the information stored in the profile's
|
||||
model tag.
|
||||
|
||||
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
||||
filename of an ICC profile.
|
||||
:returns: A string containing the internal profile information stored in
|
||||
an ICC tag.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
try:
|
||||
# add an extra newline to preserve pyCMS compatibility
|
||||
if not isinstance(profile, ImageCmsProfile):
|
||||
profile = ImageCmsProfile(profile)
|
||||
return profile.profile.product_model + "\n"
|
||||
except (AttributeError, IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def getProfileDescription(profile):
|
||||
"""
|
||||
(pyCMS) Gets the description for the given profile.
|
||||
|
||||
If profile isn't a valid CmsProfile object or filename to a profile,
|
||||
a PyCMSError is raised.
|
||||
|
||||
If an error occurs while trying to obtain the description tag, a PyCMSError
|
||||
is raised
|
||||
|
||||
Use this function to obtain the information stored in the profile's
|
||||
description tag.
|
||||
|
||||
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
||||
filename of an ICC profile.
|
||||
:returns: A string containing the internal profile information stored in an
|
||||
ICC tag.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
try:
|
||||
# add an extra newline to preserve pyCMS compatibility
|
||||
if not isinstance(profile, ImageCmsProfile):
|
||||
profile = ImageCmsProfile(profile)
|
||||
return profile.profile.product_description + "\n"
|
||||
except (AttributeError, IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def getDefaultIntent(profile):
|
||||
"""
|
||||
(pyCMS) Gets the default intent name for the given profile.
|
||||
|
||||
If profile isn't a valid CmsProfile object or filename to a profile,
|
||||
a PyCMSError is raised.
|
||||
|
||||
If an error occurs while trying to obtain the default intent, a
|
||||
PyCMSError is raised.
|
||||
|
||||
Use this function to determine the default (and usually best optomized)
|
||||
rendering intent for this profile. Most profiles support multiple
|
||||
rendering intents, but are intended mostly for one type of conversion.
|
||||
If you wish to use a different intent than returned, use
|
||||
ImageCms.isIntentSupported() to verify it will work first.
|
||||
|
||||
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
||||
filename of an ICC profile.
|
||||
:returns: Integer 0-3 specifying the default rendering intent for this
|
||||
profile.
|
||||
|
||||
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
|
||||
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
|
||||
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
|
||||
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
try:
|
||||
if not isinstance(profile, ImageCmsProfile):
|
||||
profile = ImageCmsProfile(profile)
|
||||
return profile.profile.rendering_intent
|
||||
except (AttributeError, IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def isIntentSupported(profile, intent, direction):
|
||||
"""
|
||||
(pyCMS) Checks if a given intent is supported.
|
||||
|
||||
Use this function to verify that you can use your desired
|
||||
renderingIntent with profile, and that profile can be used for the
|
||||
input/output/proof profile as you desire.
|
||||
|
||||
Some profiles are created specifically for one "direction", can cannot
|
||||
be used for others. Some profiles can only be used for certain
|
||||
rendering intents... so it's best to either verify this before trying
|
||||
to create a transform with them (using this function), or catch the
|
||||
potential PyCMSError that will occur if they don't support the modes
|
||||
you select.
|
||||
|
||||
:param profile: EITHER a valid CmsProfile object, OR a string of the
|
||||
filename of an ICC profile.
|
||||
:param intent: Integer (0-3) specifying the rendering intent you wish to
|
||||
use with this profile
|
||||
|
||||
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
|
||||
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
|
||||
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
|
||||
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
|
||||
|
||||
see the pyCMS documentation for details on rendering intents and what
|
||||
they do.
|
||||
:param direction: Integer specifing if the profile is to be used for input,
|
||||
output, or proof
|
||||
|
||||
INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
|
||||
OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
|
||||
PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
|
||||
|
||||
:returns: 1 if the intent/direction are supported, -1 if they are not.
|
||||
:exception PyCMSError:
|
||||
"""
|
||||
|
||||
try:
|
||||
if not isinstance(profile, ImageCmsProfile):
|
||||
profile = ImageCmsProfile(profile)
|
||||
# FIXME: I get different results for the same data w. different
|
||||
# compilers. Bug in LittleCMS or in the binding?
|
||||
if profile.profile.is_intent_supported(intent, direction):
|
||||
return 1
|
||||
else:
|
||||
return -1
|
||||
except (AttributeError, IOError, TypeError, ValueError) as v:
|
||||
raise PyCMSError(v)
|
||||
|
||||
|
||||
def versions():
|
||||
"""
|
||||
(pyCMS) Fetches versions.
|
||||
"""
|
||||
|
||||
import sys
|
||||
return (
|
||||
VERSION, core.littlecms_version,
|
||||
sys.version.split()[0], Image.VERSION
|
||||
)
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
if __name__ == "__main__":
|
||||
# create a cheap manual from the __doc__ strings for the functions above
|
||||
|
||||
from PIL import ImageCms
|
||||
print(__doc__)
|
||||
|
||||
for f in dir(ImageCms):
|
||||
doc = None
|
||||
try:
|
||||
exec("doc = %s.__doc__" % (f))
|
||||
if "pyCMS" in doc:
|
||||
# so we don't get the __doc__ string for imported modules
|
||||
print("=" * 80)
|
||||
print("%s" % f)
|
||||
print(doc)
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
# End of file
|
|
@ -1,276 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# map CSS3-style colour description strings to RGB
|
||||
#
|
||||
# History:
|
||||
# 2002-10-24 fl Added support for CSS-style color strings
|
||||
# 2002-12-15 fl Added RGBA support
|
||||
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
|
||||
# 2004-07-19 fl Fixed gray/grey spelling issues
|
||||
# 2009-03-05 fl Fixed rounding error in grayscale calculation
|
||||
#
|
||||
# Copyright (c) 2002-2004 by Secret Labs AB
|
||||
# Copyright (c) 2002-2004 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
import re
|
||||
|
||||
def getrgb(color):
|
||||
"""
|
||||
Convert a color string to an RGB tuple. If the string cannot be parsed,
|
||||
this function raises a :py:exc:`ValueError` exception.
|
||||
|
||||
.. versionadded:: 1.1.4
|
||||
|
||||
:param color: A color string
|
||||
:return: ``(red, green, blue[, alpha])``
|
||||
"""
|
||||
try:
|
||||
rgb = colormap[color]
|
||||
except KeyError:
|
||||
try:
|
||||
# fall back on case-insensitive lookup
|
||||
rgb = colormap[color.lower()]
|
||||
except KeyError:
|
||||
rgb = None
|
||||
# found color in cache
|
||||
if rgb:
|
||||
if isinstance(rgb, tuple):
|
||||
return rgb
|
||||
colormap[color] = rgb = getrgb(rgb)
|
||||
return rgb
|
||||
# check for known string formats
|
||||
m = re.match("#\w\w\w$", color)
|
||||
if m:
|
||||
return (
|
||||
int(color[1]*2, 16),
|
||||
int(color[2]*2, 16),
|
||||
int(color[3]*2, 16)
|
||||
)
|
||||
m = re.match("#\w\w\w\w\w\w$", color)
|
||||
if m:
|
||||
return (
|
||||
int(color[1:3], 16),
|
||||
int(color[3:5], 16),
|
||||
int(color[5:7], 16)
|
||||
)
|
||||
m = re.match("rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
|
||||
if m:
|
||||
return (
|
||||
int(m.group(1)),
|
||||
int(m.group(2)),
|
||||
int(m.group(3))
|
||||
)
|
||||
m = re.match("rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
|
||||
if m:
|
||||
return (
|
||||
int((int(m.group(1)) * 255) / 100.0 + 0.5),
|
||||
int((int(m.group(2)) * 255) / 100.0 + 0.5),
|
||||
int((int(m.group(3)) * 255) / 100.0 + 0.5)
|
||||
)
|
||||
m = re.match("hsl\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
|
||||
if m:
|
||||
from colorsys import hls_to_rgb
|
||||
rgb = hls_to_rgb(
|
||||
float(m.group(1)) / 360.0,
|
||||
float(m.group(3)) / 100.0,
|
||||
float(m.group(2)) / 100.0,
|
||||
)
|
||||
return (
|
||||
int(rgb[0] * 255 + 0.5),
|
||||
int(rgb[1] * 255 + 0.5),
|
||||
int(rgb[2] * 255 + 0.5)
|
||||
)
|
||||
m = re.match("rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
|
||||
if m:
|
||||
return (
|
||||
int(m.group(1)),
|
||||
int(m.group(2)),
|
||||
int(m.group(3)),
|
||||
int(m.group(4))
|
||||
)
|
||||
raise ValueError("unknown color specifier: %r" % color)
|
||||
|
||||
def getcolor(color, mode):
|
||||
"""
|
||||
Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a
|
||||
greyscale value if the mode is not color or a palette image. If the string
|
||||
cannot be parsed, this function raises a :py:exc:`ValueError` exception.
|
||||
|
||||
.. versionadded:: 1.1.4
|
||||
|
||||
:param color: A color string
|
||||
:return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])``
|
||||
"""
|
||||
# same as getrgb, but converts the result to the given mode
|
||||
color, alpha = getrgb(color), 255
|
||||
if len(color) == 4:
|
||||
color, alpha = color[0:3], color[3]
|
||||
|
||||
if Image.getmodebase(mode) == "L":
|
||||
r, g, b = color
|
||||
color = (r*299 + g*587 + b*114)//1000
|
||||
if mode[-1] == 'A':
|
||||
return (color, alpha)
|
||||
else:
|
||||
if mode[-1] == 'A':
|
||||
return color + (alpha,)
|
||||
return color
|
||||
|
||||
colormap = {
|
||||
# X11 colour table (from "CSS3 module: Color working draft"), with
|
||||
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
|
||||
# colour names used in CSS 1.
|
||||
"aliceblue": "#f0f8ff",
|
||||
"antiquewhite": "#faebd7",
|
||||
"aqua": "#00ffff",
|
||||
"aquamarine": "#7fffd4",
|
||||
"azure": "#f0ffff",
|
||||
"beige": "#f5f5dc",
|
||||
"bisque": "#ffe4c4",
|
||||
"black": "#000000",
|
||||
"blanchedalmond": "#ffebcd",
|
||||
"blue": "#0000ff",
|
||||
"blueviolet": "#8a2be2",
|
||||
"brown": "#a52a2a",
|
||||
"burlywood": "#deb887",
|
||||
"cadetblue": "#5f9ea0",
|
||||
"chartreuse": "#7fff00",
|
||||
"chocolate": "#d2691e",
|
||||
"coral": "#ff7f50",
|
||||
"cornflowerblue": "#6495ed",
|
||||
"cornsilk": "#fff8dc",
|
||||
"crimson": "#dc143c",
|
||||
"cyan": "#00ffff",
|
||||
"darkblue": "#00008b",
|
||||
"darkcyan": "#008b8b",
|
||||
"darkgoldenrod": "#b8860b",
|
||||
"darkgray": "#a9a9a9",
|
||||
"darkgrey": "#a9a9a9",
|
||||
"darkgreen": "#006400",
|
||||
"darkkhaki": "#bdb76b",
|
||||
"darkmagenta": "#8b008b",
|
||||
"darkolivegreen": "#556b2f",
|
||||
"darkorange": "#ff8c00",
|
||||
"darkorchid": "#9932cc",
|
||||
"darkred": "#8b0000",
|
||||
"darksalmon": "#e9967a",
|
||||
"darkseagreen": "#8fbc8f",
|
||||
"darkslateblue": "#483d8b",
|
||||
"darkslategray": "#2f4f4f",
|
||||
"darkslategrey": "#2f4f4f",
|
||||
"darkturquoise": "#00ced1",
|
||||
"darkviolet": "#9400d3",
|
||||
"deeppink": "#ff1493",
|
||||
"deepskyblue": "#00bfff",
|
||||
"dimgray": "#696969",
|
||||
"dimgrey": "#696969",
|
||||
"dodgerblue": "#1e90ff",
|
||||
"firebrick": "#b22222",
|
||||
"floralwhite": "#fffaf0",
|
||||
"forestgreen": "#228b22",
|
||||
"fuchsia": "#ff00ff",
|
||||
"gainsboro": "#dcdcdc",
|
||||
"ghostwhite": "#f8f8ff",
|
||||
"gold": "#ffd700",
|
||||
"goldenrod": "#daa520",
|
||||
"gray": "#808080",
|
||||
"grey": "#808080",
|
||||
"green": "#008000",
|
||||
"greenyellow": "#adff2f",
|
||||
"honeydew": "#f0fff0",
|
||||
"hotpink": "#ff69b4",
|
||||
"indianred": "#cd5c5c",
|
||||
"indigo": "#4b0082",
|
||||
"ivory": "#fffff0",
|
||||
"khaki": "#f0e68c",
|
||||
"lavender": "#e6e6fa",
|
||||
"lavenderblush": "#fff0f5",
|
||||
"lawngreen": "#7cfc00",
|
||||
"lemonchiffon": "#fffacd",
|
||||
"lightblue": "#add8e6",
|
||||
"lightcoral": "#f08080",
|
||||
"lightcyan": "#e0ffff",
|
||||
"lightgoldenrodyellow": "#fafad2",
|
||||
"lightgreen": "#90ee90",
|
||||
"lightgray": "#d3d3d3",
|
||||
"lightgrey": "#d3d3d3",
|
||||
"lightpink": "#ffb6c1",
|
||||
"lightsalmon": "#ffa07a",
|
||||
"lightseagreen": "#20b2aa",
|
||||
"lightskyblue": "#87cefa",
|
||||
"lightslategray": "#778899",
|
||||
"lightslategrey": "#778899",
|
||||
"lightsteelblue": "#b0c4de",
|
||||
"lightyellow": "#ffffe0",
|
||||
"lime": "#00ff00",
|
||||
"limegreen": "#32cd32",
|
||||
"linen": "#faf0e6",
|
||||
"magenta": "#ff00ff",
|
||||
"maroon": "#800000",
|
||||
"mediumaquamarine": "#66cdaa",
|
||||
"mediumblue": "#0000cd",
|
||||
"mediumorchid": "#ba55d3",
|
||||
"mediumpurple": "#9370db",
|
||||
"mediumseagreen": "#3cb371",
|
||||
"mediumslateblue": "#7b68ee",
|
||||
"mediumspringgreen": "#00fa9a",
|
||||
"mediumturquoise": "#48d1cc",
|
||||
"mediumvioletred": "#c71585",
|
||||
"midnightblue": "#191970",
|
||||
"mintcream": "#f5fffa",
|
||||
"mistyrose": "#ffe4e1",
|
||||
"moccasin": "#ffe4b5",
|
||||
"navajowhite": "#ffdead",
|
||||
"navy": "#000080",
|
||||
"oldlace": "#fdf5e6",
|
||||
"olive": "#808000",
|
||||
"olivedrab": "#6b8e23",
|
||||
"orange": "#ffa500",
|
||||
"orangered": "#ff4500",
|
||||
"orchid": "#da70d6",
|
||||
"palegoldenrod": "#eee8aa",
|
||||
"palegreen": "#98fb98",
|
||||
"paleturquoise": "#afeeee",
|
||||
"palevioletred": "#db7093",
|
||||
"papayawhip": "#ffefd5",
|
||||
"peachpuff": "#ffdab9",
|
||||
"peru": "#cd853f",
|
||||
"pink": "#ffc0cb",
|
||||
"plum": "#dda0dd",
|
||||
"powderblue": "#b0e0e6",
|
||||
"purple": "#800080",
|
||||
"red": "#ff0000",
|
||||
"rosybrown": "#bc8f8f",
|
||||
"royalblue": "#4169e1",
|
||||
"saddlebrown": "#8b4513",
|
||||
"salmon": "#fa8072",
|
||||
"sandybrown": "#f4a460",
|
||||
"seagreen": "#2e8b57",
|
||||
"seashell": "#fff5ee",
|
||||
"sienna": "#a0522d",
|
||||
"silver": "#c0c0c0",
|
||||
"skyblue": "#87ceeb",
|
||||
"slateblue": "#6a5acd",
|
||||
"slategray": "#708090",
|
||||
"slategrey": "#708090",
|
||||
"snow": "#fffafa",
|
||||
"springgreen": "#00ff7f",
|
||||
"steelblue": "#4682b4",
|
||||
"tan": "#d2b48c",
|
||||
"teal": "#008080",
|
||||
"thistle": "#d8bfd8",
|
||||
"tomato": "#ff6347",
|
||||
"turquoise": "#40e0d0",
|
||||
"violet": "#ee82ee",
|
||||
"wheat": "#f5deb3",
|
||||
"white": "#ffffff",
|
||||
"whitesmoke": "#f5f5f5",
|
||||
"yellow": "#ffff00",
|
||||
"yellowgreen": "#9acd32",
|
||||
}
|
|
@ -1,379 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# drawing interface operations
|
||||
#
|
||||
# History:
|
||||
# 1996-04-13 fl Created (experimental)
|
||||
# 1996-08-07 fl Filled polygons, ellipses.
|
||||
# 1996-08-13 fl Added text support
|
||||
# 1998-06-28 fl Handle I and F images
|
||||
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
|
||||
# 1999-01-10 fl Added shape stuff (experimental)
|
||||
# 1999-02-06 fl Added bitmap support
|
||||
# 1999-02-11 fl Changed all primitives to take options
|
||||
# 1999-02-20 fl Fixed backwards compatibility
|
||||
# 2000-10-12 fl Copy on write, when necessary
|
||||
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
|
||||
# 2002-10-24 fl Added support for CSS-style color strings
|
||||
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
|
||||
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
|
||||
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
|
||||
# 2004-09-04 fl Added width support to line primitive
|
||||
# 2004-09-10 fl Added font mode handling
|
||||
# 2006-06-19 fl Added font bearing support (getmask2)
|
||||
#
|
||||
# Copyright (c) 1997-2006 by Secret Labs AB
|
||||
# Copyright (c) 1996-2006 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
import numbers
|
||||
|
||||
from PIL import Image, ImageColor
|
||||
from PIL._util import isStringType
|
||||
|
||||
try:
|
||||
import warnings
|
||||
except ImportError:
|
||||
warnings = None
|
||||
|
||||
##
|
||||
# A simple 2D drawing interface for PIL images.
|
||||
# <p>
|
||||
# Application code should use the <b>Draw</b> factory, instead of
|
||||
# directly.
|
||||
|
||||
class ImageDraw:
|
||||
|
||||
##
|
||||
# Create a drawing instance.
|
||||
#
|
||||
# @param im The image to draw in.
|
||||
# @param mode Optional mode to use for color values. For RGB
|
||||
# images, this argument can be RGB or RGBA (to blend the
|
||||
# drawing into the image). For all other modes, this argument
|
||||
# must be the same as the image mode. If omitted, the mode
|
||||
# defaults to the mode of the image.
|
||||
|
||||
def __init__(self, im, mode=None):
|
||||
im.load()
|
||||
if im.readonly:
|
||||
im._copy() # make it writable
|
||||
blend = 0
|
||||
if mode is None:
|
||||
mode = im.mode
|
||||
if mode != im.mode:
|
||||
if mode == "RGBA" and im.mode == "RGB":
|
||||
blend = 1
|
||||
else:
|
||||
raise ValueError("mode mismatch")
|
||||
if mode == "P":
|
||||
self.palette = im.palette
|
||||
else:
|
||||
self.palette = None
|
||||
self.im = im.im
|
||||
self.draw = Image.core.draw(self.im, blend)
|
||||
self.mode = mode
|
||||
if mode in ("I", "F"):
|
||||
self.ink = self.draw.draw_ink(1, mode)
|
||||
else:
|
||||
self.ink = self.draw.draw_ink(-1, mode)
|
||||
if mode in ("1", "P", "I", "F"):
|
||||
# FIXME: fix Fill2 to properly support matte for I+F images
|
||||
self.fontmode = "1"
|
||||
else:
|
||||
self.fontmode = "L" # aliasing is okay for other modes
|
||||
self.fill = 0
|
||||
self.font = None
|
||||
|
||||
##
|
||||
# Set the default pen color.
|
||||
|
||||
def setink(self, ink):
|
||||
# compatibility
|
||||
if warnings:
|
||||
warnings.warn(
|
||||
"'setink' is deprecated; use keyword arguments instead",
|
||||
DeprecationWarning, stacklevel=2
|
||||
)
|
||||
if isStringType(ink):
|
||||
ink = ImageColor.getcolor(ink, self.mode)
|
||||
if self.palette and not isinstance(ink, numbers.Number):
|
||||
ink = self.palette.getcolor(ink)
|
||||
self.ink = self.draw.draw_ink(ink, self.mode)
|
||||
|
||||
##
|
||||
# Set the default background color.
|
||||
|
||||
def setfill(self, onoff):
|
||||
# compatibility
|
||||
if warnings:
|
||||
warnings.warn(
|
||||
"'setfill' is deprecated; use keyword arguments instead",
|
||||
DeprecationWarning, stacklevel=2
|
||||
)
|
||||
self.fill = onoff
|
||||
|
||||
##
|
||||
# Set the default font.
|
||||
|
||||
def setfont(self, font):
|
||||
# compatibility
|
||||
self.font = font
|
||||
|
||||
##
|
||||
# Get the current default font.
|
||||
|
||||
def getfont(self):
|
||||
if not self.font:
|
||||
# FIXME: should add a font repository
|
||||
from PIL import ImageFont
|
||||
self.font = ImageFont.load_default()
|
||||
return self.font
|
||||
|
||||
def _getink(self, ink, fill=None):
|
||||
if ink is None and fill is None:
|
||||
if self.fill:
|
||||
fill = self.ink
|
||||
else:
|
||||
ink = self.ink
|
||||
else:
|
||||
if ink is not None:
|
||||
if isStringType(ink):
|
||||
ink = ImageColor.getcolor(ink, self.mode)
|
||||
if self.palette and not isinstance(ink, numbers.Number):
|
||||
ink = self.palette.getcolor(ink)
|
||||
ink = self.draw.draw_ink(ink, self.mode)
|
||||
if fill is not None:
|
||||
if isStringType(fill):
|
||||
fill = ImageColor.getcolor(fill, self.mode)
|
||||
if self.palette and not isinstance(fill, numbers.Number):
|
||||
fill = self.palette.getcolor(fill)
|
||||
fill = self.draw.draw_ink(fill, self.mode)
|
||||
return ink, fill
|
||||
|
||||
##
|
||||
# Draw an arc.
|
||||
|
||||
def arc(self, xy, start, end, fill=None):
|
||||
ink, fill = self._getink(fill)
|
||||
if ink is not None:
|
||||
self.draw.draw_arc(xy, start, end, ink)
|
||||
|
||||
##
|
||||
# Draw a bitmap.
|
||||
|
||||
def bitmap(self, xy, bitmap, fill=None):
|
||||
bitmap.load()
|
||||
ink, fill = self._getink(fill)
|
||||
if ink is None:
|
||||
ink = fill
|
||||
if ink is not None:
|
||||
self.draw.draw_bitmap(xy, bitmap.im, ink)
|
||||
|
||||
##
|
||||
# Draw a chord.
|
||||
|
||||
def chord(self, xy, start, end, fill=None, outline=None):
|
||||
ink, fill = self._getink(outline, fill)
|
||||
if fill is not None:
|
||||
self.draw.draw_chord(xy, start, end, fill, 1)
|
||||
if ink is not None:
|
||||
self.draw.draw_chord(xy, start, end, ink, 0)
|
||||
|
||||
##
|
||||
# Draw an ellipse.
|
||||
|
||||
def ellipse(self, xy, fill=None, outline=None):
|
||||
ink, fill = self._getink(outline, fill)
|
||||
if fill is not None:
|
||||
self.draw.draw_ellipse(xy, fill, 1)
|
||||
if ink is not None:
|
||||
self.draw.draw_ellipse(xy, ink, 0)
|
||||
|
||||
##
|
||||
# Draw a line, or a connected sequence of line segments.
|
||||
|
||||
def line(self, xy, fill=None, width=0):
|
||||
ink, fill = self._getink(fill)
|
||||
if ink is not None:
|
||||
self.draw.draw_lines(xy, ink, width)
|
||||
|
||||
##
|
||||
# (Experimental) Draw a shape.
|
||||
|
||||
def shape(self, shape, fill=None, outline=None):
|
||||
# experimental
|
||||
shape.close()
|
||||
ink, fill = self._getink(outline, fill)
|
||||
if fill is not None:
|
||||
self.draw.draw_outline(shape, fill, 1)
|
||||
if ink is not None:
|
||||
self.draw.draw_outline(shape, ink, 0)
|
||||
|
||||
##
|
||||
# Draw a pieslice.
|
||||
|
||||
def pieslice(self, xy, start, end, fill=None, outline=None):
|
||||
ink, fill = self._getink(outline, fill)
|
||||
if fill is not None:
|
||||
self.draw.draw_pieslice(xy, start, end, fill, 1)
|
||||
if ink is not None:
|
||||
self.draw.draw_pieslice(xy, start, end, ink, 0)
|
||||
|
||||
##
|
||||
# Draw one or more individual pixels.
|
||||
|
||||
def point(self, xy, fill=None):
|
||||
ink, fill = self._getink(fill)
|
||||
if ink is not None:
|
||||
self.draw.draw_points(xy, ink)
|
||||
|
||||
##
|
||||
# Draw a polygon.
|
||||
|
||||
def polygon(self, xy, fill=None, outline=None):
|
||||
ink, fill = self._getink(outline, fill)
|
||||
if fill is not None:
|
||||
self.draw.draw_polygon(xy, fill, 1)
|
||||
if ink is not None:
|
||||
self.draw.draw_polygon(xy, ink, 0)
|
||||
|
||||
##
|
||||
# Draw a rectangle.
|
||||
|
||||
def rectangle(self, xy, fill=None, outline=None):
|
||||
ink, fill = self._getink(outline, fill)
|
||||
if fill is not None:
|
||||
self.draw.draw_rectangle(xy, fill, 1)
|
||||
if ink is not None:
|
||||
self.draw.draw_rectangle(xy, ink, 0)
|
||||
|
||||
##
|
||||
# Draw text.
|
||||
|
||||
def text(self, xy, text, fill=None, font=None, anchor=None):
|
||||
ink, fill = self._getink(fill)
|
||||
if font is None:
|
||||
font = self.getfont()
|
||||
if ink is None:
|
||||
ink = fill
|
||||
if ink is not None:
|
||||
try:
|
||||
mask, offset = font.getmask2(text, self.fontmode)
|
||||
xy = xy[0] + offset[0], xy[1] + offset[1]
|
||||
except AttributeError:
|
||||
try:
|
||||
mask = font.getmask(text, self.fontmode)
|
||||
except TypeError:
|
||||
mask = font.getmask(text)
|
||||
self.draw.draw_bitmap(xy, mask, ink)
|
||||
|
||||
##
|
||||
# Get the size of a given string, in pixels.
|
||||
|
||||
def textsize(self, text, font=None):
|
||||
if font is None:
|
||||
font = self.getfont()
|
||||
return font.getsize(text)
|
||||
|
||||
##
|
||||
# A simple 2D drawing interface for PIL images.
|
||||
#
|
||||
# @param im The image to draw in.
|
||||
# @param mode Optional mode to use for color values. For RGB
|
||||
# images, this argument can be RGB or RGBA (to blend the
|
||||
# drawing into the image). For all other modes, this argument
|
||||
# must be the same as the image mode. If omitted, the mode
|
||||
# defaults to the mode of the image.
|
||||
|
||||
def Draw(im, mode=None):
|
||||
try:
|
||||
return im.getdraw(mode)
|
||||
except AttributeError:
|
||||
return ImageDraw(im, mode)
|
||||
|
||||
# experimental access to the outline API
|
||||
try:
|
||||
Outline = Image.core.outline
|
||||
except:
|
||||
Outline = None
|
||||
|
||||
##
|
||||
# (Experimental) A more advanced 2D drawing interface for PIL images,
|
||||
# based on the WCK interface.
|
||||
#
|
||||
# @param im The image to draw in.
|
||||
# @param hints An optional list of hints.
|
||||
# @return A (drawing context, drawing resource factory) tuple.
|
||||
|
||||
def getdraw(im=None, hints=None):
|
||||
# FIXME: this needs more work!
|
||||
# FIXME: come up with a better 'hints' scheme.
|
||||
handler = None
|
||||
if not hints or "nicest" in hints:
|
||||
try:
|
||||
from PIL import _imagingagg as handler
|
||||
except ImportError:
|
||||
pass
|
||||
if handler is None:
|
||||
from PIL import ImageDraw2 as handler
|
||||
if im:
|
||||
im = handler.Draw(im)
|
||||
return im, handler
|
||||
|
||||
##
|
||||
# (experimental) Fills a bounded region with a given color.
|
||||
#
|
||||
# @param image Target image.
|
||||
# @param xy Seed position (a 2-item coordinate tuple).
|
||||
# @param value Fill color.
|
||||
# @param border Optional border value. If given, the region consists of
|
||||
# pixels with a color different from the border color. If not given,
|
||||
# the region consists of pixels having the same color as the seed
|
||||
# pixel.
|
||||
|
||||
def floodfill(image, xy, value, border=None):
|
||||
"Fill bounded region."
|
||||
# based on an implementation by Eric S. Raymond
|
||||
pixel = image.load()
|
||||
x, y = xy
|
||||
try:
|
||||
background = pixel[x, y]
|
||||
if background == value:
|
||||
return # seed point already has fill color
|
||||
pixel[x, y] = value
|
||||
except IndexError:
|
||||
return # seed point outside image
|
||||
edge = [(x, y)]
|
||||
if border is None:
|
||||
while edge:
|
||||
newedge = []
|
||||
for (x, y) in edge:
|
||||
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
|
||||
try:
|
||||
p = pixel[s, t]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
if p == background:
|
||||
pixel[s, t] = value
|
||||
newedge.append((s, t))
|
||||
edge = newedge
|
||||
else:
|
||||
while edge:
|
||||
newedge = []
|
||||
for (x, y) in edge:
|
||||
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
|
||||
try:
|
||||
p = pixel[s, t]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
if p != value and p != border:
|
||||
pixel[s, t] = value
|
||||
newedge.append((s, t))
|
||||
edge = newedge
|
|
@ -1,106 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# WCK-style drawing interface operations
|
||||
#
|
||||
# History:
|
||||
# 2003-12-07 fl created
|
||||
# 2005-05-15 fl updated; added to PIL as ImageDraw2
|
||||
# 2005-05-15 fl added text support
|
||||
# 2005-05-20 fl added arc/chord/pieslice support
|
||||
#
|
||||
# Copyright (c) 2003-2005 by Secret Labs AB
|
||||
# Copyright (c) 2003-2005 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image, ImageColor, ImageDraw, ImageFont, ImagePath
|
||||
|
||||
class Pen:
|
||||
def __init__(self, color, width=1, opacity=255):
|
||||
self.color = ImageColor.getrgb(color)
|
||||
self.width = width
|
||||
|
||||
class Brush:
|
||||
def __init__(self, color, opacity=255):
|
||||
self.color = ImageColor.getrgb(color)
|
||||
|
||||
class Font:
|
||||
def __init__(self, color, file, size=12):
|
||||
# FIXME: add support for bitmap fonts
|
||||
self.color = ImageColor.getrgb(color)
|
||||
self.font = ImageFont.truetype(file, size)
|
||||
|
||||
class Draw:
|
||||
|
||||
def __init__(self, image, size=None, color=None):
|
||||
if not hasattr(image, "im"):
|
||||
image = Image.new(image, size, color)
|
||||
self.draw = ImageDraw.Draw(image)
|
||||
self.image = image
|
||||
self.transform = None
|
||||
|
||||
def flush(self):
|
||||
return self.image
|
||||
|
||||
def render(self, op, xy, pen, brush=None):
|
||||
# handle color arguments
|
||||
outline = fill = None; width = 1
|
||||
if isinstance(pen, Pen):
|
||||
outline = pen.color
|
||||
width = pen.width
|
||||
elif isinstance(brush, Pen):
|
||||
outline = brush.color
|
||||
width = brush.width
|
||||
if isinstance(brush, Brush):
|
||||
fill = brush.color
|
||||
elif isinstance(pen, Brush):
|
||||
fill = pen.color
|
||||
# handle transformation
|
||||
if self.transform:
|
||||
xy = ImagePath.Path(xy)
|
||||
xy.transform(self.transform)
|
||||
# render the item
|
||||
if op == "line":
|
||||
self.draw.line(xy, fill=outline, width=width)
|
||||
else:
|
||||
getattr(self.draw, op)(xy, fill=fill, outline=outline)
|
||||
|
||||
def settransform(self, offset):
|
||||
(xoffset, yoffset) = offset
|
||||
self.transform = (1, 0, xoffset, 0, 1, yoffset)
|
||||
|
||||
def arc(self, xy, start, end, *options):
|
||||
self.render("arc", xy, start, end, *options)
|
||||
|
||||
def chord(self, xy, start, end, *options):
|
||||
self.render("chord", xy, start, end, *options)
|
||||
|
||||
def ellipse(self, xy, *options):
|
||||
self.render("ellipse", xy, *options)
|
||||
|
||||
def line(self, xy, *options):
|
||||
self.render("line", xy, *options)
|
||||
|
||||
def pieslice(self, xy, start, end, *options):
|
||||
self.render("pieslice", xy, start, end, *options)
|
||||
|
||||
def polygon(self, xy, *options):
|
||||
self.render("polygon", xy, *options)
|
||||
|
||||
def rectangle(self, xy, *options):
|
||||
self.render("rectangle", xy, *options)
|
||||
|
||||
def symbol(self, xy, symbol, *options):
|
||||
raise NotImplementedError("not in this version")
|
||||
|
||||
def text(self, xy, text, font):
|
||||
if self.transform:
|
||||
xy = ImagePath.Path(xy)
|
||||
xy.transform(self.transform)
|
||||
self.draw.text(xy, text, font=font.font, fill=font.color)
|
||||
|
||||
def textsize(self, text, font):
|
||||
return self.draw.textsize(text, font=font.font)
|
|
@ -1,87 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# image enhancement classes
|
||||
#
|
||||
# For a background, see "Image Processing By Interpolation and
|
||||
# Extrapolation", Paul Haeberli and Douglas Voorhies. Available
|
||||
# at http://www.graficaobscura.com/interp/index.html
|
||||
#
|
||||
# History:
|
||||
# 1996-03-23 fl Created
|
||||
# 2009-06-16 fl Fixed mean calculation
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image, ImageFilter, ImageStat
|
||||
|
||||
|
||||
class _Enhance:
|
||||
|
||||
def enhance(self, factor):
|
||||
"""
|
||||
Returns an enhanced image.
|
||||
|
||||
:param factor: A floating point value controlling the enhancement.
|
||||
Factor 1.0 always returns a copy of the original image,
|
||||
lower factors mean less color (brightness, contrast,
|
||||
etc), and higher values more. There are no restrictions
|
||||
on this value.
|
||||
:rtype: :py:class:`~PIL.Image.Image`
|
||||
"""
|
||||
return Image.blend(self.degenerate, self.image, factor)
|
||||
|
||||
|
||||
class Color(_Enhance):
|
||||
"""Adjust image color balance.
|
||||
|
||||
This class can be used to adjust the colour balance of an image, in
|
||||
a manner similar to the controls on a colour TV set. An enhancement
|
||||
factor of 0.0 gives a black and white image. A factor of 1.0 gives
|
||||
the original image.
|
||||
"""
|
||||
def __init__(self, image):
|
||||
self.image = image
|
||||
self.degenerate = image.convert("L").convert(image.mode)
|
||||
|
||||
|
||||
class Contrast(_Enhance):
|
||||
"""Adjust image contrast.
|
||||
|
||||
This class can be used to control the contrast of an image, similar
|
||||
to the contrast control on a TV set. An enhancement factor of 0.0
|
||||
gives a solid grey image. A factor of 1.0 gives the original image.
|
||||
"""
|
||||
def __init__(self, image):
|
||||
self.image = image
|
||||
mean = int(ImageStat.Stat(image.convert("L")).mean[0] + 0.5)
|
||||
self.degenerate = Image.new("L", image.size, mean).convert(image.mode)
|
||||
|
||||
|
||||
class Brightness(_Enhance):
|
||||
"""Adjust image brightness.
|
||||
|
||||
This class can be used to control the brighntess of an image. An
|
||||
enhancement factor of 0.0 gives a black image. A factor of 1.0 gives the
|
||||
original image.
|
||||
"""
|
||||
def __init__(self, image):
|
||||
self.image = image
|
||||
self.degenerate = Image.new(image.mode, image.size, 0)
|
||||
|
||||
|
||||
class Sharpness(_Enhance):
|
||||
"""Adjust image sharpness.
|
||||
|
||||
This class can be used to adjust the sharpness of an image. An
|
||||
enhancement factor of 0.0 gives a blurred image, a factor of 1.0 gives the
|
||||
original image, and a factor of 2.0 gives a sharpened image.
|
||||
"""
|
||||
def __init__(self, image):
|
||||
self.image = image
|
||||
self.degenerate = image.filter(ImageFilter.SMOOTH)
|
|
@ -1,506 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# base class for image file handlers
|
||||
#
|
||||
# history:
|
||||
# 1995-09-09 fl Created
|
||||
# 1996-03-11 fl Fixed load mechanism.
|
||||
# 1996-04-15 fl Added pcx/xbm decoders.
|
||||
# 1996-04-30 fl Added encoders.
|
||||
# 1996-12-14 fl Added load helpers
|
||||
# 1997-01-11 fl Use encode_to_file where possible
|
||||
# 1997-08-27 fl Flush output in _save
|
||||
# 1998-03-05 fl Use memory mapping for some modes
|
||||
# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
|
||||
# 1999-05-31 fl Added image parser
|
||||
# 2000-10-12 fl Set readonly flag on memory-mapped images
|
||||
# 2002-03-20 fl Use better messages for common decoder errors
|
||||
# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
|
||||
# 2003-10-30 fl Added StubImageFile class
|
||||
# 2004-02-25 fl Made incremental parser more robust
|
||||
#
|
||||
# Copyright (c) 1997-2004 by Secret Labs AB
|
||||
# Copyright (c) 1995-2004 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
from PIL._util import isPath
|
||||
import traceback, os, sys
|
||||
import io
|
||||
|
||||
MAXBLOCK = 65536
|
||||
|
||||
SAFEBLOCK = 1024*1024
|
||||
|
||||
LOAD_TRUNCATED_IMAGES = False
|
||||
|
||||
ERRORS = {
|
||||
-1: "image buffer overrun error",
|
||||
-2: "decoding error",
|
||||
-3: "unknown error",
|
||||
-8: "bad configuration",
|
||||
-9: "out of memory error"
|
||||
}
|
||||
|
||||
def raise_ioerror(error):
|
||||
try:
|
||||
message = Image.core.getcodecstatus(error)
|
||||
except AttributeError:
|
||||
message = ERRORS.get(error)
|
||||
if not message:
|
||||
message = "decoder error %d" % error
|
||||
raise IOError(message + " when reading image file")
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# Helpers
|
||||
|
||||
def _tilesort(t):
|
||||
# sort on offset
|
||||
return t[2]
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
# ImageFile base class
|
||||
|
||||
class ImageFile(Image.Image):
|
||||
"Base class for image file format handlers."
|
||||
|
||||
def __init__(self, fp=None, filename=None):
|
||||
Image.Image.__init__(self)
|
||||
|
||||
self.tile = None
|
||||
self.readonly = 1 # until we know better
|
||||
|
||||
self.decoderconfig = ()
|
||||
self.decodermaxblock = MAXBLOCK
|
||||
|
||||
if isPath(fp):
|
||||
# filename
|
||||
self.fp = open(fp, "rb")
|
||||
self.filename = fp
|
||||
else:
|
||||
# stream
|
||||
self.fp = fp
|
||||
self.filename = filename
|
||||
|
||||
try:
|
||||
self._open()
|
||||
except IndexError as v: # end of data
|
||||
if Image.DEBUG > 1:
|
||||
traceback.print_exc()
|
||||
raise SyntaxError(v)
|
||||
except TypeError as v: # end of data (ord)
|
||||
if Image.DEBUG > 1:
|
||||
traceback.print_exc()
|
||||
raise SyntaxError(v)
|
||||
except KeyError as v: # unsupported mode
|
||||
if Image.DEBUG > 1:
|
||||
traceback.print_exc()
|
||||
raise SyntaxError(v)
|
||||
except EOFError as v: # got header but not the first frame
|
||||
if Image.DEBUG > 1:
|
||||
traceback.print_exc()
|
||||
raise SyntaxError(v)
|
||||
|
||||
if not self.mode or self.size[0] <= 0:
|
||||
raise SyntaxError("not identified by this driver")
|
||||
|
||||
def draft(self, mode, size):
|
||||
"Set draft mode"
|
||||
|
||||
pass
|
||||
|
||||
def verify(self):
|
||||
"Check file integrity"
|
||||
|
||||
# raise exception if something's wrong. must be called
|
||||
# directly after open, and closes file when finished.
|
||||
self.fp = None
|
||||
|
||||
def load(self):
|
||||
"Load image data based on tile list"
|
||||
|
||||
pixel = Image.Image.load(self)
|
||||
|
||||
if self.tile is None:
|
||||
raise IOError("cannot load this image")
|
||||
if not self.tile:
|
||||
return pixel
|
||||
|
||||
self.map = None
|
||||
|
||||
readonly = 0
|
||||
|
||||
if self.filename and len(self.tile) == 1 and not hasattr(sys, 'pypy_version_info'):
|
||||
# As of pypy 2.1.0, memory mapping was failing here.
|
||||
# try memory mapping
|
||||
d, e, o, a = self.tile[0]
|
||||
if d == "raw" and a[0] == self.mode and a[0] in Image._MAPMODES:
|
||||
try:
|
||||
if hasattr(Image.core, "map"):
|
||||
# use built-in mapper
|
||||
self.map = Image.core.map(self.filename)
|
||||
self.map.seek(o)
|
||||
self.im = self.map.readimage(
|
||||
self.mode, self.size, a[1], a[2]
|
||||
)
|
||||
else:
|
||||
# use mmap, if possible
|
||||
import mmap
|
||||
file = open(self.filename, "r+")
|
||||
size = os.path.getsize(self.filename)
|
||||
# FIXME: on Unix, use PROT_READ etc
|
||||
self.map = mmap.mmap(file.fileno(), size)
|
||||
self.im = Image.core.map_buffer(
|
||||
self.map, self.size, d, e, o, a
|
||||
)
|
||||
readonly = 1
|
||||
except (AttributeError, EnvironmentError, ImportError):
|
||||
self.map = None
|
||||
|
||||
self.load_prepare()
|
||||
|
||||
# look for read/seek overrides
|
||||
try:
|
||||
read = self.load_read
|
||||
except AttributeError:
|
||||
read = self.fp.read
|
||||
|
||||
try:
|
||||
seek = self.load_seek
|
||||
except AttributeError:
|
||||
seek = self.fp.seek
|
||||
|
||||
if not self.map:
|
||||
|
||||
# sort tiles in file order
|
||||
self.tile.sort(key=_tilesort)
|
||||
|
||||
try:
|
||||
# FIXME: This is a hack to handle TIFF's JpegTables tag.
|
||||
prefix = self.tile_prefix
|
||||
except AttributeError:
|
||||
prefix = b""
|
||||
|
||||
for d, e, o, a in self.tile:
|
||||
d = Image._getdecoder(self.mode, d, a, self.decoderconfig)
|
||||
seek(o)
|
||||
try:
|
||||
d.setimage(self.im, e)
|
||||
except ValueError:
|
||||
continue
|
||||
b = prefix
|
||||
t = len(b)
|
||||
while True:
|
||||
try:
|
||||
s = read(self.decodermaxblock)
|
||||
except IndexError as ie: # truncated png/gif
|
||||
if LOAD_TRUNCATED_IMAGES:
|
||||
break
|
||||
else:
|
||||
raise IndexError(ie)
|
||||
|
||||
if not s and not d.handles_eof: # truncated jpeg
|
||||
self.tile = []
|
||||
|
||||
# JpegDecode needs to clean things up here either way
|
||||
# If we don't destroy the decompressor, we have a memory leak.
|
||||
d.cleanup()
|
||||
|
||||
if LOAD_TRUNCATED_IMAGES:
|
||||
break
|
||||
else:
|
||||
raise IOError("image file is truncated (%d bytes not processed)" % len(b))
|
||||
|
||||
b = b + s
|
||||
n, e = d.decode(b)
|
||||
if n < 0:
|
||||
break
|
||||
b = b[n:]
|
||||
t = t + n
|
||||
|
||||
self.tile = []
|
||||
self.readonly = readonly
|
||||
|
||||
self.fp = None # might be shared
|
||||
|
||||
if not self.map and (not LOAD_TRUNCATED_IMAGES or t == 0) and e < 0:
|
||||
# still raised if decoder fails to return anything
|
||||
raise_ioerror(e)
|
||||
|
||||
# post processing
|
||||
if hasattr(self, "tile_post_rotate"):
|
||||
# FIXME: This is a hack to handle rotated PCD's
|
||||
self.im = self.im.rotate(self.tile_post_rotate)
|
||||
self.size = self.im.size
|
||||
|
||||
self.load_end()
|
||||
|
||||
return Image.Image.load(self)
|
||||
|
||||
def load_prepare(self):
|
||||
# create image memory if necessary
|
||||
if not self.im or\
|
||||
self.im.mode != self.mode or self.im.size != self.size:
|
||||
self.im = Image.core.new(self.mode, self.size)
|
||||
# create palette (optional)
|
||||
if self.mode == "P":
|
||||
Image.Image.load(self)
|
||||
|
||||
def load_end(self):
|
||||
# may be overridden
|
||||
pass
|
||||
|
||||
# may be defined for contained formats
|
||||
# def load_seek(self, pos):
|
||||
# pass
|
||||
|
||||
# may be defined for blocked formats (e.g. PNG)
|
||||
# def load_read(self, bytes):
|
||||
# pass
|
||||
|
||||
|
||||
class StubImageFile(ImageFile):
|
||||
"""
|
||||
Base class for stub image loaders.
|
||||
|
||||
A stub loader is an image loader that can identify files of a
|
||||
certain format, but relies on external code to load the file.
|
||||
"""
|
||||
|
||||
def _open(self):
|
||||
raise NotImplementedError(
|
||||
"StubImageFile subclass must implement _open"
|
||||
)
|
||||
|
||||
def load(self):
|
||||
loader = self._load()
|
||||
if loader is None:
|
||||
raise IOError("cannot find loader for this %s file" % self.format)
|
||||
image = loader.load(self)
|
||||
assert image is not None
|
||||
# become the other object (!)
|
||||
self.__class__ = image.__class__
|
||||
self.__dict__ = image.__dict__
|
||||
|
||||
def _load(self):
|
||||
"(Hook) Find actual image loader."
|
||||
raise NotImplementedError(
|
||||
"StubImageFile subclass must implement _load"
|
||||
)
|
||||
|
||||
|
||||
class Parser:
|
||||
"""
|
||||
Incremental image parser. This class implements the standard
|
||||
feed/close consumer interface.
|
||||
|
||||
In Python 2.x, this is an old-style class.
|
||||
"""
|
||||
incremental = None
|
||||
image = None
|
||||
data = None
|
||||
decoder = None
|
||||
finished = 0
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
(Consumer) Reset the parser. Note that you can only call this
|
||||
method immediately after you've created a parser; parser
|
||||
instances cannot be reused.
|
||||
"""
|
||||
assert self.data is None, "cannot reuse parsers"
|
||||
|
||||
def feed(self, data):
|
||||
"""
|
||||
(Consumer) Feed data to the parser.
|
||||
|
||||
:param data: A string buffer.
|
||||
:exception IOError: If the parser failed to parse the image file.
|
||||
"""
|
||||
# collect data
|
||||
|
||||
if self.finished:
|
||||
return
|
||||
|
||||
if self.data is None:
|
||||
self.data = data
|
||||
else:
|
||||
self.data = self.data + data
|
||||
|
||||
# parse what we have
|
||||
if self.decoder:
|
||||
|
||||
if self.offset > 0:
|
||||
# skip header
|
||||
skip = min(len(self.data), self.offset)
|
||||
self.data = self.data[skip:]
|
||||
self.offset = self.offset - skip
|
||||
if self.offset > 0 or not self.data:
|
||||
return
|
||||
|
||||
n, e = self.decoder.decode(self.data)
|
||||
|
||||
if n < 0:
|
||||
# end of stream
|
||||
self.data = None
|
||||
self.finished = 1
|
||||
if e < 0:
|
||||
# decoding error
|
||||
self.image = None
|
||||
raise_ioerror(e)
|
||||
else:
|
||||
# end of image
|
||||
return
|
||||
self.data = self.data[n:]
|
||||
|
||||
elif self.image:
|
||||
|
||||
# if we end up here with no decoder, this file cannot
|
||||
# be incrementally parsed. wait until we've gotten all
|
||||
# available data
|
||||
pass
|
||||
|
||||
else:
|
||||
|
||||
# attempt to open this file
|
||||
try:
|
||||
try:
|
||||
fp = io.BytesIO(self.data)
|
||||
im = Image.open(fp)
|
||||
finally:
|
||||
fp.close() # explicitly close the virtual file
|
||||
except IOError:
|
||||
# traceback.print_exc()
|
||||
pass # not enough data
|
||||
else:
|
||||
flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
|
||||
if flag or len(im.tile) != 1:
|
||||
# custom load code, or multiple tiles
|
||||
self.decode = None
|
||||
else:
|
||||
# initialize decoder
|
||||
im.load_prepare()
|
||||
d, e, o, a = im.tile[0]
|
||||
im.tile = []
|
||||
self.decoder = Image._getdecoder(
|
||||
im.mode, d, a, im.decoderconfig
|
||||
)
|
||||
self.decoder.setimage(im.im, e)
|
||||
|
||||
# calculate decoder offset
|
||||
self.offset = o
|
||||
if self.offset <= len(self.data):
|
||||
self.data = self.data[self.offset:]
|
||||
self.offset = 0
|
||||
|
||||
self.image = im
|
||||
|
||||
def close(self):
|
||||
"""
|
||||
(Consumer) Close the stream.
|
||||
|
||||
:returns: An image object.
|
||||
:exception IOError: If the parser failed to parse the image file either
|
||||
because it cannot be identified or cannot be
|
||||
decoded.
|
||||
"""
|
||||
# finish decoding
|
||||
if self.decoder:
|
||||
# get rid of what's left in the buffers
|
||||
self.feed(b"")
|
||||
self.data = self.decoder = None
|
||||
if not self.finished:
|
||||
raise IOError("image was incomplete")
|
||||
if not self.image:
|
||||
raise IOError("cannot parse this image")
|
||||
if self.data:
|
||||
# incremental parsing not possible; reopen the file
|
||||
# not that we have all data
|
||||
try:
|
||||
fp = io.BytesIO(self.data)
|
||||
self.image = Image.open(fp)
|
||||
finally:
|
||||
self.image.load()
|
||||
fp.close() # explicitly close the virtual file
|
||||
return self.image
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
def _save(im, fp, tile, bufsize=0):
|
||||
"""Helper to save image based on tile list
|
||||
|
||||
:param im: Image object.
|
||||
:param fp: File object.
|
||||
:param tile: Tile list.
|
||||
:param bufsize: Optional buffer size
|
||||
"""
|
||||
|
||||
im.load()
|
||||
if not hasattr(im, "encoderconfig"):
|
||||
im.encoderconfig = ()
|
||||
tile.sort(key=_tilesort)
|
||||
# FIXME: make MAXBLOCK a configuration parameter
|
||||
# It would be great if we could have the encoder specifiy what it needs
|
||||
# But, it would need at least the image size in most cases. RawEncode is
|
||||
# a tricky case.
|
||||
bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
|
||||
try:
|
||||
fh = fp.fileno()
|
||||
fp.flush()
|
||||
except (AttributeError, io.UnsupportedOperation):
|
||||
# compress to Python file-compatible object
|
||||
for e, b, o, a in tile:
|
||||
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
|
||||
if o > 0:
|
||||
fp.seek(o, 0)
|
||||
e.setimage(im.im, b)
|
||||
while True:
|
||||
l, s, d = e.encode(bufsize)
|
||||
fp.write(d)
|
||||
if s:
|
||||
break
|
||||
if s < 0:
|
||||
raise IOError("encoder error %d when writing image file" % s)
|
||||
else:
|
||||
# slight speedup: compress to real file object
|
||||
for e, b, o, a in tile:
|
||||
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
|
||||
if o > 0:
|
||||
fp.seek(o, 0)
|
||||
e.setimage(im.im, b)
|
||||
s = e.encode_to_file(fh, bufsize)
|
||||
if s < 0:
|
||||
raise IOError("encoder error %d when writing image file" % s)
|
||||
try:
|
||||
fp.flush()
|
||||
except: pass
|
||||
|
||||
|
||||
def _safe_read(fp, size):
|
||||
"""
|
||||
Reads large blocks in a safe way. Unlike fp.read(n), this function
|
||||
doesn't trust the user. If the requested size is larger than
|
||||
SAFEBLOCK, the file is read block by block.
|
||||
|
||||
:param fp: File handle. Must implement a <b>read</b> method.
|
||||
:param size: Number of bytes to read.
|
||||
:returns: A string containing up to <i>size</i> bytes of data.
|
||||
"""
|
||||
if size <= 0:
|
||||
return b""
|
||||
if size <= SAFEBLOCK:
|
||||
return fp.read(size)
|
||||
data = []
|
||||
while size > 0:
|
||||
block = fp.read(min(size, SAFEBLOCK))
|
||||
if not block:
|
||||
break
|
||||
data.append(block)
|
||||
size -= len(block)
|
||||
return b"".join(data)
|
|
@ -1,40 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# kludge to get basic ImageFileIO functionality
|
||||
#
|
||||
# History:
|
||||
# 1998-08-06 fl Recreated
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1998-2002.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
"""
|
||||
The **ImageFileIO** module can be used to read an image from a
|
||||
socket, or any other stream device.
|
||||
|
||||
Deprecated. New code should use the :class:`PIL.ImageFile.Parser`
|
||||
class in the :mod:`PIL.ImageFile` module instead.
|
||||
|
||||
.. seealso:: modules :class:`PIL.ImageFile.Parser`
|
||||
"""
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
class ImageFileIO(BytesIO):
|
||||
def __init__(self, fp):
|
||||
"""
|
||||
Adds buffering to a stream file object, in order to
|
||||
provide **seek** and **tell** methods required
|
||||
by the :func:`PIL.Image.Image.open` method. The stream object must
|
||||
implement **read** and **close** methods.
|
||||
|
||||
:param fp: Stream file handle.
|
||||
|
||||
.. seealso:: modules :func:`PIL.Image.open`
|
||||
"""
|
||||
data = fp.read()
|
||||
BytesIO.__init__(self, data)
|
|
@ -1,269 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# standard filters
|
||||
#
|
||||
# History:
|
||||
# 1995-11-27 fl Created
|
||||
# 2002-06-08 fl Added rank and mode filters
|
||||
# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-2002 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from functools import reduce
|
||||
|
||||
|
||||
class Filter(object):
|
||||
pass
|
||||
|
||||
|
||||
class Kernel(Filter):
|
||||
"""
|
||||
Create a convolution kernel. The current version only
|
||||
supports 3x3 and 5x5 integer and floating point kernels.
|
||||
|
||||
In the current version, kernels can only be applied to
|
||||
"L" and "RGB" images.
|
||||
|
||||
:param size: Kernel size, given as (width, height). In the current
|
||||
version, this must be (3,3) or (5,5).
|
||||
:param kernel: A sequence containing kernel weights.
|
||||
:param scale: Scale factor. If given, the result for each pixel is
|
||||
divided by this value. the default is the sum of the
|
||||
kernel weights.
|
||||
:param offset: Offset. If given, this value is added to the result,
|
||||
after it has been divided by the scale factor.
|
||||
"""
|
||||
|
||||
def __init__(self, size, kernel, scale=None, offset=0):
|
||||
if scale is None:
|
||||
# default scale is sum of kernel
|
||||
scale = reduce(lambda a,b: a+b, kernel)
|
||||
if size[0] * size[1] != len(kernel):
|
||||
raise ValueError("not enough coefficients in kernel")
|
||||
self.filterargs = size, scale, offset, kernel
|
||||
|
||||
def filter(self, image):
|
||||
if image.mode == "P":
|
||||
raise ValueError("cannot filter palette images")
|
||||
return image.filter(*self.filterargs)
|
||||
|
||||
|
||||
class BuiltinFilter(Kernel):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
class RankFilter(Filter):
|
||||
"""
|
||||
Create a rank filter. The rank filter sorts all pixels in
|
||||
a window of the given size, and returns the **rank**'th value.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
:param rank: What pixel value to pick. Use 0 for a min filter,
|
||||
``size * size / 2`` for a median filter, ``size * size - 1``
|
||||
for a max filter, etc.
|
||||
"""
|
||||
name = "Rank"
|
||||
|
||||
def __init__(self, size, rank):
|
||||
self.size = size
|
||||
self.rank = rank
|
||||
|
||||
def filter(self, image):
|
||||
if image.mode == "P":
|
||||
raise ValueError("cannot filter palette images")
|
||||
image = image.expand(self.size//2, self.size//2)
|
||||
return image.rankfilter(self.size, self.rank)
|
||||
|
||||
|
||||
class MedianFilter(RankFilter):
|
||||
"""
|
||||
Create a median filter. Picks the median pixel value in a window with the
|
||||
given size.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
"""
|
||||
name = "Median"
|
||||
|
||||
def __init__(self, size=3):
|
||||
self.size = size
|
||||
self.rank = size*size//2
|
||||
|
||||
|
||||
class MinFilter(RankFilter):
|
||||
"""
|
||||
Create a min filter. Picks the lowest pixel value in a window with the
|
||||
given size.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
"""
|
||||
name = "Min"
|
||||
|
||||
def __init__(self, size=3):
|
||||
self.size = size
|
||||
self.rank = 0
|
||||
|
||||
|
||||
class MaxFilter(RankFilter):
|
||||
"""
|
||||
Create a max filter. Picks the largest pixel value in a window with the
|
||||
given size.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
"""
|
||||
name = "Max"
|
||||
|
||||
def __init__(self, size=3):
|
||||
self.size = size
|
||||
self.rank = size*size-1
|
||||
|
||||
|
||||
class ModeFilter(Filter):
|
||||
"""
|
||||
|
||||
Create a mode filter. Picks the most frequent pixel value in a box with the
|
||||
given size. Pixel values that occur only once or twice are ignored; if no
|
||||
pixel value occurs more than twice, the original pixel value is preserved.
|
||||
|
||||
:param size: The kernel size, in pixels.
|
||||
"""
|
||||
name = "Mode"
|
||||
|
||||
def __init__(self, size=3):
|
||||
self.size = size
|
||||
|
||||
def filter(self, image):
|
||||
return image.modefilter(self.size)
|
||||
|
||||
|
||||
class GaussianBlur(Filter):
|
||||
"""Gaussian blur filter.
|
||||
|
||||
:param radius: Blur radius.
|
||||
"""
|
||||
name = "GaussianBlur"
|
||||
|
||||
def __init__(self, radius=2):
|
||||
self.radius = radius
|
||||
|
||||
def filter(self, image):
|
||||
return image.gaussian_blur(self.radius)
|
||||
|
||||
|
||||
class UnsharpMask(Filter):
|
||||
"""Unsharp mask filter.
|
||||
|
||||
See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
|
||||
the parameters.
|
||||
|
||||
.. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
|
||||
"""
|
||||
name = "UnsharpMask"
|
||||
|
||||
def __init__(self, radius=2, percent=150, threshold=3):
|
||||
self.radius = radius
|
||||
self.percent = percent
|
||||
self.threshold = threshold
|
||||
|
||||
def filter(self, image):
|
||||
return image.unsharp_mask(self.radius, self.percent, self.threshold)
|
||||
|
||||
|
||||
class BLUR(BuiltinFilter):
|
||||
name = "Blur"
|
||||
filterargs = (5, 5), 16, 0, (
|
||||
1, 1, 1, 1, 1,
|
||||
1, 0, 0, 0, 1,
|
||||
1, 0, 0, 0, 1,
|
||||
1, 0, 0, 0, 1,
|
||||
1, 1, 1, 1, 1
|
||||
)
|
||||
|
||||
|
||||
class CONTOUR(BuiltinFilter):
|
||||
name = "Contour"
|
||||
filterargs = (3, 3), 1, 255, (
|
||||
-1, -1, -1,
|
||||
-1, 8, -1,
|
||||
-1, -1, -1
|
||||
)
|
||||
|
||||
|
||||
class DETAIL(BuiltinFilter):
|
||||
name = "Detail"
|
||||
filterargs = (3, 3), 6, 0, (
|
||||
0, -1, 0,
|
||||
-1, 10, -1,
|
||||
0, -1, 0
|
||||
)
|
||||
|
||||
|
||||
class EDGE_ENHANCE(BuiltinFilter):
|
||||
name = "Edge-enhance"
|
||||
filterargs = (3, 3), 2, 0, (
|
||||
-1, -1, -1,
|
||||
-1, 10, -1,
|
||||
-1, -1, -1
|
||||
)
|
||||
|
||||
|
||||
class EDGE_ENHANCE_MORE(BuiltinFilter):
|
||||
name = "Edge-enhance More"
|
||||
filterargs = (3, 3), 1, 0, (
|
||||
-1, -1, -1,
|
||||
-1, 9, -1,
|
||||
-1, -1, -1
|
||||
)
|
||||
|
||||
|
||||
class EMBOSS(BuiltinFilter):
|
||||
name = "Emboss"
|
||||
filterargs = (3, 3), 1, 128, (
|
||||
-1, 0, 0,
|
||||
0, 1, 0,
|
||||
0, 0, 0
|
||||
)
|
||||
|
||||
|
||||
class FIND_EDGES(BuiltinFilter):
|
||||
name = "Find Edges"
|
||||
filterargs = (3, 3), 1, 0, (
|
||||
-1, -1, -1,
|
||||
-1, 8, -1,
|
||||
-1, -1, -1
|
||||
)
|
||||
|
||||
|
||||
class SMOOTH(BuiltinFilter):
|
||||
name = "Smooth"
|
||||
filterargs = (3, 3), 13, 0, (
|
||||
1, 1, 1,
|
||||
1, 5, 1,
|
||||
1, 1, 1
|
||||
)
|
||||
|
||||
|
||||
class SMOOTH_MORE(BuiltinFilter):
|
||||
name = "Smooth More"
|
||||
filterargs = (5, 5), 100, 0, (
|
||||
1, 1, 1, 1, 1,
|
||||
1, 5, 5, 5, 1,
|
||||
1, 5, 44, 5, 1,
|
||||
1, 5, 5, 5, 1,
|
||||
1, 1, 1, 1, 1
|
||||
)
|
||||
|
||||
|
||||
class SHARPEN(BuiltinFilter):
|
||||
name = "Sharpen"
|
||||
filterargs = (3, 3), 16, 0, (
|
||||
-2, -2, -2,
|
||||
-2, 32, -2,
|
||||
-2, -2, -2
|
||||
)
|
|
@ -1,406 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# PIL raster font management
|
||||
#
|
||||
# History:
|
||||
# 1996-08-07 fl created (experimental)
|
||||
# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3
|
||||
# 1999-02-06 fl rewrote most font management stuff in C
|
||||
# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)
|
||||
# 2001-02-17 fl added freetype support
|
||||
# 2001-05-09 fl added TransposedFont wrapper class
|
||||
# 2002-03-04 fl make sure we have a "L" or "1" font
|
||||
# 2002-12-04 fl skip non-directory entries in the system path
|
||||
# 2003-04-29 fl add embedded default font
|
||||
# 2003-09-27 fl added support for truetype charmap encodings
|
||||
#
|
||||
# Todo:
|
||||
# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB
|
||||
# Copyright (c) 1996-2003 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from PIL import Image
|
||||
from PIL._util import isDirectory, isPath
|
||||
import os, sys
|
||||
|
||||
try:
|
||||
import warnings
|
||||
except ImportError:
|
||||
warnings = None
|
||||
|
||||
class _imagingft_not_installed:
|
||||
# module placeholder
|
||||
def __getattr__(self, id):
|
||||
raise ImportError("The _imagingft C module is not installed")
|
||||
|
||||
try:
|
||||
from PIL import _imagingft as core
|
||||
except ImportError:
|
||||
core = _imagingft_not_installed()
|
||||
|
||||
# FIXME: add support for pilfont2 format (see FontFile.py)
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Font metrics format:
|
||||
# "PILfont" LF
|
||||
# fontdescriptor LF
|
||||
# (optional) key=value... LF
|
||||
# "DATA" LF
|
||||
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
|
||||
#
|
||||
# To place a character, cut out srcbox and paste at dstbox,
|
||||
# relative to the character position. Then move the character
|
||||
# position according to dx, dy.
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
class ImageFont:
|
||||
"PIL font wrapper"
|
||||
|
||||
def _load_pilfont(self, filename):
|
||||
|
||||
file = open(filename, "rb")
|
||||
|
||||
for ext in (".png", ".gif", ".pbm"):
|
||||
try:
|
||||
fullname = os.path.splitext(filename)[0] + ext
|
||||
image = Image.open(fullname)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
if image and image.mode in ("1", "L"):
|
||||
break
|
||||
else:
|
||||
raise IOError("cannot find glyph data file")
|
||||
|
||||
self.file = fullname
|
||||
|
||||
return self._load_pilfont_data(file, image)
|
||||
|
||||
def _load_pilfont_data(self, file, image):
|
||||
|
||||
# read PILfont header
|
||||
if file.readline() != b"PILfont\n":
|
||||
raise SyntaxError("Not a PILfont file")
|
||||
d = file.readline().split(b";")
|
||||
self.info = [] # FIXME: should be a dictionary
|
||||
while True:
|
||||
s = file.readline()
|
||||
if not s or s == b"DATA\n":
|
||||
break
|
||||
self.info.append(s)
|
||||
|
||||
# read PILfont metrics
|
||||
data = file.read(256*20)
|
||||
|
||||
# check image
|
||||
if image.mode not in ("1", "L"):
|
||||
raise TypeError("invalid font image mode")
|
||||
|
||||
image.load()
|
||||
|
||||
self.font = Image.core.font(image.im, data)
|
||||
|
||||
# delegate critical operations to internal type
|
||||
self.getsize = self.font.getsize
|
||||
self.getmask = self.font.getmask
|
||||
|
||||
##
|
||||
# Wrapper for FreeType fonts. Application code should use the
|
||||
# <b>truetype</b> factory function to create font objects.
|
||||
|
||||
class FreeTypeFont:
|
||||
"FreeType font wrapper (requires _imagingft service)"
|
||||
|
||||
def __init__(self, font=None, size=10, index=0, encoding="", file=None):
|
||||
# FIXME: use service provider instead
|
||||
if file:
|
||||
if warnings:
|
||||
warnings.warn('file parameter deprecated, please use font parameter instead.', DeprecationWarning)
|
||||
font = file
|
||||
|
||||
if isPath(font):
|
||||
self.font = core.getfont(font, size, index, encoding)
|
||||
else:
|
||||
self.font_bytes = font.read()
|
||||
self.font = core.getfont("", size, index, encoding, self.font_bytes)
|
||||
|
||||
def getname(self):
|
||||
return self.font.family, self.font.style
|
||||
|
||||
def getmetrics(self):
|
||||
return self.font.ascent, self.font.descent
|
||||
|
||||
def getsize(self, text):
|
||||
return self.font.getsize(text)[0]
|
||||
|
||||
def getoffset(self, text):
|
||||
return self.font.getsize(text)[1]
|
||||
|
||||
def getmask(self, text, mode=""):
|
||||
return self.getmask2(text, mode)[0]
|
||||
|
||||
def getmask2(self, text, mode="", fill=Image.core.fill):
|
||||
size, offset = self.font.getsize(text)
|
||||
im = fill("L", size, 0)
|
||||
self.font.render(text, im.id, mode=="1")
|
||||
return im, offset
|
||||
|
||||
##
|
||||
# Wrapper that creates a transposed font from any existing font
|
||||
# object.
|
||||
#
|
||||
# @param font A font object.
|
||||
# @param orientation An optional orientation. If given, this should
|
||||
# be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
|
||||
# Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
|
||||
|
||||
class TransposedFont:
|
||||
"Wrapper for writing rotated or mirrored text"
|
||||
|
||||
def __init__(self, font, orientation=None):
|
||||
self.font = font
|
||||
self.orientation = orientation # any 'transpose' argument, or None
|
||||
|
||||
def getsize(self, text):
|
||||
w, h = self.font.getsize(text)
|
||||
if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
|
||||
return h, w
|
||||
return w, h
|
||||
|
||||
def getmask(self, text, mode=""):
|
||||
im = self.font.getmask(text, mode)
|
||||
if self.orientation is not None:
|
||||
return im.transpose(self.orientation)
|
||||
return im
|
||||
|
||||
|
||||
def load(filename):
|
||||
"""
|
||||
Load a font file. This function loads a font object from the given
|
||||
bitmap font file, and returns the corresponding font object.
|
||||
|
||||
:param filename: Name of font file.
|
||||
:return: A font object.
|
||||
:exception IOError: If the file could not be read.
|
||||
"""
|
||||
f = ImageFont()
|
||||
f._load_pilfont(filename)
|
||||
return f
|
||||
|
||||
|
||||
def truetype(font=None, size=10, index=0, encoding="", filename=None):
|
||||
"""
|
||||
Load a TrueType or OpenType font file, and create a font object.
|
||||
This function loads a font object from the given file, and creates
|
||||
a font object for a font of the given size.
|
||||
|
||||
This function requires the _imagingft service.
|
||||
|
||||
:param filename: A truetype font file. Under Windows, if the file
|
||||
is not found in this filename, the loader also looks in
|
||||
Windows :file:`fonts/` directory.
|
||||
:param size: The requested size, in points.
|
||||
:param index: Which font face to load (default is first available face).
|
||||
:param encoding: Which font encoding to use (default is Unicode). Common
|
||||
encodings are "unic" (Unicode), "symb" (Microsoft
|
||||
Symbol), "ADOB" (Adobe Standard), "ADBE" (Adobe Expert),
|
||||
and "armn" (Apple Roman). See the FreeType documentation
|
||||
for more information.
|
||||
:return: A font object.
|
||||
:exception IOError: If the file could not be read.
|
||||
"""
|
||||
|
||||
if filename:
|
||||
if warnings:
|
||||
warnings.warn('filename parameter deprecated, please use font parameter instead.', DeprecationWarning)
|
||||
font = filename
|
||||
|
||||
try:
|
||||
return FreeTypeFont(font, size, index, encoding)
|
||||
except IOError:
|
||||
if sys.platform == "win32":
|
||||
# check the windows font repository
|
||||
# NOTE: must use uppercase WINDIR, to work around bugs in
|
||||
# 1.5.2's os.environ.get()
|
||||
windir = os.environ.get("WINDIR")
|
||||
if windir:
|
||||
filename = os.path.join(windir, "fonts", font)
|
||||
return FreeTypeFont(filename, size, index, encoding)
|
||||
raise
|
||||
|
||||
|
||||
def load_path(filename):
|
||||
"""
|
||||
Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a
|
||||
bitmap font along the Python path.
|
||||
|
||||
:param filename: Name of font file.
|
||||
:return: A font object.
|
||||
:exception IOError: If the file could not be read.
|
||||
"""
|
||||
for dir in sys.path:
|
||||
if isDirectory(dir):
|
||||
if not isinstance(filename, str):
|
||||
if bytes is str:
|
||||
filename = filename.encode("utf-8")
|
||||
else:
|
||||
filename = filename.decode("utf-8")
|
||||
try:
|
||||
return load(os.path.join(dir, filename))
|
||||
except IOError:
|
||||
pass
|
||||
raise IOError("cannot find font file")
|
||||
|
||||
|
||||
def load_default():
|
||||
"""Load a "better than nothing" default font.
|
||||
|
||||
.. versionadded:: 1.1.4
|
||||
|
||||
:return: A font object.
|
||||
"""
|
||||
from io import BytesIO
|
||||
import base64
|
||||
f = ImageFont()
|
||||
f._load_pilfont_data(
|
||||
# courB08
|
||||
BytesIO(base64.decodestring(b'''
|
||||
UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA
|
||||
BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL
|
||||
AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA
|
||||
AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB
|
||||
ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A
|
||||
BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB
|
||||
//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA
|
||||
AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH
|
||||
AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA
|
||||
ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv
|
||||
AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/
|
||||
/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5
|
||||
AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA
|
||||
AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG
|
||||
AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA
|
||||
BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA
|
||||
AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA
|
||||
2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF
|
||||
AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////
|
||||
+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA
|
||||
////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA
|
||||
BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv
|
||||
AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA
|
||||
AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA
|
||||
AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA
|
||||
BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//
|
||||
//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA
|
||||
AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF
|
||||
AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB
|
||||
mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn
|
||||
AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA
|
||||
AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7
|
||||
AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA
|
||||
Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB
|
||||
//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA
|
||||
AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ
|
||||
AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC
|
||||
DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ
|
||||
AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/
|
||||
+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5
|
||||
AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/
|
||||
///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG
|
||||
AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA
|
||||
BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA
|
||||
Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC
|
||||
eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG
|
||||
AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////
|
||||
+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA
|
||||
////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA
|
||||
BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT
|
||||
AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A
|
||||
AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA
|
||||
Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA
|
||||
Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//
|
||||
//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA
|
||||
AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ
|
||||
AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA
|
||||
LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5
|
||||
AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA
|
||||
AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5
|
||||
AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA
|
||||
AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG
|
||||
AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA
|
||||
EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK
|
||||
AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA
|
||||
pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG
|
||||
AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////
|
||||
+QAGAAIAzgAKANUAEw==
|
||||
''')), Image.open(BytesIO(base64.decodestring(b'''
|
||||
iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u
|
||||
Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9
|
||||
M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g
|
||||
LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F
|
||||
IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA
|
||||
Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791
|
||||
NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx
|
||||
in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9
|
||||
SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY
|
||||
AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt
|
||||
y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG
|
||||
ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY
|
||||
lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H
|
||||
/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3
|
||||
AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47
|
||||
c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/
|
||||
/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw
|
||||
pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv
|
||||
oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR
|
||||
evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA
|
||||
AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//
|
||||
Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR
|
||||
w7IkEbzhVQAAAABJRU5ErkJggg==
|
||||
'''))))
|
||||
return f
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# create font data chunk for embedding
|
||||
import base64, os, sys
|
||||
font = "../Tests/images/courB08"
|
||||
print(" f._load_pilfont_data(")
|
||||
print(" # %s" % os.path.basename(font))
|
||||
print(" BytesIO(base64.decodestring(b'''")
|
||||
base64.encode(open(font + ".pil", "rb"), sys.stdout)
|
||||
print("''')), Image.open(BytesIO(base64.decodestring(b'''")
|
||||
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
|
||||
print("'''))))")
|
|
@ -1,49 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# screen grabber (windows only)
|
||||
#
|
||||
# History:
|
||||
# 2001-04-26 fl created
|
||||
# 2001-09-17 fl use builtin driver, if present
|
||||
# 2002-11-19 fl added grabclipboard support
|
||||
#
|
||||
# Copyright (c) 2001-2002 by Secret Labs AB
|
||||
# Copyright (c) 2001-2002 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
||||
try:
|
||||
# built-in driver (1.1.3 and later)
|
||||
grabber = Image.core.grabscreen
|
||||
except AttributeError:
|
||||
# stand-alone driver (pil plus)
|
||||
import _grabscreen
|
||||
grabber = _grabscreen.grab
|
||||
|
||||
|
||||
def grab(bbox=None):
|
||||
size, data = grabber()
|
||||
im = Image.frombytes(
|
||||
"RGB", size, data,
|
||||
# RGB, 32-bit line padding, origo in lower left corner
|
||||
"raw", "BGR", (size[0]*3 + 3) & -4, -1
|
||||
)
|
||||
if bbox:
|
||||
im = im.crop(bbox)
|
||||
return im
|
||||
|
||||
|
||||
def grabclipboard():
|
||||
debug = 0 # temporary interface
|
||||
data = Image.core.grabclipboard(debug)
|
||||
if isinstance(data, bytes):
|
||||
from PIL import BmpImagePlugin
|
||||
import io
|
||||
return BmpImagePlugin.DibImageFile(io.BytesIO(data))
|
||||
return data
|
|
@ -1,227 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# a simple math add-on for the Python Imaging Library
|
||||
#
|
||||
# History:
|
||||
# 1999-02-15 fl Original PIL Plus release
|
||||
# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6
|
||||
# 2005-09-12 fl Fixed int() and float() for Python 2.4.1
|
||||
#
|
||||
# Copyright (c) 1999-2005 by Secret Labs AB
|
||||
# Copyright (c) 2005 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
from PIL import _imagingmath
|
||||
|
||||
try:
|
||||
import builtins
|
||||
except ImportError:
|
||||
import __builtin__
|
||||
builtins = __builtin__
|
||||
|
||||
VERBOSE = 0
|
||||
|
||||
def _isconstant(v):
|
||||
return isinstance(v, int) or isinstance(v, float)
|
||||
|
||||
class _Operand:
|
||||
# wraps an image operand, providing standard operators
|
||||
|
||||
def __init__(self, im):
|
||||
self.im = im
|
||||
|
||||
def __fixup(self, im1):
|
||||
# convert image to suitable mode
|
||||
if isinstance(im1, _Operand):
|
||||
# argument was an image.
|
||||
if im1.im.mode in ("1", "L"):
|
||||
return im1.im.convert("I")
|
||||
elif im1.im.mode in ("I", "F"):
|
||||
return im1.im
|
||||
else:
|
||||
raise ValueError("unsupported mode: %s" % im1.im.mode)
|
||||
else:
|
||||
# argument was a constant
|
||||
if _isconstant(im1) and self.im.mode in ("1", "L", "I"):
|
||||
return Image.new("I", self.im.size, im1)
|
||||
else:
|
||||
return Image.new("F", self.im.size, im1)
|
||||
|
||||
def apply(self, op, im1, im2=None, mode=None):
|
||||
im1 = self.__fixup(im1)
|
||||
if im2 is None:
|
||||
# unary operation
|
||||
out = Image.new(mode or im1.mode, im1.size, None)
|
||||
im1.load()
|
||||
try:
|
||||
op = getattr(_imagingmath, op+"_"+im1.mode)
|
||||
except AttributeError:
|
||||
raise TypeError("bad operand type for '%s'" % op)
|
||||
_imagingmath.unop(op, out.im.id, im1.im.id)
|
||||
else:
|
||||
# binary operation
|
||||
im2 = self.__fixup(im2)
|
||||
if im1.mode != im2.mode:
|
||||
# convert both arguments to floating point
|
||||
if im1.mode != "F": im1 = im1.convert("F")
|
||||
if im2.mode != "F": im2 = im2.convert("F")
|
||||
if im1.mode != im2.mode:
|
||||
raise ValueError("mode mismatch")
|
||||
if im1.size != im2.size:
|
||||
# crop both arguments to a common size
|
||||
size = (min(im1.size[0], im2.size[0]),
|
||||
min(im1.size[1], im2.size[1]))
|
||||
if im1.size != size: im1 = im1.crop((0, 0) + size)
|
||||
if im2.size != size: im2 = im2.crop((0, 0) + size)
|
||||
out = Image.new(mode or im1.mode, size, None)
|
||||
else:
|
||||
out = Image.new(mode or im1.mode, im1.size, None)
|
||||
im1.load(); im2.load()
|
||||
try:
|
||||
op = getattr(_imagingmath, op+"_"+im1.mode)
|
||||
except AttributeError:
|
||||
raise TypeError("bad operand type for '%s'" % op)
|
||||
_imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id)
|
||||
return _Operand(out)
|
||||
|
||||
# unary operators
|
||||
def __bool__(self):
|
||||
# an image is "true" if it contains at least one non-zero pixel
|
||||
return self.im.getbbox() is not None
|
||||
|
||||
if bytes is str:
|
||||
# Provide __nonzero__ for pre-Py3k
|
||||
__nonzero__ = __bool__
|
||||
del __bool__
|
||||
|
||||
def __abs__(self):
|
||||
return self.apply("abs", self)
|
||||
def __pos__(self):
|
||||
return self
|
||||
def __neg__(self):
|
||||
return self.apply("neg", self)
|
||||
|
||||
# binary operators
|
||||
def __add__(self, other):
|
||||
return self.apply("add", self, other)
|
||||
def __radd__(self, other):
|
||||
return self.apply("add", other, self)
|
||||
def __sub__(self, other):
|
||||
return self.apply("sub", self, other)
|
||||
def __rsub__(self, other):
|
||||
return self.apply("sub", other, self)
|
||||
def __mul__(self, other):
|
||||
return self.apply("mul", self, other)
|
||||
def __rmul__(self, other):
|
||||
return self.apply("mul", other, self)
|
||||
def __truediv__(self, other):
|
||||
return self.apply("div", self, other)
|
||||
def __rtruediv__(self, other):
|
||||
return self.apply("div", other, self)
|
||||
def __mod__(self, other):
|
||||
return self.apply("mod", self, other)
|
||||
def __rmod__(self, other):
|
||||
return self.apply("mod", other, self)
|
||||
def __pow__(self, other):
|
||||
return self.apply("pow", self, other)
|
||||
def __rpow__(self, other):
|
||||
return self.apply("pow", other, self)
|
||||
|
||||
if bytes is str:
|
||||
# Provide __div__ and __rdiv__ for pre-Py3k
|
||||
__div__ = __truediv__
|
||||
__rdiv__ = __rtruediv__
|
||||
del __truediv__
|
||||
del __rtruediv__
|
||||
|
||||
# bitwise
|
||||
def __invert__(self):
|
||||
return self.apply("invert", self)
|
||||
def __and__(self, other):
|
||||
return self.apply("and", self, other)
|
||||
def __rand__(self, other):
|
||||
return self.apply("and", other, self)
|
||||
def __or__(self, other):
|
||||
return self.apply("or", self, other)
|
||||
def __ror__(self, other):
|
||||
return self.apply("or", other, self)
|
||||
def __xor__(self, other):
|
||||
return self.apply("xor", self, other)
|
||||
def __rxor__(self, other):
|
||||
return self.apply("xor", other, self)
|
||||
def __lshift__(self, other):
|
||||
return self.apply("lshift", self, other)
|
||||
def __rshift__(self, other):
|
||||
return self.apply("rshift", self, other)
|
||||
|
||||
# logical
|
||||
def __eq__(self, other):
|
||||
return self.apply("eq", self, other)
|
||||
def __ne__(self, other):
|
||||
return self.apply("ne", self, other)
|
||||
def __lt__(self, other):
|
||||
return self.apply("lt", self, other)
|
||||
def __le__(self, other):
|
||||
return self.apply("le", self, other)
|
||||
def __gt__(self, other):
|
||||
return self.apply("gt", self, other)
|
||||
def __ge__(self, other):
|
||||
return self.apply("ge", self, other)
|
||||
|
||||
# conversions
|
||||
def imagemath_int(self):
|
||||
return _Operand(self.im.convert("I"))
|
||||
def imagemath_float(self):
|
||||
return _Operand(self.im.convert("F"))
|
||||
|
||||
# logical
|
||||
def imagemath_equal(self, other):
|
||||
return self.apply("eq", self, other, mode="I")
|
||||
def imagemath_notequal(self, other):
|
||||
return self.apply("ne", self, other, mode="I")
|
||||
|
||||
def imagemath_min(self, other):
|
||||
return self.apply("min", self, other)
|
||||
def imagemath_max(self, other):
|
||||
return self.apply("max", self, other)
|
||||
|
||||
def imagemath_convert(self, mode):
|
||||
return _Operand(self.im.convert(mode))
|
||||
|
||||
ops = {}
|
||||
for k, v in list(globals().items()):
|
||||
if k[:10] == "imagemath_":
|
||||
ops[k[10:]] = v
|
||||
|
||||
|
||||
def eval(expression, _dict={}, **kw):
|
||||
"""
|
||||
Evaluates an image expression.
|
||||
|
||||
:param expression: A string containing a Python-style expression.
|
||||
:param options: Values to add to the evaluation context. You
|
||||
can either use a dictionary, or one or more keyword
|
||||
arguments.
|
||||
:return: The evaluated expression. This is usually an image object, but can
|
||||
also be an integer, a floating point value, or a pixel tuple,
|
||||
depending on the expression.
|
||||
"""
|
||||
|
||||
# build execution namespace
|
||||
args = ops.copy()
|
||||
args.update(_dict)
|
||||
args.update(kw)
|
||||
for k, v in list(args.items()):
|
||||
if hasattr(v, "im"):
|
||||
args[k] = _Operand(v)
|
||||
|
||||
out = builtins.eval(expression, args)
|
||||
try:
|
||||
return out.im
|
||||
except AttributeError:
|
||||
return out
|
|
@ -1,50 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# standard mode descriptors
|
||||
#
|
||||
# History:
|
||||
# 2006-03-20 fl Added
|
||||
#
|
||||
# Copyright (c) 2006 by Secret Labs AB.
|
||||
# Copyright (c) 2006 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
# mode descriptor cache
|
||||
_modes = {}
|
||||
|
||||
##
|
||||
# Wrapper for mode strings.
|
||||
|
||||
class ModeDescriptor:
|
||||
|
||||
def __init__(self, mode, bands, basemode, basetype):
|
||||
self.mode = mode
|
||||
self.bands = bands
|
||||
self.basemode = basemode
|
||||
self.basetype = basetype
|
||||
|
||||
def __str__(self):
|
||||
return self.mode
|
||||
|
||||
##
|
||||
# Gets a mode descriptor for the given mode.
|
||||
|
||||
def getmode(mode):
|
||||
if not _modes:
|
||||
# initialize mode cache
|
||||
from PIL import Image
|
||||
# core modes
|
||||
for m, (basemode, basetype, bands) in Image._MODEINFO.items():
|
||||
_modes[m] = ModeDescriptor(m, bands, basemode, basetype)
|
||||
# extra experimental modes
|
||||
_modes["LA"] = ModeDescriptor("LA", ("L", "A"), "L", "L")
|
||||
_modes["PA"] = ModeDescriptor("PA", ("P", "A"), "RGB", "L")
|
||||
# mapping modes
|
||||
_modes["I;16"] = ModeDescriptor("I;16", "I", "L", "L")
|
||||
_modes["I;16L"] = ModeDescriptor("I;16L", "I", "L", "L")
|
||||
_modes["I;16B"] = ModeDescriptor("I;16B", "I", "L", "L")
|
||||
return _modes[mode]
|
|
@ -1,244 +0,0 @@
|
|||
# A binary morphology add-on for the Python Imaging Library
|
||||
#
|
||||
# History:
|
||||
# 2014-06-04 Initial version.
|
||||
#
|
||||
# Copyright (c) 2014 Dov Grobgeld <dov.grobgeld@gmail.com>
|
||||
|
||||
from PIL import Image
|
||||
from PIL import _imagingmorph
|
||||
import re
|
||||
|
||||
LUT_SIZE = 1 << 9
|
||||
|
||||
|
||||
class LutBuilder:
|
||||
"""A class for building a MorphLut from a descriptive language
|
||||
|
||||
The input patterns is a list of a strings sequences like these:
|
||||
|
||||
4:(...
|
||||
.1.
|
||||
111)->1
|
||||
|
||||
(whitespaces including linebreaks are ignored). The option 4
|
||||
describes a series of symmetry operations (in this case a
|
||||
4-rotation), the pattern is described by:
|
||||
|
||||
. or X - Ignore
|
||||
1 - Pixel is on
|
||||
0 - Pixel is off
|
||||
|
||||
The result of the operation is described after "->" string.
|
||||
|
||||
The default is to return the current pixel value, which is
|
||||
returned if no other match is found.
|
||||
|
||||
Operations:
|
||||
4 - 4 way rotation
|
||||
N - Negate
|
||||
1 - Dummy op for no other operation (an op must always be given)
|
||||
M - Mirroring
|
||||
|
||||
Example:
|
||||
|
||||
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
|
||||
lut = lb.build_lut()
|
||||
|
||||
"""
|
||||
def __init__(self, patterns=None, op_name=None):
|
||||
if patterns is not None:
|
||||
self.patterns = patterns
|
||||
else:
|
||||
self.patterns = []
|
||||
self.lut = None
|
||||
if op_name is not None:
|
||||
known_patterns = {
|
||||
'corner': ['1:(... ... ...)->0',
|
||||
'4:(00. 01. ...)->1'],
|
||||
'dilation4': ['4:(... .0. .1.)->1'],
|
||||
'dilation8': ['4:(... .0. .1.)->1',
|
||||
'4:(... .0. ..1)->1'],
|
||||
'erosion4': ['4:(... .1. .0.)->0'],
|
||||
'erosion8': ['4:(... .1. .0.)->0',
|
||||
'4:(... .1. ..0)->0'],
|
||||
'edge': ['1:(... ... ...)->0',
|
||||
'4:(.0. .1. ...)->1',
|
||||
'4:(01. .1. ...)->1']
|
||||
}
|
||||
if op_name not in known_patterns:
|
||||
raise Exception('Unknown pattern '+op_name+'!')
|
||||
|
||||
self.patterns = known_patterns[op_name]
|
||||
|
||||
def add_patterns(self, patterns):
|
||||
self.patterns += patterns
|
||||
|
||||
def build_default_lut(self):
|
||||
symbols = [0, 1]
|
||||
m = 1 << 4 # pos of current pixel
|
||||
self.lut = bytearray([symbols[(i & m) > 0] for i in range(LUT_SIZE)])
|
||||
|
||||
def get_lut(self):
|
||||
return self.lut
|
||||
|
||||
def _string_permute(self, pattern, permutation):
|
||||
"""string_permute takes a pattern and a permutation and returns the
|
||||
string permuted according to the permutation list.
|
||||
"""
|
||||
assert(len(permutation) == 9)
|
||||
return ''.join([pattern[p] for p in permutation])
|
||||
|
||||
def _pattern_permute(self, basic_pattern, options, basic_result):
|
||||
"""pattern_permute takes a basic pattern and its result and clones
|
||||
the pattern according to the modifications described in the $options
|
||||
parameter. It returns a list of all cloned patterns."""
|
||||
patterns = [(basic_pattern, basic_result)]
|
||||
|
||||
# rotations
|
||||
if '4' in options:
|
||||
res = patterns[-1][1]
|
||||
for i in range(4):
|
||||
patterns.append(
|
||||
(self._string_permute(patterns[-1][0], [6, 3, 0,
|
||||
7, 4, 1,
|
||||
8, 5, 2]), res))
|
||||
# mirror
|
||||
if 'M' in options:
|
||||
n = len(patterns)
|
||||
for pattern, res in patterns[0:n]:
|
||||
patterns.append(
|
||||
(self._string_permute(pattern, [2, 1, 0,
|
||||
5, 4, 3,
|
||||
8, 7, 6]), res))
|
||||
|
||||
# negate
|
||||
if 'N' in options:
|
||||
n = len(patterns)
|
||||
for pattern, res in patterns[0:n]:
|
||||
# Swap 0 and 1
|
||||
pattern = (pattern
|
||||
.replace('0', 'Z')
|
||||
.replace('1', '0')
|
||||
.replace('Z', '1'))
|
||||
res = '%d' % (1-int(res))
|
||||
patterns.append((pattern, res))
|
||||
|
||||
return patterns
|
||||
|
||||
def build_lut(self):
|
||||
"""Compile all patterns into a morphology lut.
|
||||
|
||||
TBD :Build based on (file) morphlut:modify_lut
|
||||
"""
|
||||
self.build_default_lut()
|
||||
patterns = []
|
||||
|
||||
# Parse and create symmetries of the patterns strings
|
||||
for p in self.patterns:
|
||||
m = re.search(
|
||||
r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', ''))
|
||||
if not m:
|
||||
raise Exception('Syntax error in pattern "'+p+'"')
|
||||
options = m.group(1)
|
||||
pattern = m.group(2)
|
||||
result = int(m.group(3))
|
||||
|
||||
# Get rid of spaces
|
||||
pattern = pattern.replace(' ', '').replace('\n', '')
|
||||
|
||||
patterns += self._pattern_permute(pattern, options, result)
|
||||
|
||||
# # Debugging
|
||||
# for p,r in patterns:
|
||||
# print p,r
|
||||
# print '--'
|
||||
|
||||
# compile the patterns into regular expressions for speed
|
||||
for i in range(len(patterns)):
|
||||
p = patterns[i][0].replace('.', 'X').replace('X', '[01]')
|
||||
p = re.compile(p)
|
||||
patterns[i] = (p, patterns[i][1])
|
||||
|
||||
# Step through table and find patterns that match.
|
||||
# Note that all the patterns are searched. The last one
|
||||
# caught overrides
|
||||
for i in range(LUT_SIZE):
|
||||
# Build the bit pattern
|
||||
bitpattern = bin(i)[2:]
|
||||
bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1]
|
||||
|
||||
for p, r in patterns:
|
||||
if p.match(bitpattern):
|
||||
self.lut[i] = [0, 1][r]
|
||||
|
||||
return self.lut
|
||||
|
||||
|
||||
class MorphOp:
|
||||
"""A class for binary morphological operators"""
|
||||
|
||||
def __init__(self,
|
||||
lut=None,
|
||||
op_name=None,
|
||||
patterns=None):
|
||||
"""Create a binary morphological operator"""
|
||||
self.lut = lut
|
||||
if op_name is not None:
|
||||
self.lut = LutBuilder(op_name=op_name).build_lut()
|
||||
elif patterns is not None:
|
||||
self.lut = LutBuilder(patterns=patterns).build_lut()
|
||||
|
||||
def apply(self, image):
|
||||
"""Run a single morphological operation on an image
|
||||
|
||||
Returns a tuple of the number of changed pixels and the
|
||||
morphed image"""
|
||||
if self.lut is None:
|
||||
raise Exception('No operator loaded')
|
||||
|
||||
outimage = Image.new(image.mode, image.size, None)
|
||||
count = _imagingmorph.apply(
|
||||
bytes(self.lut), image.im.id, outimage.im.id)
|
||||
return count, outimage
|
||||
|
||||
def match(self, image):
|
||||
"""Get a list of coordinates matching the morphological operation on
|
||||
an image.
|
||||
|
||||
Returns a list of tuples of (x,y) coordinates
|
||||
of all matching pixels."""
|
||||
if self.lut is None:
|
||||
raise Exception('No operator loaded')
|
||||
|
||||
return _imagingmorph.match(bytes(self.lut), image.im.id)
|
||||
|
||||
def get_on_pixels(self, image):
|
||||
"""Get a list of all turned on pixels in a binary image
|
||||
|
||||
Returns a list of tuples of (x,y) coordinates
|
||||
of all matching pixels."""
|
||||
|
||||
return _imagingmorph.get_on_pixels(image.im.id)
|
||||
|
||||
def load_lut(self, filename):
|
||||
"""Load an operator from an mrl file"""
|
||||
with open(filename, 'rb') as f:
|
||||
self.lut = bytearray(f.read())
|
||||
|
||||
if len(self.lut) != 8192:
|
||||
self.lut = None
|
||||
raise Exception('Wrong size operator file!')
|
||||
|
||||
def save_lut(self, filename):
|
||||
"""Save an operator to an mrl file"""
|
||||
if self.lut is None:
|
||||
raise Exception('No operator loaded')
|
||||
with open(filename, 'wb') as f:
|
||||
f.write(self.lut)
|
||||
|
||||
def set_lut(self, lut):
|
||||
"""Set the lut from an external source"""
|
||||
self.lut = lut
|
||||
|
||||
# End of file
|
|
@ -1,436 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# standard image operations
|
||||
#
|
||||
# History:
|
||||
# 2001-10-20 fl Created
|
||||
# 2001-10-23 fl Added autocontrast operator
|
||||
# 2001-12-18 fl Added Kevin's fit operator
|
||||
# 2004-03-14 fl Fixed potential division by zero in equalize
|
||||
# 2005-05-05 fl Fixed equalize for low number of values
|
||||
#
|
||||
# Copyright (c) 2001-2004 by Secret Labs AB
|
||||
# Copyright (c) 2001-2004 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
from PIL._util import isStringType
|
||||
import operator
|
||||
from functools import reduce
|
||||
|
||||
#
|
||||
# helpers
|
||||
|
||||
def _border(border):
|
||||
if isinstance(border, tuple):
|
||||
if len(border) == 2:
|
||||
left, top = right, bottom = border
|
||||
elif len(border) == 4:
|
||||
left, top, right, bottom = border
|
||||
else:
|
||||
left = top = right = bottom = border
|
||||
return left, top, right, bottom
|
||||
|
||||
def _color(color, mode):
|
||||
if isStringType(color):
|
||||
from PIL import ImageColor
|
||||
color = ImageColor.getcolor(color, mode)
|
||||
return color
|
||||
|
||||
def _lut(image, lut):
|
||||
if image.mode == "P":
|
||||
# FIXME: apply to lookup table, not image data
|
||||
raise NotImplementedError("mode P support coming soon")
|
||||
elif image.mode in ("L", "RGB"):
|
||||
if image.mode == "RGB" and len(lut) == 256:
|
||||
lut = lut + lut + lut
|
||||
return image.point(lut)
|
||||
else:
|
||||
raise IOError("not supported for this image mode")
|
||||
|
||||
#
|
||||
# actions
|
||||
|
||||
|
||||
def autocontrast(image, cutoff=0, ignore=None):
|
||||
"""
|
||||
Maximize (normalize) image contrast. This function calculates a
|
||||
histogram of the input image, removes **cutoff** percent of the
|
||||
lightest and darkest pixels from the histogram, and remaps the image
|
||||
so that the darkest pixel becomes black (0), and the lightest
|
||||
becomes white (255).
|
||||
|
||||
:param image: The image to process.
|
||||
:param cutoff: How many percent to cut off from the histogram.
|
||||
:param ignore: The background pixel value (use None for no background).
|
||||
:return: An image.
|
||||
"""
|
||||
histogram = image.histogram()
|
||||
lut = []
|
||||
for layer in range(0, len(histogram), 256):
|
||||
h = histogram[layer:layer+256]
|
||||
if ignore is not None:
|
||||
# get rid of outliers
|
||||
try:
|
||||
h[ignore] = 0
|
||||
except TypeError:
|
||||
# assume sequence
|
||||
for ix in ignore:
|
||||
h[ix] = 0
|
||||
if cutoff:
|
||||
# cut off pixels from both ends of the histogram
|
||||
# get number of pixels
|
||||
n = 0
|
||||
for ix in range(256):
|
||||
n = n + h[ix]
|
||||
# remove cutoff% pixels from the low end
|
||||
cut = n * cutoff // 100
|
||||
for lo in range(256):
|
||||
if cut > h[lo]:
|
||||
cut = cut - h[lo]
|
||||
h[lo] = 0
|
||||
else:
|
||||
h[lo] -= cut
|
||||
cut = 0
|
||||
if cut <= 0:
|
||||
break
|
||||
# remove cutoff% samples from the hi end
|
||||
cut = n * cutoff // 100
|
||||
for hi in range(255, -1, -1):
|
||||
if cut > h[hi]:
|
||||
cut = cut - h[hi]
|
||||
h[hi] = 0
|
||||
else:
|
||||
h[hi] -= cut
|
||||
cut = 0
|
||||
if cut <= 0:
|
||||
break
|
||||
# find lowest/highest samples after preprocessing
|
||||
for lo in range(256):
|
||||
if h[lo]:
|
||||
break
|
||||
for hi in range(255, -1, -1):
|
||||
if h[hi]:
|
||||
break
|
||||
if hi <= lo:
|
||||
# don't bother
|
||||
lut.extend(list(range(256)))
|
||||
else:
|
||||
scale = 255.0 / (hi - lo)
|
||||
offset = -lo * scale
|
||||
for ix in range(256):
|
||||
ix = int(ix * scale + offset)
|
||||
if ix < 0:
|
||||
ix = 0
|
||||
elif ix > 255:
|
||||
ix = 255
|
||||
lut.append(ix)
|
||||
return _lut(image, lut)
|
||||
|
||||
|
||||
def colorize(image, black, white):
|
||||
"""
|
||||
Colorize grayscale image. The **black** and **white**
|
||||
arguments should be RGB tuples; this function calculates a color
|
||||
wedge mapping all black pixels in the source image to the first
|
||||
color, and all white pixels to the second color.
|
||||
|
||||
:param image: The image to colorize.
|
||||
:param black: The color to use for black input pixels.
|
||||
:param white: The color to use for white input pixels.
|
||||
:return: An image.
|
||||
"""
|
||||
assert image.mode == "L"
|
||||
black = _color(black, "RGB")
|
||||
white = _color(white, "RGB")
|
||||
red = []; green = []; blue = []
|
||||
for i in range(256):
|
||||
red.append(black[0]+i*(white[0]-black[0])//255)
|
||||
green.append(black[1]+i*(white[1]-black[1])//255)
|
||||
blue.append(black[2]+i*(white[2]-black[2])//255)
|
||||
image = image.convert("RGB")
|
||||
return _lut(image, red + green + blue)
|
||||
|
||||
|
||||
def crop(image, border=0):
|
||||
"""
|
||||
Remove border from image. The same amount of pixels are removed
|
||||
from all four sides. This function works on all image modes.
|
||||
|
||||
.. seealso:: :py:meth:`~PIL.Image.Image.crop`
|
||||
|
||||
:param image: The image to crop.
|
||||
:param border: The number of pixels to remove.
|
||||
:return: An image.
|
||||
"""
|
||||
left, top, right, bottom = _border(border)
|
||||
return image.crop(
|
||||
(left, top, image.size[0]-right, image.size[1]-bottom)
|
||||
)
|
||||
|
||||
|
||||
def deform(image, deformer, resample=Image.BILINEAR):
|
||||
"""
|
||||
Deform the image.
|
||||
|
||||
:param image: The image to deform.
|
||||
:param deformer: A deformer object. Any object that implements a
|
||||
**getmesh** method can be used.
|
||||
:param resample: What resampling filter to use.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transform(
|
||||
image.size, Image.MESH, deformer.getmesh(image), resample
|
||||
)
|
||||
|
||||
|
||||
def equalize(image, mask=None):
|
||||
"""
|
||||
Equalize the image histogram. This function applies a non-linear
|
||||
mapping to the input image, in order to create a uniform
|
||||
distribution of grayscale values in the output image.
|
||||
|
||||
:param image: The image to equalize.
|
||||
:param mask: An optional mask. If given, only the pixels selected by
|
||||
the mask are included in the analysis.
|
||||
:return: An image.
|
||||
"""
|
||||
if image.mode == "P":
|
||||
image = image.convert("RGB")
|
||||
h = image.histogram(mask)
|
||||
lut = []
|
||||
for b in range(0, len(h), 256):
|
||||
histo = [_f for _f in h[b:b+256] if _f]
|
||||
if len(histo) <= 1:
|
||||
lut.extend(list(range(256)))
|
||||
else:
|
||||
step = (reduce(operator.add, histo) - histo[-1]) // 255
|
||||
if not step:
|
||||
lut.extend(list(range(256)))
|
||||
else:
|
||||
n = step // 2
|
||||
for i in range(256):
|
||||
lut.append(n // step)
|
||||
n = n + h[i+b]
|
||||
return _lut(image, lut)
|
||||
|
||||
|
||||
def expand(image, border=0, fill=0):
|
||||
"""
|
||||
Add border to the image
|
||||
|
||||
:param image: The image to expand.
|
||||
:param border: Border width, in pixels.
|
||||
:param fill: Pixel fill value (a color value). Default is 0 (black).
|
||||
:return: An image.
|
||||
"""
|
||||
"Add border to image"
|
||||
left, top, right, bottom = _border(border)
|
||||
width = left + image.size[0] + right
|
||||
height = top + image.size[1] + bottom
|
||||
out = Image.new(image.mode, (width, height), _color(fill, image.mode))
|
||||
out.paste(image, (left, top))
|
||||
return out
|
||||
|
||||
|
||||
def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)):
|
||||
"""
|
||||
Returns a sized and cropped version of the image, cropped to the
|
||||
requested aspect ratio and size.
|
||||
|
||||
This function was contributed by Kevin Cazabon.
|
||||
|
||||
:param size: The requested output size in pixels, given as a
|
||||
(width, height) tuple.
|
||||
:param method: What resampling method to use. Default is
|
||||
:py:attr:`PIL.Image.NEAREST`.
|
||||
:param bleed: Remove a border around the outside of the image (from all
|
||||
four edges. The value is a decimal percentage (use 0.01 for
|
||||
one percent). The default value is 0 (no border).
|
||||
:param centering: Control the cropping position. Use (0.5, 0.5) for
|
||||
center cropping (e.g. if cropping the width, take 50% off
|
||||
of the left side, and therefore 50% off the right side).
|
||||
(0.0, 0.0) will crop from the top left corner (i.e. if
|
||||
cropping the width, take all of the crop off of the right
|
||||
side, and if cropping the height, take all of it off the
|
||||
bottom). (1.0, 0.0) will crop from the bottom left
|
||||
corner, etc. (i.e. if cropping the width, take all of the
|
||||
crop off the left side, and if cropping the height take
|
||||
none from the top, and therefore all off the bottom).
|
||||
:return: An image.
|
||||
"""
|
||||
|
||||
# by Kevin Cazabon, Feb 17/2000
|
||||
# kevin@cazabon.com
|
||||
# http://www.cazabon.com
|
||||
|
||||
# ensure inputs are valid
|
||||
if not isinstance(centering, list):
|
||||
centering = [centering[0], centering[1]]
|
||||
|
||||
if centering[0] > 1.0 or centering[0] < 0.0:
|
||||
centering [0] = 0.50
|
||||
if centering[1] > 1.0 or centering[1] < 0.0:
|
||||
centering[1] = 0.50
|
||||
|
||||
if bleed > 0.49999 or bleed < 0.0:
|
||||
bleed = 0.0
|
||||
|
||||
# calculate the area to use for resizing and cropping, subtracting
|
||||
# the 'bleed' around the edges
|
||||
|
||||
# number of pixels to trim off on Top and Bottom, Left and Right
|
||||
bleedPixels = (
|
||||
int((float(bleed) * float(image.size[0])) + 0.5),
|
||||
int((float(bleed) * float(image.size[1])) + 0.5)
|
||||
)
|
||||
|
||||
liveArea = (0, 0, image.size[0], image.size[1])
|
||||
if bleed > 0.0:
|
||||
liveArea = (
|
||||
bleedPixels[0], bleedPixels[1], image.size[0] - bleedPixels[0] - 1,
|
||||
image.size[1] - bleedPixels[1] - 1
|
||||
)
|
||||
|
||||
liveSize = (liveArea[2] - liveArea[0], liveArea[3] - liveArea[1])
|
||||
|
||||
# calculate the aspect ratio of the liveArea
|
||||
liveAreaAspectRatio = float(liveSize[0])/float(liveSize[1])
|
||||
|
||||
# calculate the aspect ratio of the output image
|
||||
aspectRatio = float(size[0]) / float(size[1])
|
||||
|
||||
# figure out if the sides or top/bottom will be cropped off
|
||||
if liveAreaAspectRatio >= aspectRatio:
|
||||
# liveArea is wider than what's needed, crop the sides
|
||||
cropWidth = int((aspectRatio * float(liveSize[1])) + 0.5)
|
||||
cropHeight = liveSize[1]
|
||||
else:
|
||||
# liveArea is taller than what's needed, crop the top and bottom
|
||||
cropWidth = liveSize[0]
|
||||
cropHeight = int((float(liveSize[0])/aspectRatio) + 0.5)
|
||||
|
||||
# make the crop
|
||||
leftSide = int(liveArea[0] + (float(liveSize[0]-cropWidth) * centering[0]))
|
||||
if leftSide < 0:
|
||||
leftSide = 0
|
||||
topSide = int(liveArea[1] + (float(liveSize[1]-cropHeight) * centering[1]))
|
||||
if topSide < 0:
|
||||
topSide = 0
|
||||
|
||||
out = image.crop(
|
||||
(leftSide, topSide, leftSide + cropWidth, topSide + cropHeight)
|
||||
)
|
||||
|
||||
# resize the image and return it
|
||||
return out.resize(size, method)
|
||||
|
||||
|
||||
def flip(image):
|
||||
"""
|
||||
Flip the image vertically (top to bottom).
|
||||
|
||||
:param image: The image to flip.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transpose(Image.FLIP_TOP_BOTTOM)
|
||||
|
||||
|
||||
def grayscale(image):
|
||||
"""
|
||||
Convert the image to grayscale.
|
||||
|
||||
:param image: The image to convert.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.convert("L")
|
||||
|
||||
|
||||
def invert(image):
|
||||
"""
|
||||
Invert (negate) the image.
|
||||
|
||||
:param image: The image to invert.
|
||||
:return: An image.
|
||||
"""
|
||||
lut = []
|
||||
for i in range(256):
|
||||
lut.append(255-i)
|
||||
return _lut(image, lut)
|
||||
|
||||
|
||||
def mirror(image):
|
||||
"""
|
||||
Flip image horizontally (left to right).
|
||||
|
||||
:param image: The image to mirror.
|
||||
:return: An image.
|
||||
"""
|
||||
return image.transpose(Image.FLIP_LEFT_RIGHT)
|
||||
|
||||
|
||||
def posterize(image, bits):
|
||||
"""
|
||||
Reduce the number of bits for each color channel.
|
||||
|
||||
:param image: The image to posterize.
|
||||
:param bits: The number of bits to keep for each channel (1-8).
|
||||
:return: An image.
|
||||
"""
|
||||
lut = []
|
||||
mask = ~(2**(8-bits)-1)
|
||||
for i in range(256):
|
||||
lut.append(i & mask)
|
||||
return _lut(image, lut)
|
||||
|
||||
|
||||
def solarize(image, threshold=128):
|
||||
"""
|
||||
Invert all pixel values above a threshold.
|
||||
|
||||
:param image: The image to posterize.
|
||||
:param threshold: All pixels above this greyscale level are inverted.
|
||||
:return: An image.
|
||||
"""
|
||||
lut = []
|
||||
for i in range(256):
|
||||
if i < threshold:
|
||||
lut.append(i)
|
||||
else:
|
||||
lut.append(255-i)
|
||||
return _lut(image, lut)
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# PIL USM components, from Kevin Cazabon.
|
||||
|
||||
def gaussian_blur(im, radius=None):
|
||||
""" PIL_usm.gblur(im, [radius])"""
|
||||
|
||||
if radius is None:
|
||||
radius = 5.0
|
||||
|
||||
im.load()
|
||||
|
||||
return im.im.gaussian_blur(radius)
|
||||
|
||||
gblur = gaussian_blur
|
||||
|
||||
def unsharp_mask(im, radius=None, percent=None, threshold=None):
|
||||
""" PIL_usm.usm(im, [radius, percent, threshold])"""
|
||||
|
||||
if radius is None:
|
||||
radius = 5.0
|
||||
if percent is None:
|
||||
percent = 150
|
||||
if threshold is None:
|
||||
threshold = 3
|
||||
|
||||
im.load()
|
||||
|
||||
return im.im.unsharp_mask(radius, percent, threshold)
|
||||
|
||||
usm = unsharp_mask
|
|
@ -1,209 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# image palette object
|
||||
#
|
||||
# History:
|
||||
# 1996-03-11 fl Rewritten.
|
||||
# 1997-01-03 fl Up and running.
|
||||
# 1997-08-23 fl Added load hack
|
||||
# 2001-04-16 fl Fixed randint shadow bug in random()
|
||||
#
|
||||
# Copyright (c) 1997-2001 by Secret Labs AB
|
||||
# Copyright (c) 1996-1997 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
import array
|
||||
from PIL import Image, ImageColor
|
||||
|
||||
|
||||
class ImagePalette:
|
||||
"Color palette for palette mapped images"
|
||||
|
||||
def __init__(self, mode = "RGB", palette = None, size = 0):
|
||||
self.mode = mode
|
||||
self.rawmode = None # if set, palette contains raw data
|
||||
self.palette = palette or list(range(256))*len(self.mode)
|
||||
self.colors = {}
|
||||
self.dirty = None
|
||||
if ((size == 0 and len(self.mode)*256 != len(self.palette)) or
|
||||
(size != 0 and size != len(self.palette))):
|
||||
raise ValueError("wrong palette size")
|
||||
|
||||
def getdata(self):
|
||||
"""
|
||||
Get palette contents in format suitable # for the low-level
|
||||
``im.putpalette`` primitive.
|
||||
|
||||
.. warning:: This method is experimental.
|
||||
"""
|
||||
if self.rawmode:
|
||||
return self.rawmode, self.palette
|
||||
return self.mode + ";L", self.tobytes()
|
||||
|
||||
def tobytes(self):
|
||||
"""Convert palette to bytes.
|
||||
|
||||
.. warning:: This method is experimental.
|
||||
"""
|
||||
if self.rawmode:
|
||||
raise ValueError("palette contains raw palette data")
|
||||
if isinstance(self.palette, bytes):
|
||||
return self.palette
|
||||
arr = array.array("B", self.palette)
|
||||
if hasattr(arr, 'tobytes'):
|
||||
#py3k has a tobytes, tostring is deprecated.
|
||||
return arr.tobytes()
|
||||
return arr.tostring()
|
||||
|
||||
# Declare tostring as an alias for tobytes
|
||||
tostring = tobytes
|
||||
|
||||
def getcolor(self, color):
|
||||
"""Given an rgb tuple, allocate palette entry.
|
||||
|
||||
.. warning:: This method is experimental.
|
||||
"""
|
||||
if self.rawmode:
|
||||
raise ValueError("palette contains raw palette data")
|
||||
if isinstance(color, tuple):
|
||||
try:
|
||||
return self.colors[color]
|
||||
except KeyError:
|
||||
# allocate new color slot
|
||||
if isinstance(self.palette, bytes):
|
||||
self.palette = [int(x) for x in self.palette]
|
||||
index = len(self.colors)
|
||||
if index >= 256:
|
||||
raise ValueError("cannot allocate more than 256 colors")
|
||||
self.colors[color] = index
|
||||
self.palette[index] = color[0]
|
||||
self.palette[index+256] = color[1]
|
||||
self.palette[index+512] = color[2]
|
||||
self.dirty = 1
|
||||
return index
|
||||
else:
|
||||
raise ValueError("unknown color specifier: %r" % color)
|
||||
|
||||
def save(self, fp):
|
||||
"""Save palette to text file.
|
||||
|
||||
.. warning:: This method is experimental.
|
||||
"""
|
||||
if self.rawmode:
|
||||
raise ValueError("palette contains raw palette data")
|
||||
if isinstance(fp, str):
|
||||
fp = open(fp, "w")
|
||||
fp.write("# Palette\n")
|
||||
fp.write("# Mode: %s\n" % self.mode)
|
||||
for i in range(256):
|
||||
fp.write("%d" % i)
|
||||
for j in range(i*len(self.mode), (i+1)*len(self.mode)):
|
||||
try:
|
||||
fp.write(" %d" % self.palette[j])
|
||||
except IndexError:
|
||||
fp.write(" 0")
|
||||
fp.write("\n")
|
||||
fp.close()
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Internal
|
||||
|
||||
def raw(rawmode, data):
|
||||
palette = ImagePalette()
|
||||
palette.rawmode = rawmode
|
||||
palette.palette = data
|
||||
palette.dirty = 1
|
||||
return palette
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Factories
|
||||
|
||||
def _make_linear_lut(black, white):
|
||||
lut = []
|
||||
if black == 0:
|
||||
for i in range(256):
|
||||
lut.append(white*i//255)
|
||||
else:
|
||||
raise NotImplementedError # FIXME
|
||||
return lut
|
||||
|
||||
def _make_gamma_lut(exp, mode="RGB"):
|
||||
lut = []
|
||||
for i in range(256):
|
||||
lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5))
|
||||
return lut
|
||||
|
||||
def new(mode, data):
|
||||
return Image.core.new_palette(mode, data)
|
||||
|
||||
def negative(mode="RGB"):
|
||||
palette = list(range(256))
|
||||
palette.reverse()
|
||||
return ImagePalette(mode, palette * len(mode))
|
||||
|
||||
def random(mode="RGB"):
|
||||
from random import randint
|
||||
palette = []
|
||||
for i in range(256*len(mode)):
|
||||
palette.append(randint(0, 255))
|
||||
return ImagePalette(mode, palette)
|
||||
|
||||
def sepia(white="#fff0c0"):
|
||||
r, g, b = ImageColor.getrgb(white)
|
||||
r = _make_linear_lut(0, r)
|
||||
g = _make_linear_lut(0, g)
|
||||
b = _make_linear_lut(0, b)
|
||||
return ImagePalette("RGB", r + g + b)
|
||||
|
||||
def wedge(mode="RGB"):
|
||||
return ImagePalette(mode, list(range(256)) * len(mode))
|
||||
|
||||
def load(filename):
|
||||
|
||||
# FIXME: supports GIMP gradients only
|
||||
|
||||
fp = open(filename, "rb")
|
||||
|
||||
lut = None
|
||||
|
||||
if not lut:
|
||||
try:
|
||||
from PIL import GimpPaletteFile
|
||||
fp.seek(0)
|
||||
p = GimpPaletteFile.GimpPaletteFile(fp)
|
||||
lut = p.getpalette()
|
||||
except (SyntaxError, ValueError):
|
||||
#import traceback
|
||||
#traceback.print_exc()
|
||||
pass
|
||||
|
||||
if not lut:
|
||||
try:
|
||||
from PIL import GimpGradientFile
|
||||
fp.seek(0)
|
||||
p = GimpGradientFile.GimpGradientFile(fp)
|
||||
lut = p.getpalette()
|
||||
except (SyntaxError, ValueError):
|
||||
#import traceback
|
||||
#traceback.print_exc()
|
||||
pass
|
||||
|
||||
if not lut:
|
||||
try:
|
||||
from PIL import PaletteFile
|
||||
fp.seek(0)
|
||||
p = PaletteFile.PaletteFile(fp)
|
||||
lut = p.getpalette()
|
||||
except (SyntaxError, ValueError):
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
pass
|
||||
|
||||
if not lut:
|
||||
raise IOError("cannot load palette")
|
||||
|
||||
return lut # data, rawmode
|
|
@ -1,66 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# path interface
|
||||
#
|
||||
# History:
|
||||
# 1996-11-04 fl Created
|
||||
# 2002-04-14 fl Added documentation stub class
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
||||
# the Python class below is overridden by the C implementation.
|
||||
|
||||
|
||||
class Path:
|
||||
|
||||
def __init__(self, xy):
|
||||
pass
|
||||
|
||||
##
|
||||
# Compacts the path, by removing points that are close to each
|
||||
# other. This method modifies the path in place.
|
||||
|
||||
def compact(self, distance=2):
|
||||
pass
|
||||
|
||||
##
|
||||
# Gets the bounding box.
|
||||
|
||||
def getbbox(self):
|
||||
pass
|
||||
|
||||
##
|
||||
# Maps the path through a function.
|
||||
|
||||
def map(self, function):
|
||||
pass
|
||||
|
||||
##
|
||||
# Converts the path to Python list.
|
||||
#
|
||||
# @param flat By default, this function returns a list of 2-tuples
|
||||
# [(x, y), ...]. If this argument is true, it returns a flat
|
||||
# list [x, y, ...] instead.
|
||||
# @return A list of coordinates.
|
||||
|
||||
def tolist(self, flat=0):
|
||||
pass
|
||||
|
||||
##
|
||||
# Transforms the path.
|
||||
|
||||
def transform(self, matrix):
|
||||
pass
|
||||
|
||||
|
||||
# override with C implementation
|
||||
Path = Image.core.path
|
|
@ -1,89 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a simple Qt image interface.
|
||||
#
|
||||
# history:
|
||||
# 2006-06-03 fl: created
|
||||
# 2006-06-04 fl: inherit from QImage instead of wrapping it
|
||||
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
|
||||
# 2013-11-13 fl: add support for Qt5 (aurelien.ballier@cyclonit.com)
|
||||
#
|
||||
# Copyright (c) 2006 by Secret Labs AB
|
||||
# Copyright (c) 2006 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
from PIL._util import isPath
|
||||
|
||||
try:
|
||||
from PyQt5.QtGui import QImage, qRgba
|
||||
except:
|
||||
from PyQt4.QtGui import QImage, qRgba
|
||||
|
||||
##
|
||||
# (Internal) Turns an RGB color into a Qt compatible color integer.
|
||||
|
||||
def rgb(r, g, b, a=255):
|
||||
# use qRgb to pack the colors, and then turn the resulting long
|
||||
# into a negative integer with the same bitpattern.
|
||||
return (qRgba(r, g, b, a) & 0xffffffff)
|
||||
|
||||
##
|
||||
# An PIL image wrapper for Qt. This is a subclass of PyQt4's QImage
|
||||
# class.
|
||||
#
|
||||
# @param im A PIL Image object, or a file name (given either as Python
|
||||
# string or a PyQt string object).
|
||||
|
||||
class ImageQt(QImage):
|
||||
|
||||
def __init__(self, im):
|
||||
|
||||
data = None
|
||||
colortable = None
|
||||
|
||||
# handle filename, if given instead of image name
|
||||
if hasattr(im, "toUtf8"):
|
||||
# FIXME - is this really the best way to do this?
|
||||
im = unicode(im.toUtf8(), "utf-8")
|
||||
if isPath(im):
|
||||
im = Image.open(im)
|
||||
|
||||
if im.mode == "1":
|
||||
format = QImage.Format_Mono
|
||||
elif im.mode == "L":
|
||||
format = QImage.Format_Indexed8
|
||||
colortable = []
|
||||
for i in range(256):
|
||||
colortable.append(rgb(i, i, i))
|
||||
elif im.mode == "P":
|
||||
format = QImage.Format_Indexed8
|
||||
colortable = []
|
||||
palette = im.getpalette()
|
||||
for i in range(0, len(palette), 3):
|
||||
colortable.append(rgb(*palette[i:i+3]))
|
||||
elif im.mode == "RGB":
|
||||
data = im.tobytes("raw", "BGRX")
|
||||
format = QImage.Format_RGB32
|
||||
elif im.mode == "RGBA":
|
||||
try:
|
||||
data = im.tobytes("raw", "BGRA")
|
||||
except SystemError:
|
||||
# workaround for earlier versions
|
||||
r, g, b, a = im.split()
|
||||
im = Image.merge("RGBA", (b, g, r, a))
|
||||
format = QImage.Format_ARGB32
|
||||
else:
|
||||
raise ValueError("unsupported image mode %r" % im.mode)
|
||||
|
||||
# must keep a reference, or Qt will crash!
|
||||
self.__data = data or im.tobytes()
|
||||
|
||||
QImage.__init__(self, self.__data, im.size[0], im.size[1], format)
|
||||
|
||||
if colortable:
|
||||
self.setColorTable(colortable)
|
|
@ -1,41 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# sequence support classes
|
||||
#
|
||||
# history:
|
||||
# 1997-02-20 fl Created
|
||||
#
|
||||
# Copyright (c) 1997 by Secret Labs AB.
|
||||
# Copyright (c) 1997 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
##
|
||||
|
||||
class Iterator:
|
||||
"""
|
||||
This class implements an iterator object that can be used to loop
|
||||
over an image sequence.
|
||||
|
||||
You can use the ``[]`` operator to access elements by index. This operator
|
||||
will raise an :py:exc:`IndexError` if you try to access a nonexistent
|
||||
frame.
|
||||
|
||||
:param im: An image object.
|
||||
"""
|
||||
|
||||
def __init__(self, im):
|
||||
if not hasattr(im, "seek"):
|
||||
raise AttributeError("im must have seek method")
|
||||
self.im = im
|
||||
|
||||
def __getitem__(self, ix):
|
||||
try:
|
||||
if ix:
|
||||
self.im.seek(ix)
|
||||
return self.im
|
||||
except EOFError:
|
||||
raise IndexError # end of sequence
|
|
@ -1,171 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# im.show() drivers
|
||||
#
|
||||
# History:
|
||||
# 2008-04-06 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 2008.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from PIL import Image
|
||||
import os, sys
|
||||
|
||||
if sys.version_info >= (3, 3):
|
||||
from shlex import quote
|
||||
else:
|
||||
from pipes import quote
|
||||
|
||||
_viewers = []
|
||||
|
||||
def register(viewer, order=1):
|
||||
try:
|
||||
if issubclass(viewer, Viewer):
|
||||
viewer = viewer()
|
||||
except TypeError:
|
||||
pass # raised if viewer wasn't a class
|
||||
if order > 0:
|
||||
_viewers.append(viewer)
|
||||
elif order < 0:
|
||||
_viewers.insert(0, viewer)
|
||||
|
||||
##
|
||||
# Displays a given image.
|
||||
#
|
||||
# @param image An image object.
|
||||
# @param title Optional title. Not all viewers can display the title.
|
||||
# @param **options Additional viewer options.
|
||||
# @return True if a suitable viewer was found, false otherwise.
|
||||
|
||||
def show(image, title=None, **options):
|
||||
for viewer in _viewers:
|
||||
if viewer.show(image, title=title, **options):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
##
|
||||
# Base class for viewers.
|
||||
|
||||
class Viewer:
|
||||
|
||||
# main api
|
||||
|
||||
def show(self, image, **options):
|
||||
|
||||
# save temporary image to disk
|
||||
if image.mode[:4] == "I;16":
|
||||
# @PIL88 @PIL101
|
||||
# "I;16" isn't an 'official' mode, but we still want to
|
||||
# provide a simple way to show 16-bit images.
|
||||
base = "L"
|
||||
# FIXME: auto-contrast if max() > 255?
|
||||
else:
|
||||
base = Image.getmodebase(image.mode)
|
||||
if base != image.mode and image.mode != "1":
|
||||
image = image.convert(base)
|
||||
|
||||
return self.show_image(image, **options)
|
||||
|
||||
# hook methods
|
||||
|
||||
format = None
|
||||
|
||||
def get_format(self, image):
|
||||
# return format name, or None to save as PGM/PPM
|
||||
return self.format
|
||||
|
||||
def get_command(self, file, **options):
|
||||
raise NotImplementedError
|
||||
|
||||
def save_image(self, image):
|
||||
# save to temporary file, and return filename
|
||||
return image._dump(format=self.get_format(image))
|
||||
|
||||
def show_image(self, image, **options):
|
||||
# display given image
|
||||
return self.show_file(self.save_image(image), **options)
|
||||
|
||||
def show_file(self, file, **options):
|
||||
# display given file
|
||||
os.system(self.get_command(file, **options))
|
||||
return 1
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
if sys.platform == "win32":
|
||||
|
||||
class WindowsViewer(Viewer):
|
||||
format = "BMP"
|
||||
def get_command(self, file, **options):
|
||||
return ('start "Pillow" /WAIT "%s" '
|
||||
'&& ping -n 2 127.0.0.1 >NUL '
|
||||
'&& del /f "%s"' % (file, file))
|
||||
|
||||
register(WindowsViewer)
|
||||
|
||||
elif sys.platform == "darwin":
|
||||
|
||||
class MacViewer(Viewer):
|
||||
format = "BMP"
|
||||
def get_command(self, file, **options):
|
||||
# on darwin open returns immediately resulting in the temp
|
||||
# file removal while app is opening
|
||||
command = "open -a /Applications/Preview.app"
|
||||
command = "(%s %s; sleep 20; rm -f %s)&" % (command, quote(file), quote(file))
|
||||
return command
|
||||
|
||||
register(MacViewer)
|
||||
|
||||
else:
|
||||
|
||||
# unixoids
|
||||
|
||||
def which(executable):
|
||||
path = os.environ.get("PATH")
|
||||
if not path:
|
||||
return None
|
||||
for dirname in path.split(os.pathsep):
|
||||
filename = os.path.join(dirname, executable)
|
||||
if os.path.isfile(filename):
|
||||
# FIXME: make sure it's executable
|
||||
return filename
|
||||
return None
|
||||
|
||||
class UnixViewer(Viewer):
|
||||
def show_file(self, file, **options):
|
||||
command, executable = self.get_command_ex(file, **options)
|
||||
command = "(%s %s; rm -f %s)&" % (command, quote(file), quote(file))
|
||||
os.system(command)
|
||||
return 1
|
||||
|
||||
# implementations
|
||||
|
||||
class DisplayViewer(UnixViewer):
|
||||
def get_command_ex(self, file, **options):
|
||||
command = executable = "display"
|
||||
return command, executable
|
||||
|
||||
if which("display"):
|
||||
register(DisplayViewer)
|
||||
|
||||
class XVViewer(UnixViewer):
|
||||
def get_command_ex(self, file, title=None, **options):
|
||||
# note: xv is pretty outdated. most modern systems have
|
||||
# imagemagick's display command instead.
|
||||
command = executable = "xv"
|
||||
if title:
|
||||
command += " -name %s" % quote(title)
|
||||
return command, executable
|
||||
|
||||
if which("xv"):
|
||||
register(XVViewer)
|
||||
|
||||
if __name__ == "__main__":
|
||||
# usage: python ImageShow.py imagefile [title]
|
||||
print(show(Image.open(sys.argv[1]), *sys.argv[2:]))
|
|
@ -1,147 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# global image statistics
|
||||
#
|
||||
# History:
|
||||
# 1996-04-05 fl Created
|
||||
# 1997-05-21 fl Added mask; added rms, var, stddev attributes
|
||||
# 1997-08-05 fl Added median
|
||||
# 1998-07-05 hk Fixed integer overflow error
|
||||
#
|
||||
# Notes:
|
||||
# This class shows how to implement delayed evaluation of attributes.
|
||||
# To get a certain value, simply access the corresponding attribute.
|
||||
# The __getattr__ dispatcher takes care of the rest.
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1996-97.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
import operator, math
|
||||
from functools import reduce
|
||||
|
||||
|
||||
class Stat:
|
||||
|
||||
def __init__(self, image_or_list, mask = None):
|
||||
try:
|
||||
if mask:
|
||||
self.h = image_or_list.histogram(mask)
|
||||
else:
|
||||
self.h = image_or_list.histogram()
|
||||
except AttributeError:
|
||||
self.h = image_or_list # assume it to be a histogram list
|
||||
if not isinstance(self.h, list):
|
||||
raise TypeError("first argument must be image or list")
|
||||
self.bands = list(range(len(self.h) // 256))
|
||||
|
||||
def __getattr__(self, id):
|
||||
"Calculate missing attribute"
|
||||
if id[:4] == "_get":
|
||||
raise AttributeError(id)
|
||||
# calculate missing attribute
|
||||
v = getattr(self, "_get" + id)()
|
||||
setattr(self, id, v)
|
||||
return v
|
||||
|
||||
def _getextrema(self):
|
||||
"Get min/max values for each band in the image"
|
||||
|
||||
def minmax(histogram):
|
||||
n = 255
|
||||
x = 0
|
||||
for i in range(256):
|
||||
if histogram[i]:
|
||||
n = min(n, i)
|
||||
x = max(x, i)
|
||||
return n, x # returns (255, 0) if there's no data in the histogram
|
||||
|
||||
v = []
|
||||
for i in range(0, len(self.h), 256):
|
||||
v.append(minmax(self.h[i:]))
|
||||
return v
|
||||
|
||||
def _getcount(self):
|
||||
"Get total number of pixels in each layer"
|
||||
|
||||
v = []
|
||||
for i in range(0, len(self.h), 256):
|
||||
v.append(reduce(operator.add, self.h[i:i+256]))
|
||||
return v
|
||||
|
||||
def _getsum(self):
|
||||
"Get sum of all pixels in each layer"
|
||||
|
||||
v = []
|
||||
for i in range(0, len(self.h), 256):
|
||||
sum = 0.0
|
||||
for j in range(256):
|
||||
sum += j * self.h[i + j]
|
||||
v.append(sum)
|
||||
return v
|
||||
|
||||
def _getsum2(self):
|
||||
"Get squared sum of all pixels in each layer"
|
||||
|
||||
v = []
|
||||
for i in range(0, len(self.h), 256):
|
||||
sum2 = 0.0
|
||||
for j in range(256):
|
||||
sum2 += (j ** 2) * float(self.h[i + j])
|
||||
v.append(sum2)
|
||||
return v
|
||||
|
||||
def _getmean(self):
|
||||
"Get average pixel level for each layer"
|
||||
|
||||
v = []
|
||||
for i in self.bands:
|
||||
v.append(self.sum[i] / self.count[i])
|
||||
return v
|
||||
|
||||
def _getmedian(self):
|
||||
"Get median pixel level for each layer"
|
||||
|
||||
v = []
|
||||
for i in self.bands:
|
||||
s = 0
|
||||
l = self.count[i]//2
|
||||
b = i * 256
|
||||
for j in range(256):
|
||||
s = s + self.h[b+j]
|
||||
if s > l:
|
||||
break
|
||||
v.append(j)
|
||||
return v
|
||||
|
||||
def _getrms(self):
|
||||
"Get RMS for each layer"
|
||||
|
||||
v = []
|
||||
for i in self.bands:
|
||||
v.append(math.sqrt(self.sum2[i] / self.count[i]))
|
||||
return v
|
||||
|
||||
|
||||
def _getvar(self):
|
||||
"Get variance for each layer"
|
||||
|
||||
v = []
|
||||
for i in self.bands:
|
||||
n = self.count[i]
|
||||
v.append((self.sum2[i]-(self.sum[i]**2.0)/n)/n)
|
||||
return v
|
||||
|
||||
def _getstddev(self):
|
||||
"Get standard deviation for each layer"
|
||||
|
||||
v = []
|
||||
for i in self.bands:
|
||||
v.append(math.sqrt(self.var[i]))
|
||||
return v
|
||||
|
||||
Global = Stat # compatibility
|
|
@ -1,296 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a Tk display interface
|
||||
#
|
||||
# History:
|
||||
# 96-04-08 fl Created
|
||||
# 96-09-06 fl Added getimage method
|
||||
# 96-11-01 fl Rewritten, removed image attribute and crop method
|
||||
# 97-05-09 fl Use PyImagingPaste method instead of image type
|
||||
# 97-05-12 fl Minor tweaks to match the IFUNC95 interface
|
||||
# 97-05-17 fl Support the "pilbitmap" booster patch
|
||||
# 97-06-05 fl Added file= and data= argument to image constructors
|
||||
# 98-03-09 fl Added width and height methods to Image classes
|
||||
# 98-07-02 fl Use default mode for "P" images without palette attribute
|
||||
# 98-07-02 fl Explicitly destroy Tkinter image objects
|
||||
# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
|
||||
# 99-07-26 fl Automatically hook into Tkinter (if possible)
|
||||
# 99-08-15 fl Hook uses _imagingtk instead of _imaging
|
||||
#
|
||||
# Copyright (c) 1997-1999 by Secret Labs AB
|
||||
# Copyright (c) 1996-1997 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
try:
|
||||
import tkinter
|
||||
except ImportError:
|
||||
import Tkinter
|
||||
tkinter = Tkinter
|
||||
del Tkinter
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Check for Tkinter interface hooks
|
||||
|
||||
_pilbitmap_ok = None
|
||||
|
||||
def _pilbitmap_check():
|
||||
global _pilbitmap_ok
|
||||
if _pilbitmap_ok is None:
|
||||
try:
|
||||
im = Image.new("1", (1,1))
|
||||
tkinter.BitmapImage(data="PIL:%d" % im.im.id)
|
||||
_pilbitmap_ok = 1
|
||||
except tkinter.TclError:
|
||||
_pilbitmap_ok = 0
|
||||
return _pilbitmap_ok
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# PhotoImage
|
||||
|
||||
class PhotoImage:
|
||||
"""
|
||||
A Tkinter-compatible photo image. This can be used
|
||||
everywhere Tkinter expects an image object. If the image is an RGBA
|
||||
image, pixels having alpha 0 are treated as transparent.
|
||||
|
||||
The constructor takes either a PIL image, or a mode and a size.
|
||||
Alternatively, you can use the **file** or **data** options to initialize
|
||||
the photo image object.
|
||||
|
||||
:param image: Either a PIL image, or a mode string. If a mode string is
|
||||
used, a size must also be given.
|
||||
:param size: If the first argument is a mode string, this defines the size
|
||||
of the image.
|
||||
:keyword file: A filename to load the image from (using
|
||||
``Image.open(file)``).
|
||||
:keyword data: An 8-bit string containing image data (as loaded from an
|
||||
image file).
|
||||
"""
|
||||
|
||||
def __init__(self, image=None, size=None, **kw):
|
||||
|
||||
# Tk compatibility: file or data
|
||||
if image is None:
|
||||
if "file" in kw:
|
||||
image = Image.open(kw["file"])
|
||||
del kw["file"]
|
||||
elif "data" in kw:
|
||||
from io import BytesIO
|
||||
image = Image.open(BytesIO(kw["data"]))
|
||||
del kw["data"]
|
||||
|
||||
if hasattr(image, "mode") and hasattr(image, "size"):
|
||||
# got an image instead of a mode
|
||||
mode = image.mode
|
||||
if mode == "P":
|
||||
# palette mapped data
|
||||
image.load()
|
||||
try:
|
||||
mode = image.palette.mode
|
||||
except AttributeError:
|
||||
mode = "RGB" # default
|
||||
size = image.size
|
||||
kw["width"], kw["height"] = size
|
||||
else:
|
||||
mode = image
|
||||
image = None
|
||||
|
||||
if mode not in ["1", "L", "RGB", "RGBA"]:
|
||||
mode = Image.getmodebase(mode)
|
||||
|
||||
self.__mode = mode
|
||||
self.__size = size
|
||||
self.__photo = tkinter.PhotoImage(**kw)
|
||||
self.tk = self.__photo.tk
|
||||
if image:
|
||||
self.paste(image)
|
||||
|
||||
def __del__(self):
|
||||
name = self.__photo.name
|
||||
self.__photo.name = None
|
||||
try:
|
||||
self.__photo.tk.call("image", "delete", name)
|
||||
except:
|
||||
pass # ignore internal errors
|
||||
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Get the Tkinter photo image identifier. This method is automatically
|
||||
called by Tkinter whenever a PhotoImage object is passed to a Tkinter
|
||||
method.
|
||||
|
||||
:return: A Tkinter photo image identifier (a string).
|
||||
"""
|
||||
return str(self.__photo)
|
||||
|
||||
|
||||
def width(self):
|
||||
"""
|
||||
Get the width of the image.
|
||||
|
||||
:return: The width, in pixels.
|
||||
"""
|
||||
return self.__size[0]
|
||||
|
||||
|
||||
def height(self):
|
||||
"""
|
||||
Get the height of the image.
|
||||
|
||||
:return: The height, in pixels.
|
||||
"""
|
||||
return self.__size[1]
|
||||
|
||||
|
||||
def paste(self, im, box=None):
|
||||
"""
|
||||
Paste a PIL image into the photo image. Note that this can
|
||||
be very slow if the photo image is displayed.
|
||||
|
||||
:param im: A PIL image. The size must match the target region. If the
|
||||
mode does not match, the image is converted to the mode of
|
||||
the bitmap image.
|
||||
:param box: A 4-tuple defining the left, upper, right, and lower pixel
|
||||
coordinate. If None is given instead of a tuple, all of
|
||||
the image is assumed.
|
||||
"""
|
||||
|
||||
# convert to blittable
|
||||
im.load()
|
||||
image = im.im
|
||||
if image.isblock() and im.mode == self.__mode:
|
||||
block = image
|
||||
else:
|
||||
block = image.new_block(self.__mode, im.size)
|
||||
image.convert2(block, image) # convert directly between buffers
|
||||
|
||||
tk = self.__photo.tk
|
||||
|
||||
try:
|
||||
tk.call("PyImagingPhoto", self.__photo, block.id)
|
||||
except tkinter.TclError as v:
|
||||
# activate Tkinter hook
|
||||
try:
|
||||
from PIL import _imagingtk
|
||||
try:
|
||||
_imagingtk.tkinit(tk.interpaddr(), 1)
|
||||
except AttributeError:
|
||||
_imagingtk.tkinit(id(tk), 0)
|
||||
tk.call("PyImagingPhoto", self.__photo, block.id)
|
||||
except (ImportError, AttributeError, tkinter.TclError):
|
||||
raise # configuration problem; cannot attach to Tkinter
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# BitmapImage
|
||||
|
||||
|
||||
class BitmapImage:
|
||||
"""
|
||||
|
||||
A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
|
||||
expects an image object.
|
||||
|
||||
The given image must have mode "1". Pixels having value 0 are treated as
|
||||
transparent. Options, if any, are passed on to Tkinter. The most commonly
|
||||
used option is **foreground**, which is used to specify the color for the
|
||||
non-transparent parts. See the Tkinter documentation for information on
|
||||
how to specify colours.
|
||||
|
||||
:param image: A PIL image.
|
||||
"""
|
||||
|
||||
def __init__(self, image=None, **kw):
|
||||
|
||||
# Tk compatibility: file or data
|
||||
if image is None:
|
||||
if "file" in kw:
|
||||
image = Image.open(kw["file"])
|
||||
del kw["file"]
|
||||
elif "data" in kw:
|
||||
from io import BytesIO
|
||||
image = Image.open(BytesIO(kw["data"]))
|
||||
del kw["data"]
|
||||
|
||||
self.__mode = image.mode
|
||||
self.__size = image.size
|
||||
|
||||
if _pilbitmap_check():
|
||||
# fast way (requires the pilbitmap booster patch)
|
||||
image.load()
|
||||
kw["data"] = "PIL:%d" % image.im.id
|
||||
self.__im = image # must keep a reference
|
||||
else:
|
||||
# slow but safe way
|
||||
kw["data"] = image.tobitmap()
|
||||
self.__photo = tkinter.BitmapImage(**kw)
|
||||
|
||||
def __del__(self):
|
||||
name = self.__photo.name
|
||||
self.__photo.name = None
|
||||
try:
|
||||
self.__photo.tk.call("image", "delete", name)
|
||||
except:
|
||||
pass # ignore internal errors
|
||||
|
||||
|
||||
def width(self):
|
||||
"""
|
||||
Get the width of the image.
|
||||
|
||||
:return: The width, in pixels.
|
||||
"""
|
||||
return self.__size[0]
|
||||
|
||||
|
||||
def height(self):
|
||||
"""
|
||||
Get the height of the image.
|
||||
|
||||
:return: The height, in pixels.
|
||||
"""
|
||||
return self.__size[1]
|
||||
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Get the Tkinter bitmap image identifier. This method is automatically
|
||||
called by Tkinter whenever a BitmapImage object is passed to a Tkinter
|
||||
method.
|
||||
|
||||
:return: A Tkinter bitmap image identifier (a string).
|
||||
"""
|
||||
return str(self.__photo)
|
||||
|
||||
|
||||
def getimage(photo):
|
||||
"""Copies the contents of a PhotoImage to a PIL image memory."""
|
||||
photo.tk.call("PyImagingPhotoGet", photo)
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Helper for the Image.show method.
|
||||
|
||||
def _show(image, title):
|
||||
|
||||
class UI(tkinter.Label):
|
||||
def __init__(self, master, im):
|
||||
if im.mode == "1":
|
||||
self.image = BitmapImage(im, foreground="white", master=master)
|
||||
else:
|
||||
self.image = PhotoImage(im, master=master)
|
||||
tkinter.Label.__init__(self, master, image=self.image,
|
||||
bg="black", bd=0)
|
||||
|
||||
if not tkinter._default_root:
|
||||
raise IOError("tkinter not initialized")
|
||||
top = tkinter.Toplevel()
|
||||
if title:
|
||||
top.title(title)
|
||||
UI(top, image).pack()
|
|
@ -1,95 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# transform wrappers
|
||||
#
|
||||
# History:
|
||||
# 2002-04-08 fl Created
|
||||
#
|
||||
# Copyright (c) 2002 by Secret Labs AB
|
||||
# Copyright (c) 2002 by Fredrik Lundh
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL import Image
|
||||
|
||||
class Transform(Image.ImageTransformHandler):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
def getdata(self):
|
||||
return self.method, self.data
|
||||
def transform(self, size, image, **options):
|
||||
# can be overridden
|
||||
method, data = self.getdata()
|
||||
return image.transform(size, method, data, **options)
|
||||
|
||||
##
|
||||
# Define an affine image transform.
|
||||
# <p>
|
||||
# This function takes a 6-tuple (<i>a, b, c, d, e, f</i>) which
|
||||
# contain the first two rows from an affine transform matrix. For
|
||||
# each pixel (<i>x, y</i>) in the output image, the new value is
|
||||
# taken from a position (a <i>x</i> + b <i>y</i> + c,
|
||||
# d <i>x</i> + e <i>y</i> + f) in the input image, rounded to
|
||||
# nearest pixel.
|
||||
# <p>
|
||||
# This function can be used to scale, translate, rotate, and shear the
|
||||
# original image.
|
||||
#
|
||||
# @def AffineTransform(matrix)
|
||||
# @param matrix A 6-tuple (<i>a, b, c, d, e, f</i>) containing
|
||||
# the first two rows from an affine transform matrix.
|
||||
# @see Image#Image.transform
|
||||
|
||||
class AffineTransform(Transform):
|
||||
method = Image.AFFINE
|
||||
|
||||
##
|
||||
# Define a transform to extract a subregion from an image.
|
||||
# <p>
|
||||
# Maps a rectangle (defined by two corners) from the image to a
|
||||
# rectangle of the given size. The resulting image will contain
|
||||
# data sampled from between the corners, such that (<i>x0, y0</i>)
|
||||
# in the input image will end up at (0,0) in the output image,
|
||||
# and (<i>x1, y1</i>) at <i>size</i>.
|
||||
# <p>
|
||||
# This method can be used to crop, stretch, shrink, or mirror an
|
||||
# arbitrary rectangle in the current image. It is slightly slower than
|
||||
# <b>crop</b>, but about as fast as a corresponding <b>resize</b>
|
||||
# operation.
|
||||
#
|
||||
# @def ExtentTransform(bbox)
|
||||
# @param bbox A 4-tuple (<i>x0, y0, x1, y1</i>) which specifies
|
||||
# two points in the input image's coordinate system.
|
||||
# @see Image#Image.transform
|
||||
|
||||
class ExtentTransform(Transform):
|
||||
method = Image.EXTENT
|
||||
|
||||
##
|
||||
# Define an quad image transform.
|
||||
# <p>
|
||||
# Maps a quadrilateral (a region defined by four corners) from the
|
||||
# image to a rectangle of the given size.
|
||||
#
|
||||
# @def QuadTransform(xy)
|
||||
# @param xy An 8-tuple (<i>x0, y0, x1, y1, x2, y2, y3, y3</i>) which
|
||||
# contain the upper left, lower left, lower right, and upper right
|
||||
# corner of the source quadrilateral.
|
||||
# @see Image#Image.transform
|
||||
|
||||
class QuadTransform(Transform):
|
||||
method = Image.QUAD
|
||||
|
||||
##
|
||||
# Define an mesh image transform. A mesh transform consists of one
|
||||
# or more individual quad transforms.
|
||||
#
|
||||
# @def MeshTransform(data)
|
||||
# @param data A list of (bbox, quad) tuples.
|
||||
# @see Image#Image.transform
|
||||
|
||||
class MeshTransform(Transform):
|
||||
method = Image.MESH
|
|
@ -1,251 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# a Windows DIB display interface
|
||||
#
|
||||
# History:
|
||||
# 1996-05-20 fl Created
|
||||
# 1996-09-20 fl Fixed subregion exposure
|
||||
# 1997-09-21 fl Added draw primitive (for tzPrint)
|
||||
# 2003-05-21 fl Added experimental Window/ImageWindow classes
|
||||
# 2003-09-05 fl Added fromstring/tostring methods
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-2003.
|
||||
# Copyright (c) Fredrik Lundh 1996-2003.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
import warnings
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class HDC:
|
||||
"""
|
||||
Wraps a HDC integer. The resulting object can be passed to the
|
||||
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
|
||||
methods.
|
||||
"""
|
||||
def __init__(self, dc):
|
||||
self.dc = dc
|
||||
def __int__(self):
|
||||
return self.dc
|
||||
|
||||
class HWND:
|
||||
"""
|
||||
Wraps a HWND integer. The resulting object can be passed to the
|
||||
:py:meth:`~PIL.ImageWin.Dib.draw` and :py:meth:`~PIL.ImageWin.Dib.expose`
|
||||
methods, instead of a DC.
|
||||
"""
|
||||
def __init__(self, wnd):
|
||||
self.wnd = wnd
|
||||
def __int__(self):
|
||||
return self.wnd
|
||||
|
||||
|
||||
class Dib:
|
||||
"""
|
||||
A Windows bitmap with the given mode and size. The mode can be one of "1",
|
||||
"L", "P", or "RGB".
|
||||
|
||||
If the display requires a palette, this constructor creates a suitable
|
||||
palette and associates it with the image. For an "L" image, 128 greylevels
|
||||
are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together
|
||||
with 20 greylevels.
|
||||
|
||||
To make sure that palettes work properly under Windows, you must call the
|
||||
**palette** method upon certain events from Windows.
|
||||
|
||||
:param image: Either a PIL image, or a mode string. If a mode string is
|
||||
used, a size must also be given. The mode can be one of "1",
|
||||
"L", "P", or "RGB".
|
||||
:param size: If the first argument is a mode string, this
|
||||
defines the size of the image.
|
||||
"""
|
||||
|
||||
def __init__(self, image, size=None):
|
||||
if hasattr(image, "mode") and hasattr(image, "size"):
|
||||
mode = image.mode
|
||||
size = image.size
|
||||
else:
|
||||
mode = image
|
||||
image = None
|
||||
if mode not in ["1", "L", "P", "RGB"]:
|
||||
mode = Image.getmodebase(mode)
|
||||
self.image = Image.core.display(mode, size)
|
||||
self.mode = mode
|
||||
self.size = size
|
||||
if image:
|
||||
self.paste(image)
|
||||
|
||||
|
||||
def expose(self, handle):
|
||||
"""
|
||||
Copy the bitmap contents to a device context.
|
||||
|
||||
:param handle: Device context (HDC), cast to a Python integer, or a HDC
|
||||
or HWND instance. In PythonWin, you can use the
|
||||
:py:meth:`CDC.GetHandleAttrib` to get a suitable handle.
|
||||
"""
|
||||
if isinstance(handle, HWND):
|
||||
dc = self.image.getdc(handle)
|
||||
try:
|
||||
result = self.image.expose(dc)
|
||||
finally:
|
||||
self.image.releasedc(handle, dc)
|
||||
else:
|
||||
result = self.image.expose(handle)
|
||||
return result
|
||||
|
||||
def draw(self, handle, dst, src=None):
|
||||
"""
|
||||
Same as expose, but allows you to specify where to draw the image, and
|
||||
what part of it to draw.
|
||||
|
||||
The destination and source areas are given as 4-tuple rectangles. If
|
||||
the source is omitted, the entire image is copied. If the source and
|
||||
the destination have different sizes, the image is resized as
|
||||
necessary.
|
||||
"""
|
||||
if not src:
|
||||
src = (0,0) + self.size
|
||||
if isinstance(handle, HWND):
|
||||
dc = self.image.getdc(handle)
|
||||
try:
|
||||
result = self.image.draw(dc, dst, src)
|
||||
finally:
|
||||
self.image.releasedc(handle, dc)
|
||||
else:
|
||||
result = self.image.draw(handle, dst, src)
|
||||
return result
|
||||
|
||||
|
||||
def query_palette(self, handle):
|
||||
"""
|
||||
Installs the palette associated with the image in the given device
|
||||
context.
|
||||
|
||||
This method should be called upon **QUERYNEWPALETTE** and
|
||||
**PALETTECHANGED** events from Windows. If this method returns a
|
||||
non-zero value, one or more display palette entries were changed, and
|
||||
the image should be redrawn.
|
||||
|
||||
:param handle: Device context (HDC), cast to a Python integer, or an
|
||||
HDC or HWND instance.
|
||||
:return: A true value if one or more entries were changed (this
|
||||
indicates that the image should be redrawn).
|
||||
"""
|
||||
if isinstance(handle, HWND):
|
||||
handle = self.image.getdc(handle)
|
||||
try:
|
||||
result = self.image.query_palette(handle)
|
||||
finally:
|
||||
self.image.releasedc(handle, handle)
|
||||
else:
|
||||
result = self.image.query_palette(handle)
|
||||
return result
|
||||
|
||||
|
||||
def paste(self, im, box=None):
|
||||
"""
|
||||
Paste a PIL image into the bitmap image.
|
||||
|
||||
:param im: A PIL image. The size must match the target region.
|
||||
If the mode does not match, the image is converted to the
|
||||
mode of the bitmap image.
|
||||
:param box: A 4-tuple defining the left, upper, right, and
|
||||
lower pixel coordinate. If None is given instead of a
|
||||
tuple, all of the image is assumed.
|
||||
"""
|
||||
im.load()
|
||||
if self.mode != im.mode:
|
||||
im = im.convert(self.mode)
|
||||
if box:
|
||||
self.image.paste(im.im, box)
|
||||
else:
|
||||
self.image.paste(im.im)
|
||||
|
||||
|
||||
def frombytes(self, buffer):
|
||||
"""
|
||||
Load display memory contents from byte data.
|
||||
|
||||
:param buffer: A buffer containing display data (usually
|
||||
data returned from <b>tobytes</b>)
|
||||
"""
|
||||
return self.image.frombytes(buffer)
|
||||
|
||||
|
||||
def tobytes(self):
|
||||
"""
|
||||
Copy display memory contents to bytes object.
|
||||
|
||||
:return: A bytes object containing display data.
|
||||
"""
|
||||
return self.image.tobytes()
|
||||
|
||||
##
|
||||
# Deprecated aliases to frombytes & tobytes.
|
||||
|
||||
def fromstring(self, *args, **kw):
|
||||
warnings.warn(
|
||||
'fromstring() is deprecated. Please call frombytes() instead.',
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.frombytes(*args, **kw)
|
||||
|
||||
def tostring(self):
|
||||
warnings.warn(
|
||||
'tostring() is deprecated. Please call tobytes() instead.',
|
||||
DeprecationWarning,
|
||||
stacklevel=2
|
||||
)
|
||||
return self.tobytes()
|
||||
|
||||
##
|
||||
# Create a Window with the given title size.
|
||||
|
||||
class Window:
|
||||
|
||||
def __init__(self, title="PIL", width=None, height=None):
|
||||
self.hwnd = Image.core.createwindow(
|
||||
title, self.__dispatcher, width or 0, height or 0
|
||||
)
|
||||
|
||||
def __dispatcher(self, action, *args):
|
||||
return getattr(self, "ui_handle_" + action)(*args)
|
||||
|
||||
def ui_handle_clear(self, dc, x0, y0, x1, y1):
|
||||
pass
|
||||
|
||||
def ui_handle_damage(self, x0, y0, x1, y1):
|
||||
pass
|
||||
|
||||
def ui_handle_destroy(self):
|
||||
pass
|
||||
|
||||
def ui_handle_repair(self, dc, x0, y0, x1, y1):
|
||||
pass
|
||||
|
||||
def ui_handle_resize(self, width, height):
|
||||
pass
|
||||
|
||||
def mainloop(self):
|
||||
Image.core.eventloop()
|
||||
|
||||
##
|
||||
# Create an image window which displays the given image.
|
||||
|
||||
class ImageWindow(Window):
|
||||
|
||||
def __init__(self, image, title="PIL"):
|
||||
if not isinstance(image, Dib):
|
||||
image = Dib(image)
|
||||
self.image = image
|
||||
width, height = image.size
|
||||
Window.__init__(self, title, width=width, height=height)
|
||||
|
||||
def ui_handle_repair(self, dc, x0, y0, x1, y1):
|
||||
self.image.draw(dc, (x0, y0, x1, y1))
|
|
@ -1,93 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# IM Tools support for PIL
|
||||
#
|
||||
# history:
|
||||
# 1996-05-27 fl Created (read 8-bit images only)
|
||||
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.2)
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-2001.
|
||||
# Copyright (c) Fredrik Lundh 1996-2001.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.2"
|
||||
|
||||
import re
|
||||
|
||||
from PIL import Image, ImageFile
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
field = re.compile(br"([a-z]*) ([^ \r\n]*)")
|
||||
|
||||
##
|
||||
# Image plugin for IM Tools images.
|
||||
|
||||
class ImtImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "IMT"
|
||||
format_description = "IM Tools"
|
||||
|
||||
def _open(self):
|
||||
|
||||
# Quick rejection: if there's not a LF among the first
|
||||
# 100 bytes, this is (probably) not a text header.
|
||||
|
||||
if not b"\n" in self.fp.read(100):
|
||||
raise SyntaxError("not an IM file")
|
||||
self.fp.seek(0)
|
||||
|
||||
xsize = ysize = 0
|
||||
|
||||
while True:
|
||||
|
||||
s = self.fp.read(1)
|
||||
if not s:
|
||||
break
|
||||
|
||||
if s == b'\x0C':
|
||||
|
||||
# image data begins
|
||||
self.tile = [("raw", (0,0)+self.size,
|
||||
self.fp.tell(),
|
||||
(self.mode, 0, 1))]
|
||||
|
||||
break
|
||||
|
||||
else:
|
||||
|
||||
# read key/value pair
|
||||
# FIXME: dangerous, may read whole file
|
||||
s = s + self.fp.readline()
|
||||
if len(s) == 1 or len(s) > 100:
|
||||
break
|
||||
if s[0] == b"*":
|
||||
continue # comment
|
||||
|
||||
m = field.match(s)
|
||||
if not m:
|
||||
break
|
||||
k, v = m.group(1,2)
|
||||
if k == "width":
|
||||
xsize = int(v)
|
||||
self.size = xsize, ysize
|
||||
elif k == "height":
|
||||
ysize = int(v)
|
||||
self.size = xsize, ysize
|
||||
elif k == "pixel" and v == "n8":
|
||||
self.mode = "L"
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open("IMT", ImtImageFile)
|
||||
|
||||
#
|
||||
# no extension registered (".im" is simply too common)
|
|
@ -1,287 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# IPTC/NAA file handling
|
||||
#
|
||||
# history:
|
||||
# 1995-10-01 fl Created
|
||||
# 1998-03-09 fl Cleaned up and added to PIL
|
||||
# 2002-06-18 fl Added getiptcinfo helper
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997-2002.
|
||||
# Copyright (c) Fredrik Lundh 1995.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
__version__ = "0.3"
|
||||
|
||||
|
||||
from PIL import Image, ImageFile, _binary
|
||||
import os, tempfile
|
||||
|
||||
i8 = _binary.i8
|
||||
i16 = _binary.i16be
|
||||
i32 = _binary.i32be
|
||||
o8 = _binary.o8
|
||||
|
||||
COMPRESSION = {
|
||||
1: "raw",
|
||||
5: "jpeg"
|
||||
}
|
||||
|
||||
PAD = o8(0) * 4
|
||||
|
||||
#
|
||||
# Helpers
|
||||
|
||||
def i(c):
|
||||
return i32((PAD + c)[-4:])
|
||||
|
||||
def dump(c):
|
||||
for i in c:
|
||||
print("%02x" % i8(i), end=' ')
|
||||
print()
|
||||
|
||||
##
|
||||
# Image plugin for IPTC/NAA datastreams. To read IPTC/NAA fields
|
||||
# from TIFF and JPEG files, use the <b>getiptcinfo</b> function.
|
||||
|
||||
class IptcImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "IPTC"
|
||||
format_description = "IPTC/NAA"
|
||||
|
||||
def getint(self, key):
|
||||
return i(self.info[key])
|
||||
|
||||
def field(self):
|
||||
#
|
||||
# get a IPTC field header
|
||||
s = self.fp.read(5)
|
||||
if not len(s):
|
||||
return None, 0
|
||||
|
||||
tag = i8(s[1]), i8(s[2])
|
||||
|
||||
# syntax
|
||||
if i8(s[0]) != 0x1C or tag[0] < 1 or tag[0] > 9:
|
||||
raise SyntaxError("invalid IPTC/NAA file")
|
||||
|
||||
# field size
|
||||
size = i8(s[3])
|
||||
if size > 132:
|
||||
raise IOError("illegal field length in IPTC/NAA file")
|
||||
elif size == 128:
|
||||
size = 0
|
||||
elif size > 128:
|
||||
size = i(self.fp.read(size-128))
|
||||
else:
|
||||
size = i16(s[3:])
|
||||
|
||||
return tag, size
|
||||
|
||||
def _is_raw(self, offset, size):
|
||||
#
|
||||
# check if the file can be mapped
|
||||
|
||||
# DISABLED: the following only slows things down...
|
||||
return 0
|
||||
|
||||
self.fp.seek(offset)
|
||||
t, sz = self.field()
|
||||
if sz != size[0]:
|
||||
return 0
|
||||
y = 1
|
||||
while True:
|
||||
self.fp.seek(sz, 1)
|
||||
t, s = self.field()
|
||||
if t != (8, 10):
|
||||
break
|
||||
if s != sz:
|
||||
return 0
|
||||
y += 1
|
||||
return y == size[1]
|
||||
|
||||
def _open(self):
|
||||
|
||||
# load descriptive fields
|
||||
while True:
|
||||
offset = self.fp.tell()
|
||||
tag, size = self.field()
|
||||
if not tag or tag == (8,10):
|
||||
break
|
||||
if size:
|
||||
tagdata = self.fp.read(size)
|
||||
else:
|
||||
tagdata = None
|
||||
if tag in list(self.info.keys()):
|
||||
if isinstance(self.info[tag], list):
|
||||
self.info[tag].append(tagdata)
|
||||
else:
|
||||
self.info[tag] = [self.info[tag], tagdata]
|
||||
else:
|
||||
self.info[tag] = tagdata
|
||||
|
||||
# print tag, self.info[tag]
|
||||
|
||||
# mode
|
||||
layers = i8(self.info[(3,60)][0])
|
||||
component = i8(self.info[(3,60)][1])
|
||||
if (3,65) in self.info:
|
||||
id = i8(self.info[(3,65)][0])-1
|
||||
else:
|
||||
id = 0
|
||||
if layers == 1 and not component:
|
||||
self.mode = "L"
|
||||
elif layers == 3 and component:
|
||||
self.mode = "RGB"[id]
|
||||
elif layers == 4 and component:
|
||||
self.mode = "CMYK"[id]
|
||||
|
||||
# size
|
||||
self.size = self.getint((3,20)), self.getint((3,30))
|
||||
|
||||
# compression
|
||||
try:
|
||||
compression = COMPRESSION[self.getint((3,120))]
|
||||
except KeyError:
|
||||
raise IOError("Unknown IPTC image compression")
|
||||
|
||||
# tile
|
||||
if tag == (8,10):
|
||||
if compression == "raw" and self._is_raw(offset, self.size):
|
||||
self.tile = [(compression, (offset, size + 5, -1),
|
||||
(0, 0, self.size[0], self.size[1]))]
|
||||
else:
|
||||
self.tile = [("iptc", (compression, offset),
|
||||
(0, 0, self.size[0], self.size[1]))]
|
||||
|
||||
def load(self):
|
||||
|
||||
if len(self.tile) != 1 or self.tile[0][0] != "iptc":
|
||||
return ImageFile.ImageFile.load(self)
|
||||
|
||||
type, tile, box = self.tile[0]
|
||||
|
||||
encoding, offset = tile
|
||||
|
||||
self.fp.seek(offset)
|
||||
|
||||
# Copy image data to temporary file
|
||||
o_fd, outfile = tempfile.mkstemp(text=False)
|
||||
o = os.fdopen(o_fd)
|
||||
if encoding == "raw":
|
||||
# To simplify access to the extracted file,
|
||||
# prepend a PPM header
|
||||
o.write("P5\n%d %d\n255\n" % self.size)
|
||||
while True:
|
||||
type, size = self.field()
|
||||
if type != (8, 10):
|
||||
break
|
||||
while size > 0:
|
||||
s = self.fp.read(min(size, 8192))
|
||||
if not s:
|
||||
break
|
||||
o.write(s)
|
||||
size -= len(s)
|
||||
o.close()
|
||||
|
||||
try:
|
||||
try:
|
||||
# fast
|
||||
self.im = Image.core.open_ppm(outfile)
|
||||
except:
|
||||
# slightly slower
|
||||
im = Image.open(outfile)
|
||||
im.load()
|
||||
self.im = im.im
|
||||
finally:
|
||||
try: os.unlink(outfile)
|
||||
except: pass
|
||||
|
||||
|
||||
Image.register_open("IPTC", IptcImageFile)
|
||||
|
||||
Image.register_extension("IPTC", ".iim")
|
||||
|
||||
##
|
||||
# Get IPTC information from TIFF, JPEG, or IPTC file.
|
||||
#
|
||||
# @param im An image containing IPTC data.
|
||||
# @return A dictionary containing IPTC information, or None if
|
||||
# no IPTC information block was found.
|
||||
|
||||
def getiptcinfo(im):
|
||||
|
||||
from PIL import TiffImagePlugin, JpegImagePlugin
|
||||
import io
|
||||
|
||||
data = None
|
||||
|
||||
if isinstance(im, IptcImageFile):
|
||||
# return info dictionary right away
|
||||
return im.info
|
||||
|
||||
elif isinstance(im, JpegImagePlugin.JpegImageFile):
|
||||
# extract the IPTC/NAA resource
|
||||
try:
|
||||
app = im.app["APP13"]
|
||||
if app[:14] == "Photoshop 3.0\x00":
|
||||
app = app[14:]
|
||||
# parse the image resource block
|
||||
offset = 0
|
||||
while app[offset:offset+4] == "8BIM":
|
||||
offset += 4
|
||||
# resource code
|
||||
code = JpegImagePlugin.i16(app, offset)
|
||||
offset += 2
|
||||
# resource name (usually empty)
|
||||
name_len = i8(app[offset])
|
||||
name = app[offset+1:offset+1+name_len]
|
||||
offset = 1 + offset + name_len
|
||||
if offset & 1:
|
||||
offset += 1
|
||||
# resource data block
|
||||
size = JpegImagePlugin.i32(app, offset)
|
||||
offset += 4
|
||||
if code == 0x0404:
|
||||
# 0x0404 contains IPTC/NAA data
|
||||
data = app[offset:offset+size]
|
||||
break
|
||||
offset = offset + size
|
||||
if offset & 1:
|
||||
offset += 1
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
|
||||
elif isinstance(im, TiffImagePlugin.TiffImageFile):
|
||||
# get raw data from the IPTC/NAA tag (PhotoShop tags the data
|
||||
# as 4-byte integers, so we cannot use the get method...)
|
||||
try:
|
||||
data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK]
|
||||
except (AttributeError, KeyError):
|
||||
pass
|
||||
|
||||
if data is None:
|
||||
return None # no properties
|
||||
|
||||
# create an IptcImagePlugin object without initializing it
|
||||
class FakeImage:
|
||||
pass
|
||||
im = FakeImage()
|
||||
im.__class__ = IptcImageFile
|
||||
|
||||
# parse the IPTC information chunk
|
||||
im.info = {}
|
||||
im.fp = io.BytesIO(data)
|
||||
|
||||
try:
|
||||
im._open()
|
||||
except (IndexError, KeyError):
|
||||
pass # expected failure
|
||||
|
||||
return im.info
|
|
@ -1,277 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# JPEG2000 file handling
|
||||
#
|
||||
# History:
|
||||
# 2014-03-12 ajh Created
|
||||
#
|
||||
# Copyright (c) 2014 Coriolis Systems Limited
|
||||
# Copyright (c) 2014 Alastair Houghton
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
from PIL import Image, ImageFile
|
||||
import struct
|
||||
import os
|
||||
import io
|
||||
|
||||
|
||||
def _parse_codestream(fp):
|
||||
"""Parse the JPEG 2000 codestream to extract the size and component
|
||||
count from the SIZ marker segment, returning a PIL (size, mode) tuple."""
|
||||
|
||||
hdr = fp.read(2)
|
||||
lsiz = struct.unpack('>H', hdr)[0]
|
||||
siz = hdr + fp.read(lsiz - 2)
|
||||
lsiz, rsiz, xsiz, ysiz, xosiz, yosiz, xtsiz, ytsiz, \
|
||||
xtosiz, ytosiz, csiz \
|
||||
= struct.unpack('>HHIIIIIIIIH', siz[:38])
|
||||
ssiz = [None]*csiz
|
||||
xrsiz = [None]*csiz
|
||||
yrsiz = [None]*csiz
|
||||
for i in range(csiz):
|
||||
ssiz[i], xrsiz[i], yrsiz[i] \
|
||||
= struct.unpack('>BBB', siz[36 + 3 * i:39 + 3 * i])
|
||||
|
||||
size = (xsiz - xosiz, ysiz - yosiz)
|
||||
if csiz == 1:
|
||||
if (yrsiz[0] & 0x7f) > 8:
|
||||
mode = 'I;16'
|
||||
else:
|
||||
mode = 'L'
|
||||
elif csiz == 2:
|
||||
mode = 'LA'
|
||||
elif csiz == 3:
|
||||
mode = 'RGB'
|
||||
elif csiz == 4:
|
||||
mode = 'RGBA'
|
||||
else:
|
||||
mode = None
|
||||
|
||||
return (size, mode)
|
||||
|
||||
|
||||
def _parse_jp2_header(fp):
|
||||
"""Parse the JP2 header box to extract size, component count and
|
||||
color space information, returning a PIL (size, mode) tuple."""
|
||||
|
||||
# Find the JP2 header box
|
||||
header = None
|
||||
while True:
|
||||
lbox, tbox = struct.unpack('>I4s', fp.read(8))
|
||||
if lbox == 1:
|
||||
lbox = struct.unpack('>Q', fp.read(8))[0]
|
||||
hlen = 16
|
||||
else:
|
||||
hlen = 8
|
||||
|
||||
if lbox < hlen:
|
||||
raise SyntaxError('Invalid JP2 header length')
|
||||
|
||||
if tbox == b'jp2h':
|
||||
header = fp.read(lbox - hlen)
|
||||
break
|
||||
else:
|
||||
fp.seek(lbox - hlen, os.SEEK_CUR)
|
||||
|
||||
if header is None:
|
||||
raise SyntaxError('could not find JP2 header')
|
||||
|
||||
size = None
|
||||
mode = None
|
||||
bpc = None
|
||||
|
||||
hio = io.BytesIO(header)
|
||||
while True:
|
||||
lbox, tbox = struct.unpack('>I4s', hio.read(8))
|
||||
if lbox == 1:
|
||||
lbox = struct.unpack('>Q', hio.read(8))[0]
|
||||
hlen = 16
|
||||
else:
|
||||
hlen = 8
|
||||
|
||||
content = hio.read(lbox - hlen)
|
||||
|
||||
if tbox == b'ihdr':
|
||||
height, width, nc, bpc, c, unkc, ipr \
|
||||
= struct.unpack('>IIHBBBB', content)
|
||||
size = (width, height)
|
||||
if unkc:
|
||||
if nc == 1 and (bpc & 0x7f) > 8:
|
||||
mode = 'I;16'
|
||||
elif nc == 1:
|
||||
mode = 'L'
|
||||
elif nc == 2:
|
||||
mode = 'LA'
|
||||
elif nc == 3:
|
||||
mode = 'RGB'
|
||||
elif nc == 4:
|
||||
mode = 'RGBA'
|
||||
break
|
||||
elif tbox == b'colr':
|
||||
meth, prec, approx = struct.unpack('>BBB', content[:3])
|
||||
if meth == 1:
|
||||
cs = struct.unpack('>I', content[3:7])[0]
|
||||
if cs == 16: # sRGB
|
||||
if nc == 1 and (bpc & 0x7f) > 8:
|
||||
mode = 'I;16'
|
||||
elif nc == 1:
|
||||
mode = 'L'
|
||||
elif nc == 3:
|
||||
mode = 'RGB'
|
||||
elif nc == 4:
|
||||
mode = 'RGBA'
|
||||
break
|
||||
elif cs == 17: # grayscale
|
||||
if nc == 1 and (bpc & 0x7f) > 8:
|
||||
mode = 'I;16'
|
||||
elif nc == 1:
|
||||
mode = 'L'
|
||||
elif nc == 2:
|
||||
mode = 'LA'
|
||||
break
|
||||
elif cs == 18: # sYCC
|
||||
if nc == 3:
|
||||
mode = 'RGB'
|
||||
elif nc == 4:
|
||||
mode = 'RGBA'
|
||||
break
|
||||
|
||||
return (size, mode)
|
||||
|
||||
##
|
||||
# Image plugin for JPEG2000 images.
|
||||
|
||||
|
||||
class Jpeg2KImageFile(ImageFile.ImageFile):
|
||||
format = "JPEG2000"
|
||||
format_description = "JPEG 2000 (ISO 15444)"
|
||||
|
||||
def _open(self):
|
||||
sig = self.fp.read(4)
|
||||
if sig == b'\xff\x4f\xff\x51':
|
||||
self.codec = "j2k"
|
||||
self.size, self.mode = _parse_codestream(self.fp)
|
||||
else:
|
||||
sig = sig + self.fp.read(8)
|
||||
|
||||
if sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a':
|
||||
self.codec = "jp2"
|
||||
self.size, self.mode = _parse_jp2_header(self.fp)
|
||||
else:
|
||||
raise SyntaxError('not a JPEG 2000 file')
|
||||
|
||||
if self.size is None or self.mode is None:
|
||||
raise SyntaxError('unable to determine size/mode')
|
||||
|
||||
self.reduce = 0
|
||||
self.layers = 0
|
||||
|
||||
fd = -1
|
||||
length = -1
|
||||
|
||||
try:
|
||||
fd = self.fp.fileno()
|
||||
length = os.fstat(fd).st_size
|
||||
except:
|
||||
fd = -1
|
||||
try:
|
||||
pos = self.fp.tell()
|
||||
self.fp.seek(0, 2)
|
||||
length = self.fp.tell()
|
||||
self.fp.seek(pos, 0)
|
||||
except:
|
||||
length = -1
|
||||
|
||||
self.tile = [('jpeg2k', (0, 0) + self.size, 0,
|
||||
(self.codec, self.reduce, self.layers, fd, length))]
|
||||
|
||||
def load(self):
|
||||
if self.reduce:
|
||||
power = 1 << self.reduce
|
||||
adjust = power >> 1
|
||||
self.size = (int((self.size[0] + adjust) / power),
|
||||
int((self.size[1] + adjust) / power))
|
||||
|
||||
if self.tile:
|
||||
# Update the reduce and layers settings
|
||||
t = self.tile[0]
|
||||
t3 = (t[3][0], self.reduce, self.layers, t[3][3], t[3][4])
|
||||
self.tile = [(t[0], (0, 0) + self.size, t[2], t3)]
|
||||
|
||||
ImageFile.ImageFile.load(self)
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return (prefix[:4] == b'\xff\x4f\xff\x51'
|
||||
or prefix[:12] == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a')
|
||||
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Save support
|
||||
|
||||
def _save(im, fp, filename):
|
||||
if filename.endswith('.j2k'):
|
||||
kind = 'j2k'
|
||||
else:
|
||||
kind = 'jp2'
|
||||
|
||||
# Get the keyword arguments
|
||||
info = im.encoderinfo
|
||||
|
||||
offset = info.get('offset', None)
|
||||
tile_offset = info.get('tile_offset', None)
|
||||
tile_size = info.get('tile_size', None)
|
||||
quality_mode = info.get('quality_mode', 'rates')
|
||||
quality_layers = info.get('quality_layers', None)
|
||||
num_resolutions = info.get('num_resolutions', 0)
|
||||
cblk_size = info.get('codeblock_size', None)
|
||||
precinct_size = info.get('precinct_size', None)
|
||||
irreversible = info.get('irreversible', False)
|
||||
progression = info.get('progression', 'LRCP')
|
||||
cinema_mode = info.get('cinema_mode', 'no')
|
||||
fd = -1
|
||||
|
||||
if hasattr(fp, "fileno"):
|
||||
try:
|
||||
fd = fp.fileno()
|
||||
except:
|
||||
fd = -1
|
||||
|
||||
im.encoderconfig = (
|
||||
offset,
|
||||
tile_offset,
|
||||
tile_size,
|
||||
quality_mode,
|
||||
quality_layers,
|
||||
num_resolutions,
|
||||
cblk_size,
|
||||
precinct_size,
|
||||
irreversible,
|
||||
progression,
|
||||
cinema_mode,
|
||||
fd
|
||||
)
|
||||
|
||||
ImageFile._save(im, fp, [('jpeg2k', (0, 0)+im.size, 0, kind)])
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Registry stuff
|
||||
|
||||
Image.register_open('JPEG2000', Jpeg2KImageFile, _accept)
|
||||
Image.register_save('JPEG2000', _save)
|
||||
|
||||
Image.register_extension('JPEG2000', '.jp2')
|
||||
Image.register_extension('JPEG2000', '.j2k')
|
||||
Image.register_extension('JPEG2000', '.jpc')
|
||||
Image.register_extension('JPEG2000', '.jpf')
|
||||
Image.register_extension('JPEG2000', '.jpx')
|
||||
Image.register_extension('JPEG2000', '.j2c')
|
||||
|
||||
Image.register_mime('JPEG2000', 'image/jp2')
|
||||
Image.register_mime('JPEG2000', 'image/jpx')
|
|
@ -1,625 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# JPEG (JFIF) file handling
|
||||
#
|
||||
# See "Digital Compression and Coding of Continous-Tone Still Images,
|
||||
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
|
||||
#
|
||||
# History:
|
||||
# 1995-09-09 fl Created
|
||||
# 1995-09-13 fl Added full parser
|
||||
# 1996-03-25 fl Added hack to use the IJG command line utilities
|
||||
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
|
||||
# 1996-05-28 fl Added draft support, JFIF version (0.1)
|
||||
# 1996-12-30 fl Added encoder options, added progression property (0.2)
|
||||
# 1997-08-27 fl Save mode 1 images as BW (0.3)
|
||||
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
|
||||
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
|
||||
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
|
||||
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
|
||||
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
|
||||
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
|
||||
# 2003-09-13 fl Extract COM markers
|
||||
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
|
||||
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
|
||||
# 2009-03-08 fl Added subsampling support (from Justin Huff).
|
||||
#
|
||||
# Copyright (c) 1997-2003 by Secret Labs AB.
|
||||
# Copyright (c) 1995-1996 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
__version__ = "0.6"
|
||||
|
||||
import array
|
||||
import struct
|
||||
from PIL import Image, ImageFile, _binary
|
||||
from PIL.JpegPresets import presets
|
||||
from PIL._util import isStringType
|
||||
|
||||
i8 = _binary.i8
|
||||
o8 = _binary.o8
|
||||
i16 = _binary.i16be
|
||||
i32 = _binary.i32be
|
||||
|
||||
|
||||
#
|
||||
# Parser
|
||||
|
||||
def Skip(self, marker):
|
||||
n = i16(self.fp.read(2))-2
|
||||
ImageFile._safe_read(self.fp, n)
|
||||
|
||||
|
||||
def APP(self, marker):
|
||||
#
|
||||
# Application marker. Store these in the APP dictionary.
|
||||
# Also look for well-known application markers.
|
||||
|
||||
n = i16(self.fp.read(2))-2
|
||||
s = ImageFile._safe_read(self.fp, n)
|
||||
|
||||
app = "APP%d" % (marker & 15)
|
||||
|
||||
self.app[app] = s # compatibility
|
||||
self.applist.append((app, s))
|
||||
|
||||
if marker == 0xFFE0 and s[:4] == b"JFIF":
|
||||
# extract JFIF information
|
||||
self.info["jfif"] = version = i16(s, 5) # version
|
||||
self.info["jfif_version"] = divmod(version, 256)
|
||||
# extract JFIF properties
|
||||
try:
|
||||
jfif_unit = i8(s[7])
|
||||
jfif_density = i16(s, 8), i16(s, 10)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
if jfif_unit == 1:
|
||||
self.info["dpi"] = jfif_density
|
||||
self.info["jfif_unit"] = jfif_unit
|
||||
self.info["jfif_density"] = jfif_density
|
||||
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
|
||||
# extract Exif information (incomplete)
|
||||
self.info["exif"] = s # FIXME: value will change
|
||||
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
|
||||
# extract FlashPix information (incomplete)
|
||||
self.info["flashpix"] = s # FIXME: value will change
|
||||
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
|
||||
# Since an ICC profile can be larger than the maximum size of
|
||||
# a JPEG marker (64K), we need provisions to split it into
|
||||
# multiple markers. The format defined by the ICC specifies
|
||||
# one or more APP2 markers containing the following data:
|
||||
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
|
||||
# Marker sequence number 1, 2, etc (1 byte)
|
||||
# Number of markers Total of APP2's used (1 byte)
|
||||
# Profile data (remainder of APP2 data)
|
||||
# Decoders should use the marker sequence numbers to
|
||||
# reassemble the profile, rather than assuming that the APP2
|
||||
# markers appear in the correct sequence.
|
||||
self.icclist.append(s)
|
||||
elif marker == 0xFFEE and s[:5] == b"Adobe":
|
||||
self.info["adobe"] = i16(s, 5)
|
||||
# extract Adobe custom properties
|
||||
try:
|
||||
adobe_transform = i8(s[1])
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
self.info["adobe_transform"] = adobe_transform
|
||||
|
||||
|
||||
def COM(self, marker):
|
||||
#
|
||||
# Comment marker. Store these in the APP dictionary.
|
||||
n = i16(self.fp.read(2))-2
|
||||
s = ImageFile._safe_read(self.fp, n)
|
||||
|
||||
self.app["COM"] = s # compatibility
|
||||
self.applist.append(("COM", s))
|
||||
|
||||
|
||||
def SOF(self, marker):
|
||||
#
|
||||
# Start of frame marker. Defines the size and mode of the
|
||||
# image. JPEG is colour blind, so we use some simple
|
||||
# heuristics to map the number of layers to an appropriate
|
||||
# mode. Note that this could be made a bit brighter, by
|
||||
# looking for JFIF and Adobe APP markers.
|
||||
|
||||
n = i16(self.fp.read(2))-2
|
||||
s = ImageFile._safe_read(self.fp, n)
|
||||
self.size = i16(s[3:]), i16(s[1:])
|
||||
|
||||
self.bits = i8(s[0])
|
||||
if self.bits != 8:
|
||||
raise SyntaxError("cannot handle %d-bit layers" % self.bits)
|
||||
|
||||
self.layers = i8(s[5])
|
||||
if self.layers == 1:
|
||||
self.mode = "L"
|
||||
elif self.layers == 3:
|
||||
self.mode = "RGB"
|
||||
elif self.layers == 4:
|
||||
self.mode = "CMYK"
|
||||
else:
|
||||
raise SyntaxError("cannot handle %d-layer images" % self.layers)
|
||||
|
||||
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
|
||||
self.info["progressive"] = self.info["progression"] = 1
|
||||
|
||||
if self.icclist:
|
||||
# fixup icc profile
|
||||
self.icclist.sort() # sort by sequence number
|
||||
if i8(self.icclist[0][13]) == len(self.icclist):
|
||||
profile = []
|
||||
for p in self.icclist:
|
||||
profile.append(p[14:])
|
||||
icc_profile = b"".join(profile)
|
||||
else:
|
||||
icc_profile = None # wrong number of fragments
|
||||
self.info["icc_profile"] = icc_profile
|
||||
self.icclist = None
|
||||
|
||||
for i in range(6, len(s), 3):
|
||||
t = s[i:i+3]
|
||||
# 4-tuples: id, vsamp, hsamp, qtable
|
||||
self.layer.append((t[0], i8(t[1])//16, i8(t[1]) & 15, i8(t[2])))
|
||||
|
||||
|
||||
def DQT(self, marker):
|
||||
#
|
||||
# Define quantization table. Support baseline 8-bit tables
|
||||
# only. Note that there might be more than one table in
|
||||
# each marker.
|
||||
|
||||
# FIXME: The quantization tables can be used to estimate the
|
||||
# compression quality.
|
||||
|
||||
n = i16(self.fp.read(2))-2
|
||||
s = ImageFile._safe_read(self.fp, n)
|
||||
while len(s):
|
||||
if len(s) < 65:
|
||||
raise SyntaxError("bad quantization table marker")
|
||||
v = i8(s[0])
|
||||
if v//16 == 0:
|
||||
self.quantization[v & 15] = array.array("b", s[1:65])
|
||||
s = s[65:]
|
||||
else:
|
||||
return # FIXME: add code to read 16-bit tables!
|
||||
# raise SyntaxError, "bad quantization table element size"
|
||||
|
||||
|
||||
#
|
||||
# JPEG marker table
|
||||
|
||||
MARKER = {
|
||||
0xFFC0: ("SOF0", "Baseline DCT", SOF),
|
||||
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
|
||||
0xFFC2: ("SOF2", "Progressive DCT", SOF),
|
||||
0xFFC3: ("SOF3", "Spatial lossless", SOF),
|
||||
0xFFC4: ("DHT", "Define Huffman table", Skip),
|
||||
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
|
||||
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
|
||||
0xFFC7: ("SOF7", "Differential spatial", SOF),
|
||||
0xFFC8: ("JPG", "Extension", None),
|
||||
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
|
||||
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
|
||||
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
|
||||
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
|
||||
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
|
||||
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
|
||||
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
|
||||
0xFFD0: ("RST0", "Restart 0", None),
|
||||
0xFFD1: ("RST1", "Restart 1", None),
|
||||
0xFFD2: ("RST2", "Restart 2", None),
|
||||
0xFFD3: ("RST3", "Restart 3", None),
|
||||
0xFFD4: ("RST4", "Restart 4", None),
|
||||
0xFFD5: ("RST5", "Restart 5", None),
|
||||
0xFFD6: ("RST6", "Restart 6", None),
|
||||
0xFFD7: ("RST7", "Restart 7", None),
|
||||
0xFFD8: ("SOI", "Start of image", None),
|
||||
0xFFD9: ("EOI", "End of image", None),
|
||||
0xFFDA: ("SOS", "Start of scan", Skip),
|
||||
0xFFDB: ("DQT", "Define quantization table", DQT),
|
||||
0xFFDC: ("DNL", "Define number of lines", Skip),
|
||||
0xFFDD: ("DRI", "Define restart interval", Skip),
|
||||
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
|
||||
0xFFDF: ("EXP", "Expand reference component", Skip),
|
||||
0xFFE0: ("APP0", "Application segment 0", APP),
|
||||
0xFFE1: ("APP1", "Application segment 1", APP),
|
||||
0xFFE2: ("APP2", "Application segment 2", APP),
|
||||
0xFFE3: ("APP3", "Application segment 3", APP),
|
||||
0xFFE4: ("APP4", "Application segment 4", APP),
|
||||
0xFFE5: ("APP5", "Application segment 5", APP),
|
||||
0xFFE6: ("APP6", "Application segment 6", APP),
|
||||
0xFFE7: ("APP7", "Application segment 7", APP),
|
||||
0xFFE8: ("APP8", "Application segment 8", APP),
|
||||
0xFFE9: ("APP9", "Application segment 9", APP),
|
||||
0xFFEA: ("APP10", "Application segment 10", APP),
|
||||
0xFFEB: ("APP11", "Application segment 11", APP),
|
||||
0xFFEC: ("APP12", "Application segment 12", APP),
|
||||
0xFFED: ("APP13", "Application segment 13", APP),
|
||||
0xFFEE: ("APP14", "Application segment 14", APP),
|
||||
0xFFEF: ("APP15", "Application segment 15", APP),
|
||||
0xFFF0: ("JPG0", "Extension 0", None),
|
||||
0xFFF1: ("JPG1", "Extension 1", None),
|
||||
0xFFF2: ("JPG2", "Extension 2", None),
|
||||
0xFFF3: ("JPG3", "Extension 3", None),
|
||||
0xFFF4: ("JPG4", "Extension 4", None),
|
||||
0xFFF5: ("JPG5", "Extension 5", None),
|
||||
0xFFF6: ("JPG6", "Extension 6", None),
|
||||
0xFFF7: ("JPG7", "Extension 7", None),
|
||||
0xFFF8: ("JPG8", "Extension 8", None),
|
||||
0xFFF9: ("JPG9", "Extension 9", None),
|
||||
0xFFFA: ("JPG10", "Extension 10", None),
|
||||
0xFFFB: ("JPG11", "Extension 11", None),
|
||||
0xFFFC: ("JPG12", "Extension 12", None),
|
||||
0xFFFD: ("JPG13", "Extension 13", None),
|
||||
0xFFFE: ("COM", "Comment", COM)
|
||||
}
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[0:1] == b"\377"
|
||||
|
||||
|
||||
##
|
||||
# Image plugin for JPEG and JFIF images.
|
||||
|
||||
class JpegImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "JPEG"
|
||||
format_description = "JPEG (ISO 10918)"
|
||||
|
||||
def _open(self):
|
||||
|
||||
s = self.fp.read(1)
|
||||
|
||||
if i8(s[0]) != 255:
|
||||
raise SyntaxError("not a JPEG file")
|
||||
|
||||
# Create attributes
|
||||
self.bits = self.layers = 0
|
||||
|
||||
# JPEG specifics (internal)
|
||||
self.layer = []
|
||||
self.huffman_dc = {}
|
||||
self.huffman_ac = {}
|
||||
self.quantization = {}
|
||||
self.app = {} # compatibility
|
||||
self.applist = []
|
||||
self.icclist = []
|
||||
|
||||
while True:
|
||||
|
||||
i = i8(s)
|
||||
if i == 0xFF:
|
||||
s = s + self.fp.read(1)
|
||||
i = i16(s)
|
||||
else:
|
||||
# Skip non-0xFF junk
|
||||
s = b"\xff"
|
||||
continue
|
||||
|
||||
if i in MARKER:
|
||||
name, description, handler = MARKER[i]
|
||||
# print hex(i), name, description
|
||||
if handler is not None:
|
||||
handler(self, i)
|
||||
if i == 0xFFDA: # start of scan
|
||||
rawmode = self.mode
|
||||
if self.mode == "CMYK":
|
||||
rawmode = "CMYK;I" # assume adobe conventions
|
||||
self.tile = [("jpeg", (0, 0) + self.size, 0, (rawmode, ""))]
|
||||
# self.__offset = self.fp.tell()
|
||||
break
|
||||
s = self.fp.read(1)
|
||||
elif i == 0 or i == 0xFFFF:
|
||||
# padded marker or junk; move on
|
||||
s = b"\xff"
|
||||
else:
|
||||
raise SyntaxError("no marker found")
|
||||
|
||||
def draft(self, mode, size):
|
||||
|
||||
if len(self.tile) != 1:
|
||||
return
|
||||
|
||||
d, e, o, a = self.tile[0]
|
||||
scale = 0
|
||||
|
||||
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
|
||||
self.mode = mode
|
||||
a = mode, ""
|
||||
|
||||
if size:
|
||||
scale = max(self.size[0] // size[0], self.size[1] // size[1])
|
||||
for s in [8, 4, 2, 1]:
|
||||
if scale >= s:
|
||||
break
|
||||
e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1]
|
||||
self.size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s)
|
||||
scale = s
|
||||
|
||||
self.tile = [(d, e, o, a)]
|
||||
self.decoderconfig = (scale, 1)
|
||||
|
||||
return self
|
||||
|
||||
def load_djpeg(self):
|
||||
|
||||
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
|
||||
|
||||
import subprocess
|
||||
import tempfile
|
||||
import os
|
||||
f, path = tempfile.mkstemp()
|
||||
os.close(f)
|
||||
if os.path.exists(self.filename):
|
||||
subprocess.check_call(["djpeg", "-outfile", path, self.filename])
|
||||
else:
|
||||
raise ValueError("Invalid Filename")
|
||||
|
||||
try:
|
||||
self.im = Image.core.open_ppm(path)
|
||||
finally:
|
||||
try:
|
||||
os.unlink(path)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.mode = self.im.mode
|
||||
self.size = self.im.size
|
||||
|
||||
self.tile = []
|
||||
|
||||
def _getexif(self):
|
||||
return _getexif(self)
|
||||
|
||||
|
||||
def _getexif(self):
|
||||
# Extract EXIF information. This method is highly experimental,
|
||||
# and is likely to be replaced with something better in a future
|
||||
# version.
|
||||
from PIL import TiffImagePlugin
|
||||
import io
|
||||
|
||||
def fixup(value):
|
||||
if len(value) == 1:
|
||||
return value[0]
|
||||
return value
|
||||
# The EXIF record consists of a TIFF file embedded in a JPEG
|
||||
# application marker (!).
|
||||
try:
|
||||
data = self.info["exif"]
|
||||
except KeyError:
|
||||
return None
|
||||
file = io.BytesIO(data[6:])
|
||||
head = file.read(8)
|
||||
exif = {}
|
||||
# process dictionary
|
||||
info = TiffImagePlugin.ImageFileDirectory(head)
|
||||
info.load(file)
|
||||
for key, value in info.items():
|
||||
exif[key] = fixup(value)
|
||||
# get exif extension
|
||||
try:
|
||||
file.seek(exif[0x8769])
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
info = TiffImagePlugin.ImageFileDirectory(head)
|
||||
info.load(file)
|
||||
for key, value in info.items():
|
||||
exif[key] = fixup(value)
|
||||
# get gpsinfo extension
|
||||
try:
|
||||
file.seek(exif[0x8825])
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
info = TiffImagePlugin.ImageFileDirectory(head)
|
||||
info.load(file)
|
||||
exif[0x8825] = gps = {}
|
||||
for key, value in info.items():
|
||||
gps[key] = fixup(value)
|
||||
return exif
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# stuff to save JPEG files
|
||||
|
||||
RAWMODE = {
|
||||
"1": "L",
|
||||
"L": "L",
|
||||
"RGB": "RGB",
|
||||
"RGBA": "RGB",
|
||||
"RGBX": "RGB",
|
||||
"CMYK": "CMYK;I", # assume adobe conventions
|
||||
"YCbCr": "YCbCr",
|
||||
}
|
||||
|
||||
zigzag_index = ( 0, 1, 5, 6, 14, 15, 27, 28,
|
||||
2, 4, 7, 13, 16, 26, 29, 42,
|
||||
3, 8, 12, 17, 25, 30, 41, 43,
|
||||
9, 11, 18, 24, 31, 40, 44, 53,
|
||||
10, 19, 23, 32, 39, 45, 52, 54,
|
||||
20, 22, 33, 38, 46, 51, 55, 60,
|
||||
21, 34, 37, 47, 50, 56, 59, 61,
|
||||
35, 36, 48, 49, 57, 58, 62, 63)
|
||||
|
||||
samplings = {
|
||||
(1, 1, 1, 1, 1, 1): 0,
|
||||
(2, 1, 1, 1, 1, 1): 1,
|
||||
(2, 2, 1, 1, 1, 1): 2,
|
||||
}
|
||||
|
||||
|
||||
def convert_dict_qtables(qtables):
|
||||
qtables = [qtables[key] for key in range(len(qtables)) if key in qtables]
|
||||
for idx, table in enumerate(qtables):
|
||||
qtables[idx] = [table[i] for i in zigzag_index]
|
||||
return qtables
|
||||
|
||||
|
||||
def get_sampling(im):
|
||||
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
|
||||
return samplings.get(sampling, -1)
|
||||
|
||||
|
||||
def _save(im, fp, filename):
|
||||
|
||||
try:
|
||||
rawmode = RAWMODE[im.mode]
|
||||
except KeyError:
|
||||
raise IOError("cannot write mode %s as JPEG" % im.mode)
|
||||
|
||||
info = im.encoderinfo
|
||||
|
||||
dpi = info.get("dpi", (0, 0))
|
||||
|
||||
quality = info.get("quality", 0)
|
||||
subsampling = info.get("subsampling", -1)
|
||||
qtables = info.get("qtables")
|
||||
|
||||
if quality == "keep":
|
||||
quality = 0
|
||||
subsampling = "keep"
|
||||
qtables = "keep"
|
||||
elif quality in presets:
|
||||
preset = presets[quality]
|
||||
quality = 0
|
||||
subsampling = preset.get('subsampling', -1)
|
||||
qtables = preset.get('quantization')
|
||||
elif not isinstance(quality, int):
|
||||
raise ValueError("Invalid quality setting")
|
||||
else:
|
||||
if subsampling in presets:
|
||||
subsampling = presets[subsampling].get('subsampling', -1)
|
||||
if isStringType(qtables) and qtables in presets:
|
||||
qtables = presets[qtables].get('quantization')
|
||||
|
||||
if subsampling == "4:4:4":
|
||||
subsampling = 0
|
||||
elif subsampling == "4:2:2":
|
||||
subsampling = 1
|
||||
elif subsampling == "4:1:1":
|
||||
subsampling = 2
|
||||
elif subsampling == "keep":
|
||||
if im.format != "JPEG":
|
||||
raise ValueError("Cannot use 'keep' when original image is not a JPEG")
|
||||
subsampling = get_sampling(im)
|
||||
|
||||
def validate_qtables(qtables):
|
||||
if qtables is None:
|
||||
return qtables
|
||||
if isStringType(qtables):
|
||||
try:
|
||||
lines = [int(num) for line in qtables.splitlines()
|
||||
for num in line.split('#', 1)[0].split()]
|
||||
except ValueError:
|
||||
raise ValueError("Invalid quantization table")
|
||||
else:
|
||||
qtables = [lines[s:s+64] for s in range(0, len(lines), 64)]
|
||||
if isinstance(qtables, (tuple, list, dict)):
|
||||
if isinstance(qtables, dict):
|
||||
qtables = convert_dict_qtables(qtables)
|
||||
elif isinstance(qtables, tuple):
|
||||
qtables = list(qtables)
|
||||
if not (0 < len(qtables) < 5):
|
||||
raise ValueError("None or too many quantization tables")
|
||||
for idx, table in enumerate(qtables):
|
||||
try:
|
||||
if len(table) != 64:
|
||||
raise
|
||||
table = array.array('b', table)
|
||||
except TypeError:
|
||||
raise ValueError("Invalid quantization table")
|
||||
else:
|
||||
qtables[idx] = list(table)
|
||||
return qtables
|
||||
|
||||
if qtables == "keep":
|
||||
if im.format != "JPEG":
|
||||
raise ValueError("Cannot use 'keep' when original image is not a JPEG")
|
||||
qtables = getattr(im, "quantization", None)
|
||||
qtables = validate_qtables(qtables)
|
||||
|
||||
extra = b""
|
||||
|
||||
icc_profile = info.get("icc_profile")
|
||||
if icc_profile:
|
||||
ICC_OVERHEAD_LEN = 14
|
||||
MAX_BYTES_IN_MARKER = 65533
|
||||
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
|
||||
markers = []
|
||||
while icc_profile:
|
||||
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
|
||||
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
|
||||
i = 1
|
||||
for marker in markers:
|
||||
size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker))
|
||||
extra += b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) + o8(len(markers)) + marker
|
||||
i += 1
|
||||
|
||||
# get keyword arguments
|
||||
im.encoderconfig = (
|
||||
quality,
|
||||
# "progressive" is the official name, but older documentation
|
||||
# says "progression"
|
||||
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
|
||||
"progressive" in info or "progression" in info,
|
||||
info.get("smooth", 0),
|
||||
"optimize" in info,
|
||||
info.get("streamtype", 0),
|
||||
dpi[0], dpi[1],
|
||||
subsampling,
|
||||
qtables,
|
||||
extra,
|
||||
info.get("exif", b"")
|
||||
)
|
||||
|
||||
# if we optimize, libjpeg needs a buffer big enough to hold the whole image
|
||||
# in a shot. Guessing on the size, at im.size bytes. (raw pizel size is
|
||||
# channels*size, this is a value that's been used in a django patch.
|
||||
# https://github.com/jdriscoll/django-imagekit/issues/50
|
||||
bufsize = 0
|
||||
if "optimize" in info or "progressive" in info or "progression" in info:
|
||||
if quality >= 95:
|
||||
bufsize = 2 * im.size[0] * im.size[1]
|
||||
else:
|
||||
bufsize = im.size[0] * im.size[1]
|
||||
|
||||
# The exif info needs to be written as one block, + APP1, + one spare byte.
|
||||
# Ensure that our buffer is big enough
|
||||
bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif", b"")) + 5)
|
||||
|
||||
ImageFile._save(im, fp, [("jpeg", (0, 0)+im.size, 0, rawmode)], bufsize)
|
||||
|
||||
|
||||
def _save_cjpeg(im, fp, filename):
|
||||
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
|
||||
import os
|
||||
import subprocess
|
||||
tempfile = im._dump()
|
||||
subprocess.check_call(["cjpeg", "-outfile", filename, tempfile])
|
||||
try:
|
||||
os.unlink(file)
|
||||
except:
|
||||
pass
|
||||
|
||||
# -------------------------------------------------------------------q-
|
||||
# Registry stuff
|
||||
|
||||
Image.register_open("JPEG", JpegImageFile, _accept)
|
||||
Image.register_save("JPEG", _save)
|
||||
|
||||
Image.register_extension("JPEG", ".jfif")
|
||||
Image.register_extension("JPEG", ".jpe")
|
||||
Image.register_extension("JPEG", ".jpg")
|
||||
Image.register_extension("JPEG", ".jpeg")
|
||||
|
||||
Image.register_mime("JPEG", "image/jpeg")
|
|
@ -1,241 +0,0 @@
|
|||
"""
|
||||
JPEG quality settings equivalent to the Photoshop settings.
|
||||
|
||||
More presets can be added to the presets dict if needed.
|
||||
|
||||
Can be use when saving JPEG file.
|
||||
|
||||
To apply the preset, specify::
|
||||
|
||||
quality="preset_name"
|
||||
|
||||
To apply only the quantization table::
|
||||
|
||||
qtables="preset_name"
|
||||
|
||||
To apply only the subsampling setting::
|
||||
|
||||
subsampling="preset_name"
|
||||
|
||||
Example::
|
||||
|
||||
im.save("image_name.jpg", quality="web_high")
|
||||
|
||||
|
||||
Subsampling
|
||||
-----------
|
||||
|
||||
Subsampling is the practice of encoding images by implementing less resolution
|
||||
for chroma information than for luma information.
|
||||
(ref.: http://en.wikipedia.org/wiki/Chroma_subsampling)
|
||||
|
||||
Possible subsampling values are 0, 1 and 2 that correspond to 4:4:4, 4:2:2 and
|
||||
4:1:1 (or 4:2:0?).
|
||||
|
||||
You can get the subsampling of a JPEG with the
|
||||
`JpegImagePlugin.get_subsampling(im)` function.
|
||||
|
||||
|
||||
Quantization tables
|
||||
-------------------
|
||||
|
||||
They are values use by the DCT (Discrete cosine transform) to remove
|
||||
*unnecessary* information from the image (the lossy part of the compression).
|
||||
(ref.: http://en.wikipedia.org/wiki/Quantization_matrix#Quantization_matrices,
|
||||
http://en.wikipedia.org/wiki/JPEG#Quantization)
|
||||
|
||||
You can get the quantization tables of a JPEG with::
|
||||
|
||||
im.quantization
|
||||
|
||||
This will return a dict with a number of arrays. You can pass this dict directly
|
||||
as the qtables argument when saving a JPEG.
|
||||
|
||||
The tables format between im.quantization and quantization in presets differ in
|
||||
3 ways:
|
||||
|
||||
1. The base container of the preset is a list with sublists instead of dict.
|
||||
dict[0] -> list[0], dict[1] -> list[1], ...
|
||||
2. Each table in a preset is a list instead of an array.
|
||||
3. The zigzag order is remove in the preset (needed by libjpeg >= 6a).
|
||||
|
||||
You can convert the dict format to the preset format with the
|
||||
`JpegImagePlugin.convert_dict_qtables(dict_qtables)` function.
|
||||
|
||||
Libjpeg ref.: http://www.jpegcameras.com/libjpeg/libjpeg-3.html
|
||||
|
||||
"""
|
||||
|
||||
presets = {
|
||||
'web_low': {'subsampling': 2, # "4:1:1"
|
||||
'quantization': [
|
||||
[20, 16, 25, 39, 50, 46, 62, 68,
|
||||
16, 18, 23, 38, 38, 53, 65, 68,
|
||||
25, 23, 31, 38, 53, 65, 68, 68,
|
||||
39, 38, 38, 53, 65, 68, 68, 68,
|
||||
50, 38, 53, 65, 68, 68, 68, 68,
|
||||
46, 53, 65, 68, 68, 68, 68, 68,
|
||||
62, 65, 68, 68, 68, 68, 68, 68,
|
||||
68, 68, 68, 68, 68, 68, 68, 68],
|
||||
[21, 25, 32, 38, 54, 68, 68, 68,
|
||||
25, 28, 24, 38, 54, 68, 68, 68,
|
||||
32, 24, 32, 43, 66, 68, 68, 68,
|
||||
38, 38, 43, 53, 68, 68, 68, 68,
|
||||
54, 54, 66, 68, 68, 68, 68, 68,
|
||||
68, 68, 68, 68, 68, 68, 68, 68,
|
||||
68, 68, 68, 68, 68, 68, 68, 68,
|
||||
68, 68, 68, 68, 68, 68, 68, 68]
|
||||
]},
|
||||
'web_medium': {'subsampling': 2, # "4:1:1"
|
||||
'quantization': [
|
||||
[16, 11, 11, 16, 23, 27, 31, 30,
|
||||
11, 12, 12, 15, 20, 23, 23, 30,
|
||||
11, 12, 13, 16, 23, 26, 35, 47,
|
||||
16, 15, 16, 23, 26, 37, 47, 64,
|
||||
23, 20, 23, 26, 39, 51, 64, 64,
|
||||
27, 23, 26, 37, 51, 64, 64, 64,
|
||||
31, 23, 35, 47, 64, 64, 64, 64,
|
||||
30, 30, 47, 64, 64, 64, 64, 64],
|
||||
[17, 15, 17, 21, 20, 26, 38, 48,
|
||||
15, 19, 18, 17, 20, 26, 35, 43,
|
||||
17, 18, 20, 22, 26, 30, 46, 53,
|
||||
21, 17, 22, 28, 30, 39, 53, 64,
|
||||
20, 20, 26, 30, 39, 48, 64, 64,
|
||||
26, 26, 30, 39, 48, 63, 64, 64,
|
||||
38, 35, 46, 53, 64, 64, 64, 64,
|
||||
48, 43, 53, 64, 64, 64, 64, 64]
|
||||
]},
|
||||
'web_high': {'subsampling': 0, # "4:4:4"
|
||||
'quantization': [
|
||||
[ 6, 4, 4, 6, 9, 11, 12, 16,
|
||||
4, 5, 5, 6, 8, 10, 12, 12,
|
||||
4, 5, 5, 6, 10, 12, 14, 19,
|
||||
6, 6, 6, 11, 12, 15, 19, 28,
|
||||
9, 8, 10, 12, 16, 20, 27, 31,
|
||||
11, 10, 12, 15, 20, 27, 31, 31,
|
||||
12, 12, 14, 19, 27, 31, 31, 31,
|
||||
16, 12, 19, 28, 31, 31, 31, 31],
|
||||
[ 7, 7, 13, 24, 26, 31, 31, 31,
|
||||
7, 12, 16, 21, 31, 31, 31, 31,
|
||||
13, 16, 17, 31, 31, 31, 31, 31,
|
||||
24, 21, 31, 31, 31, 31, 31, 31,
|
||||
26, 31, 31, 31, 31, 31, 31, 31,
|
||||
31, 31, 31, 31, 31, 31, 31, 31,
|
||||
31, 31, 31, 31, 31, 31, 31, 31,
|
||||
31, 31, 31, 31, 31, 31, 31, 31]
|
||||
]},
|
||||
'web_very_high': {'subsampling': 0, # "4:4:4"
|
||||
'quantization': [
|
||||
[ 2, 2, 2, 2, 3, 4, 5, 6,
|
||||
2, 2, 2, 2, 3, 4, 5, 6,
|
||||
2, 2, 2, 2, 4, 5, 7, 9,
|
||||
2, 2, 2, 4, 5, 7, 9, 12,
|
||||
3, 3, 4, 5, 8, 10, 12, 12,
|
||||
4, 4, 5, 7, 10, 12, 12, 12,
|
||||
5, 5, 7, 9, 12, 12, 12, 12,
|
||||
6, 6, 9, 12, 12, 12, 12, 12],
|
||||
[ 3, 3, 5, 9, 13, 15, 15, 15,
|
||||
3, 4, 6, 11, 14, 12, 12, 12,
|
||||
5, 6, 9, 14, 12, 12, 12, 12,
|
||||
9, 11, 14, 12, 12, 12, 12, 12,
|
||||
13, 14, 12, 12, 12, 12, 12, 12,
|
||||
15, 12, 12, 12, 12, 12, 12, 12,
|
||||
15, 12, 12, 12, 12, 12, 12, 12,
|
||||
15, 12, 12, 12, 12, 12, 12, 12]
|
||||
]},
|
||||
'web_maximum': {'subsampling': 0, # "4:4:4"
|
||||
'quantization': [
|
||||
[ 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 2,
|
||||
1, 1, 1, 1, 1, 1, 2, 2,
|
||||
1, 1, 1, 1, 1, 2, 2, 3,
|
||||
1, 1, 1, 1, 2, 2, 3, 3,
|
||||
1, 1, 1, 2, 2, 3, 3, 3,
|
||||
1, 1, 2, 2, 3, 3, 3, 3],
|
||||
[ 1, 1, 1, 2, 2, 3, 3, 3,
|
||||
1, 1, 1, 2, 3, 3, 3, 3,
|
||||
1, 1, 1, 3, 3, 3, 3, 3,
|
||||
2, 2, 3, 3, 3, 3, 3, 3,
|
||||
2, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 3, 3]
|
||||
]},
|
||||
'low': {'subsampling': 2, # "4:1:1"
|
||||
'quantization': [
|
||||
[18, 14, 14, 21, 30, 35, 34, 17,
|
||||
14, 16, 16, 19, 26, 23, 12, 12,
|
||||
14, 16, 17, 21, 23, 12, 12, 12,
|
||||
21, 19, 21, 23, 12, 12, 12, 12,
|
||||
30, 26, 23, 12, 12, 12, 12, 12,
|
||||
35, 23, 12, 12, 12, 12, 12, 12,
|
||||
34, 12, 12, 12, 12, 12, 12, 12,
|
||||
17, 12, 12, 12, 12, 12, 12, 12],
|
||||
[20, 19, 22, 27, 20, 20, 17, 17,
|
||||
19, 25, 23, 14, 14, 12, 12, 12,
|
||||
22, 23, 14, 14, 12, 12, 12, 12,
|
||||
27, 14, 14, 12, 12, 12, 12, 12,
|
||||
20, 14, 12, 12, 12, 12, 12, 12,
|
||||
20, 12, 12, 12, 12, 12, 12, 12,
|
||||
17, 12, 12, 12, 12, 12, 12, 12,
|
||||
17, 12, 12, 12, 12, 12, 12, 12]
|
||||
]},
|
||||
'medium': {'subsampling': 2, # "4:1:1"
|
||||
'quantization': [
|
||||
[12, 8, 8, 12, 17, 21, 24, 17,
|
||||
8, 9, 9, 11, 15, 19, 12, 12,
|
||||
8, 9, 10, 12, 19, 12, 12, 12,
|
||||
12, 11, 12, 21, 12, 12, 12, 12,
|
||||
17, 15, 19, 12, 12, 12, 12, 12,
|
||||
21, 19, 12, 12, 12, 12, 12, 12,
|
||||
24, 12, 12, 12, 12, 12, 12, 12,
|
||||
17, 12, 12, 12, 12, 12, 12, 12],
|
||||
[13, 11, 13, 16, 20, 20, 17, 17,
|
||||
11, 14, 14, 14, 14, 12, 12, 12,
|
||||
13, 14, 14, 14, 12, 12, 12, 12,
|
||||
16, 14, 14, 12, 12, 12, 12, 12,
|
||||
20, 14, 12, 12, 12, 12, 12, 12,
|
||||
20, 12, 12, 12, 12, 12, 12, 12,
|
||||
17, 12, 12, 12, 12, 12, 12, 12,
|
||||
17, 12, 12, 12, 12, 12, 12, 12]
|
||||
]},
|
||||
'high': {'subsampling': 0, # "4:4:4"
|
||||
'quantization': [
|
||||
[ 6, 4, 4, 6, 9, 11, 12, 16,
|
||||
4, 5, 5, 6, 8, 10, 12, 12,
|
||||
4, 5, 5, 6, 10, 12, 12, 12,
|
||||
6, 6, 6, 11, 12, 12, 12, 12,
|
||||
9, 8, 10, 12, 12, 12, 12, 12,
|
||||
11, 10, 12, 12, 12, 12, 12, 12,
|
||||
12, 12, 12, 12, 12, 12, 12, 12,
|
||||
16, 12, 12, 12, 12, 12, 12, 12],
|
||||
[ 7, 7, 13, 24, 20, 20, 17, 17,
|
||||
7, 12, 16, 14, 14, 12, 12, 12,
|
||||
13, 16, 14, 14, 12, 12, 12, 12,
|
||||
24, 14, 14, 12, 12, 12, 12, 12,
|
||||
20, 14, 12, 12, 12, 12, 12, 12,
|
||||
20, 12, 12, 12, 12, 12, 12, 12,
|
||||
17, 12, 12, 12, 12, 12, 12, 12,
|
||||
17, 12, 12, 12, 12, 12, 12, 12]
|
||||
]},
|
||||
'maximum': {'subsampling': 0, # "4:4:4"
|
||||
'quantization': [
|
||||
[ 2, 2, 2, 2, 3, 4, 5, 6,
|
||||
2, 2, 2, 2, 3, 4, 5, 6,
|
||||
2, 2, 2, 2, 4, 5, 7, 9,
|
||||
2, 2, 2, 4, 5, 7, 9, 12,
|
||||
3, 3, 4, 5, 8, 10, 12, 12,
|
||||
4, 4, 5, 7, 10, 12, 12, 12,
|
||||
5, 5, 7, 9, 12, 12, 12, 12,
|
||||
6, 6, 9, 12, 12, 12, 12, 12],
|
||||
[ 3, 3, 5, 9, 13, 15, 15, 15,
|
||||
3, 4, 6, 10, 14, 12, 12, 12,
|
||||
5, 6, 9, 14, 12, 12, 12, 12,
|
||||
9, 10, 14, 12, 12, 12, 12, 12,
|
||||
13, 14, 12, 12, 12, 12, 12, 12,
|
||||
15, 12, 12, 12, 12, 12, 12, 12,
|
||||
15, 12, 12, 12, 12, 12, 12, 12,
|
||||
15, 12, 12, 12, 12, 12, 12, 12]
|
||||
]},
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Basic McIdas support for PIL
|
||||
#
|
||||
# History:
|
||||
# 1997-05-05 fl Created (8-bit images only)
|
||||
# 2009-03-08 fl Added 16/32-bit support.
|
||||
#
|
||||
# Thanks to Richard Jones and Craig Swank for specs and samples.
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
__version__ = "0.2"
|
||||
|
||||
import struct
|
||||
from PIL import Image, ImageFile
|
||||
|
||||
def _accept(s):
|
||||
return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04"
|
||||
|
||||
##
|
||||
# Image plugin for McIdas area images.
|
||||
|
||||
class McIdasImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "MCIDAS"
|
||||
format_description = "McIdas area file"
|
||||
|
||||
def _open(self):
|
||||
|
||||
# parse area file directory
|
||||
s = self.fp.read(256)
|
||||
if not _accept(s) or len(s) != 256:
|
||||
raise SyntaxError("not an McIdas area file")
|
||||
|
||||
self.area_descriptor_raw = s
|
||||
self.area_descriptor = w = [0] + list(struct.unpack("!64i", s))
|
||||
|
||||
# get mode
|
||||
if w[11] == 1:
|
||||
mode = rawmode = "L"
|
||||
elif w[11] == 2:
|
||||
# FIXME: add memory map support
|
||||
mode = "I"; rawmode = "I;16B"
|
||||
elif w[11] == 4:
|
||||
# FIXME: add memory map support
|
||||
mode = "I"; rawmode = "I;32B"
|
||||
else:
|
||||
raise SyntaxError("unsupported McIdas format")
|
||||
|
||||
self.mode = mode
|
||||
self.size = w[10], w[9]
|
||||
|
||||
offset = w[34] + w[15]
|
||||
stride = w[15] + w[10]*w[11]*w[14]
|
||||
|
||||
self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))]
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# registry
|
||||
|
||||
Image.register_open("MCIDAS", McIdasImageFile, _accept)
|
||||
|
||||
# no default extension
|
|
@ -1,95 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# Microsoft Image Composer support for PIL
|
||||
#
|
||||
# Notes:
|
||||
# uses TiffImagePlugin.py to read the actual image streams
|
||||
#
|
||||
# History:
|
||||
# 97-01-20 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
|
||||
from PIL import Image, TiffImagePlugin
|
||||
from PIL.OleFileIO import *
|
||||
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:8] == MAGIC
|
||||
|
||||
##
|
||||
# Image plugin for Microsoft's Image Composer file format.
|
||||
|
||||
class MicImageFile(TiffImagePlugin.TiffImageFile):
|
||||
|
||||
format = "MIC"
|
||||
format_description = "Microsoft Image Composer"
|
||||
|
||||
def _open(self):
|
||||
|
||||
# read the OLE directory and see if this is a likely
|
||||
# to be a Microsoft Image Composer file
|
||||
|
||||
try:
|
||||
self.ole = OleFileIO(self.fp)
|
||||
except IOError:
|
||||
raise SyntaxError("not an MIC file; invalid OLE file")
|
||||
|
||||
# find ACI subfiles with Image members (maybe not the
|
||||
# best way to identify MIC files, but what the... ;-)
|
||||
|
||||
self.images = []
|
||||
for file in self.ole.listdir():
|
||||
if file[1:] and file[0][-4:] == ".ACI" and file[1] == "Image":
|
||||
self.images.append(file)
|
||||
|
||||
# if we didn't find any images, this is probably not
|
||||
# an MIC file.
|
||||
if not self.images:
|
||||
raise SyntaxError("not an MIC file; no image entries")
|
||||
|
||||
self.__fp = self.fp
|
||||
self.frame = 0
|
||||
|
||||
if len(self.images) > 1:
|
||||
self.category = Image.CONTAINER
|
||||
|
||||
self.seek(0)
|
||||
|
||||
def seek(self, frame):
|
||||
|
||||
try:
|
||||
filename = self.images[frame]
|
||||
except IndexError:
|
||||
raise EOFError("no such frame")
|
||||
|
||||
self.fp = self.ole.openstream(filename)
|
||||
|
||||
TiffImagePlugin.TiffImageFile._open(self)
|
||||
|
||||
self.frame = frame
|
||||
|
||||
def tell(self):
|
||||
|
||||
return self.frame
|
||||
|
||||
#
|
||||
# --------------------------------------------------------------------
|
||||
|
||||
Image.register_open("MIC", MicImageFile, _accept)
|
||||
|
||||
Image.register_extension("MIC", ".mic")
|
|
@ -1,83 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# MPEG file handling
|
||||
#
|
||||
# History:
|
||||
# 95-09-09 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1995.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
from PIL import Image, ImageFile
|
||||
from PIL._binary import i8
|
||||
|
||||
#
|
||||
# Bitstream parser
|
||||
|
||||
class BitStream:
|
||||
|
||||
def __init__(self, fp):
|
||||
self.fp = fp
|
||||
self.bits = 0
|
||||
self.bitbuffer = 0
|
||||
|
||||
def next(self):
|
||||
return i8(self.fp.read(1))
|
||||
|
||||
def peek(self, bits):
|
||||
while self.bits < bits:
|
||||
c = self.next()
|
||||
if c < 0:
|
||||
self.bits = 0
|
||||
continue
|
||||
self.bitbuffer = (self.bitbuffer << 8) + c
|
||||
self.bits += 8
|
||||
return self.bitbuffer >> (self.bits - bits) & (1 << bits) - 1
|
||||
|
||||
def skip(self, bits):
|
||||
while self.bits < bits:
|
||||
self.bitbuffer = (self.bitbuffer << 8) + i8(self.fp.read(1))
|
||||
self.bits += 8
|
||||
self.bits = self.bits - bits
|
||||
|
||||
def read(self, bits):
|
||||
v = self.peek(bits)
|
||||
self.bits = self.bits - bits
|
||||
return v
|
||||
|
||||
##
|
||||
# Image plugin for MPEG streams. This plugin can identify a stream,
|
||||
# but it cannot read it.
|
||||
|
||||
class MpegImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "MPEG"
|
||||
format_description = "MPEG"
|
||||
|
||||
def _open(self):
|
||||
|
||||
s = BitStream(self.fp)
|
||||
|
||||
if s.read(32) != 0x1B3:
|
||||
raise SyntaxError("not an MPEG file")
|
||||
|
||||
self.mode = "RGB"
|
||||
self.size = s.read(12), s.read(12)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Registry stuff
|
||||
|
||||
Image.register_open("MPEG", MpegImageFile)
|
||||
|
||||
Image.register_extension("MPEG", ".mpg")
|
||||
Image.register_extension("MPEG", ".mpeg")
|
||||
|
||||
Image.register_mime("MPEG", "video/mpeg")
|
|
@ -1,101 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library.
|
||||
# $Id$
|
||||
#
|
||||
# MSP file handling
|
||||
#
|
||||
# This is the format used by the Paint program in Windows 1 and 2.
|
||||
#
|
||||
# History:
|
||||
# 95-09-05 fl Created
|
||||
# 97-01-03 fl Read/write MSP images
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1995-97.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
|
||||
__version__ = "0.1"
|
||||
|
||||
from PIL import Image, ImageFile, _binary
|
||||
|
||||
|
||||
#
|
||||
# read MSP files
|
||||
|
||||
i16 = _binary.i16le
|
||||
|
||||
def _accept(prefix):
|
||||
return prefix[:4] in [b"DanM", b"LinS"]
|
||||
|
||||
##
|
||||
# Image plugin for Windows MSP images. This plugin supports both
|
||||
# uncompressed (Windows 1.0).
|
||||
|
||||
class MspImageFile(ImageFile.ImageFile):
|
||||
|
||||
format = "MSP"
|
||||
format_description = "Windows Paint"
|
||||
|
||||
def _open(self):
|
||||
|
||||
# Header
|
||||
s = self.fp.read(32)
|
||||
if s[:4] not in [b"DanM", b"LinS"]:
|
||||
raise SyntaxError("not an MSP file")
|
||||
|
||||
# Header checksum
|
||||
sum = 0
|
||||
for i in range(0, 32, 2):
|
||||
sum = sum ^ i16(s[i:i+2])
|
||||
if sum != 0:
|
||||
raise SyntaxError("bad MSP checksum")
|
||||
|
||||
self.mode = "1"
|
||||
self.size = i16(s[4:]), i16(s[6:])
|
||||
|
||||
if s[:4] == b"DanM":
|
||||
self.tile = [("raw", (0,0)+self.size, 32, ("1", 0, 1))]
|
||||
else:
|
||||
self.tile = [("msp", (0,0)+self.size, 32+2*self.size[1], None)]
|
||||
|
||||
#
|
||||
# write MSP files (uncompressed only)
|
||||
|
||||
o16 = _binary.o16le
|
||||
|
||||
def _save(im, fp, filename):
|
||||
|
||||
if im.mode != "1":
|
||||
raise IOError("cannot write mode %s as MSP" % im.mode)
|
||||
|
||||
# create MSP header
|
||||
header = [0] * 16
|
||||
|
||||
header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1
|
||||
header[2], header[3] = im.size
|
||||
header[4], header[5] = 1, 1
|
||||
header[6], header[7] = 1, 1
|
||||
header[8], header[9] = im.size
|
||||
|
||||
sum = 0
|
||||
for h in header:
|
||||
sum = sum ^ h
|
||||
header[12] = sum # FIXME: is this the right field?
|
||||
|
||||
# header
|
||||
for h in header:
|
||||
fp.write(o16(h))
|
||||
|
||||
# image body
|
||||
ImageFile._save(im, fp, [("raw", (0,0)+im.size, 32, ("1", 0, 1))])
|
||||
|
||||
#
|
||||
# registry
|
||||
|
||||
Image.register_open("MSP", MspImageFile, _accept)
|
||||
Image.register_save("MSP", _save)
|
||||
|
||||
Image.register_extension("MSP", ".msp")
|
|
@ -1,351 +0,0 @@
|
|||
OleFileIO_PL
|
||||
============
|
||||
|
||||
[OleFileIO_PL](http://www.decalage.info/python/olefileio) is a Python module to parse and read [Microsoft OLE2 files (also called Structured Storage, Compound File Binary Format or Compound Document File Format)](http://en.wikipedia.org/wiki/Compound_File_Binary_Format), such as Microsoft Office documents, Image Composer and FlashPix files, Outlook messages, StickyNotes, several Microscopy file formats ...
|
||||
|
||||
This is an improved version of the OleFileIO module from [PIL](http://www.pythonware.com/products/pil/index.htm), the excellent Python Imaging Library, created and maintained by Fredrik Lundh. The API is still compatible with PIL, but since 2005 I have improved the internal implementation significantly, with new features, bugfixes and a more robust design.
|
||||
|
||||
As far as I know, this module is now the most complete and robust Python implementation to read MS OLE2 files, portable on several operating systems. (please tell me if you know other similar Python modules)
|
||||
|
||||
OleFileIO_PL can be used as an independent module or with PIL. The goal is to have it integrated into [Pillow](http://python-pillow.github.io/), the friendly fork of PIL.
|
||||
|
||||
OleFileIO\_PL is mostly meant for developers. If you are looking for tools to analyze OLE files or to extract data, then please also check [python-oletools](http://www.decalage.info/python/oletools), which are built upon OleFileIO_PL.
|
||||
|
||||
News
|
||||
----
|
||||
|
||||
Follow all updates and news on Twitter: <https://twitter.com/decalage2>
|
||||
|
||||
- **2014-02-04 v0.30**: now compatible with Python 3.x, thanks to Martin Panter who did most of the hard work.
|
||||
- 2013-07-24 v0.26: added methods to parse stream/storage timestamps, improved listdir to include storages, fixed parsing of direntry timestamps
|
||||
- 2013-05-27 v0.25: improved metadata extraction, properties parsing and exception handling, fixed [issue #12](https://bitbucket.org/decalage/olefileio_pl/issue/12/error-when-converting-timestamps-in-ole)
|
||||
- 2013-05-07 v0.24: new features to extract metadata (get\_metadata method and OleMetadata class), improved getproperties to convert timestamps to Python datetime
|
||||
- 2012-10-09: published [python-oletools](http://www.decalage.info/python/oletools), a package of analysis tools based on OleFileIO_PL
|
||||
- 2012-09-11 v0.23: added support for file-like objects, fixed [issue #8](https://bitbucket.org/decalage/olefileio_pl/issue/8/bug-with-file-object)
|
||||
- 2012-02-17 v0.22: fixed issues #7 (bug in getproperties) and #2 (added close method)
|
||||
- 2011-10-20: code hosted on bitbucket to ease contributions and bug tracking
|
||||
- 2010-01-24 v0.21: fixed support for big-endian CPUs, such as PowerPC Macs.
|
||||
- 2009-12-11 v0.20: small bugfix in OleFileIO.open when filename is not plain str.
|
||||
- 2009-12-10 v0.19: fixed support for 64 bits platforms (thanks to Ben G. and Martijn for reporting the bug)
|
||||
- see changelog in source code for more info.
|
||||
|
||||
Download
|
||||
--------
|
||||
|
||||
The archive is available on [the project page](https://bitbucket.org/decalage/olefileio_pl/downloads).
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Parse and read any OLE file such as Microsoft Office 97-2003 legacy document formats (Word .doc, Excel .xls, PowerPoint .ppt, Visio .vsd, Project .mpp), Image Composer and FlashPix files, Outlook messages, StickyNotes, Zeiss AxioVision ZVI files, Olympus FluoView OIB files, ...
|
||||
- List all the streams and storages contained in an OLE file
|
||||
- Open streams as files
|
||||
- Parse and read property streams, containing metadata of the file
|
||||
- Portable, pure Python module, no dependency
|
||||
|
||||
|
||||
Main improvements over the original version of OleFileIO in PIL:
|
||||
----------------------------------------------------------------
|
||||
|
||||
- Compatible with Python 3.x and 2.6+
|
||||
- Many bug fixes
|
||||
- Support for files larger than 6.8MB
|
||||
- Support for 64 bits platforms and big-endian CPUs
|
||||
- Robust: many checks to detect malformed files
|
||||
- Runtime option to choose if malformed files should be parsed or raise exceptions
|
||||
- Improved API
|
||||
- Metadata extraction, stream/storage timestamps (e.g. for document forensics)
|
||||
- Can open file-like objects
|
||||
- Added setup.py and install.bat to ease installation
|
||||
- More convenient slash-based syntax for stream paths
|
||||
|
||||
|
||||
|
||||
How to use this module
|
||||
----------------------
|
||||
|
||||
OleFileIO_PL can be used as an independent module or with PIL. The main functions and methods are explained below.
|
||||
|
||||
For more information, see also the file **OleFileIO_PL.html**, sample code at the end of the module itself, and docstrings within the code.
|
||||
|
||||
### About the structure of OLE files ###
|
||||
|
||||
An OLE file can be seen as a mini file system or a Zip archive: It contains **streams** of data that look like files embedded within the OLE file. Each stream has a name. For example, the main stream of a MS Word document containing its text is named "WordDocument".
|
||||
|
||||
An OLE file can also contain **storages**. A storage is a folder that contains streams or other storages. For example, a MS Word document with VBA macros has a storage called "Macros".
|
||||
|
||||
Special streams can contain **properties**. A property is a specific value that can be used to store information such as the metadata of a document (title, author, creation date, etc). Property stream names usually start with the character '\x05'.
|
||||
|
||||
For example, a typical MS Word document may look like this:
|
||||
|
||||
\x05DocumentSummaryInformation (stream)
|
||||
\x05SummaryInformation (stream)
|
||||
WordDocument (stream)
|
||||
Macros (storage)
|
||||
PROJECT (stream)
|
||||
PROJECTwm (stream)
|
||||
VBA (storage)
|
||||
Module1 (stream)
|
||||
ThisDocument (stream)
|
||||
_VBA_PROJECT (stream)
|
||||
dir (stream)
|
||||
ObjectPool (storage)
|
||||
|
||||
|
||||
|
||||
### Import OleFileIO_PL ###
|
||||
|
||||
:::python
|
||||
import OleFileIO_PL
|
||||
|
||||
As of version 0.30, the code has been changed to be compatible with Python 3.x. As a consequence, compatibility with Python 2.5 or older is not provided anymore. However, a copy of v0.26 is available as OleFileIO_PL2.py. If your application needs to be compatible with Python 2.5 or older, you may use the following code to load the old version when needed:
|
||||
|
||||
:::python
|
||||
try:
|
||||
import OleFileIO_PL
|
||||
except:
|
||||
import OleFileIO_PL2 as OleFileIO_PL
|
||||
|
||||
If you think OleFileIO_PL should stay compatible with Python 2.5 or older, please [contact me](http://decalage.info/contact).
|
||||
|
||||
|
||||
### Test if a file is an OLE container ###
|
||||
|
||||
Use isOleFile to check if the first bytes of the file contain the Magic for OLE files, before opening it. isOleFile returns True if it is an OLE file, False otherwise (new in v0.16).
|
||||
|
||||
:::python
|
||||
assert OleFileIO_PL.isOleFile('myfile.doc')
|
||||
|
||||
|
||||
### Open an OLE file from disk ###
|
||||
|
||||
Create an OleFileIO object with the file path as parameter:
|
||||
|
||||
:::python
|
||||
ole = OleFileIO_PL.OleFileIO('myfile.doc')
|
||||
|
||||
### Open an OLE file from a file-like object ###
|
||||
|
||||
This is useful if the file is not on disk, e.g. already stored in a string or as a file-like object.
|
||||
|
||||
:::python
|
||||
ole = OleFileIO_PL.OleFileIO(f)
|
||||
|
||||
For example the code below reads a file into a string, then uses BytesIO to turn it into a file-like object.
|
||||
|
||||
:::python
|
||||
data = open('myfile.doc', 'rb').read()
|
||||
f = io.BytesIO(data) # or StringIO.StringIO for Python 2.x
|
||||
ole = OleFileIO_PL.OleFileIO(f)
|
||||
|
||||
### How to handle malformed OLE files ###
|
||||
|
||||
By default, the parser is configured to be as robust and permissive as possible, allowing to parse most malformed OLE files. Only fatal errors will raise an exception. It is possible to tell the parser to be more strict in order to raise exceptions for files that do not fully conform to the OLE specifications, using the raise_defect option (new in v0.14):
|
||||
|
||||
:::python
|
||||
ole = OleFileIO_PL.OleFileIO('myfile.doc', raise_defects=DEFECT_INCORRECT)
|
||||
|
||||
When the parsing is done, the list of non-fatal issues detected is available as a list in the parsing_issues attribute of the OleFileIO object (new in 0.25):
|
||||
|
||||
:::python
|
||||
print('Non-fatal issues raised during parsing:')
|
||||
if ole.parsing_issues:
|
||||
for exctype, msg in ole.parsing_issues:
|
||||
print('- %s: %s' % (exctype.__name__, msg))
|
||||
else:
|
||||
print('None')
|
||||
|
||||
|
||||
### Syntax for stream and storage path ###
|
||||
|
||||
Two different syntaxes are allowed for methods that need or return the path of streams and storages:
|
||||
|
||||
1) Either a **list of strings** including all the storages from the root up to the stream/storage name. For example a stream called "WordDocument" at the root will have ['WordDocument'] as full path. A stream called "ThisDocument" located in the storage "Macros/VBA" will be ['Macros', 'VBA', 'ThisDocument']. This is the original syntax from PIL. While hard to read and not very convenient, this syntax works in all cases.
|
||||
|
||||
2) Or a **single string with slashes** to separate storage and stream names (similar to the Unix path syntax). The previous examples would be 'WordDocument' and 'Macros/VBA/ThisDocument'. This syntax is easier, but may fail if a stream or storage name contains a slash. (new in v0.15)
|
||||
|
||||
Both are case-insensitive.
|
||||
|
||||
Switching between the two is easy:
|
||||
|
||||
:::python
|
||||
slash_path = '/'.join(list_path)
|
||||
list_path = slash_path.split('/')
|
||||
|
||||
|
||||
### Get the list of streams ###
|
||||
|
||||
listdir() returns a list of all the streams contained in the OLE file, including those stored in storages. Each stream is listed itself as a list, as described above.
|
||||
|
||||
:::python
|
||||
print(ole.listdir())
|
||||
|
||||
Sample result:
|
||||
|
||||
:::python
|
||||
[['\x01CompObj'], ['\x05DocumentSummaryInformation'], ['\x05SummaryInformation']
|
||||
, ['1Table'], ['Macros', 'PROJECT'], ['Macros', 'PROJECTwm'], ['Macros', 'VBA',
|
||||
'Module1'], ['Macros', 'VBA', 'ThisDocument'], ['Macros', 'VBA', '_VBA_PROJECT']
|
||||
, ['Macros', 'VBA', 'dir'], ['ObjectPool'], ['WordDocument']]
|
||||
|
||||
As an option it is possible to choose if storages should also be listed, with or without streams (new in v0.26):
|
||||
|
||||
:::python
|
||||
ole.listdir (streams=False, storages=True)
|
||||
|
||||
|
||||
### Test if known streams/storages exist: ###
|
||||
|
||||
exists(path) checks if a given stream or storage exists in the OLE file (new in v0.16).
|
||||
|
||||
:::python
|
||||
if ole.exists('worddocument'):
|
||||
print("This is a Word document.")
|
||||
if ole.exists('macros/vba'):
|
||||
print("This document seems to contain VBA macros.")
|
||||
|
||||
|
||||
### Read data from a stream ###
|
||||
|
||||
openstream(path) opens a stream as a file-like object.
|
||||
|
||||
The following example extracts the "Pictures" stream from a PPT file:
|
||||
|
||||
:::python
|
||||
pics = ole.openstream('Pictures')
|
||||
data = pics.read()
|
||||
|
||||
|
||||
### Get information about a stream/storage ###
|
||||
|
||||
Several methods can provide the size, type and timestamps of a given stream/storage:
|
||||
|
||||
get_size(path) returns the size of a stream in bytes (new in v0.16):
|
||||
|
||||
:::python
|
||||
s = ole.get_size('WordDocument')
|
||||
|
||||
get_type(path) returns the type of a stream/storage, as one of the following constants: STGTY\_STREAM for a stream, STGTY\_STORAGE for a storage, STGTY\_ROOT for the root entry, and False for a non existing path (new in v0.15).
|
||||
|
||||
:::python
|
||||
t = ole.get_type('WordDocument')
|
||||
|
||||
get\_ctime(path) and get\_mtime(path) return the creation and modification timestamps of a stream/storage, as a Python datetime object with UTC timezone. Please note that these timestamps are only present if the application that created the OLE file explicitly stored them, which is rarely the case. When not present, these methods return None (new in v0.26).
|
||||
|
||||
:::python
|
||||
c = ole.get_ctime('WordDocument')
|
||||
m = ole.get_mtime('WordDocument')
|
||||
|
||||
The root storage is a special case: You can get its creation and modification timestamps using the OleFileIO.root attribute (new in v0.26):
|
||||
|
||||
:::python
|
||||
c = ole.root.getctime()
|
||||
m = ole.root.getmtime()
|
||||
|
||||
### Extract metadata ###
|
||||
|
||||
get_metadata() will check if standard property streams exist, parse all the properties they contain, and return an OleMetadata object with the found properties as attributes (new in v0.24).
|
||||
|
||||
:::python
|
||||
meta = ole.get_metadata()
|
||||
print('Author:', meta.author)
|
||||
print('Title:', meta.title)
|
||||
print('Creation date:', meta.create_time)
|
||||
# print all metadata:
|
||||
meta.dump()
|
||||
|
||||
Available attributes include:
|
||||
|
||||
codepage, title, subject, author, keywords, comments, template,
|
||||
last_saved_by, revision_number, total_edit_time, last_printed, create_time,
|
||||
last_saved_time, num_pages, num_words, num_chars, thumbnail,
|
||||
creating_application, security, codepage_doc, category, presentation_target,
|
||||
bytes, lines, paragraphs, slides, notes, hidden_slides, mm_clips,
|
||||
scale_crop, heading_pairs, titles_of_parts, manager, company, links_dirty,
|
||||
chars_with_spaces, unused, shared_doc, link_base, hlinks, hlinks_changed,
|
||||
version, dig_sig, content_type, content_status, language, doc_version
|
||||
|
||||
See the source code of the OleMetadata class for more information.
|
||||
|
||||
|
||||
### Parse a property stream ###
|
||||
|
||||
get\_properties(path) can be used to parse any property stream that is not handled by get\_metadata. It returns a dictionary indexed by integers. Each integer is the index of the property, pointing to its value. For example in the standard property stream '\x05SummaryInformation', the document title is property #2, and the subject is #3.
|
||||
|
||||
:::python
|
||||
p = ole.getproperties('specialprops')
|
||||
|
||||
By default as in the original PIL version, timestamp properties are converted into a number of seconds since Jan 1,1601. With the option convert\_time, you can obtain more convenient Python datetime objects (UTC timezone). If some time properties should not be converted (such as total editing time in '\x05SummaryInformation'), the list of indexes can be passed as no_conversion (new in v0.25):
|
||||
|
||||
:::python
|
||||
p = ole.getproperties('specialprops', convert_time=True, no_conversion=[10])
|
||||
|
||||
|
||||
### Close the OLE file ###
|
||||
|
||||
Unless your application is a simple script that terminates after processing an OLE file, do not forget to close each OleFileIO object after parsing to close the file on disk. (new in v0.22)
|
||||
|
||||
:::python
|
||||
ole.close()
|
||||
|
||||
### Use OleFileIO_PL as a script ###
|
||||
|
||||
OleFileIO_PL can also be used as a script from the command-line to display the structure of an OLE file and its metadata, for example:
|
||||
|
||||
OleFileIO_PL.py myfile.doc
|
||||
|
||||
You can use the option -c to check that all streams can be read fully, and -d to generate very verbose debugging information.
|
||||
|
||||
## Real-life examples ##
|
||||
|
||||
A real-life example: [using OleFileIO_PL for malware analysis and forensics](http://blog.gregback.net/2011/03/using-remnux-for-forensic-puzzle-6/).
|
||||
|
||||
See also [this paper](https://computer-forensics.sans.org/community/papers/gcfa/grow-forensic-tools-taxonomy-python-libraries-helpful-forensic-analysis_6879) about python tools for forensics, which features OleFileIO_PL.
|
||||
|
||||
About Python 2 and 3
|
||||
--------------------
|
||||
|
||||
OleFileIO\_PL used to support only Python 2.x. As of version 0.30, the code has been changed to be compatible with Python 3.x. As a consequence, compatibility with Python 2.5 or older is not provided anymore. However, a copy of v0.26 is available as OleFileIO_PL2.py. See above the "import" section for a workaround.
|
||||
|
||||
If you think OleFileIO_PL should stay compatible with Python 2.5 or older, please [contact me](http://decalage.info/contact).
|
||||
|
||||
How to contribute
|
||||
-----------------
|
||||
|
||||
The code is available in [a Mercurial repository on bitbucket](https://bitbucket.org/decalage/olefileio_pl). You may use it to submit enhancements or to report any issue.
|
||||
|
||||
If you would like to help us improve this module, or simply provide feedback, please [contact me](http://decalage.info/contact). You can help in many ways:
|
||||
|
||||
- test this module on different platforms / Python versions
|
||||
- find and report bugs
|
||||
- improve documentation, code samples, docstrings
|
||||
- write unittest test cases
|
||||
- provide tricky malformed files
|
||||
|
||||
How to report bugs
|
||||
------------------
|
||||
|
||||
To report a bug, for example a normal file which is not parsed correctly, please use the [issue reporting page](https://bitbucket.org/decalage/olefileio_pl/issues?status=new&status=open), or if you prefer to do it privately, use this [contact form](http://decalage.info/contact). Please provide all the information about the context and how to reproduce the bug.
|
||||
|
||||
If possible please join the debugging output of OleFileIO_PL. For this, launch the following command :
|
||||
|
||||
OleFileIO_PL.py -d -c file >debug.txt
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
OleFileIO_PL is open-source.
|
||||
|
||||
OleFileIO_PL changes are Copyright (c) 2005-2014 by Philippe Lagadec.
|
||||
|
||||
The Python Imaging Library (PIL) is
|
||||
|
||||
- Copyright (c) 1997-2005 by Secret Labs AB
|
||||
|
||||
- Copyright (c) 1995-2005 by Fredrik Lundh
|
||||
|
||||
By obtaining, using, and/or copying this software and/or its associated documentation, you agree that you have read, understood, and will comply with the following terms and conditions:
|
||||
|
||||
Permission to use, copy, modify, and distribute this software and its associated documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies, and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Secret Labs AB or the author not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission.
|
||||
|
||||
SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
File diff suppressed because it is too large
Load diff
|
@ -1,236 +0,0 @@
|
|||
#
|
||||
# The Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# simple postscript graphics interface
|
||||
#
|
||||
# History:
|
||||
# 1996-04-20 fl Created
|
||||
# 1999-01-10 fl Added gsave/grestore to image method
|
||||
# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge)
|
||||
#
|
||||
# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved.
|
||||
# Copyright (c) 1996 by Fredrik Lundh.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from PIL import EpsImagePlugin
|
||||
|
||||
##
|
||||
# Simple Postscript graphics interface.
|
||||
|
||||
class PSDraw:
|
||||
"""
|
||||
Sets up printing to the given file. If **file** is omitted,
|
||||
:py:attr:`sys.stdout` is assumed.
|
||||
"""
|
||||
|
||||
def __init__(self, fp=None):
|
||||
if not fp:
|
||||
import sys
|
||||
fp = sys.stdout
|
||||
self.fp = fp
|
||||
|
||||
def begin_document(self, id = None):
|
||||
"""Set up printing of a document. (Write Postscript DSC header.)"""
|
||||
# FIXME: incomplete
|
||||
self.fp.write("%!PS-Adobe-3.0\n"
|
||||
"save\n"
|
||||
"/showpage { } def\n"
|
||||
"%%EndComments\n"
|
||||
"%%BeginDocument\n")
|
||||
#self.fp.write(ERROR_PS) # debugging!
|
||||
self.fp.write(EDROFF_PS)
|
||||
self.fp.write(VDI_PS)
|
||||
self.fp.write("%%EndProlog\n")
|
||||
self.isofont = {}
|
||||
|
||||
def end_document(self):
|
||||
"""Ends printing. (Write Postscript DSC footer.)"""
|
||||
self.fp.write("%%EndDocument\n"
|
||||
"restore showpage\n"
|
||||
"%%End\n")
|
||||
if hasattr(self.fp, "flush"):
|
||||
self.fp.flush()
|
||||
|
||||
def setfont(self, font, size):
|
||||
"""
|
||||
Selects which font to use.
|
||||
|
||||
:param font: A Postscript font name
|
||||
:param size: Size in points.
|
||||
"""
|
||||
if font not in self.isofont:
|
||||
# reencode font
|
||||
self.fp.write("/PSDraw-%s ISOLatin1Encoding /%s E\n" %\
|
||||
(font, font))
|
||||
self.isofont[font] = 1
|
||||
# rough
|
||||
self.fp.write("/F0 %d /PSDraw-%s F\n" % (size, font))
|
||||
|
||||
def setink(self, ink):
|
||||
"""
|
||||
.. warning::
|
||||
|
||||
This has been in the PIL API for ages but was never implemented.
|
||||
"""
|
||||
print("*** NOT YET IMPLEMENTED ***")
|
||||
|
||||
def line(self, xy0, xy1):
|
||||
"""
|
||||
Draws a line between the two points. Coordinates are given in
|
||||
Postscript point coordinates (72 points per inch, (0, 0) is the lower
|
||||
left corner of the page).
|
||||
"""
|
||||
xy = xy0 + xy1
|
||||
self.fp.write("%d %d %d %d Vl\n" % xy)
|
||||
|
||||
def rectangle(self, box):
|
||||
"""
|
||||
Draws a rectangle.
|
||||
|
||||
:param box: A 4-tuple of integers whose order and function is currently
|
||||
undocumented.
|
||||
|
||||
Hint: the tuple is passed into this format string:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
%d %d M %d %d 0 Vr\n
|
||||
"""
|
||||
self.fp.write("%d %d M %d %d 0 Vr\n" % box)
|
||||
|
||||
def text(self, xy, text):
|
||||
"""
|
||||
Draws text at the given position. You must use
|
||||
:py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method.
|
||||
"""
|
||||
text = "\\(".join(text.split("("))
|
||||
text = "\\)".join(text.split(")"))
|
||||
xy = xy + (text,)
|
||||
self.fp.write("%d %d M (%s) S\n" % xy)
|
||||
|
||||
def image(self, box, im, dpi = None):
|
||||
"""Draw a PIL image, centered in the given box."""
|
||||
# default resolution depends on mode
|
||||
if not dpi:
|
||||
if im.mode == "1":
|
||||
dpi = 200 # fax
|
||||
else:
|
||||
dpi = 100 # greyscale
|
||||
# image size (on paper)
|
||||
x = float(im.size[0] * 72) / dpi
|
||||
y = float(im.size[1] * 72) / dpi
|
||||
# max allowed size
|
||||
xmax = float(box[2] - box[0])
|
||||
ymax = float(box[3] - box[1])
|
||||
if x > xmax:
|
||||
y = y * xmax / x; x = xmax
|
||||
if y > ymax:
|
||||
x = x * ymax / y; y = ymax
|
||||
dx = (xmax - x) / 2 + box[0]
|
||||
dy = (ymax - y) / 2 + box[1]
|
||||
self.fp.write("gsave\n%f %f translate\n" % (dx, dy))
|
||||
if (x, y) != im.size:
|
||||
# EpsImagePlugin._save prints the image at (0,0,xsize,ysize)
|
||||
sx = x / im.size[0]
|
||||
sy = y / im.size[1]
|
||||
self.fp.write("%f %f scale\n" % (sx, sy))
|
||||
EpsImagePlugin._save(im, self.fp, None, 0)
|
||||
self.fp.write("\ngrestore\n")
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Postscript driver
|
||||
|
||||
#
|
||||
# EDROFF.PS -- Postscript driver for Edroff 2
|
||||
#
|
||||
# History:
|
||||
# 94-01-25 fl: created (edroff 2.04)
|
||||
#
|
||||
# Copyright (c) Fredrik Lundh 1994.
|
||||
#
|
||||
|
||||
EDROFF_PS = """\
|
||||
/S { show } bind def
|
||||
/P { moveto show } bind def
|
||||
/M { moveto } bind def
|
||||
/X { 0 rmoveto } bind def
|
||||
/Y { 0 exch rmoveto } bind def
|
||||
/E { findfont
|
||||
dup maxlength dict begin
|
||||
{
|
||||
1 index /FID ne { def } { pop pop } ifelse
|
||||
} forall
|
||||
/Encoding exch def
|
||||
dup /FontName exch def
|
||||
currentdict end definefont pop
|
||||
} bind def
|
||||
/F { findfont exch scalefont dup setfont
|
||||
[ exch /setfont cvx ] cvx bind def
|
||||
} bind def
|
||||
"""
|
||||
|
||||
#
|
||||
# VDI.PS -- Postscript driver for VDI meta commands
|
||||
#
|
||||
# History:
|
||||
# 94-01-25 fl: created (edroff 2.04)
|
||||
#
|
||||
# Copyright (c) Fredrik Lundh 1994.
|
||||
#
|
||||
|
||||
VDI_PS = """\
|
||||
/Vm { moveto } bind def
|
||||
/Va { newpath arcn stroke } bind def
|
||||
/Vl { moveto lineto stroke } bind def
|
||||
/Vc { newpath 0 360 arc closepath } bind def
|
||||
/Vr { exch dup 0 rlineto
|
||||
exch dup neg 0 exch rlineto
|
||||
exch neg 0 rlineto
|
||||
0 exch rlineto
|
||||
100 div setgray fill 0 setgray } bind def
|
||||
/Tm matrix def
|
||||
/Ve { Tm currentmatrix pop
|
||||
translate scale newpath 0 0 .5 0 360 arc closepath
|
||||
Tm setmatrix
|
||||
} bind def
|
||||
/Vf { currentgray exch setgray fill setgray } bind def
|
||||
"""
|
||||
|
||||
#
|
||||
# ERROR.PS -- Error handler
|
||||
#
|
||||
# History:
|
||||
# 89-11-21 fl: created (pslist 1.10)
|
||||
#
|
||||
|
||||
ERROR_PS = """\
|
||||
/landscape false def
|
||||
/errorBUF 200 string def
|
||||
/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def
|
||||
errordict begin /handleerror {
|
||||
initmatrix /Courier findfont 10 scalefont setfont
|
||||
newpath 72 720 moveto $error begin /newerror false def
|
||||
(PostScript Error) show errorNL errorNL
|
||||
(Error: ) show
|
||||
/errorname load errorBUF cvs show errorNL errorNL
|
||||
(Command: ) show
|
||||
/command load dup type /stringtype ne { errorBUF cvs } if show
|
||||
errorNL errorNL
|
||||
(VMstatus: ) show
|
||||
vmstatus errorBUF cvs show ( bytes available, ) show
|
||||
errorBUF cvs show ( bytes used at level ) show
|
||||
errorBUF cvs show errorNL errorNL
|
||||
(Operand stargck: ) show errorNL /ostargck load {
|
||||
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
|
||||
} forall errorNL
|
||||
(Execution stargck: ) show errorNL /estargck load {
|
||||
dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL
|
||||
} forall
|
||||
end showpage
|
||||
} def end
|
||||
"""
|
|
@ -1,55 +0,0 @@
|
|||
#
|
||||
# Python Imaging Library
|
||||
# $Id$
|
||||
#
|
||||
# stuff to read simple, teragon-style palette files
|
||||
#
|
||||
# History:
|
||||
# 97-08-23 fl Created
|
||||
#
|
||||
# Copyright (c) Secret Labs AB 1997.
|
||||
# Copyright (c) Fredrik Lundh 1997.
|
||||
#
|
||||
# See the README file for information on usage and redistribution.
|
||||
#
|
||||
|
||||
from PIL._binary import o8
|
||||
|
||||
##
|
||||
# File handler for Teragon-style palette files.
|
||||
|
||||
class PaletteFile:
|
||||
|
||||
rawmode = "RGB"
|
||||
|
||||
def __init__(self, fp):
|
||||
|
||||
self.palette = [(i, i, i) for i in range(256)]
|
||||
|
||||
while True:
|
||||
|
||||
s = fp.readline()
|
||||
|
||||
if not s:
|
||||
break
|
||||
if s[0:1] == b"#":
|
||||
continue
|
||||
if len(s) > 100:
|
||||
raise SyntaxError("bad palette file")
|
||||
|
||||
v = [int(x) for x in s.split()]
|
||||
try:
|
||||
[i, r, g, b] = v
|
||||
except ValueError:
|
||||
[i, r] = v
|
||||
g = b = r
|
||||
|
||||
if 0 <= i <= 255:
|
||||
self.palette[i] = o8(r) + o8(g) + o8(b)
|
||||
|
||||
self.palette = b"".join(self.palette)
|
||||
|
||||
|
||||
def getpalette(self):
|
||||
|
||||
return self.palette, self.rawmode
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue