Projects
openEuler:Mainline
python-cachetools
Sign Up
Log In
Username
Password
We truncated the diff of some files because they were too big. If you want to see the full diff for every file,
click here
.
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
Expand all
Collapse all
Changes of Revision 2
View file
_service:tar_scm:python-cachetools.spec
Changed
@@ -1,6 +1,6 @@ %global _empty_manifest_terminate_build 0 Name: python-cachetools -Version: 5.2.0 +Version: 5.3.0 Release: 1 Summary: Extensible memoizing collections and decorators License: MIT @@ -35,7 +35,7 @@ function decorator. %prep -%autosetup -n cachetools-5.2.0 +%autosetup -n cachetools-%{version} %build %py3_build @@ -80,6 +80,9 @@ %{_docdir}/* %changelog +* Fri Apr 07 2023 xu_ping <707078654@qq.com> - 5.3.0-1 +- Upgrade to version 5.3.0 + * Thu Jun 23 2022 SimpleUpdate Robot <tc@openeuler.org> - 5.2.0-1 - Upgrade to version 5.2.0
View file
_service
Changed
@@ -2,7 +2,7 @@ <service name="tar_scm"> <param name="scm">git</param> <param name="url">git@gitee.com:src-openeuler/python-cachetools.git</param> - <param name="revision">2ba2d428c6ccf73dcb3629e251d434e8d6ca57d1</param> + <param name="revision">master</param> <param name="exclude">*</param> <param name="extract">*</param> </service>
View file
_service:tar_scm:v5.2.0.tar.gz/.github/workflows/ci.yml -> _service:tar_scm:v5.3.0.tar.gz/.github/workflows/ci.yml
Changed
@@ -9,14 +9,14 @@ strategy: fail-fast: false matrix: - python: "3.7", "3.8", "3.9", "3.10", "pypy-3.7", "pypy-3.8" + python: "3.7", "3.8", "3.9", "3.10", "3.11", "pypy-3.7", "pypy-3.8" steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - run: python -m pip install coverage tox - run: python -m tox - - uses: codecov/codecov-action@v1 + - uses: codecov/codecov-action@v3 with: name: ${{ matrix.python }}
View file
_service:tar_scm:v5.2.0.tar.gz/CHANGELOG.rst -> _service:tar_scm:v5.3.0.tar.gz/CHANGELOG.rst
Changed
@@ -1,3 +1,19 @@ +v5.3.0 (2023-01-22) +=================== + +- Add ``cache_info()`` function to ``@cached`` decorator. + + +v5.2.1 (2023-01-08) +=================== + +- Add support for Python 3.11. + +- Correct version information in RTD documentation. + +- ``badges/shields``: Change to GitHub workflow badge routes. + + v5.2.0 (2022-05-29) ===================
View file
_service:tar_scm:v5.2.0.tar.gz/README.rst -> _service:tar_scm:v5.3.0.tar.gz/README.rst
Changed
@@ -5,7 +5,7 @@ :target: https://pypi.org/project/cachetools/ :alt: Latest PyPI version -.. image:: https://img.shields.io/github/workflow/status/tkem/cachetools/CI +.. image:: https://img.shields.io/github/actions/workflow/status/tkem/cachetools/ci.yml :target: https://github.com/tkem/cachetools/actions/workflows/ci.yml :alt: CI build status @@ -99,7 +99,7 @@ License ------------------------------------------------------------------------ -Copyright (c) 2014-2022 Thomas Kemmer. +Copyright (c) 2014-2023 Thomas Kemmer. Licensed under the `MIT License`_.
View file
_service:tar_scm:v5.2.0.tar.gz/docs/conf.py -> _service:tar_scm:v5.3.0.tar.gz/docs/conf.py
Changed
@@ -5,19 +5,10 @@ sys.path.insert(0, str((basedir / "src").resolve())) - -def get_version(): - import configparser - - cp = configparser.ConfigParser() - cp.read(basedir / "setup.cfg") - return cp"metadata""version" - - project = "cachetools" -copyright = "2014-2022 Thomas Kemmer" -version = get_version() -release = version +copyright = "2014-2023 Thomas Kemmer" +version = "5.3" +release = "5.3.0" extensions = "sphinx.ext.autodoc",
View file
_service:tar_scm:v5.2.0.tar.gz/docs/index.rst -> _service:tar_scm:v5.3.0.tar.gz/docs/index.rst
Changed
@@ -256,7 +256,7 @@ >>> fib(42) 267914296 -.. decorator:: cached(cache, key=cachetools.keys.hashkey, lock=None) +.. decorator:: cached(cache, key=cachetools.keys.hashkey, lock=None, info=False) Decorator to wrap a function with a memoizing callable that saves results in a cache. @@ -321,6 +321,31 @@ # no need for get_pep.cache_lock here get_pep.cache_clear() + If `info` is set to :const:`True`, the wrapped function is + instrumented with a :func:`cache_info()` function that returns a + named tuple showing `hits`, `misses`, `maxsize` and `currsize`, to + help measure the effectiveness of the cache. + + .. note:: + + Note that this will inflict a - probably minor - performance + penalty, so it has to be explicitly enabled. + + .. doctest:: + :pyversion: >= 3 + + >>> @cached(cache=LRUCache(maxsize=32), info=True) + ... def get_pep(num): + ... url = 'http://www.python.org/dev/peps/pep-%04d/' % num + ... with urllib.request.urlopen(url) as s: + ... return s.read() + + >>> for n in 8, 290, 308, 320, 8, 218, 320, 279, 289, 320, 9991: + ... pep = get_pep(n) + + >>> get_pep.cache_info() + CacheInfo(hits=3, misses=8, maxsize=32, currsize=8) + The original underlying function is accessible through the :attr:`__wrapped__` attribute. This can be used for introspection or for bypassing the cache.
View file
_service:tar_scm:v5.2.0.tar.gz/setup.cfg -> _service:tar_scm:v5.3.0.tar.gz/setup.cfg
Changed
@@ -5,7 +5,7 @@ author = Thomas Kemmer author_email = tkemmer@computer.org license = MIT -license_file = LICENSE +license_files = LICENSE description = Extensible memoizing collections and decorators long_description = file: README.rst classifiers = @@ -20,6 +20,7 @@ Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 Topic :: Software Development :: Libraries :: Python Modules options
View file
_service:tar_scm:v5.2.0.tar.gz/src/cachetools/__init__.py -> _service:tar_scm:v5.3.0.tar.gz/src/cachetools/__init__.py
Changed
@@ -13,7 +13,7 @@ "cachedmethod", ) -__version__ = "5.2.0" +__version__ = "5.3.0" import collections import collections.abc @@ -615,64 +615,163 @@ return value -def cached(cache, key=keys.hashkey, lock=None): +_CacheInfo = collections.namedtuple( + "CacheInfo", "hits", "misses", "maxsize", "currsize" +) + + +def cached(cache, key=keys.hashkey, lock=None, info=False): """Decorator to wrap a function with a memoizing callable that saves results in a cache. """ def decorator(func): - if cache is None: + if info: + hits = misses = 0 - def wrapper(*args, **kwargs): - return func(*args, **kwargs) + if isinstance(cache, Cache): - def clear(): - pass + def getinfo(): + nonlocal hits, misses + return _CacheInfo(hits, misses, cache.maxsize, cache.currsize) - elif lock is None: + elif isinstance(cache, collections.abc.Mapping): - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: - return cachek - except KeyError: - pass # key not found - v = func(*args, **kwargs) - try: - cachek = v - except ValueError: - pass # value too large - return v + def getinfo(): + nonlocal hits, misses + return _CacheInfo(hits, misses, None, len(cache)) - def clear(): - cache.clear() + else: - else: + def getinfo(): + nonlocal hits, misses + return _CacheInfo(hits, misses, 0, 0) + + if cache is None: + + def wrapper(*args, **kwargs): + nonlocal misses + misses += 1 + return func(*args, **kwargs) + + def cache_clear(): + nonlocal hits, misses + hits = misses = 0 + + cache_info = getinfo + + elif lock is None: + + def wrapper(*args, **kwargs): + nonlocal hits, misses + k = key(*args, **kwargs) + try: + result = cachek + hits += 1 + return result + except KeyError: + misses += 1 + v = func(*args, **kwargs) + try: + cachek = v + except ValueError: + pass # value too large + return v + + def cache_clear(): + nonlocal hits, misses + cache.clear() + hits = misses = 0 - def wrapper(*args, **kwargs): - k = key(*args, **kwargs) - try: + cache_info = getinfo + + else: + + def wrapper(*args, **kwargs): + nonlocal hits, misses + k = key(*args, **kwargs) + try: + with lock: + result = cachek + hits += 1 + return result + except KeyError: + with lock: + misses += 1 + v = func(*args, **kwargs) + # in case of a race, prefer the item already in the cache + try: + with lock: + return cache.setdefault(k, v) + except ValueError: + return v # value too large + + def cache_clear(): + nonlocal hits, misses with lock: - return cachek - except KeyError: - pass # key not found - v = func(*args, **kwargs) - # in case of a race, prefer the item already in the cache - try: + cache.clear() + hits = misses = 0 + + def cache_info(): with lock: - return cache.setdefault(k, v) - except ValueError: - return v # value too large + return getinfo() + + else: + if cache is None: + + def wrapper(*args, **kwargs): + return func(*args, **kwargs) - def clear(): - with lock: + def cache_clear(): + pass + + elif lock is None: + + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + return cachek + except KeyError: + pass # key not found + v = func(*args, **kwargs) + try: + cachek = v + except ValueError: + pass # value too large + return v + + def cache_clear(): cache.clear() + else: + + def wrapper(*args, **kwargs): + k = key(*args, **kwargs) + try: + with lock: + return cachek + except KeyError: + pass # key not found + v = func(*args, **kwargs) + # in case of a race, prefer the item already in the cache + try: + with lock: + return cache.setdefault(k, v) + except ValueError: + return v # value too large + + def cache_clear(): + with lock: + cache.clear() + + cache_info = None
View file
_service:tar_scm:v5.2.0.tar.gz/src/cachetools/func.py -> _service:tar_scm:v5.3.0.tar.gz/src/cachetools/func.py
Changed
@@ -2,8 +2,6 @@ __all__ = ("fifo_cache", "lfu_cache", "lru_cache", "mru_cache", "rr_cache", "ttl_cache") -import collections -import functools import math import random import time @@ -14,24 +12,10 @@ from dummy_threading import RLock from . import FIFOCache, LFUCache, LRUCache, MRUCache, RRCache, TTLCache +from . import cached from . import keys -_CacheInfo = collections.namedtuple( - "CacheInfo", "hits", "misses", "maxsize", "currsize" -) - - -class _UnboundCache(dict): - @property - def maxsize(self): - return None - - @property - def currsize(self): - return len(self) - - class _UnboundTTLCache(TTLCache): def __init__(self, ttl, timer): TTLCache.__init__(self, math.inf, ttl, timer) @@ -41,50 +25,11 @@ return None -def _cache(cache, typed): - maxsize = cache.maxsize - +def _cache(cache, maxsize, typed): def decorator(func): key = keys.typedkey if typed else keys.hashkey - hits = misses = 0 - lock = RLock() - - def wrapper(*args, **kwargs): - nonlocal hits, misses - k = key(*args, **kwargs) - with lock: - try: - v = cachek - hits += 1 - return v - except KeyError: - misses += 1 - v = func(*args, **kwargs) - # in case of a race, prefer the item already in the cache - try: - with lock: - return cache.setdefault(k, v) - except ValueError: - return v # value too large - - def cache_info(): - with lock: - maxsize = cache.maxsize - currsize = cache.currsize - return _CacheInfo(hits, misses, maxsize, currsize) - - def cache_clear(): - nonlocal hits, misses - with lock: - try: - cache.clear() - finally: - hits = misses = 0 - - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear + wrapper = cached(cache=cache, key=key, lock=RLock(), info=True)(func) wrapper.cache_parameters = lambda: {"maxsize": maxsize, "typed": typed} - functools.update_wrapper(wrapper, func) return wrapper return decorator @@ -97,11 +42,11 @@ """ if maxsize is None: - return _cache(_UnboundCache(), typed) + return _cache({}, None, typed) elif callable(maxsize): - return _cache(FIFOCache(128), typed)(maxsize) + return _cache(FIFOCache(128), 128, typed)(maxsize) else: - return _cache(FIFOCache(maxsize), typed) + return _cache(FIFOCache(maxsize), maxsize, typed) def lfu_cache(maxsize=128, typed=False): @@ -111,11 +56,11 @@ """ if maxsize is None: - return _cache(_UnboundCache(), typed) + return _cache({}, None, typed) elif callable(maxsize): - return _cache(LFUCache(128), typed)(maxsize) + return _cache(LFUCache(128), 128, typed)(maxsize) else: - return _cache(LFUCache(maxsize), typed) + return _cache(LFUCache(maxsize), maxsize, typed) def lru_cache(maxsize=128, typed=False): @@ -125,11 +70,11 @@ """ if maxsize is None: - return _cache(_UnboundCache(), typed) + return _cache({}, None, typed) elif callable(maxsize): - return _cache(LRUCache(128), typed)(maxsize) + return _cache(LRUCache(128), 128, typed)(maxsize) else: - return _cache(LRUCache(maxsize), typed) + return _cache(LRUCache(maxsize), maxsize, typed) def mru_cache(maxsize=128, typed=False): @@ -138,11 +83,11 @@ algorithm. """ if maxsize is None: - return _cache(_UnboundCache(), typed) + return _cache({}, None, typed) elif callable(maxsize): - return _cache(MRUCache(128), typed)(maxsize) + return _cache(MRUCache(128), 128, typed)(maxsize) else: - return _cache(MRUCache(maxsize), typed) + return _cache(MRUCache(maxsize), maxsize, typed) def rr_cache(maxsize=128, choice=random.choice, typed=False): @@ -152,11 +97,11 @@ """ if maxsize is None: - return _cache(_UnboundCache(), typed) + return _cache({}, None, typed) elif callable(maxsize): - return _cache(RRCache(128, choice), typed)(maxsize) + return _cache(RRCache(128, choice), 128, typed)(maxsize) else: - return _cache(RRCache(maxsize, choice), typed) + return _cache(RRCache(maxsize, choice), maxsize, typed) def ttl_cache(maxsize=128, ttl=600, timer=time.monotonic, typed=False): @@ -165,8 +110,8 @@ algorithm with a per-item time-to-live (TTL) value. """ if maxsize is None: - return _cache(_UnboundTTLCache(ttl, timer), typed) + return _cache(_UnboundTTLCache(ttl, timer), None, typed) elif callable(maxsize): - return _cache(TTLCache(128, ttl, timer), typed)(maxsize) + return _cache(TTLCache(128, ttl, timer), 128, typed)(maxsize) else: - return _cache(TTLCache(maxsize, ttl, timer), typed) + return _cache(TTLCache(maxsize, ttl, timer), maxsize, typed)
View file
_service:tar_scm:v5.2.0.tar.gz/tests/test_cached.py -> _service:tar_scm:v5.3.0.tar.gz/tests/test_cached.py
Changed
@@ -145,16 +145,25 @@ self.assertEqual(len(cache), 0) self.assertEqual(lock.count, 3) - def test_decorator_clear_none(self): - cache = None - wrapper = cachetools.cached(cache)(self.func) - wrapper.cache_clear() # no-op - class CacheWrapperTest(unittest.TestCase, DecoratorTestMixin): def cache(self, minsize): return cachetools.Cache(maxsize=minsize) + def test_decorator_info(self): + cache = self.cache(2) + wrapper = cachetools.cached(cache, info=True)(self.func) + self.assertEqual(wrapper.cache_info(), (0, 0, 2, 0)) + self.assertEqual(wrapper(0), 0) + self.assertEqual(wrapper.cache_info(), (0, 1, 2, 1)) + self.assertEqual(wrapper(1), 1) + self.assertEqual(wrapper.cache_info(), (0, 2, 2, 2)) + self.assertEqual(wrapper(0), 0) + self.assertEqual(wrapper.cache_info(), (1, 2, 2, 2)) + wrapper.cache_clear() + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.cache_info(), (0, 0, 2, 0)) + def test_zero_size_cache_decorator(self): cache = self.cache(0) wrapper = cachetools.cached(cache)(self.func) @@ -173,11 +182,33 @@ self.assertEqual(len(cache), 0) self.assertEqual(lock.count, 2) + def test_zero_size_cache_decorator_info(self): + cache = self.cache(0) + wrapper = cachetools.cached(cache, info=True)(self.func) + + self.assertEqual(wrapper.cache_info(), (0, 0, 0, 0)) + self.assertEqual(wrapper(0), 0) + self.assertEqual(wrapper.cache_info(), (0, 1, 0, 0)) + class DictWrapperTest(unittest.TestCase, DecoratorTestMixin): def cache(self, minsize): return dict() + def test_decorator_info(self): + cache = self.cache(2) + wrapper = cachetools.cached(cache, info=True)(self.func) + self.assertEqual(wrapper.cache_info(), (0, 0, None, 0)) + self.assertEqual(wrapper(0), 0) + self.assertEqual(wrapper.cache_info(), (0, 1, None, 1)) + self.assertEqual(wrapper(1), 1) + self.assertEqual(wrapper.cache_info(), (0, 2, None, 2)) + self.assertEqual(wrapper(0), 0) + self.assertEqual(wrapper.cache_info(), (1, 2, None, 2)) + wrapper.cache_clear() + self.assertEqual(len(cache), 0) + self.assertEqual(wrapper.cache_info(), (0, 0, None, 0)) + class NoneWrapperTest(unittest.TestCase): def func(self, *args, **kwargs): @@ -189,3 +220,26 @@ self.assertEqual(wrapper(0), (0,)) self.assertEqual(wrapper(1), (1,)) self.assertEqual(wrapper(1, foo="bar"), (1, ("foo", "bar"))) + + def test_decorator_attributes(self): + wrapper = cachetools.cached(None)(self.func) + + self.assertIs(wrapper.cache, None) + self.assertIs(wrapper.cache_key, cachetools.keys.hashkey) + self.assertIs(wrapper.cache_lock, None) + + def test_decorator_clear(self): + wrapper = cachetools.cached(None)(self.func) + + wrapper.cache_clear() # no-op + + def test_decorator_info(self): + wrapper = cachetools.cached(None, info=True)(self.func) + + self.assertEqual(wrapper.cache_info(), (0, 0, 0, 0)) + self.assertEqual(wrapper(0), (0,)) + self.assertEqual(wrapper.cache_info(), (0, 1, 0, 0)) + self.assertEqual(wrapper(1), (1,)) + self.assertEqual(wrapper.cache_info(), (0, 2, 0, 0)) + wrapper.cache_clear() + self.assertEqual(wrapper.cache_info(), (0, 0, 0, 0))
Locations
Projects
Search
Status Monitor
Help
Open Build Service
OBS Manuals
API Documentation
OBS Portal
Reporting a Bug
Contact
Mailing List
Forums
Chat (IRC)
Twitter
Open Build Service (OBS)
is an
openSUSE project
.
浙ICP备2022010568号-2