use locking to limit cache file access to one

This commit is contained in:
faush01
2020-07-03 15:38:37 +10:00
parent 14514fc7b5
commit f25c309167
6 changed files with 286 additions and 49 deletions

View File

@@ -687,7 +687,7 @@ msgid "Number of images removed from cache : %s"
msgstr ""
msgctxt "#30345"
msgid "Cache server requests"
msgid "Cache Emby server data requests"
msgstr ""
msgctxt "#30346"
@@ -1073,3 +1073,7 @@ msgstr ""
msgctxt "#30440"
msgid "Play next"
msgstr ""
msgctxt "#30441"
msgid "Use cached widget data"
msgstr ""

View File

@@ -14,6 +14,7 @@ from .item_functions import extract_item_info
from .kodi_utils import HomeWindow
from .translation import string_load
from .tracking import timer
from .filelock import FileLock
import xbmc
import xbmcaddon
@@ -93,15 +94,16 @@ class DataManager:
if os.path.isfile(cache_file) and use_cache:
log.debug("Loading url data from cached pickle data")
with open(cache_file, 'rb') as handle:
try:
cache_item = cPickle.load(handle)
cache_thread.cached_item = cache_item
item_list = cache_item.item_list
total_records = cache_item.total_records
except Exception as err:
log.error("Pickle Data Load Failed : {0}", err)
item_list = None
with FileLock(cache_file + ".locked", timeout=5):
with open(cache_file, 'rb') as handle:
try:
cache_item = cPickle.load(handle)
cache_thread.cached_item = cache_item
item_list = cache_item.item_list
total_records = cache_item.total_records
except Exception as err:
log.error("Pickle Data Load Failed : {0}", err)
item_list = None
# we need to load the list item data form the server
if item_list is None or len(item_list) == 0:
@@ -141,10 +143,10 @@ class DataManager:
cache_thread.cached_item = cache_item
# copy.deepcopy(item_list)
if use_cache:
cache_thread.start()
if not use_cache:
cache_thread = None
return cache_file, item_list, total_records
return cache_file, item_list, total_records, cache_thread
class CacheManagerThread(threading.Thread):
@@ -172,16 +174,6 @@ class CacheManagerThread(threading.Thread):
return m.hexdigest()
@staticmethod
def wait_for_save(home_window, file_name):
loops = 0
wait_refresh = home_window.get_property(file_name)
while wait_refresh and loops < 200 and not xbmc.Monitor().abortRequested():
xbmc.sleep(100)
loops = loops + 1
wait_refresh = home_window.get_property(file_name)
return loops
def run(self):
log.debug("CacheManagerThread : Started")
@@ -205,14 +197,11 @@ class CacheManagerThread(threading.Thread):
self.cached_item.date_saved = time.time()
self.cached_item.date_last_used = time.time()
loops = self.wait_for_save(home_window, self.cached_item.file_path)
log.debug("CacheManagerThread : Saving New Data loops")
log.debug("CacheManagerThread : Saving New Data loops({0})", loops)
with open(self.cached_item.file_path, 'wb') as handle:
cPickle.dump(self.cached_item, handle, protocol=cPickle.HIGHEST_PROTOCOL)
home_window.clear_property(self.cached_item.file_path)
with FileLock(self.cached_item.file_path + ".locked", timeout=5):
with open(self.cached_item.file_path, 'wb') as handle:
cPickle.dump(self.cached_item, handle, protocol=cPickle.HIGHEST_PROTOCOL)
else:
log.debug("CacheManagerThread : Reloading to recheck data hashes")
@@ -256,23 +245,19 @@ class CacheManagerThread(threading.Thread):
self.cached_item.date_last_used = time.time()
self.cached_item.total_records = total_records
# we need to refresh but will wait until the main function has finished
loops = self.wait_for_save(home_window, self.cached_item.file_path)
with FileLock(self.cached_item.file_path + ".locked", timeout=5):
with open(self.cached_item.file_path, 'wb') as handle:
cPickle.dump(self.cached_item, handle, protocol=cPickle.HIGHEST_PROTOCOL)
with open(self.cached_item.file_path, 'wb') as handle:
cPickle.dump(self.cached_item, handle, protocol=cPickle.HIGHEST_PROTOCOL)
home_window.clear_property(self.cached_item.file_path)
log.debug("CacheManagerThread : Sending container refresh ({0})", loops)
log.debug("CacheManagerThread : Sending container refresh")
xbmc.executebuiltin("Container.Refresh")
else:
self.cached_item.date_last_used = time.time()
loops = self.wait_for_save(home_window, self.cached_item.file_path)
with open(self.cached_item.file_path, 'wb') as handle:
cPickle.dump(self.cached_item, handle, protocol=cPickle.HIGHEST_PROTOCOL)
log.debug("CacheManagerThread : Updating last used date for cache data ({0})", loops)
home_window.clear_property(self.cached_item.file_path)
with FileLock(self.cached_item.file_path + ".locked", timeout=5):
with open(self.cached_item.file_path, 'wb') as handle:
cPickle.dump(self.cached_item, handle, protocol=cPickle.HIGHEST_PROTOCOL)
log.debug("CacheManagerThread : Updating last used date for cache data")
log.debug("CacheManagerThread : Exited")
@@ -308,8 +293,10 @@ def clear_old_cache_data():
cache_item = None
for x in range(0, 5):
try:
with open(os.path.join(addon_dir, filename), 'rb') as handle:
cache_item = cPickle.load(handle)
data_file = os.path.join(addon_dir, filename)
with FileLock(data_file + ".locked", timeout=5):
with open(data_file, 'rb') as handle:
cache_item = cPickle.load(handle)
break
except Exception as error:
log.debug("clear_old_cache_data() : Pickle load error : {0}", error)
@@ -324,10 +311,14 @@ def clear_old_cache_data():
log.debug("clear_old_cache_data() : Cache item last used : {0} sec ago", item_last_used)
if item_last_used == -1 or item_last_used > (3600 * 24 * 7):
log.debug("clear_old_cache_data() : Deleting cache item age : {0}", item_last_used)
xbmcvfs.delete(os.path.join(addon_dir, filename))
data_file = os.path.join(addon_dir, filename)
with FileLock(data_file + ".locked", timeout=5):
xbmcvfs.delete(data_file)
del_count += 1
else:
log.debug("clear_old_cache_data() : Deleting unloadable cache item")
xbmcvfs.delete(os.path.join(addon_dir, filename))
data_file = os.path.join(addon_dir, filename)
with FileLock(data_file + ".locked", timeout=5):
xbmcvfs.delete(data_file)
log.debug("clear_old_cache_data() : Cache items deleted : {0}", del_count)

View File

@@ -252,7 +252,7 @@ def process_directory(url, progress, params, use_cache_data=False):
gui_options["name_format_type"] = name_format_type
use_cache = settings.getSetting("use_cache") == "true" and use_cache_data
cache_file, item_list, total_records = data_manager.get_items(url, gui_options, use_cache)
cache_file, item_list, total_records, cache_thread = data_manager.get_items(url, gui_options, use_cache)
# flatten single season
# if there is only one result and it is a season and you have flatten signle season turned on then
@@ -415,6 +415,7 @@ def process_directory(url, progress, params, use_cache_data=False):
if gui_item:
dir_items.append(gui_item)
HomeWindow().clear_property(cache_file)
if cache_thread is not None:
cache_thread.start()
return dir_items, detected_type, total_records

239
resources/lib/filelock.py Normal file
View File

@@ -0,0 +1,239 @@
# https://github.com/ilastik/lazyflow/blob/master/lazyflow/utility/fileLock.py
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
"""
Implementation of a simple cross-platform file locking mechanism.
This is a modified version of code retrieved on 2013-01-01 from
http://www.evanfosmark.com/2009/01/cross-platform-file-locking-support-in-python.
(The original code was released under the BSD License. See below for details.)
Modifications in this version:
- Tweak docstrings for sphinx.
- Accept an absolute path for the protected file (instead of a file name relative to cwd).
- Allow timeout to be None.
- Fixed a bug that caused the original code to be NON-threadsafe when the same FileLock instance was shared by multiple threads in one process.
(The original was safe for multiple processes, but not multiple threads in a single process. This version is safe for both cases.)
- Added ``purge()`` function.
- Added ``available()`` function.
- Expanded API to mimic ``threading.Lock interface``:
- ``__enter__`` always calls ``acquire()``, and therefore blocks if ``acquire()`` was called previously.
- ``__exit__`` always calls ``release()``. It is therefore a bug to call ``release()`` from within a context manager.
- Added ``locked()`` function.
- Added blocking parameter to ``acquire()`` method
WARNINGS:
- The locking mechanism used here may need to be changed to support old NFS filesystems:
http://lwn.net/Articles/251004
(Newer versions of NFS should be okay, e.g. NFSv3 with Linux kernel 2.6. Check the open(2) man page for details about O_EXCL.)
- This code has not been thoroughly tested on Windows, and there has been one report of incorrect results on Windows XP and Windows 7.
The locking mechanism used in this class should (in theory) be cross-platform, but use at your own risk.
ORIGINAL LICENSE:
The original code did not properly include license text.
(It merely said "License: BSD".)
Therefore, we'll attach the following generic BSD License terms to this file.
Those who extract this file from the lazyflow code base (LGPL) for their own use
are therefore bound by the terms of both the Simplified BSD License below AND the LGPL.
Copyright (c) 2013, Evan Fosmark and others.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
"""
# from builtins import object
import os
import sys
import time
import errno
# from .simple_logging import SimpleLogging
# log = SimpleLogging(__name__)
class FileLock(object):
""" A file locking mechanism that has context-manager support so
you can use it in a ``with`` statement. This should be relatively cross
compatible as it doesn't rely on ``msvcrt`` or ``fcntl`` for the locking.
"""
class FileLockException(Exception):
pass
def __init__(self, protected_file_path, timeout=None, delay=1, lock_file_contents=None):
""" Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
"""
self.is_locked = False
self.lockfile = protected_file_path + ".lock"
self.timeout = timeout
self.delay = delay
self._lock_file_contents = lock_file_contents
if self._lock_file_contents is None:
self._lock_file_contents = "Owning process args:\n"
for arg in sys.argv:
self._lock_file_contents += arg + "\n"
def locked(self):
"""
Returns True iff the file is owned by THIS FileLock instance.
(Even if this returns false, the file could be owned by another FileLock instance, possibly in a different thread or process).
"""
return self.is_locked
def available(self):
"""
Returns True iff the file is currently available to be locked.
"""
return not os.path.exists(self.lockfile)
def acquire(self, blocking=True):
""" Acquire the lock, if possible. If the lock is in use, and `blocking` is False, return False.
Otherwise, check again every `self.delay` seconds until it either gets the lock or
exceeds `timeout` number of seconds, in which case it raises an exception.
"""
start_time = time.time()
while True:
try:
# Attempt to create the lockfile.
# These flags cause os.open to raise an OSError if the file already exists.
fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
with os.fdopen(fd, "a") as f:
# Print some info about the current process as debug info for anyone who bothers to look.
f.write(self._lock_file_contents)
break
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.timeout is not None and (time.time() - start_time) >= self.timeout:
raise FileLock.FileLockException("Timeout occurred.")
if not blocking:
return False
time.sleep(self.delay)
self.is_locked = True
return True
def release(self):
""" Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
self.is_locked = False
os.unlink(self.lockfile)
def __enter__(self):
""" Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
self.acquire()
return self
def __exit__(self, type, value, traceback):
""" Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
self.release()
def __del__(self):
""" Make sure this ``FileLock`` instance doesn't leave a .lock file
lying around.
"""
if self.is_locked:
self.release()
def purge(self):
"""
For debug purposes only. Removes the lock file from the hard disk.
"""
if os.path.exists(self.lockfile):
self.release()
return True
return False
"""
if __name__ == "__main__":
import sys
import functools
import threading
import tempfile
from builtins import range
temp_dir = tempfile.mkdtemp()
protected_filepath = os.path.join(temp_dir, "somefile.txt")
print("Protecting file: {}".format(protected_filepath))
fl = FileLock(protected_filepath)
def writeLines(line, repeat=10):
with fl:
for _ in range(repeat):
with open(protected_filepath, "a") as f:
f.write(line + "\n")
f.flush()
th1 = threading.Thread(target=functools.partial(writeLines, "1111111111111111111111111111111"))
th2 = threading.Thread(target=functools.partial(writeLines, "2222222222222222222222222222222"))
th3 = threading.Thread(target=functools.partial(writeLines, "3333333333333333333333333333333"))
th4 = threading.Thread(target=functools.partial(writeLines, "4444444444444444444444444444444"))
th1.start()
th2.start()
th3.start()
th4.start()
th1.join()
th2.join()
th3.join()
th4.join()
assert not os.path.exists(fl.lockfile), "The lock file wasn't cleaned up!"
# Print the contents of the file.
# Please manually inspect the output. Does it look like the operations were atomic?
with open(protected_filepath, "r") as f:
sys.stdout.write(f.read())
"""

View File

@@ -264,6 +264,7 @@ def get_widget_content(handle, params):
settings = xbmcaddon.Addon()
hide_watched = settings.getSetting("hide_watched") == "true"
use_cached_widget_data = settings.getSetting("use_cached_widget_data") == "true"
widget_type = params.get("type")
if widget_type is None:
@@ -389,7 +390,7 @@ def get_widget_content(handle, params):
items_url = get_emby_url(url_verb, url_params)
list_items, detected_type, total_records = process_directory(items_url, None, params, False)
list_items, detected_type, total_records = process_directory(items_url, None, params, use_cached_widget_data)
# remove resumable items from next up
if widget_type == "nextup_episodes":

View File

@@ -129,6 +129,7 @@
<setting id="log_debug" type="bool" label="30027" default="false" visible="true" enable="true" />
<setting id="log_timing" type="bool" label="30015" default="false" visible="true" enable="true" />
<setting id="use_cache" type="bool" label="30345" default="true" visible="true" enable="true" />
<setting id="use_cached_widget_data" type="bool" label="30441" default="false" visible="true" enable="true" />
<setting id="showLoadProgress" type="bool" label="30120" default="false" visible="true" enable="true" />
<setting id="suppressErrors" type="bool" label="30315" default="false" visible="true" enable="true" />
<setting id="speed_test_data_size" type="slider" label="30436" default="15" range="5,1,100" option="int" visible="true"/>