hydrus/include/ClientCaches.py

2609 lines
83 KiB
Python
Raw Normal View History

2019-05-22 22:35:06 +00:00
from . import ClientFiles
2019-02-27 23:03:30 +00:00
from . import ClientImageHandling
2019-01-09 22:59:03 +00:00
from . import ClientParsing
from . import ClientPaths
from . import ClientRendering
from . import ClientSearch
from . import ClientServices
from . import ClientThreading
from . import HydrusConstants as HC
from . import HydrusExceptions
from . import HydrusFileHandling
2019-02-27 23:03:30 +00:00
from . import HydrusImageHandling
2019-01-09 22:59:03 +00:00
from . import HydrusPaths
from . import HydrusSerialisable
from . import HydrusThreading
2017-01-18 22:52:39 +00:00
import json
2015-03-18 21:46:29 +00:00
import os
import random
import threading
import time
2019-01-09 22:59:03 +00:00
from . import HydrusData
from . import ClientData
from . import ClientConstants as CC
from . import HydrusGlobals as HG
2015-08-05 18:42:35 +00:00
import collections
2019-01-09 22:59:03 +00:00
from . import HydrusTags
2016-06-08 20:27:22 +00:00
import traceback
2018-12-05 22:35:30 +00:00
import weakref
2019-11-14 03:56:30 +00:00
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from . import QtPorting as QP
2015-03-18 21:46:29 +00:00
2019-06-19 22:08:48 +00:00
# now let's fill out grandparents
2015-11-25 22:00:57 +00:00
def BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents ):
2019-06-19 22:08:48 +00:00
# important thing here, and reason why it is recursive, is because we want to preserve the parent-grandparent interleaving in list order
def AddParentsAndGrandparents( simple_children_to_parents, this_childs_parents, parents ):
2015-11-25 22:00:57 +00:00
for parent in parents:
2019-06-19 22:08:48 +00:00
if parent not in this_childs_parents:
2015-11-25 22:00:57 +00:00
2019-06-19 22:08:48 +00:00
this_childs_parents.append( parent )
2015-11-25 22:00:57 +00:00
2019-06-19 22:08:48 +00:00
# this parent has its own parents, so the child should get those as well
2015-11-25 22:00:57 +00:00
if parent in simple_children_to_parents:
grandparents = simple_children_to_parents[ parent ]
2019-06-19 22:08:48 +00:00
AddParentsAndGrandparents( simple_children_to_parents, this_childs_parents, grandparents )
2015-11-25 22:00:57 +00:00
service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
2019-06-19 22:08:48 +00:00
for ( service_key, simple_children_to_parents ) in service_keys_to_simple_children_to_parents.items():
2015-11-25 22:00:57 +00:00
children_to_parents = service_keys_to_children_to_parents[ service_key ]
2019-09-05 00:05:32 +00:00
for ( child, parents ) in list( simple_children_to_parents.items() ):
2015-11-25 22:00:57 +00:00
2019-06-19 22:08:48 +00:00
this_childs_parents = children_to_parents[ child ]
AddParentsAndGrandparents( simple_children_to_parents, this_childs_parents, parents )
2015-11-25 22:00:57 +00:00
return service_keys_to_children_to_parents
def BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat ):
service_keys_to_simple_children_to_parents = collections.defaultdict( HydrusData.default_dict_set )
2019-06-19 22:08:48 +00:00
for ( service_key, pairs ) in service_keys_to_pairs_flat.items():
2015-11-25 22:00:57 +00:00
service_keys_to_simple_children_to_parents[ service_key ] = BuildSimpleChildrenToParents( pairs )
return service_keys_to_simple_children_to_parents
2019-06-19 22:08:48 +00:00
# take pairs, make dict of child -> parents while excluding loops
# no grandparents here
2015-11-25 22:00:57 +00:00
def BuildSimpleChildrenToParents( pairs ):
simple_children_to_parents = HydrusData.default_dict_set()
for ( child, parent ) in pairs:
2017-05-03 21:33:48 +00:00
if child == parent:
continue
2015-11-25 22:00:57 +00:00
2019-05-08 21:06:42 +00:00
if parent in simple_children_to_parents and LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ):
continue
2015-11-25 22:00:57 +00:00
simple_children_to_parents[ child ].add( parent )
return simple_children_to_parents
2017-04-05 21:16:40 +00:00
def CollapseTagSiblingPairs( groups_of_pairs ):
# This now takes 'groups' of pairs in descending order of precedence
# This allows us to mandate that local tags take precedence
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
# a pair is invalid if:
# it causes a loop (a->b, b->c, c->a)
# there is already a relationship for the 'bad' sibling (a->b, a->c)
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
valid_chains = {}
2015-11-25 22:00:57 +00:00
2017-04-05 21:16:40 +00:00
for pairs in groups_of_pairs:
2015-11-25 22:00:57 +00:00
2017-04-05 21:16:40 +00:00
pairs = list( pairs )
2016-09-14 18:03:59 +00:00
2017-04-05 21:16:40 +00:00
pairs.sort()
for ( bad, good ) in pairs:
2015-11-25 22:00:57 +00:00
2017-04-05 21:16:40 +00:00
if bad == good:
# a->a is a loop!
continue
2016-09-14 18:03:59 +00:00
2017-04-05 21:16:40 +00:00
if bad not in valid_chains:
2016-09-14 18:03:59 +00:00
2017-04-05 21:16:40 +00:00
we_have_a_loop = False
2016-09-14 18:03:59 +00:00
2017-04-05 21:16:40 +00:00
current_best = good
while current_best in valid_chains:
2016-09-14 18:03:59 +00:00
2017-04-05 21:16:40 +00:00
current_best = valid_chains[ current_best ]
2016-09-14 18:03:59 +00:00
2017-04-05 21:16:40 +00:00
if current_best == bad:
we_have_a_loop = True
break
2016-09-14 18:03:59 +00:00
2017-04-05 21:16:40 +00:00
if not we_have_a_loop:
valid_chains[ bad ] = good
2016-09-14 18:03:59 +00:00
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
# now we collapse the chains, turning:
# a->b, b->c ... e->f
# into
# a->f, b->f ... e->f
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
siblings = {}
2015-11-25 22:00:57 +00:00
2019-09-05 00:05:32 +00:00
for ( bad, good ) in list( valid_chains.items() ):
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
# given a->b, want to find f
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
if good in siblings:
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
# f already calculated and added
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
best = siblings[ good ]
else:
# we don't know f for this chain, so let's figure it out
current_best = good
while current_best in valid_chains:
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
current_best = valid_chains[ current_best ] # pursue endpoint f
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
best = current_best
# add a->f
siblings[ bad ] = best
2015-11-25 22:00:57 +00:00
2016-09-14 18:03:59 +00:00
return siblings
2015-11-25 22:00:57 +00:00
2019-07-03 22:49:27 +00:00
def DeLoopTagSiblingPairs( groups_of_pairs ):
pass
2015-11-25 22:00:57 +00:00
def LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ):
potential_loop_paths = { parent }
2019-05-08 21:06:42 +00:00
while True:
2015-11-25 22:00:57 +00:00
new_potential_loop_paths = set()
2019-05-08 21:06:42 +00:00
for potential_loop_path in potential_loop_paths:
2015-11-25 22:00:57 +00:00
2019-05-08 21:06:42 +00:00
if potential_loop_path in simple_children_to_parents:
new_potential_loop_paths.update( simple_children_to_parents[ potential_loop_path ] )
2015-11-25 22:00:57 +00:00
potential_loop_paths = new_potential_loop_paths
2019-04-24 22:18:50 +00:00
if child in potential_loop_paths:
return True
2019-05-08 21:06:42 +00:00
elif len( potential_loop_paths ) == 0:
return False
2015-11-25 22:00:57 +00:00
2018-11-14 23:10:55 +00:00
class BitmapManager( object ):
2019-04-17 21:51:50 +00:00
MAX_MEMORY_ALLOWANCE = 512 * 1024 * 1024
2018-11-14 23:10:55 +00:00
def __init__( self, controller ):
self._controller = controller
2019-11-14 03:56:30 +00:00
self._media_background_pixmap_path = None
self._media_background_pixmap = None
2019-04-17 21:51:50 +00:00
2018-11-14 23:10:55 +00:00
2019-11-14 03:56:30 +00:00
def _GetQtImageFormat( self, depth ):
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
if depth == 24:
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
return QG.QImage.Format_RGB888
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
elif depth == 32:
return QG.QImage.Format_RGBA8888
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
def GetQtImage( self, width, height, depth = 24 ):
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
if width < 0:
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
width = 20
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
if height < 0:
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
height = 20
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
qt_image_format = self._GetQtImageFormat( depth )
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
return QG.QImage( width, height, qt_image_format )
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
def GetQtPixmap( self, width, height ):
2019-04-17 21:51:50 +00:00
if width < 0:
width = 20
if height < 0:
height = 20
2019-11-14 03:56:30 +00:00
key = ( width, height )
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
return QG.QPixmap( width, height )
def GetQtImageFromBuffer( self, width, height, depth, data ):
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
qt_image_format = self._GetQtImageFormat( depth )
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
bytes_per_line = ( depth / 8 ) * width
# no copy here
qt_image = QG.QImage( data, width, height, bytes_per_line, qt_image_format )
# cheeky solution here
# the QImage init does not take python ownership of the data, so if it gets garbage collected, we crash
# so, add a beardy python ref to it, no problem :^)
# other anwser here is to do a .copy, but this can be a _little_ expensive and eats memory
qt_image.python_data_reference = data
return qt_image
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
def GetQtPixmapFromBuffer( self, width, height, depth, data ):
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
qt_image_format = self._GetQtImageFormat( depth )
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
bytes_per_line = ( depth / 8 ) * width
# no copy, no new data allocated
qt_image = QG.QImage( data, width, height, bytes_per_line, qt_image_format )
2019-04-17 21:51:50 +00:00
2019-11-14 03:56:30 +00:00
# _should_ be a safe copy of the hot data
pixmap = QG.QPixmap.fromImage( qt_image )
return pixmap
2018-11-14 23:10:55 +00:00
2019-11-14 03:56:30 +00:00
def GetMediaBackgroundPixmap( self ):
2018-11-14 23:10:55 +00:00
2019-11-14 03:56:30 +00:00
pixmap_path = self._controller.new_options.GetNoneableString( 'media_background_bmp_path' )
2018-11-14 23:10:55 +00:00
2019-11-14 03:56:30 +00:00
if pixmap_path != self._media_background_pixmap_path:
2018-11-14 23:10:55 +00:00
2019-11-14 03:56:30 +00:00
self._media_background_pixmap_path = pixmap_path
2018-11-14 23:10:55 +00:00
try:
2019-11-14 03:56:30 +00:00
self._media_background_pixmap = QG.QPixmap( self._media_background_pixmap_path )
2018-11-14 23:10:55 +00:00
except Exception as e:
2019-11-14 03:56:30 +00:00
self._media_background_pixmap = None
2018-11-14 23:10:55 +00:00
HydrusData.ShowText( 'Loading a bmp caused an error!' )
HydrusData.ShowException( e )
return None
2019-11-14 03:56:30 +00:00
return self._media_background_pixmap
2019-04-17 21:51:50 +00:00
2015-03-18 21:46:29 +00:00
class DataCache( object ):
2017-09-13 20:50:41 +00:00
def __init__( self, controller, cache_size, timeout = 1200 ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2016-08-24 18:36:56 +00:00
self._cache_size = cache_size
2017-09-13 20:50:41 +00:00
self._timeout = timeout
2015-03-18 21:46:29 +00:00
self._keys_to_data = {}
2017-07-05 21:09:28 +00:00
self._keys_fifo = collections.OrderedDict()
2015-03-18 21:46:29 +00:00
self._total_estimated_memory_footprint = 0
self._lock = threading.Lock()
2017-07-05 21:09:28 +00:00
self._controller.sub( self, 'MaintainCache', 'memory_maintenance_pulse' )
2015-03-18 21:46:29 +00:00
2017-12-13 22:33:07 +00:00
def _Delete( self, key ):
2015-06-24 22:10:14 +00:00
2017-12-13 22:33:07 +00:00
if key not in self._keys_to_data:
return
2015-06-24 22:10:14 +00:00
2017-12-13 22:33:07 +00:00
deletee_data = self._keys_to_data[ key ]
2015-06-24 22:10:14 +00:00
2017-12-13 22:33:07 +00:00
del self._keys_to_data[ key ]
2015-06-24 22:10:14 +00:00
2016-04-14 01:54:29 +00:00
self._RecalcMemoryUsage()
2017-12-13 22:33:07 +00:00
def _DeleteItem( self ):
( deletee_key, last_access_time ) = self._keys_fifo.popitem( last = False )
self._Delete( deletee_key )
2016-04-14 01:54:29 +00:00
def _RecalcMemoryUsage( self ):
2019-09-05 00:05:32 +00:00
self._total_estimated_memory_footprint = sum( ( data.GetEstimatedMemoryFootprint() for data in self._keys_to_data.values() ) )
2016-04-14 01:54:29 +00:00
2015-06-24 22:10:14 +00:00
2016-08-03 22:15:54 +00:00
def _TouchKey( self, key ):
2017-07-05 21:09:28 +00:00
# have to delete first, rather than overwriting, so the ordereddict updates its internal order
if key in self._keys_fifo:
2016-08-03 22:15:54 +00:00
2017-07-05 21:09:28 +00:00
del self._keys_fifo[ key ]
2016-08-03 22:15:54 +00:00
2017-07-05 21:09:28 +00:00
self._keys_fifo[ key ] = HydrusData.GetNow()
2016-08-03 22:15:54 +00:00
2015-03-18 21:46:29 +00:00
def Clear( self ):
with self._lock:
self._keys_to_data = {}
2017-07-05 21:09:28 +00:00
self._keys_fifo = collections.OrderedDict()
2015-03-18 21:46:29 +00:00
self._total_estimated_memory_footprint = 0
def AddData( self, key, data ):
with self._lock:
if key not in self._keys_to_data:
2016-08-24 18:36:56 +00:00
while self._total_estimated_memory_footprint > self._cache_size:
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
self._DeleteItem()
2015-03-18 21:46:29 +00:00
self._keys_to_data[ key ] = data
2017-07-05 21:09:28 +00:00
self._TouchKey( key )
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
self._RecalcMemoryUsage()
2015-03-18 21:46:29 +00:00
2017-12-13 22:33:07 +00:00
def DeleteData( self, key ):
with self._lock:
self._Delete( key )
2015-03-18 21:46:29 +00:00
def GetData( self, key ):
with self._lock:
2016-04-14 01:54:29 +00:00
if key not in self._keys_to_data:
2019-01-09 22:59:03 +00:00
raise Exception( 'Cache error! Looking for ' + str( key ) + ', but it was missing.' )
2016-04-14 01:54:29 +00:00
2015-03-18 21:46:29 +00:00
2016-08-03 22:15:54 +00:00
self._TouchKey( key )
2015-03-18 21:46:29 +00:00
return self._keys_to_data[ key ]
2016-08-03 22:15:54 +00:00
def GetIfHasData( self, key ):
with self._lock:
if key in self._keys_to_data:
self._TouchKey( key )
return self._keys_to_data[ key ]
else:
return None
2015-03-18 21:46:29 +00:00
def HasData( self, key ):
2016-04-14 01:54:29 +00:00
with self._lock:
return key in self._keys_to_data
2015-03-18 21:46:29 +00:00
def MaintainCache( self ):
with self._lock:
while True:
2016-04-14 01:54:29 +00:00
if len( self._keys_fifo ) == 0:
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
break
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
else:
2019-09-05 00:05:32 +00:00
( key, last_access_time ) = next( iter( self._keys_fifo.items() ) )
2015-06-24 22:10:14 +00:00
2017-09-13 20:50:41 +00:00
if HydrusData.TimeHasPassed( last_access_time + self._timeout ):
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
self._DeleteItem()
else:
break
2015-03-18 21:46:29 +00:00
2018-12-05 22:35:30 +00:00
class FileViewingStatsManager( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
self._pending_updates = {}
self._last_update = HydrusData.GetNow()
self._my_flush_job = self._controller.CallRepeating( 5, 60, self.REPEATINGFlush )
2019-06-05 19:42:39 +00:00
def _GenerateViewsRow( self, viewtype, viewtime_delta ):
new_options = HG.client_controller.new_options
preview_views_delta = 0
preview_viewtime_delta = 0
media_views_delta = 0
media_viewtime_delta = 0
if viewtype == 'preview':
preview_min = new_options.GetNoneableInteger( 'file_viewing_statistics_preview_min_time' )
preview_max = new_options.GetNoneableInteger( 'file_viewing_statistics_preview_max_time' )
if preview_max is not None:
viewtime_delta = min( viewtime_delta, preview_max )
if preview_min is None or viewtime_delta >= preview_min:
preview_views_delta = 1
preview_viewtime_delta = viewtime_delta
elif viewtype in ( 'media', 'media_duplicates_filter' ):
do_it = True
if viewtime_delta == 'media_duplicates_filter' and not new_options.GetBoolean( 'file_viewing_statistics_active_on_dupe_filter' ):
do_it = False
if do_it:
media_min = new_options.GetNoneableInteger( 'file_viewing_statistics_media_min_time' )
media_max = new_options.GetNoneableInteger( 'file_viewing_statistics_media_max_time' )
if media_max is not None:
viewtime_delta = min( viewtime_delta, media_max )
if media_min is None or viewtime_delta >= media_min:
media_views_delta = 1
2019-07-17 22:10:19 +00:00
media_viewtime_delta = viewtime_delta
2019-06-05 19:42:39 +00:00
return ( preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta )
def _PubSubRow( self, hash, row ):
( preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta ) = row
pubsub_row = ( hash, preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILE_VIEWING_STATS, HC.CONTENT_UPDATE_ADD, pubsub_row )
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] }
HG.client_controller.pub( 'content_updates_data', service_keys_to_content_updates )
HG.client_controller.pub( 'content_updates_gui', service_keys_to_content_updates )
2018-12-05 22:35:30 +00:00
def REPEATINGFlush( self ):
self.Flush()
def Flush( self ):
with self._lock:
if len( self._pending_updates ) > 0:
content_updates = []
2019-01-30 22:14:54 +00:00
for ( hash, ( preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta ) ) in self._pending_updates.items():
2018-12-05 22:35:30 +00:00
row = ( hash, preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILE_VIEWING_STATS, HC.CONTENT_UPDATE_ADD, row )
content_updates.append( content_update )
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : content_updates }
# non-synchronous
self._controller.Write( 'content_updates', service_keys_to_content_updates, do_pubsubs = False )
self._pending_updates = {}
2019-06-05 19:42:39 +00:00
def FinishViewing( self, viewtype, hash, viewtime_delta ):
2018-12-05 22:35:30 +00:00
2018-12-12 22:15:46 +00:00
if not HG.client_controller.new_options.GetBoolean( 'file_viewing_statistics_active' ):
return
2018-12-05 22:35:30 +00:00
with self._lock:
2019-06-05 19:42:39 +00:00
row = self._GenerateViewsRow( viewtype, viewtime_delta )
2018-12-05 22:35:30 +00:00
if hash not in self._pending_updates:
2019-06-05 19:42:39 +00:00
self._pending_updates[ hash ] = row
2018-12-05 22:35:30 +00:00
else:
2019-06-05 19:42:39 +00:00
( preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta ) = row
2018-12-05 22:35:30 +00:00
( existing_preview_views_delta, existing_preview_viewtime_delta, existing_media_views_delta, existing_media_viewtime_delta ) = self._pending_updates[ hash ]
self._pending_updates[ hash ] = ( existing_preview_views_delta + preview_views_delta, existing_preview_viewtime_delta + preview_viewtime_delta, existing_media_views_delta + media_views_delta, existing_media_viewtime_delta + media_viewtime_delta )
2019-06-05 19:42:39 +00:00
self._PubSubRow( hash, row )
2018-12-05 22:35:30 +00:00
2015-03-18 21:46:29 +00:00
class LocalBooruCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-03-18 21:46:29 +00:00
self._lock = threading.Lock()
self._RefreshShares()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshShares', 'refresh_local_booru_shares' )
2019-01-30 22:14:54 +00:00
self._controller.sub( self, 'RefreshShares', 'restart_client_server_service' )
2015-03-18 21:46:29 +00:00
def _CheckDataUsage( self ):
2017-06-07 22:05:15 +00:00
if not self._local_booru_service.BandwidthOK():
2017-03-02 02:14:56 +00:00
2019-02-06 22:41:35 +00:00
raise HydrusExceptions.InsufficientCredentialsException( 'This booru has used all its monthly data. Please try again next month.' )
2017-03-02 02:14:56 +00:00
2015-03-18 21:46:29 +00:00
def _CheckFileAuthorised( self, share_key, hash ):
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
2017-05-31 21:50:53 +00:00
if hash not in info[ 'hashes_set' ]:
raise HydrusExceptions.NotFoundException( 'That file was not found in that share.' )
2015-03-18 21:46:29 +00:00
def _CheckShareAuthorised( self, share_key ):
self._CheckDataUsage()
info = self._GetInfo( share_key )
timeout = info[ 'timeout' ]
2017-05-31 21:50:53 +00:00
if timeout is not None and HydrusData.TimeHasPassed( timeout ):
2019-02-06 22:41:35 +00:00
raise HydrusExceptions.InsufficientCredentialsException( 'This share has expired.' )
2017-05-31 21:50:53 +00:00
2015-03-18 21:46:29 +00:00
def _GetInfo( self, share_key ):
try: info = self._keys_to_infos[ share_key ]
except: raise HydrusExceptions.NotFoundException( 'Did not find that share on this booru.' )
if info is None:
2015-11-25 22:00:57 +00:00
info = self._controller.Read( 'local_booru_share', share_key )
2015-03-18 21:46:29 +00:00
hashes = info[ 'hashes' ]
info[ 'hashes_set' ] = set( hashes )
2016-05-04 21:50:55 +00:00
media_results = self._controller.Read( 'media_results', hashes )
2015-03-18 21:46:29 +00:00
info[ 'media_results' ] = media_results
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
info[ 'hashes_to_media_results' ] = hashes_to_media_results
self._keys_to_infos[ share_key ] = info
return info
def _RefreshShares( self ):
2017-06-28 20:23:21 +00:00
self._local_booru_service = self._controller.services_manager.GetService( CC.LOCAL_BOORU_SERVICE_KEY )
2015-03-18 21:46:29 +00:00
self._keys_to_infos = {}
2015-11-25 22:00:57 +00:00
share_keys = self._controller.Read( 'local_booru_share_keys' )
2015-03-18 21:46:29 +00:00
2019-01-30 22:14:54 +00:00
for share_key in share_keys:
self._keys_to_infos[ share_key ] = None
2015-03-18 21:46:29 +00:00
def CheckShareAuthorised( self, share_key ):
with self._lock: self._CheckShareAuthorised( share_key )
def CheckFileAuthorised( self, share_key, hash ):
with self._lock: self._CheckFileAuthorised( share_key, hash )
def GetGalleryInfo( self, share_key ):
with self._lock:
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_results = info[ 'media_results' ]
return ( name, text, timeout, media_results )
def GetMediaResult( self, share_key, hash ):
with self._lock:
info = self._GetInfo( share_key )
media_result = info[ 'hashes_to_media_results' ][ hash ]
return media_result
def GetPageInfo( self, share_key, hash ):
with self._lock:
self._CheckFileAuthorised( share_key, hash )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_result = info[ 'hashes_to_media_results' ][ hash ]
return ( name, text, timeout, media_result )
2019-01-30 22:14:54 +00:00
def RefreshShares( self, *args, **kwargs ):
2015-03-18 21:46:29 +00:00
with self._lock:
self._RefreshShares()
2018-12-05 22:35:30 +00:00
class MediaResultCache( object ):
def __init__( self ):
self._lock = threading.Lock()
self._hash_ids_to_media_results = weakref.WeakValueDictionary()
self._hashes_to_media_results = weakref.WeakValueDictionary()
HG.client_controller.sub( self, 'ProcessContentUpdates', 'content_updates_data' )
HG.client_controller.sub( self, 'ProcessServiceUpdates', 'service_updates_data' )
HG.client_controller.sub( self, 'NewForceRefreshTags', 'notify_new_force_refresh_tags_data' )
2019-10-02 23:38:59 +00:00
HG.client_controller.sub( self, 'NewTagDisplayRules', 'notify_new_tag_display_rules' )
2018-12-05 22:35:30 +00:00
def AddMediaResults( self, media_results ):
with self._lock:
for media_result in media_results:
hash_id = media_result.GetHashId()
hash = media_result.GetHash()
self._hash_ids_to_media_results[ hash_id ] = media_result
self._hashes_to_media_results[ hash ] = media_result
2019-04-24 22:18:50 +00:00
def DropMediaResult( self, hash_id, hash ):
with self._lock:
if hash_id in self._hash_ids_to_media_results:
del self._hash_ids_to_media_results[ hash_id ]
if hash in self._hashes_to_media_results:
del self._hashes_to_media_results[ hash ]
2018-12-05 22:35:30 +00:00
def GetMediaResultsAndMissing( self, hash_ids ):
with self._lock:
media_results = []
missing_hash_ids = []
for hash_id in hash_ids:
if hash_id in self._hash_ids_to_media_results:
media_results.append( self._hash_ids_to_media_results[ hash_id ] )
else:
missing_hash_ids.append( hash_id )
return ( media_results, missing_hash_ids )
def NewForceRefreshTags( self ):
2019-09-05 00:05:32 +00:00
# repo sync or tag migration occurred, so we need complete refresh
2018-12-05 22:35:30 +00:00
2019-08-15 00:40:48 +00:00
def do_it( hash_ids ):
2018-12-05 22:35:30 +00:00
2019-08-15 00:40:48 +00:00
for group_of_hash_ids in HydrusData.SplitListIntoChunks( hash_ids, 256 ):
2018-12-05 22:35:30 +00:00
2019-08-15 00:40:48 +00:00
if HydrusThreading.IsThreadShuttingDown():
return
2018-12-05 22:35:30 +00:00
2019-08-15 00:40:48 +00:00
hash_ids_to_tags_managers = HG.client_controller.Read( 'force_refresh_tags_managers', group_of_hash_ids )
with self._lock:
2018-12-05 22:35:30 +00:00
2019-08-15 00:40:48 +00:00
for ( hash_id, tags_manager ) in hash_ids_to_tags_managers.items():
2018-12-05 22:35:30 +00:00
if hash_id in self._hash_ids_to_media_results:
self._hash_ids_to_media_results[ hash_id ].SetTagsManager( tags_manager )
2019-10-02 23:38:59 +00:00
HG.client_controller.pub( 'refresh_all_tag_presentation_gui' )
2019-08-15 00:40:48 +00:00
with self._lock:
hash_ids = list( self._hash_ids_to_media_results.keys() )
HG.client_controller.CallToThread( do_it, hash_ids )
2018-12-05 22:35:30 +00:00
2019-10-02 23:38:59 +00:00
def NewTagDisplayRules( self ):
2018-12-05 22:35:30 +00:00
with self._lock:
2019-09-05 00:05:32 +00:00
for media_result in self._hash_ids_to_media_results.values():
2018-12-05 22:35:30 +00:00
2019-10-02 23:38:59 +00:00
media_result.GetTagsManager().NewTagDisplayRules()
2018-12-05 22:35:30 +00:00
2019-10-02 23:38:59 +00:00
HG.client_controller.pub( 'refresh_all_tag_presentation_gui' )
2018-12-05 22:35:30 +00:00
def ProcessContentUpdates( self, service_keys_to_content_updates ):
with self._lock:
2019-08-15 00:40:48 +00:00
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
2018-12-05 22:35:30 +00:00
for content_update in content_updates:
hashes = content_update.GetHashes()
for hash in hashes:
if hash in self._hashes_to_media_results:
self._hashes_to_media_results[ hash ].ProcessContentUpdate( service_key, content_update )
def ProcessServiceUpdates( self, service_keys_to_service_updates ):
with self._lock:
2019-08-15 00:40:48 +00:00
for ( service_key, service_updates ) in service_keys_to_service_updates.items():
2018-12-05 22:35:30 +00:00
for service_update in service_updates:
( action, row ) = service_update.ToTuple()
if action in ( HC.SERVICE_UPDATE_DELETE_PENDING, HC.SERVICE_UPDATE_RESET ):
2019-08-15 00:40:48 +00:00
for media_result in self._hash_ids_to_media_results.values():
2018-12-05 22:35:30 +00:00
if action == HC.SERVICE_UPDATE_DELETE_PENDING:
media_result.DeletePending( service_key )
elif action == HC.SERVICE_UPDATE_RESET:
media_result.ResetService( service_key )
2018-04-25 22:07:52 +00:00
class ParsingCache( object ):
def __init__( self ):
2018-08-08 20:29:54 +00:00
self._next_clean_cache_time = HydrusData.GetNow()
2018-04-25 22:07:52 +00:00
self._html_to_soups = {}
self._json_to_jsons = {}
self._lock = threading.Lock()
def _CleanCache( self ):
2018-08-08 20:29:54 +00:00
if HydrusData.TimeHasPassed( self._next_clean_cache_time ):
2018-04-25 22:07:52 +00:00
2018-08-08 20:29:54 +00:00
for cache in ( self._html_to_soups, self._json_to_jsons ):
dead_datas = set()
2019-09-05 00:05:32 +00:00
for ( data, ( last_accessed, parsed_object ) ) in cache.items():
2018-08-08 20:29:54 +00:00
if HydrusData.TimeHasPassed( last_accessed + 10 ):
dead_datas.add( data )
2018-04-25 22:07:52 +00:00
2018-08-08 20:29:54 +00:00
for dead_data in dead_datas:
2018-04-25 22:07:52 +00:00
2018-08-08 20:29:54 +00:00
del cache[ dead_data ]
2018-04-25 22:07:52 +00:00
2018-08-08 20:29:54 +00:00
self._next_clean_cache_time = HydrusData.GetNow() + 5
2018-04-25 22:07:52 +00:00
def CleanCache( self ):
with self._lock:
self._CleanCache()
def GetJSON( self, json_text ):
with self._lock:
now = HydrusData.GetNow()
if json_text not in self._json_to_jsons:
json_object = json.loads( json_text )
self._json_to_jsons[ json_text ] = ( now, json_object )
( last_accessed, json_object ) = self._json_to_jsons[ json_text ]
if last_accessed != now:
self._json_to_jsons[ json_text ] = ( now, json_object )
if len( self._json_to_jsons ) > 10:
self._CleanCache()
return json_object
def GetSoup( self, html ):
with self._lock:
now = HydrusData.GetNow()
if html not in self._html_to_soups:
soup = ClientParsing.GetSoup( html )
self._html_to_soups[ html ] = ( now, soup )
( last_accessed, soup ) = self._html_to_soups[ html ]
if last_accessed != now:
self._html_to_soups[ html ] = ( now, soup )
if len( self._html_to_soups ) > 10:
self._CleanCache()
return soup
2015-03-18 21:46:29 +00:00
class RenderedImageCache( object ):
2016-08-17 20:07:22 +00:00
def __init__( self, controller ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2015-03-18 21:46:29 +00:00
2017-12-06 22:06:56 +00:00
cache_size = self._controller.options[ 'fullscreen_cache_size' ]
2018-07-11 20:23:51 +00:00
cache_timeout = self._controller.new_options.GetInteger( 'image_cache_timeout' )
2016-08-24 18:36:56 +00:00
2018-07-11 20:23:51 +00:00
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
2015-03-18 21:46:29 +00:00
2017-10-04 17:51:58 +00:00
def Clear( self ):
self._data_cache.Clear()
2015-03-18 21:46:29 +00:00
2016-09-21 19:54:04 +00:00
def GetImageRenderer( self, media ):
2015-03-18 21:46:29 +00:00
hash = media.GetHash()
2016-09-21 19:54:04 +00:00
key = hash
2015-03-18 21:46:29 +00:00
2016-08-03 22:15:54 +00:00
result = self._data_cache.GetIfHasData( key )
if result is None:
2015-03-18 21:46:29 +00:00
2016-09-21 19:54:04 +00:00
image_renderer = ClientRendering.ImageRenderer( media )
2015-03-18 21:46:29 +00:00
2016-09-21 19:54:04 +00:00
self._data_cache.AddData( key, image_renderer )
2015-03-18 21:46:29 +00:00
2016-08-03 22:15:54 +00:00
else:
2015-03-18 21:46:29 +00:00
2016-09-21 19:54:04 +00:00
image_renderer = result
2016-08-03 22:15:54 +00:00
2016-09-21 19:54:04 +00:00
return image_renderer
2015-03-18 21:46:29 +00:00
2016-09-21 19:54:04 +00:00
def HasImageRenderer( self, hash ):
2015-03-18 21:46:29 +00:00
2016-09-21 19:54:04 +00:00
key = hash
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
return self._data_cache.HasData( key )
2015-03-18 21:46:29 +00:00
2019-03-20 21:22:10 +00:00
class ServicesManager( object ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2016-08-24 18:36:56 +00:00
2015-11-04 22:30:28 +00:00
self._lock = threading.Lock()
2019-03-20 21:22:10 +00:00
self._keys_to_services = {}
self._services_sorted = []
2015-11-04 22:30:28 +00:00
2019-03-20 21:22:10 +00:00
self.RefreshServices()
2015-03-18 21:46:29 +00:00
2019-03-20 21:22:10 +00:00
self._controller.sub( self, 'RefreshServices', 'notify_new_services_data' )
2015-03-18 21:46:29 +00:00
2019-03-20 21:22:10 +00:00
def _GetService( self, service_key ):
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
try:
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
return self._keys_to_services[ service_key ]
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
except KeyError:
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
raise HydrusExceptions.DataMissing( 'That service was not found!' )
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
def _SetServices( self, services ):
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
self._keys_to_services = { service.GetServiceKey() : service for service in services }
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
self._keys_to_services[ CC.TEST_SERVICE_KEY ] = ClientServices.GenerateService( CC.TEST_SERVICE_KEY, HC.TEST_SERVICE, 'test service' )
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
key = lambda s: s.GetName()
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
self._services_sorted = list( services )
self._services_sorted.sort( key = key )
def Filter( self, service_keys, desired_types ):
with self._lock:
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
def func( service_key ):
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
return self._keys_to_services[ service_key ].GetServiceType() in desired_types
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
filtered_service_keys = list(filter( func, service_keys ))
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
return filtered_service_keys
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
def FilterValidServiceKeys( self, service_keys ):
with self._lock:
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
def func( service_key ):
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
return service_key in self._keys_to_services
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
filtered_service_keys = list(filter( func, service_keys ))
return filtered_service_keys
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
def GetName( self, service_key ):
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
service = self._GetService( service_key )
return service.GetName()
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
def GetService( self, service_key ):
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
return self._GetService( service_key )
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
def GetServiceType( self, service_key ):
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
return self._GetService( service_key ).GetServiceType()
2019-02-27 23:03:30 +00:00
2019-03-20 21:22:10 +00:00
def GetServiceKeyFromName( self, allowed_types, service_name ):
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2016-06-08 20:27:22 +00:00
2019-03-20 21:22:10 +00:00
for service in self._services_sorted:
if service.GetServiceType() in allowed_types and service.GetName() == service_name:
return service.GetServiceKey()
2016-06-08 20:27:22 +00:00
2019-03-20 21:22:10 +00:00
raise HydrusExceptions.DataMissing()
2016-06-08 20:27:22 +00:00
2019-03-20 21:22:10 +00:00
def GetServiceKeys( self, desired_types = HC.ALL_SERVICES ):
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2019-09-05 00:05:32 +00:00
filtered_service_keys = [ service_key for ( service_key, service ) in self._keys_to_services.items() if service.GetServiceType() in desired_types ]
2019-03-20 21:22:10 +00:00
return filtered_service_keys
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
def GetServices( self, desired_types = HC.ALL_SERVICES, randomised = True ):
with self._lock:
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
def func( service ):
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
return service.GetServiceType() in desired_types
2016-06-08 20:27:22 +00:00
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
services = list(filter( func, self._services_sorted ))
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
if randomised:
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
random.shuffle( services )
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
return services
2017-09-13 20:50:41 +00:00
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
def RefreshServices( self ):
with self._lock:
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
services = self._controller.Read( 'services' )
2015-12-23 22:51:04 +00:00
2019-03-20 21:22:10 +00:00
self._SetServices( services )
2016-06-08 20:27:22 +00:00
2019-03-20 21:22:10 +00:00
def ServiceExists( self, service_key ):
with self._lock:
return service_key in self._keys_to_services
class TagParentsManager( object ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
self._dirty = False
self._refresh_job = None
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
self._service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
self._RefreshParents()
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
self._lock = threading.Lock()
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
self._controller.sub( self, 'NotifyNewParents', 'notify_new_parents' )
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
def _RefreshParents( self ):
2017-03-02 02:14:56 +00:00
2019-03-20 21:22:10 +00:00
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_parents' )
2017-03-02 02:14:56 +00:00
2019-03-20 21:22:10 +00:00
# first collapse siblings
2017-01-04 22:48:23 +00:00
2019-05-29 21:34:43 +00:00
siblings_manager = self._controller.tag_siblings_manager
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
collapsed_service_keys_to_statuses_to_pairs = collections.defaultdict( HydrusData.default_dict_set )
2016-03-09 19:37:14 +00:00
2019-05-29 21:34:43 +00:00
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
2016-03-09 19:37:14 +00:00
2019-05-29 21:34:43 +00:00
if service_key == CC.COMBINED_TAG_SERVICE_KEY:
continue
2019-03-20 21:22:10 +00:00
2019-05-29 21:34:43 +00:00
for ( status, pairs ) in statuses_to_pairs.items():
2017-03-08 23:23:12 +00:00
2019-05-29 21:34:43 +00:00
pairs = siblings_manager.CollapsePairs( service_key, pairs )
2019-03-20 21:22:10 +00:00
collapsed_service_keys_to_statuses_to_pairs[ service_key ][ status ] = pairs
2017-03-08 23:23:12 +00:00
2016-03-09 19:37:14 +00:00
2019-03-20 21:22:10 +00:00
# now collapse current and pending
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
service_keys_to_pairs_flat = HydrusData.default_dict_set()
2019-09-05 00:05:32 +00:00
for ( service_key, statuses_to_pairs ) in collapsed_service_keys_to_statuses_to_pairs.items():
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
pairs_flat = statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] )
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
service_keys_to_pairs_flat[ service_key ] = pairs_flat
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
# now create the combined tag service
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
combined_pairs_flat = set()
2019-06-19 22:08:48 +00:00
for pairs_flat in service_keys_to_pairs_flat.values():
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
combined_pairs_flat.update( pairs_flat )
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
service_keys_to_pairs_flat[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_pairs_flat
#
service_keys_to_simple_children_to_parents = BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat )
self._service_keys_to_children_to_parents = BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents )
2017-01-04 22:48:23 +00:00
2019-05-29 21:34:43 +00:00
def ExpandPredicates( self, service_key, predicates, service_strict = False ):
2017-05-24 20:28:24 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
2017-05-24 20:28:24 +00:00
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
2017-05-24 20:28:24 +00:00
2019-03-20 21:22:10 +00:00
results = []
2019-02-13 22:26:43 +00:00
with self._lock:
2019-03-20 21:22:10 +00:00
for predicate in predicates:
2019-02-13 22:26:43 +00:00
2019-03-20 21:22:10 +00:00
results.append( predicate )
if predicate.GetType() == HC.PREDICATE_TYPE_TAG:
2019-02-13 22:26:43 +00:00
2019-03-20 21:22:10 +00:00
tag = predicate.GetValue()
parents = self._service_keys_to_children_to_parents[ service_key ][ tag ]
for parent in parents:
parent_predicate = ClientSearch.Predicate( HC.PREDICATE_TYPE_PARENT, parent )
results.append( parent_predicate )
2019-02-13 22:26:43 +00:00
2019-03-20 21:22:10 +00:00
return results
2019-02-13 22:26:43 +00:00
2019-05-29 21:34:43 +00:00
def ExpandTags( self, service_key, tags, service_strict = False ):
2017-01-04 22:48:23 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
2017-01-04 22:48:23 +00:00
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-11-18 22:44:07 +00:00
with self._lock:
2019-03-20 21:22:10 +00:00
tags_results = set( tags )
for tag in tags:
2017-03-08 23:23:12 +00:00
2019-03-20 21:22:10 +00:00
tags_results.update( self._service_keys_to_children_to_parents[ service_key ][ tag ] )
2017-03-08 23:23:12 +00:00
2019-03-20 21:22:10 +00:00
return tags_results
2015-11-25 22:00:57 +00:00
2019-03-20 21:22:10 +00:00
2019-05-29 21:34:43 +00:00
def GetParents( self, service_key, tag, service_strict = False ):
2019-03-20 21:22:10 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
return self._service_keys_to_children_to_parents[ service_key ][ tag ]
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
2019-03-20 21:22:10 +00:00
def NotifyNewParents( self ):
2015-11-25 22:00:57 +00:00
with self._lock:
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
self._dirty = True
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
if self._refresh_job is not None:
self._refresh_job.Cancel()
self._refresh_job = self._controller.CallLater( 8.0, self.RefreshParentsIfDirty )
2015-11-18 22:44:07 +00:00
2018-01-31 22:58:15 +00:00
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
def RefreshParentsIfDirty( self ):
2016-10-19 20:02:56 +00:00
with self._lock:
2019-03-20 21:22:10 +00:00
if self._dirty:
self._RefreshParents()
self._dirty = False
2016-10-19 20:02:56 +00:00
2019-03-20 21:22:10 +00:00
class TagSiblingsManager( object ):
2017-04-19 20:58:30 +00:00
def __init__( self, controller ):
self._controller = controller
2019-03-20 21:22:10 +00:00
self._dirty = False
self._refresh_job = None
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
self._service_keys_to_siblings = collections.defaultdict( dict )
self._service_keys_to_reverse_lookup = collections.defaultdict( dict )
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
self._RefreshSiblings()
self._lock = threading.Lock()
self._controller.sub( self, 'NotifyNewSiblings', 'notify_new_siblings_data' )
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
def _CollapseTags( self, service_key, tags ):
siblings = self._service_keys_to_siblings[ service_key ]
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
return { siblings[ tag ] if tag in siblings else tag for tag in tags }
def _RefreshSiblings( self ):
self._service_keys_to_siblings = collections.defaultdict( dict )
self._service_keys_to_reverse_lookup = collections.defaultdict( dict )
local_tags_pairs = set()
tag_repo_pairs = set()
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_siblings' )
2019-09-05 00:05:32 +00:00
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
all_pairs = statuses_to_pairs[ HC.CONTENT_STATUS_CURRENT ].union( statuses_to_pairs[ HC.CONTENT_STATUS_PENDING ] )
2019-09-18 22:40:39 +00:00
service = self._controller.services_manager.GetService( service_key )
if service.GetServiceType() == HC.LOCAL_TAG:
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
local_tags_pairs = set( all_pairs )
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
else:
tag_repo_pairs.update( all_pairs )
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
siblings = CollapseTagSiblingPairs( [ all_pairs ] )
self._service_keys_to_siblings[ service_key ] = siblings
reverse_lookup = collections.defaultdict( list )
2019-09-05 00:05:32 +00:00
for ( bad, good ) in siblings.items():
2019-03-20 21:22:10 +00:00
reverse_lookup[ good ].append( bad )
self._service_keys_to_reverse_lookup[ service_key ] = reverse_lookup
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
combined_siblings = CollapseTagSiblingPairs( [ local_tags_pairs, tag_repo_pairs ] )
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
self._service_keys_to_siblings[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_siblings
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
combined_reverse_lookup = collections.defaultdict( list )
2017-04-19 20:58:30 +00:00
2019-09-05 00:05:32 +00:00
for ( bad, good ) in combined_siblings.items():
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
combined_reverse_lookup[ good ].append( bad )
2017-04-19 20:58:30 +00:00
2019-03-20 21:22:10 +00:00
self._service_keys_to_reverse_lookup[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_reverse_lookup
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
def CollapsePredicates( self, service_key, predicates, service_strict = False ):
2016-04-06 19:52:45 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
2016-04-06 19:52:45 +00:00
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
results = [ predicate for predicate in predicates if predicate.GetType() != HC.PREDICATE_TYPE_TAG ]
tag_predicates = [ predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG ]
2019-11-14 03:56:30 +00:00
tags_to_predicates = {predicate.GetValue() : predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG}
2019-03-20 21:22:10 +00:00
2019-09-05 00:05:32 +00:00
tags = list( tags_to_predicates.keys() )
2019-03-20 21:22:10 +00:00
tags_to_include_in_results = set()
for tag in tags:
if tag in siblings:
old_tag = tag
old_predicate = tags_to_predicates[ old_tag ]
new_tag = siblings[ old_tag ]
if new_tag not in tags_to_predicates:
( old_pred_type, old_value, old_inclusive ) = old_predicate.GetInfo()
new_predicate = ClientSearch.Predicate( old_pred_type, new_tag, old_inclusive )
tags_to_predicates[ new_tag ] = new_predicate
tags_to_include_in_results.add( new_tag )
new_predicate = tags_to_predicates[ new_tag ]
new_predicate.AddCounts( old_predicate )
else:
tags_to_include_in_results.add( tag )
2016-04-06 19:52:45 +00:00
2019-03-20 21:22:10 +00:00
results.extend( [ tags_to_predicates[ tag ] for tag in tags_to_include_in_results ] )
2016-04-06 19:52:45 +00:00
2019-03-20 21:22:10 +00:00
return results
2016-04-06 19:52:45 +00:00
2019-05-29 21:34:43 +00:00
def CollapsePairs( self, service_key, pairs, service_strict = False ):
2016-10-05 20:22:40 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
2016-10-05 20:22:40 +00:00
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
2016-10-05 20:22:40 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
siblings = self._service_keys_to_siblings[ service_key ]
result = set()
for ( a, b ) in pairs:
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
if a in siblings:
a = siblings[ a ]
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
if b in siblings:
2016-04-06 19:52:45 +00:00
2019-03-20 21:22:10 +00:00
b = siblings[ b ]
2016-04-06 19:52:45 +00:00
2019-03-20 21:22:10 +00:00
result.add( ( a, b ) )
2015-11-18 22:44:07 +00:00
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
return result
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
def CollapseStatusesToTags( self, service_key, statuses_to_tags, service_strict = False ):
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
2016-04-06 19:52:45 +00:00
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2016-04-06 19:52:45 +00:00
2019-03-20 21:22:10 +00:00
new_statuses_to_tags = HydrusData.default_dict_set()
2016-04-06 19:52:45 +00:00
2019-09-05 00:05:32 +00:00
for ( status, tags ) in statuses_to_tags.items():
2016-04-06 19:52:45 +00:00
2019-09-05 00:05:32 +00:00
new_statuses_to_tags[ status ] = self._CollapseTags( service_key, tags )
2016-04-06 19:52:45 +00:00
2019-03-20 21:22:10 +00:00
return new_statuses_to_tags
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
def CollapseTag( self, service_key, tag, service_strict = False ):
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
siblings = self._service_keys_to_siblings[ service_key ]
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
if tag in siblings:
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
return siblings[ tag ]
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
else:
return tag
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
2019-05-29 21:34:43 +00:00
def CollapseTags( self, service_key, tags, service_strict = False ):
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
return self._CollapseTags( service_key, tags )
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
def CollapseTagsToCount( self, service_key, tags_to_count, service_strict = False ):
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
2015-11-18 22:44:07 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
with self._lock:
2019-03-20 21:22:10 +00:00
siblings = self._service_keys_to_siblings[ service_key ]
results = collections.Counter()
2019-09-05 00:05:32 +00:00
for ( tag, count ) in tags_to_count.items():
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
if tag in siblings:
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
tag = siblings[ tag ]
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
results[ tag ] += count
2015-08-05 18:42:35 +00:00
return results
2019-05-29 21:34:43 +00:00
def GetSibling( self, service_key, tag, service_strict = False ):
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
2015-11-18 22:44:07 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
with self._lock:
2019-03-20 21:22:10 +00:00
siblings = self._service_keys_to_siblings[ service_key ]
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
if tag in siblings:
2015-11-11 21:20:41 +00:00
2019-03-20 21:22:10 +00:00
return siblings[ tag ]
else:
return None
2015-11-11 21:20:41 +00:00
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
def GetAllSiblings( self, service_key, tag, service_strict = False ):
2015-08-05 18:42:35 +00:00
2019-05-29 21:34:43 +00:00
if not service_strict and self._controller.new_options.GetBoolean( 'apply_all_siblings_to_all_services' ):
2019-03-20 21:22:10 +00:00
service_key = CC.COMBINED_TAG_SERVICE_KEY
with self._lock:
siblings = self._service_keys_to_siblings[ service_key ]
reverse_lookup = self._service_keys_to_reverse_lookup[ service_key ]
if tag in siblings:
best_tag = siblings[ tag ]
elif tag in reverse_lookup:
best_tag = tag
else:
return [ tag ]
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
all_siblings = list( reverse_lookup[ best_tag ] )
2015-11-18 22:44:07 +00:00
2019-03-20 21:22:10 +00:00
all_siblings.append( best_tag )
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
return all_siblings
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
def NotifyNewSiblings( self ):
2018-03-28 21:55:58 +00:00
with self._lock:
self._dirty = True
2018-11-07 23:09:40 +00:00
if self._refresh_job is not None:
self._refresh_job.Cancel()
2019-03-20 21:22:10 +00:00
self._refresh_job = self._controller.CallLater( 8.0, self.RefreshSiblingsIfDirty )
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
def RefreshSiblingsIfDirty( self ):
2015-08-05 18:42:35 +00:00
2015-11-11 21:20:41 +00:00
with self._lock:
2018-03-28 21:55:58 +00:00
if self._dirty:
2019-03-20 21:22:10 +00:00
self._RefreshSiblings()
2018-03-28 21:55:58 +00:00
self._dirty = False
2019-10-02 23:38:59 +00:00
self._controller.pub( 'notify_new_tag_display_rules' )
2019-03-20 21:22:10 +00:00
2015-11-11 21:20:41 +00:00
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
class ThumbnailCache( object ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
cache_size = self._controller.options[ 'thumbnail_cache_size' ]
cache_timeout = self._controller.new_options.GetInteger( 'thumbnail_cache_timeout' )
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
self._data_cache = DataCache( self._controller, cache_size, timeout = cache_timeout )
2015-08-05 18:42:35 +00:00
2019-04-03 22:45:57 +00:00
self._magic_mime_thumbnail_ease_score_lookup = {}
self._InitialiseMagicMimeScores()
2015-08-05 18:42:35 +00:00
self._lock = threading.Lock()
2019-03-20 21:22:10 +00:00
self._thumbnail_error_occurred = False
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
self._waterfall_queue_quick = set()
2019-04-10 22:50:53 +00:00
self._waterfall_queue = []
self._delayed_regeneration_queue_quick = set()
self._delayed_regeneration_queue = []
2015-11-11 21:20:41 +00:00
2019-03-20 21:22:10 +00:00
self._waterfall_event = threading.Event()
self._special_thumbs = {}
self.Clear()
self._controller.CallToThreadLongRunning( self.DAEMONWaterfall )
2019-10-02 23:38:59 +00:00
self._controller.sub( self, 'Clear', 'reset_thumbnail_cache' )
2019-03-20 21:22:10 +00:00
self._controller.sub( self, 'ClearThumbnails', 'clear_thumbnails' )
2015-11-11 21:20:41 +00:00
2019-03-27 22:01:02 +00:00
def _GetThumbnailHydrusBitmap( self, display_media ):
2017-04-05 21:16:40 +00:00
2019-03-27 22:01:02 +00:00
bounding_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
hash = display_media.GetHash()
mime = display_media.GetMime()
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
locations_manager = display_media.GetLocationsManager()
try:
2016-09-14 18:03:59 +00:00
2019-04-24 22:18:50 +00:00
path = self._controller.client_files_manager.GetThumbnailPath( display_media )
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
except HydrusExceptions.FileMissingException as e:
if locations_manager.IsLocal():
2017-04-05 21:16:40 +00:00
2019-03-27 22:01:02 +00:00
summary = 'Unable to get thumbnail for file {}.'.format( hash.hex() )
2017-04-05 21:16:40 +00:00
2019-03-20 21:22:10 +00:00
self._HandleThumbnailException( e, summary )
2017-04-05 21:16:40 +00:00
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
return self._special_thumbs[ 'hydrus' ]
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
try:
2016-09-14 18:03:59 +00:00
2019-05-08 21:06:42 +00:00
numpy_image = ClientImageHandling.GenerateNumPyImage( path, mime )
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
except Exception as e:
try:
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
# file is malformed, let's force a regen
2019-05-22 22:35:06 +00:00
self._controller.files_maintenance_manager.RunJobImmediately( [ display_media ], ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL, pub_job_key = False )
2019-03-20 21:22:10 +00:00
except Exception as e:
summary = 'The thumbnail for file ' + hash.hex() + ' was not loadable. An attempt to regenerate it failed.'
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
try:
2019-05-08 21:06:42 +00:00
numpy_image = ClientImageHandling.GenerateNumPyImage( path, mime )
2019-03-20 21:22:10 +00:00
except Exception as e:
summary = 'The thumbnail for file ' + hash.hex() + ' was not loadable. It was regenerated, but that file would not render either. Your image libraries or hard drive connection are unreliable. Please inform the hydrus developer what has happened.'
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
2016-09-14 18:03:59 +00:00
2019-05-08 21:06:42 +00:00
( current_width, current_height ) = HydrusImageHandling.GetResolutionNumPy( numpy_image )
2019-03-20 21:22:10 +00:00
2019-05-22 22:35:06 +00:00
( media_width, media_height ) = display_media.GetResolution()
2019-03-27 22:01:02 +00:00
( expected_width, expected_height ) = HydrusImageHandling.GetThumbnailResolution( ( media_width, media_height ), bounding_dimensions )
2019-03-20 21:22:10 +00:00
2019-04-24 22:18:50 +00:00
exactly_as_expected = current_width == expected_width and current_height == expected_height
rotation_exception = current_width == expected_height and current_height == expected_width
correct_size = exactly_as_expected or rotation_exception
2019-03-20 21:22:10 +00:00
2019-03-27 22:01:02 +00:00
if not correct_size:
2015-08-05 18:42:35 +00:00
2019-04-10 22:50:53 +00:00
it_is_definitely_too_big = current_width >= expected_width and current_height >= expected_height
2015-08-05 18:42:35 +00:00
2019-04-10 22:50:53 +00:00
if it_is_definitely_too_big:
2019-04-03 22:45:57 +00:00
if HG.file_report_mode:
2019-03-27 22:01:02 +00:00
2019-04-10 22:50:53 +00:00
HydrusData.ShowText( 'Thumbnail {} too big.'.format( hash.hex() ) )
2019-03-27 22:01:02 +00:00
2019-03-20 21:22:10 +00:00
2019-04-10 22:50:53 +00:00
# the thumb we have is larger than desired. we can use it to generate what we actually want without losing significant data
2019-03-20 21:22:10 +00:00
2019-04-10 22:50:53 +00:00
# this is _resize_, not _thumbnail_, because we already know the dimensions we want
# and in some edge cases, doing getthumbresolution on existing thumb dimensions results in float/int conversion imprecision and you get 90px/91px regen cycles that never get fixed
2019-05-08 21:06:42 +00:00
numpy_image = HydrusImageHandling.ResizeNumPyImage( numpy_image, ( expected_width, expected_height ) )
2019-03-20 21:22:10 +00:00
2019-04-10 22:50:53 +00:00
if locations_manager.IsLocal():
# we have the master file, so it is safe to save our resized thumb back to disk since we can regen from source if needed
2019-03-27 22:01:02 +00:00
if HG.file_report_mode:
2019-04-10 22:50:53 +00:00
HydrusData.ShowText( 'Thumbnail {} too big, saving back to disk.'.format( hash.hex() ) )
2019-03-27 22:01:02 +00:00
2019-04-10 22:50:53 +00:00
try:
2019-03-27 22:01:02 +00:00
try:
2019-05-08 21:06:42 +00:00
thumbnail_bytes = HydrusImageHandling.GenerateThumbnailBytesNumPy( numpy_image, mime )
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
except HydrusExceptions.CantRenderWithCVException:
2019-04-03 22:45:57 +00:00
2019-05-08 21:06:42 +00:00
thumbnail_bytes = HydrusImageHandling.GenerateThumbnailBytesFromStaticImagePath( path, ( expected_width, expected_height ), mime )
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
except:
summary = 'The thumbnail for file {} was too large, but an attempt to shrink it failed.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
2019-03-27 22:01:02 +00:00
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
try:
2019-05-22 22:35:06 +00:00
self._controller.client_files_manager.AddThumbnailFromBytes( hash, thumbnail_bytes, silent = True )
self._controller.files_maintenance_manager.ClearJobs( { hash }, ClientFiles.REGENERATE_FILE_DATA_JOB_REFIT_THUMBNAIL )
2019-04-10 22:50:53 +00:00
except:
summary = 'The thumbnail for file {} was too large, but an attempt to save back the shrunk file failed.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
return self._special_thumbs[ 'hydrus' ]
else:
# the thumb we have is either too small or completely messed up due to a previous ratio misparse
media_is_same_size_as_current_thumb = current_width == media_width and current_height == media_height
if media_is_same_size_as_current_thumb:
# the thumb is smaller than expected, but this is a 32x32 pixilart image or whatever, so no need to scale
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
if HG.file_report_mode:
HydrusData.ShowText( 'Thumbnail {} too small due to small source file.'.format( hash.hex() ) )
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
pass
else:
2019-04-03 22:45:57 +00:00
2019-05-08 21:06:42 +00:00
numpy_image = HydrusImageHandling.ResizeNumPyImage( numpy_image, ( expected_width, expected_height ) )
2019-04-10 22:50:53 +00:00
if locations_manager.IsLocal():
2019-03-27 22:01:02 +00:00
2019-04-10 22:50:53 +00:00
# we have the master file, so we should regen the thumb from source
2019-03-27 22:01:02 +00:00
if HG.file_report_mode:
2019-04-10 22:50:53 +00:00
HydrusData.ShowText( 'Thumbnail {} too small, scheduling regeneration from source.'.format( hash.hex() ) )
2019-03-27 22:01:02 +00:00
2019-04-24 22:18:50 +00:00
delayed_item = display_media.GetMediaResult()
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
with self._lock:
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
if delayed_item not in self._delayed_regeneration_queue_quick:
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
self._delayed_regeneration_queue_quick.add( delayed_item )
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
self._delayed_regeneration_queue.append( delayed_item )
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
else:
# we do not have the master file, so we have to scale up from what we have
if HG.file_report_mode:
2019-04-03 22:45:57 +00:00
2019-04-10 22:50:53 +00:00
HydrusData.ShowText( 'Thumbnail {} was too small, only scaling up due to no local source.'.format( hash.hex() ) )
2019-04-03 22:45:57 +00:00
2019-03-27 22:01:02 +00:00
2019-03-20 21:22:10 +00:00
2019-03-27 22:01:02 +00:00
hydrus_bitmap = ClientRendering.GenerateHydrusBitmapFromNumPyImage( numpy_image )
2019-03-20 21:22:10 +00:00
return hydrus_bitmap
def _HandleThumbnailException( self, e, summary ):
if self._thumbnail_error_occurred:
HydrusData.Print( summary )
else:
self._thumbnail_error_occurred = True
message = 'A thumbnail error has occurred. The problem thumbnail will appear with the default \'hydrus\' symbol. You may need to take hard drive recovery actions, and if the error is not obviously fixable, you can contact hydrus dev for additional help. Specific information for this first error follows. Subsequent thumbnail errors in this session will be silently printed to the log.'
message += os.linesep * 2
message += str( e )
message += os.linesep * 2
message += summary
HydrusData.ShowText( message )
2019-04-03 22:45:57 +00:00
def _InitialiseMagicMimeScores( self ):
# let's render our thumbs in order of ease of regeneration, so we rush what we can to screen as fast as possible and leave big vids until the end
for mime in HC.ALLOWED_MIMES:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 5
# default filetype thumbs are easiest
self._magic_mime_thumbnail_ease_score_lookup[ None ] = 0
self._magic_mime_thumbnail_ease_score_lookup[ HC.APPLICATION_UNKNOWN ] = 0
for mime in HC.APPLICATIONS:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 0
for mime in HC.AUDIO:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 0
# images a little trickier
for mime in HC.IMAGES:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 1
# override because these are a bit more
self._magic_mime_thumbnail_ease_score_lookup[ HC.IMAGE_APNG ] = 2
self._magic_mime_thumbnail_ease_score_lookup[ HC.IMAGE_GIF ] = 2
# ffmpeg hellzone
for mime in HC.VIDEO:
self._magic_mime_thumbnail_ease_score_lookup[ mime ] = 3
2019-04-10 22:50:53 +00:00
def _RecalcQueues( self ):
2019-03-20 21:22:10 +00:00
# here we sort by the hash since this is both breddy random and more likely to access faster on a well defragged hard drive!
2019-04-10 22:50:53 +00:00
# and now with the magic mime order
2019-03-20 21:22:10 +00:00
2019-04-10 22:50:53 +00:00
def sort_waterfall( item ):
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
( page_key, media ) = item
2015-08-05 18:42:35 +00:00
2019-04-03 22:45:57 +00:00
display_media = media.GetDisplayMedia()
magic_score = self._magic_mime_thumbnail_ease_score_lookup[ display_media.GetMime() ]
hash = display_media.GetHash()
return ( magic_score, hash )
2015-08-05 18:42:35 +00:00
2019-04-10 22:50:53 +00:00
self._waterfall_queue = list( self._waterfall_queue_quick )
2019-03-20 21:22:10 +00:00
2019-04-03 22:45:57 +00:00
# we pop off the end, so reverse
2019-04-10 22:50:53 +00:00
self._waterfall_queue.sort( key = sort_waterfall, reverse = True )
def sort_regen( item ):
2019-04-24 22:18:50 +00:00
media_result = item
hash = media_result.GetHash()
mime = media_result.GetMime()
2019-04-10 22:50:53 +00:00
magic_score = self._magic_mime_thumbnail_ease_score_lookup[ mime ]
return ( magic_score, hash )
self._delayed_regeneration_queue = list( self._delayed_regeneration_queue_quick )
# we pop off the end, so reverse
self._delayed_regeneration_queue.sort( key = sort_regen, reverse = True )
2019-03-20 21:22:10 +00:00
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
def CancelWaterfall( self, page_key, medias ):
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2017-04-05 21:16:40 +00:00
2019-03-20 21:22:10 +00:00
self._waterfall_queue_quick.difference_update( ( ( page_key, media ) for media in medias ) )
2017-04-05 21:16:40 +00:00
2019-06-05 19:42:39 +00:00
cancelled_media_results = { media.GetDisplayMedia().GetMediaResult() for media in medias }
2019-05-29 21:34:43 +00:00
outstanding_delayed_hashes = { media_result.GetHash() for media_result in cancelled_media_results if media_result in self._delayed_regeneration_queue_quick }
if len( outstanding_delayed_hashes ) > 0:
self._controller.files_maintenance_manager.ScheduleJob( outstanding_delayed_hashes, ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL )
self._delayed_regeneration_queue_quick.difference_update( cancelled_media_results )
2019-04-10 22:50:53 +00:00
self._RecalcQueues()
2019-03-20 21:22:10 +00:00
def Clear( self ):
2017-04-05 21:16:40 +00:00
2015-08-05 18:42:35 +00:00
with self._lock:
2019-03-20 21:22:10 +00:00
self._data_cache.Clear()
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
self._special_thumbs = {}
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
names = [ 'hydrus', 'pdf', 'psd', 'audio', 'video', 'zip' ]
2019-04-24 22:18:50 +00:00
bounding_dimensions = self._controller.options[ 'thumbnail_dimensions' ]
2019-03-20 21:22:10 +00:00
2019-04-24 22:18:50 +00:00
for name in names:
2015-08-05 18:42:35 +00:00
2019-04-24 22:18:50 +00:00
path = os.path.join( HC.STATIC_DIR, name + '.png' )
2019-04-10 22:50:53 +00:00
2019-05-08 21:06:42 +00:00
numpy_image = ClientImageHandling.GenerateNumPyImage( path, HC.IMAGE_PNG )
numpy_image_resolution = HydrusImageHandling.GetResolutionNumPy( numpy_image )
target_resolution = HydrusImageHandling.GetThumbnailResolution( numpy_image_resolution, bounding_dimensions )
2019-04-24 22:18:50 +00:00
2019-05-08 21:06:42 +00:00
numpy_image = HydrusImageHandling.ResizeNumPyImage( numpy_image, target_resolution )
2015-08-05 18:42:35 +00:00
2019-04-24 22:18:50 +00:00
hydrus_bitmap = ClientRendering.GenerateHydrusBitmapFromNumPyImage( numpy_image )
2019-03-20 21:22:10 +00:00
2019-04-24 22:18:50 +00:00
self._special_thumbs[ name ] = hydrus_bitmap
2015-08-05 18:42:35 +00:00
2019-10-02 23:38:59 +00:00
self._controller.pub( 'notify_complete_thumbnail_reset' )
2019-04-10 22:50:53 +00:00
self._waterfall_queue_quick = set()
self._delayed_regeneration_queue_quick = set()
self._RecalcQueues()
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
def ClearThumbnails( self, hashes ):
2017-04-05 21:16:40 +00:00
2015-11-11 21:20:41 +00:00
with self._lock:
2019-03-20 21:22:10 +00:00
for hash in hashes:
2015-11-11 21:20:41 +00:00
2019-03-20 21:22:10 +00:00
self._data_cache.DeleteData( hash )
2015-11-11 21:20:41 +00:00
2019-03-20 21:22:10 +00:00
def DoingWork( self ):
2017-04-05 21:16:40 +00:00
2016-09-14 18:03:59 +00:00
with self._lock:
2019-04-10 22:50:53 +00:00
return len( self._waterfall_queue ) > 0
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
def GetThumbnail( self, media ):
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
try:
2017-04-05 21:16:40 +00:00
2019-03-20 21:22:10 +00:00
display_media = media.GetDisplayMedia()
2015-11-11 21:20:41 +00:00
2019-03-20 21:22:10 +00:00
except:
2015-11-11 21:20:41 +00:00
2019-03-20 21:22:10 +00:00
# sometimes media can get switched around during a collect event, and if this happens during waterfall, we have a problem here
# just return for now, we'll see how it goes
2017-04-05 21:16:40 +00:00
2019-03-20 21:22:10 +00:00
return self._special_thumbs[ 'hydrus' ]
2017-04-05 21:16:40 +00:00
2019-03-20 21:22:10 +00:00
locations_manager = display_media.GetLocationsManager()
if locations_manager.ShouldIdeallyHaveThumbnail():
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
mime = display_media.GetMime()
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
if mime in HC.MIMES_WITH_THUMBNAILS:
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
hash = display_media.GetHash()
result = self._data_cache.GetIfHasData( hash )
if result is None:
2016-09-14 18:03:59 +00:00
2019-03-20 21:22:10 +00:00
try:
2019-03-27 22:01:02 +00:00
hydrus_bitmap = self._GetThumbnailHydrusBitmap( display_media )
2019-03-20 21:22:10 +00:00
except:
hydrus_bitmap = self._special_thumbs[ 'hydrus' ]
self._data_cache.AddData( hash, hydrus_bitmap )
else:
hydrus_bitmap = result
2016-09-14 18:03:59 +00:00
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
return hydrus_bitmap
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
elif mime in HC.AUDIO: return self._special_thumbs[ 'audio' ]
elif mime in HC.VIDEO: return self._special_thumbs[ 'video' ]
elif mime == HC.APPLICATION_PDF: return self._special_thumbs[ 'pdf' ]
elif mime == HC.APPLICATION_PSD: return self._special_thumbs[ 'psd' ]
elif mime in HC.ARCHIVES: return self._special_thumbs[ 'zip' ]
else: return self._special_thumbs[ 'hydrus' ]
2015-08-05 18:42:35 +00:00
2019-03-20 21:22:10 +00:00
else:
return self._special_thumbs[ 'hydrus' ]
2015-08-05 18:42:35 +00:00
2015-10-07 21:56:22 +00:00
2019-03-20 21:22:10 +00:00
def HasThumbnailCached( self, media ):
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
display_media = media.GetDisplayMedia()
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
hash = display_media.GetHash()
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
return self._data_cache.HasData( hash )
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
else:
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
return True
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
def Waterfall( self, page_key, medias ):
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
self._waterfall_queue_quick.update( ( ( page_key, media ) for media in medias ) )
2018-03-28 21:55:58 +00:00
2019-04-10 22:50:53 +00:00
self._RecalcQueues()
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
self._waterfall_event.set()
def DAEMONWaterfall( self ):
last_paused = HydrusData.GetNowPrecise()
while not HydrusThreading.IsThreadShuttingDown():
2018-03-28 21:55:58 +00:00
2019-04-10 22:50:53 +00:00
time.sleep( 0.00001 )
2019-03-20 21:22:10 +00:00
with self._lock:
2018-03-28 21:55:58 +00:00
2019-04-10 22:50:53 +00:00
do_wait = len( self._waterfall_queue ) == 0 and len( self._delayed_regeneration_queue ) == 0
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
if do_wait:
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
self._waterfall_event.wait( 1 )
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
self._waterfall_event.clear()
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
last_paused = HydrusData.GetNowPrecise()
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
start_time = HydrusData.GetNowPrecise()
stop_time = start_time + 0.005 # a bit of a typical frame
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
page_keys_to_rendered_medias = collections.defaultdict( list )
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
while not HydrusData.TimeHasPassedPrecise( stop_time ):
2018-11-07 23:09:40 +00:00
2019-03-20 21:22:10 +00:00
with self._lock:
2019-04-10 22:50:53 +00:00
if len( self._waterfall_queue ) == 0:
2019-03-20 21:22:10 +00:00
break
2019-04-10 22:50:53 +00:00
result = self._waterfall_queue.pop()
2019-03-20 21:22:10 +00:00
self._waterfall_queue_quick.discard( result )
2018-11-07 23:09:40 +00:00
2019-03-20 21:22:10 +00:00
( page_key, media ) = result
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
self.GetThumbnail( media )
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
page_keys_to_rendered_medias[ page_key ].append( media )
2018-03-28 21:55:58 +00:00
2019-03-20 21:22:10 +00:00
2019-04-10 22:50:53 +00:00
if len( page_keys_to_rendered_medias ) > 0:
2018-11-07 23:09:40 +00:00
2019-04-10 22:50:53 +00:00
for ( page_key, rendered_medias ) in page_keys_to_rendered_medias.items():
self._controller.pub( 'waterfall_thumbnails', page_key, rendered_medias )
time.sleep( 0.00001 )
2019-03-20 21:22:10 +00:00
2019-04-10 22:50:53 +00:00
# now we will do regen if appropriate
with self._lock:
# got more important work or no work to do
if len( self._waterfall_queue ) > 0 or len( self._delayed_regeneration_queue ) == 0 or HG.client_controller.CurrentlyPubSubbing():
continue
2019-04-24 22:18:50 +00:00
media_result = self._delayed_regeneration_queue.pop()
2019-04-10 22:50:53 +00:00
2019-04-24 22:18:50 +00:00
self._delayed_regeneration_queue_quick.discard( media_result )
2019-04-10 22:50:53 +00:00
if HG.file_report_mode:
2019-04-24 22:18:50 +00:00
hash = media_result.GetHash()
2019-04-10 22:50:53 +00:00
HydrusData.ShowText( 'Thumbnail {} now regenerating from source.'.format( hash.hex() ) )
try:
2019-05-22 22:35:06 +00:00
self._controller.files_maintenance_manager.RunJobImmediately( [ media_result ], ClientFiles.REGENERATE_FILE_DATA_JOB_FORCE_THUMBNAIL, pub_job_key = False )
2019-04-10 22:50:53 +00:00
except HydrusExceptions.FileMissingException:
pass
except Exception as e:
2019-04-24 22:18:50 +00:00
hash = media_result.GetHash()
2019-04-10 22:50:53 +00:00
summary = 'The thumbnail for file {} was incorrect, but a later attempt to regenerate it or load the new file back failed.'.format( hash.hex() )
self._HandleThumbnailException( e, summary )
2018-03-28 21:55:58 +00:00
2015-11-25 22:00:57 +00:00
class UndoManager( object ):
def __init__( self, controller ):
self._controller = controller
self._commands = []
self._inverted_commands = []
self._current_index = 0
self._lock = threading.Lock()
self._controller.sub( self, 'Undo', 'undo' )
self._controller.sub( self, 'Redo', 'redo' )
def _FilterServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
filtered_service_keys_to_content_updates = {}
2019-09-05 00:05:32 +00:00
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
2015-11-25 22:00:57 +00:00
filtered_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
if data_type == HC.CONTENT_TYPE_FILES:
2016-12-21 22:30:54 +00:00
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_UNDELETE, HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ):
continue
2015-11-25 22:00:57 +00:00
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
2016-12-21 22:30:54 +00:00
if action in ( HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ):
continue
else:
continue
2015-11-25 22:00:57 +00:00
filtered_content_update = HydrusData.ContentUpdate( data_type, action, row )
filtered_content_updates.append( filtered_content_update )
if len( filtered_content_updates ) > 0:
filtered_service_keys_to_content_updates[ service_key ] = filtered_content_updates
return filtered_service_keys_to_content_updates
def _InvertServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
inverted_service_keys_to_content_updates = {}
2019-09-05 00:05:32 +00:00
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
2015-11-25 22:00:57 +00:00
inverted_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
inverted_row = row
if data_type == HC.CONTENT_TYPE_FILES:
if action == HC.CONTENT_UPDATE_ARCHIVE: inverted_action = HC.CONTENT_UPDATE_INBOX
elif action == HC.CONTENT_UPDATE_INBOX: inverted_action = HC.CONTENT_UPDATE_ARCHIVE
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
2019-04-10 22:50:53 +00:00
elif action == HC.CONTENT_UPDATE_PETITION: inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
2015-11-25 22:00:57 +00:00
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action == HC.CONTENT_UPDATE_ADD: inverted_action = HC.CONTENT_UPDATE_DELETE
elif action == HC.CONTENT_UPDATE_DELETE: inverted_action = HC.CONTENT_UPDATE_ADD
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
2019-04-10 22:50:53 +00:00
elif action == HC.CONTENT_UPDATE_PETITION: inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
2015-11-25 22:00:57 +00:00
inverted_content_update = HydrusData.ContentUpdate( data_type, inverted_action, inverted_row )
inverted_content_updates.append( inverted_content_update )
inverted_service_keys_to_content_updates[ service_key ] = inverted_content_updates
return inverted_service_keys_to_content_updates
def AddCommand( self, action, *args, **kwargs ):
with self._lock:
inverted_action = action
inverted_args = args
inverted_kwargs = kwargs
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
service_keys_to_content_updates = self._FilterServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( service_keys_to_content_updates ) == 0: return
inverted_service_keys_to_content_updates = self._InvertServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( inverted_service_keys_to_content_updates ) == 0: return
inverted_args = ( inverted_service_keys_to_content_updates, )
else: return
self._commands = self._commands[ : self._current_index ]
self._inverted_commands = self._inverted_commands[ : self._current_index ]
self._commands.append( ( action, args, kwargs ) )
self._inverted_commands.append( ( inverted_action, inverted_args, inverted_kwargs ) )
self._current_index += 1
self._controller.pub( 'notify_new_undo' )
def GetUndoRedoStrings( self ):
with self._lock:
( undo_string, redo_string ) = ( None, None )
if self._current_index > 0:
undo_index = self._current_index - 1
( action, args, kwargs ) = self._commands[ undo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
undo_string = 'undo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
redo_index = self._current_index
( action, args, kwargs ) = self._commands[ redo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
redo_string = 'redo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
return ( undo_string, redo_string )
def Undo( self ):
action = None
with self._lock:
if self._current_index > 0:
self._current_index -= 1
( action, args, kwargs ) = self._inverted_commands[ self._current_index ]
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
def Redo( self ):
action = None
with self._lock:
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
( action, args, kwargs ) = self._commands[ self._current_index ]
self._current_index += 1
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )