hydrus/include/ClientCaches.py

1950 lines
61 KiB
Python
Raw Normal View History

2015-10-07 21:56:22 +00:00
import ClientDefaults
2015-03-18 21:46:29 +00:00
import ClientFiles
2015-10-21 21:53:10 +00:00
import ClientNetworking
2015-08-05 18:42:35 +00:00
import ClientRendering
2015-03-18 21:46:29 +00:00
import HydrusConstants as HC
import HydrusExceptions
import HydrusFileHandling
import HydrusImageHandling
2015-11-04 22:30:28 +00:00
import HydrusPaths
2015-11-18 22:44:07 +00:00
import HydrusSessions
2015-11-25 22:00:57 +00:00
import itertools
2015-03-18 21:46:29 +00:00
import os
import random
import Queue
2015-12-02 22:32:18 +00:00
import shutil
2015-03-18 21:46:29 +00:00
import threading
import time
2015-10-07 21:56:22 +00:00
import urllib
2015-03-18 21:46:29 +00:00
import wx
2015-03-25 22:04:19 +00:00
import HydrusData
import ClientData
2015-06-03 21:05:13 +00:00
import ClientConstants as CC
2015-03-25 22:04:19 +00:00
import HydrusGlobals
2015-08-05 18:42:35 +00:00
import collections
import HydrusTags
import itertools
import ClientSearch
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
# important thing here, and reason why it is recursive, is because we want to preserve the parent-grandparent interleaving
def BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents ):
def AddParents( simple_children_to_parents, children_to_parents, child, parents ):
for parent in parents:
if parent not in children_to_parents[ child ]:
children_to_parents[ child ].append( parent )
if parent in simple_children_to_parents:
grandparents = simple_children_to_parents[ parent ]
AddParents( simple_children_to_parents, children_to_parents, child, grandparents )
service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
for ( service_key, simple_children_to_parents ) in service_keys_to_simple_children_to_parents.items():
children_to_parents = service_keys_to_children_to_parents[ service_key ]
for ( child, parents ) in simple_children_to_parents.items():
AddParents( simple_children_to_parents, children_to_parents, child, parents )
return service_keys_to_children_to_parents
def BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat ):
service_keys_to_simple_children_to_parents = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, pairs ) in service_keys_to_pairs_flat.items():
service_keys_to_simple_children_to_parents[ service_key ] = BuildSimpleChildrenToParents( pairs )
return service_keys_to_simple_children_to_parents
def BuildSimpleChildrenToParents( pairs ):
simple_children_to_parents = HydrusData.default_dict_set()
for ( child, parent ) in pairs:
if child == parent: continue
if LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ): continue
simple_children_to_parents[ child ].add( parent )
return simple_children_to_parents
def CollapseTagSiblingChains( processed_siblings ):
# now to collapse chains
# A -> B and B -> C goes to A -> C and B -> C
siblings = {}
for ( old_tag, new_tag ) in processed_siblings.items():
# adding A -> B
if new_tag in siblings:
# B -> F already calculated and added, so add A -> F
siblings[ old_tag ] = siblings[ new_tag ]
else:
while new_tag in processed_siblings: new_tag = processed_siblings[ new_tag ] # pursue endpoint F
siblings[ old_tag ] = new_tag
reverse_lookup = collections.defaultdict( list )
for ( old_tag, new_tag ) in siblings.items():
reverse_lookup[ new_tag ].append( old_tag )
return ( siblings, reverse_lookup )
def CombineTagSiblingPairs( service_keys_to_statuses_to_pairs ):
# first combine the services
# if A map already exists, don't overwrite
# if A -> B forms a loop, don't write it
processed_siblings = {}
current_deleted_pairs = set()
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
pairs = statuses_to_pairs[ HC.CURRENT ].union( statuses_to_pairs[ HC.PENDING ] )
for ( old, new ) in pairs:
if old == new: continue
if old not in processed_siblings:
next_new = new
we_have_a_loop = False
while next_new in processed_siblings:
next_new = processed_siblings[ next_new ]
if next_new == old:
we_have_a_loop = True
break
if not we_have_a_loop: processed_siblings[ old ] = new
return processed_siblings
def LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ):
potential_loop_paths = { parent }
while len( potential_loop_paths.intersection( simple_children_to_parents.keys() ) ) > 0:
new_potential_loop_paths = set()
for potential_loop_path in potential_loop_paths.intersection( simple_children_to_parents.keys() ):
new_potential_loop_paths.update( simple_children_to_parents[ potential_loop_path ] )
potential_loop_paths = new_potential_loop_paths
if child in potential_loop_paths: return True
return False
class ClientFilesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
2015-12-02 22:32:18 +00:00
self._prefixes_to_locations = {}
2015-11-25 22:00:57 +00:00
self._Reinit()
2015-12-02 22:32:18 +00:00
def _GetLocation( self, hash ):
2015-11-25 22:00:57 +00:00
2015-12-02 22:32:18 +00:00
hash_encoded = hash.encode( 'hex' )
prefix = hash_encoded[:2]
location = self._prefixes_to_locations[ prefix ]
return location
def _GetRecoverTuple( self ):
paths = { path for path in self._prefixes_to_locations.values() }
for path in paths:
for prefix in HydrusData.IterateHexPrefixes():
correct_path = self._prefixes_to_locations[ prefix ]
if path != correct_path and os.path.exists( os.path.join( path, prefix ) ):
return ( prefix, path, correct_path )
return None
2015-11-25 22:00:57 +00:00
def _GetRebalanceTuple( self ):
paths_to_ideal_weights = self._controller.GetNewOptions().GetClientFilesLocationsToIdealWeights()
total_weight = sum( paths_to_ideal_weights.values() )
paths_to_normalised_ideal_weights = { path : weight / total_weight for ( path, weight ) in paths_to_ideal_weights.items() }
2015-12-02 22:32:18 +00:00
current_paths_to_normalised_weights = collections.defaultdict( lambda: 0 )
2015-11-25 22:00:57 +00:00
for ( prefix, path ) in self._prefixes_to_locations.items():
current_paths_to_normalised_weights[ path ] += 1.0 / 256
2015-12-02 22:32:18 +00:00
for path in current_paths_to_normalised_weights.keys():
if path not in paths_to_normalised_ideal_weights:
paths_to_normalised_ideal_weights[ path ] = 0.0
2015-11-25 22:00:57 +00:00
#
overweight_paths = []
underweight_paths = []
for ( path, ideal_weight ) in paths_to_normalised_ideal_weights.items():
if path in current_paths_to_normalised_weights:
current_weight = current_paths_to_normalised_weights[ path ]
if current_weight < ideal_weight:
underweight_paths.append( path )
elif current_weight >= ideal_weight + 1.0 / 256:
overweight_paths.append( path )
else:
underweight_paths.append( path )
#
if len( underweight_paths ) == 0 or len( overweight_paths ) == 0:
return None
else:
overweight_path = overweight_paths.pop( 0 )
underweight_path = underweight_paths.pop( 0 )
2015-12-02 22:32:18 +00:00
prefixes_and_paths = self._prefixes_to_locations.items()
random.shuffle( prefixes_and_paths )
for ( prefix, path ) in prefixes_and_paths:
2015-11-25 22:00:57 +00:00
if path == overweight_path:
return ( prefix, overweight_path, underweight_path )
2015-12-02 22:32:18 +00:00
def _IterateAllFilePaths( self ):
for ( prefix, location ) in self._prefixes_to_locations.items():
dir = os.path.join( location, prefix )
next_filenames = os.listdir( dir )
for filename in next_filenames:
yield os.path.join( dir, filename )
def _Reinit( self ):
self._prefixes_to_locations = self._controller.Read( 'client_files_locations' )
def GetExpectedFilePath( self, hash, mime ):
with self._lock:
location = self._GetLocation( hash )
return ClientFiles.GetExpectedFilePath( location, hash, mime )
def GetFilePath( self, hash, mime = None ):
with self._lock:
location = self._GetLocation( hash )
return ClientFiles.GetFilePath( location, hash, mime )
def IterateAllFileHashes( self ):
with self._lock:
for path in self._IterateAllFilePaths():
( base, filename ) = os.path.split( path )
result = filename.split( '.', 1 )
if len( result ) != 2: continue
( hash_encoded, ext ) = result
try: hash = hash_encoded.decode( 'hex' )
except TypeError: continue
yield hash
def IterateAllFilePaths( self ):
with self._lock:
for path in self._IterateAllFilePaths():
yield path
2015-11-25 22:00:57 +00:00
def Rebalance( self, partial = True ):
with self._lock:
rebalance_tuple = self._GetRebalanceTuple()
while rebalance_tuple is not None:
( prefix, overweight_path, underweight_path ) = rebalance_tuple
2015-12-02 22:32:18 +00:00
text = 'Moving \'' + prefix + '\' files from ' + overweight_path + ' to ' + underweight_path
if partial:
HydrusData.Print( text )
else:
HydrusData.ShowText( text )
self._controller.Write( 'relocate_client_files', prefix, overweight_path, underweight_path )
2015-11-25 22:00:57 +00:00
self._Reinit()
if partial:
break
rebalance_tuple = self._GetRebalanceTuple()
2015-12-02 22:32:18 +00:00
recover_tuple = self._GetRecoverTuple()
while recover_tuple is not None:
( prefix, incorrect_path, correct_path ) = recover_tuple
text = 'Recovering \'' + prefix + '\' files from ' + incorrect_path + ' to ' + correct_path
if partial:
HydrusData.Print( text )
else:
HydrusData.ShowText( text )
full_incorrect_path = os.path.join( incorrect_path, prefix )
full_correct_path = os.path.join( correct_path, prefix )
HydrusPaths.CopyAndMergeTree( full_incorrect_path, full_correct_path )
try: HydrusPaths.RecyclePath( full_incorrect_path )
except:
HydrusData.ShowText( 'After recovering some files, attempting to remove ' + full_incorrect_path + ' failed.' )
return
if partial:
break
recover_tuple = self._GetRecoverTuple()
if not partial:
HydrusData.ShowText( 'All folders balanced!' )
2015-11-25 22:00:57 +00:00
2015-03-18 21:46:29 +00:00
class DataCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller, cache_size_key ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2015-03-18 21:46:29 +00:00
self._cache_size_key = cache_size_key
self._keys_to_data = {}
self._keys_fifo = []
self._total_estimated_memory_footprint = 0
self._lock = threading.Lock()
wx.CallLater( 60 * 1000, self.MaintainCache )
2015-06-24 22:10:14 +00:00
def _DeleteItem( self, index = 0 ):
( deletee_key, last_access_time ) = self._keys_fifo.pop( index )
deletee_data = self._keys_to_data[ deletee_key ]
self._total_estimated_memory_footprint -= deletee_data.GetEstimatedMemoryFootprint()
del self._keys_to_data[ deletee_key ]
2015-03-18 21:46:29 +00:00
def Clear( self ):
with self._lock:
self._keys_to_data = {}
self._keys_fifo = []
self._total_estimated_memory_footprint = 0
def AddData( self, key, data ):
with self._lock:
if key not in self._keys_to_data:
2015-11-25 22:00:57 +00:00
options = self._controller.GetOptions()
2015-05-13 20:22:39 +00:00
while self._total_estimated_memory_footprint > options[ self._cache_size_key ]:
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
self._DeleteItem()
2015-03-18 21:46:29 +00:00
self._keys_to_data[ key ] = data
2015-03-25 22:04:19 +00:00
self._keys_fifo.append( ( key, HydrusData.GetNow() ) )
2015-03-18 21:46:29 +00:00
self._total_estimated_memory_footprint += data.GetEstimatedMemoryFootprint()
def GetData( self, key ):
with self._lock:
2015-11-04 22:30:28 +00:00
if key not in self._keys_to_data: raise Exception( 'Cache error! Looking for ' + HydrusData.ToUnicode( key ) + ', but it was missing.' )
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
for ( i, ( fifo_key, last_access_time ) ) in enumerate( self._keys_fifo ):
2015-03-18 21:46:29 +00:00
if fifo_key == key:
del self._keys_fifo[ i ]
break
2015-03-25 22:04:19 +00:00
self._keys_fifo.append( ( key, HydrusData.GetNow() ) )
2015-03-18 21:46:29 +00:00
return self._keys_to_data[ key ]
def HasData( self, key ):
with self._lock: return key in self._keys_to_data
def MaintainCache( self ):
with self._lock:
while True:
if len( self._keys_fifo ) == 0: break
else:
2015-06-24 22:10:14 +00:00
oldest_index = 0
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
( key, last_access_time ) = self._keys_fifo[ oldest_index ]
if HydrusData.TimeHasPassed( last_access_time + 1200 ):
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
self._DeleteItem( oldest_index )
2015-03-18 21:46:29 +00:00
else: break
wx.CallLater( 60 * 1000, self.MaintainCache )
class LocalBooruCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-03-18 21:46:29 +00:00
self._lock = threading.Lock()
self._RefreshShares()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshShares', 'refresh_local_booru_shares' )
self._controller.sub( self, 'RefreshShares', 'restart_booru' )
2015-03-18 21:46:29 +00:00
def _CheckDataUsage( self ):
info = self._local_booru_service.GetInfo()
max_monthly_data = info[ 'max_monthly_data' ]
used_monthly_data = info[ 'used_monthly_data' ]
if max_monthly_data is not None and used_monthly_data > max_monthly_data: raise HydrusExceptions.ForbiddenException( 'This booru has used all its monthly data. Please try again next month.' )
def _CheckFileAuthorised( self, share_key, hash ):
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
if hash not in info[ 'hashes_set' ]: raise HydrusExceptions.NotFoundException( 'That file was not found in that share.' )
def _CheckShareAuthorised( self, share_key ):
self._CheckDataUsage()
info = self._GetInfo( share_key )
timeout = info[ 'timeout' ]
2015-06-24 22:10:14 +00:00
if timeout is not None and HydrusData.TimeHasPassed( timeout ): raise HydrusExceptions.ForbiddenException( 'This share has expired.' )
2015-03-18 21:46:29 +00:00
def _GetInfo( self, share_key ):
try: info = self._keys_to_infos[ share_key ]
except: raise HydrusExceptions.NotFoundException( 'Did not find that share on this booru.' )
if info is None:
2015-11-25 22:00:57 +00:00
info = self._controller.Read( 'local_booru_share', share_key )
2015-03-18 21:46:29 +00:00
hashes = info[ 'hashes' ]
info[ 'hashes_set' ] = set( hashes )
2015-11-25 22:00:57 +00:00
media_results = self._controller.Read( 'media_results', CC.LOCAL_FILE_SERVICE_KEY, hashes )
2015-03-18 21:46:29 +00:00
info[ 'media_results' ] = media_results
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
info[ 'hashes_to_media_results' ] = hashes_to_media_results
self._keys_to_infos[ share_key ] = info
return info
def _RefreshShares( self ):
2015-11-25 22:00:57 +00:00
self._local_booru_service = self._controller.GetServicesManager().GetService( CC.LOCAL_BOORU_SERVICE_KEY )
2015-03-18 21:46:29 +00:00
self._keys_to_infos = {}
2015-11-25 22:00:57 +00:00
share_keys = self._controller.Read( 'local_booru_share_keys' )
2015-03-18 21:46:29 +00:00
for share_key in share_keys: self._keys_to_infos[ share_key ] = None
def CheckShareAuthorised( self, share_key ):
with self._lock: self._CheckShareAuthorised( share_key )
def CheckFileAuthorised( self, share_key, hash ):
with self._lock: self._CheckFileAuthorised( share_key, hash )
def GetGalleryInfo( self, share_key ):
with self._lock:
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_results = info[ 'media_results' ]
return ( name, text, timeout, media_results )
def GetMediaResult( self, share_key, hash ):
with self._lock:
info = self._GetInfo( share_key )
media_result = info[ 'hashes_to_media_results' ][ hash ]
return media_result
def GetPageInfo( self, share_key, hash ):
with self._lock:
self._CheckFileAuthorised( share_key, hash )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_result = info[ 'hashes_to_media_results' ][ hash ]
return ( name, text, timeout, media_result )
def RefreshShares( self ):
with self._lock:
self._RefreshShares()
2015-11-25 22:00:57 +00:00
class HydrusSessionManager( object ):
def __init__( self, controller ):
self._controller = controller
existing_sessions = self._controller.Read( 'hydrus_sessions' )
self._service_keys_to_sessions = { service_key : ( session_key, expires ) for ( service_key, session_key, expires ) in existing_sessions }
self._lock = threading.Lock()
def DeleteSessionKey( self, service_key ):
with self._lock:
self._controller.Write( 'delete_hydrus_session_key', service_key )
if service_key in self._service_keys_to_sessions: del self._service_keys_to_sessions[ service_key ]
def GetSessionKey( self, service_key ):
now = HydrusData.GetNow()
with self._lock:
if service_key in self._service_keys_to_sessions:
( session_key, expires ) = self._service_keys_to_sessions[ service_key ]
if now + 600 > expires: del self._service_keys_to_sessions[ service_key ]
else: return session_key
# session key expired or not found
service = self._controller.GetServicesManager().GetService( service_key )
( response_gumpf, cookies ) = service.Request( HC.GET, 'session_key', return_cookies = True )
try: session_key = cookies[ 'session_key' ].decode( 'hex' )
except: raise Exception( 'Service did not return a session key!' )
expires = now + HydrusSessions.HYDRUS_SESSION_LIFETIME
self._service_keys_to_sessions[ service_key ] = ( session_key, expires )
self._controller.Write( 'hydrus_session', service_key, session_key, expires )
return session_key
2015-03-18 21:46:29 +00:00
class MenuEventIdToActionCache( object ):
def __init__( self ):
self._ids_to_actions = {}
self._actions_to_ids = {}
2015-09-23 21:21:02 +00:00
self._temporary_ids = set()
self._free_temporary_ids = set()
def _ClearTemporaries( self ):
for temporary_id in self._temporary_ids.difference( self._free_temporary_ids ):
temporary_action = self._ids_to_actions[ temporary_id ]
del self._ids_to_actions[ temporary_id ]
del self._actions_to_ids[ temporary_action ]
self._free_temporary_ids = set( self._temporary_ids )
def _GetNewId( self, temporary ):
if temporary:
if len( self._free_temporary_ids ) == 0:
new_id = wx.NewId()
self._temporary_ids.add( new_id )
self._free_temporary_ids.add( new_id )
return self._free_temporary_ids.pop()
else:
return wx.NewId()
2015-03-18 21:46:29 +00:00
def GetAction( self, event_id ):
2015-09-23 21:21:02 +00:00
action = None
if event_id in self._ids_to_actions:
action = self._ids_to_actions[ event_id ]
if event_id in self._temporary_ids:
self._ClearTemporaries()
return action
2015-03-18 21:46:29 +00:00
2015-09-23 21:21:02 +00:00
def GetId( self, command, data = None, temporary = False ):
2015-03-18 21:46:29 +00:00
action = ( command, data )
if action not in self._actions_to_ids:
2015-09-23 21:21:02 +00:00
event_id = self._GetNewId( temporary )
2015-03-18 21:46:29 +00:00
self._ids_to_actions[ event_id ] = action
self._actions_to_ids[ action ] = event_id
return self._actions_to_ids[ action ]
2015-09-23 21:21:02 +00:00
def GetPermanentId( self, command, data = None ):
return self.GetId( command, data, False )
def GetTemporaryId( self, command, data = None ):
temporary = True
if data is None:
temporary = False
return self.GetId( command, data, temporary )
2015-03-18 21:46:29 +00:00
MENU_EVENT_ID_TO_ACTION_CACHE = MenuEventIdToActionCache()
class RenderedImageCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller, cache_type ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2015-03-18 21:46:29 +00:00
self._type = cache_type
2015-11-25 22:00:57 +00:00
if self._type == 'fullscreen': self._data_cache = DataCache( self._controller, 'fullscreen_cache_size' )
elif self._type == 'preview': self._data_cache = DataCache( self._controller, 'preview_cache_size' )
2015-03-18 21:46:29 +00:00
self._total_estimated_memory_footprint = 0
self._keys_being_rendered = {}
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'FinishedRendering', 'finished_rendering' )
2015-03-18 21:46:29 +00:00
def Clear( self ): self._data_cache.Clear()
def GetImage( self, media, target_resolution = None ):
hash = media.GetHash()
if target_resolution is None: target_resolution = media.GetResolution()
key = ( hash, target_resolution )
if self._data_cache.HasData( key ): return self._data_cache.GetData( key )
elif key in self._keys_being_rendered: return self._keys_being_rendered[ key ]
else:
2015-08-05 18:42:35 +00:00
image_container = ClientRendering.RasterContainerImage( media, target_resolution )
2015-03-18 21:46:29 +00:00
self._keys_being_rendered[ key ] = image_container
return image_container
def HasImage( self, hash, target_resolution ):
key = ( hash, target_resolution )
return self._data_cache.HasData( key ) or key in self._keys_being_rendered
def FinishedRendering( self, key ):
if key in self._keys_being_rendered:
image_container = self._keys_being_rendered[ key ]
del self._keys_being_rendered[ key ]
self._data_cache.AddData( key, image_container )
class ThumbnailCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
self._data_cache = DataCache( self._controller, 'thumbnail_cache_size' )
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
self._lock = threading.Lock()
self._waterfall_queue_quick = set()
self._waterfall_queue_random = []
self._waterfall_event = threading.Event()
2015-03-18 21:46:29 +00:00
self._special_thumbs = {}
self.Clear()
threading.Thread( target = self.DAEMONWaterfall, name = 'Waterfall Daemon' ).start()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'Clear', 'thumbnail_resize' )
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
def _RecalcWaterfallQueueRandom( self ):
self._waterfall_queue_random = list( self._waterfall_queue_quick )
random.shuffle( self._waterfall_queue_random )
def CancelWaterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.difference_update( ( ( page_key, media ) for media in medias ) )
self._RecalcWaterfallQueueRandom()
2015-03-18 21:46:29 +00:00
def Clear( self ):
self._data_cache.Clear()
self._special_thumbs = {}
names = [ 'hydrus', 'flash', 'pdf', 'audio', 'video' ]
2015-11-04 22:30:28 +00:00
( os_file_handle, temp_path ) = HydrusPaths.GetTempPath()
2015-05-06 20:26:18 +00:00
try:
2015-03-18 21:46:29 +00:00
2015-05-06 20:26:18 +00:00
for name in names:
2015-11-04 22:30:28 +00:00
path = os.path.join( HC.STATIC_DIR, name + '.png' )
2015-05-06 20:26:18 +00:00
2015-11-25 22:00:57 +00:00
options = self._controller.GetOptions()
2015-05-13 20:22:39 +00:00
thumbnail = HydrusFileHandling.GenerateThumbnail( path, options[ 'thumbnail_dimensions' ] )
2015-05-06 20:26:18 +00:00
with open( temp_path, 'wb' ) as f: f.write( thumbnail )
2015-08-05 18:42:35 +00:00
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( temp_path )
2015-05-06 20:26:18 +00:00
self._special_thumbs[ name ] = hydrus_bitmap
2015-03-18 21:46:29 +00:00
2015-05-06 20:26:18 +00:00
finally:
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
HydrusPaths.CleanUpTempPath( os_file_handle, temp_path )
2015-03-18 21:46:29 +00:00
def GetThumbnail( self, media ):
2015-11-11 21:20:41 +00:00
display_media = media.GetDisplayMedia()
mime = display_media.GetMime()
2015-03-18 21:46:29 +00:00
if mime in HC.MIMES_WITH_THUMBNAILS:
2015-11-11 21:20:41 +00:00
hash = display_media.GetHash()
2015-03-18 21:46:29 +00:00
if not self._data_cache.HasData( hash ):
path = None
try:
path = ClientFiles.GetThumbnailPath( hash, False )
2015-08-05 18:42:35 +00:00
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path )
2015-03-18 21:46:29 +00:00
except HydrusExceptions.NotFoundException:
2015-11-18 22:44:07 +00:00
HydrusData.Print( 'Could not find the thumbnail for ' + hash.encode( 'hex' ) + '!' )
2015-03-18 21:46:29 +00:00
return self._special_thumbs[ 'hydrus' ]
self._data_cache.AddData( hash, hydrus_bitmap )
return self._data_cache.GetData( hash )
elif mime in HC.AUDIO: return self._special_thumbs[ 'audio' ]
elif mime in HC.VIDEO: return self._special_thumbs[ 'video' ]
elif mime == HC.APPLICATION_FLASH: return self._special_thumbs[ 'flash' ]
elif mime == HC.APPLICATION_PDF: return self._special_thumbs[ 'pdf' ]
else: return self._special_thumbs[ 'hydrus' ]
2015-11-11 21:20:41 +00:00
def HasThumbnailCached( self, media ):
display_media = media.GetDisplayMedia()
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
return self._data_cache.HasData( hash )
else:
return True
2015-11-04 22:30:28 +00:00
def Waterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.update( ( ( page_key, media ) for media in medias ) )
2015-11-25 22:00:57 +00:00
self._RecalcWaterfallQueueRandom()
self._waterfall_event.set()
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
def DAEMONWaterfall( self ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
last_paused = HydrusData.GetNowPrecise()
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
while not HydrusGlobals.view_shutdown:
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
with self._lock:
do_wait = len( self._waterfall_queue_random ) == 0
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
if do_wait:
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._waterfall_event.wait( 1 )
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._waterfall_event.clear()
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
last_paused = HydrusData.GetNowPrecise()
with self._lock:
if len( self._waterfall_queue_random ) == 0:
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
continue
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
else:
result = self._waterfall_queue_random.pop( 0 )
self._waterfall_queue_quick.discard( result )
( page_key, media ) = result
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
try:
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self.GetThumbnail( media ) # to load it
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._controller.pub( 'waterfall_thumbnail', page_key, media )
if HydrusData.GetNowPrecise() - last_paused > 0.005:
time.sleep( 0.00001 )
last_paused = HydrusData.GetNowPrecise()
except Exception as e:
HydrusData.ShowException( e )
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
class ServicesManager( object ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self._lock = threading.Lock()
self._keys_to_services = {}
self._services_sorted = []
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self.RefreshServices()
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshServices', 'notify_new_services_data' )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
def GetService( self, service_key ):
2015-11-18 22:44:07 +00:00
with self._lock:
2015-11-25 22:00:57 +00:00
try: return self._keys_to_services[ service_key ]
except KeyError: raise HydrusExceptions.NotFoundException( 'That service was not found!' )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
def GetServices( self, types = HC.ALL_SERVICES, randomised = True ):
2015-11-18 22:44:07 +00:00
with self._lock:
2015-11-25 22:00:57 +00:00
services = [ service for service in self._services_sorted if service.GetServiceType() in types ]
if randomised:
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
random.shuffle( services )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
return services
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
def RefreshServices( self ):
with self._lock:
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
services = self._controller.Read( 'services' )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self._keys_to_services = { service.GetServiceKey() : service for service in services }
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
compare_function = lambda a, b: cmp( a.GetName(), b.GetName() )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self._services_sorted = list( services )
self._services_sorted.sort( cmp = compare_function )
2015-11-18 22:44:07 +00:00
2015-08-05 18:42:35 +00:00
class TagCensorshipManager( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-08-05 18:42:35 +00:00
self.RefreshData()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshData', 'notify_new_tag_censorship' )
2015-08-05 18:42:35 +00:00
def GetInfo( self, service_key ):
if service_key in self._service_keys_to_info: return self._service_keys_to_info[ service_key ]
else: return ( True, set() )
def RefreshData( self ):
2015-11-25 22:00:57 +00:00
info = self._controller.Read( 'tag_censorship' )
2015-08-05 18:42:35 +00:00
self._service_keys_to_info = {}
self._service_keys_to_predicates = {}
for ( service_key, blacklist, censorships ) in info:
self._service_keys_to_info[ service_key ] = ( blacklist, censorships )
tag_matches = lambda tag: True in ( HydrusTags.CensorshipMatch( tag, censorship ) for censorship in censorships )
2015-11-18 22:44:07 +00:00
if blacklist:
predicate = lambda tag: not tag_matches( tag )
else:
predicate = tag_matches
2015-08-05 18:42:35 +00:00
self._service_keys_to_predicates[ service_key ] = predicate
def FilterServiceKeysToStatusesToTags( self, service_keys_to_statuses_to_tags ):
filtered_service_keys_to_statuses_to_tags = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, statuses_to_tags ) in service_keys_to_statuses_to_tags.items():
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
if service_key_lookup in self._service_keys_to_predicates:
combined_predicate = self._service_keys_to_predicates[ service_key_lookup ]
new_statuses_to_tags = HydrusData.default_dict_set()
for ( status, tags ) in statuses_to_tags.items():
new_statuses_to_tags[ status ] = { tag for tag in tags if combined_predicate( tag ) }
statuses_to_tags = new_statuses_to_tags
filtered_service_keys_to_statuses_to_tags[ service_key ] = statuses_to_tags
return filtered_service_keys_to_statuses_to_tags
def FilterTags( self, service_key, tags ):
for service_key in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
if service_key in self._service_keys_to_predicates:
predicate = self._service_keys_to_predicates[ service_key ]
tags = { tag for tag in tags if predicate( tag ) }
return tags
class TagParentsManager( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-08-05 18:42:35 +00:00
2015-11-11 21:20:41 +00:00
self._service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
2015-08-05 18:42:35 +00:00
self._RefreshParents()
self._lock = threading.Lock()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshParents', 'notify_new_parents' )
2015-08-05 18:42:35 +00:00
def _RefreshParents( self ):
2015-11-25 22:00:57 +00:00
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_parents' )
2015-08-05 18:42:35 +00:00
# first collapse siblings
2015-11-25 22:00:57 +00:00
sibling_manager = self._controller.GetManager( 'tag_siblings' )
2015-08-05 18:42:35 +00:00
collapsed_service_keys_to_statuses_to_pairs = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
if service_key == CC.COMBINED_TAG_SERVICE_KEY: continue
for ( status, pairs ) in statuses_to_pairs.items():
pairs = sibling_manager.CollapsePairs( pairs )
collapsed_service_keys_to_statuses_to_pairs[ service_key ][ status ] = pairs
# now collapse current and pending
service_keys_to_pairs_flat = HydrusData.default_dict_set()
for ( service_key, statuses_to_pairs ) in collapsed_service_keys_to_statuses_to_pairs.items():
pairs_flat = statuses_to_pairs[ HC.CURRENT ].union( statuses_to_pairs[ HC.PENDING ] )
service_keys_to_pairs_flat[ service_key ] = pairs_flat
# now create the combined tag service
combined_pairs_flat = set()
for pairs_flat in service_keys_to_pairs_flat.values():
combined_pairs_flat.update( pairs_flat )
service_keys_to_pairs_flat[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_pairs_flat
#
service_keys_to_simple_children_to_parents = BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat )
self._service_keys_to_children_to_parents = BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents )
def ExpandPredicates( self, service_key, predicates ):
2015-11-25 22:00:57 +00:00
new_options = self._controller.GetNewOptions()
2015-11-18 22:44:07 +00:00
if new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
results = []
with self._lock:
for predicate in predicates:
results.append( predicate )
if predicate.GetType() == HC.PREDICATE_TYPE_TAG:
tag = predicate.GetValue()
parents = self._service_keys_to_children_to_parents[ service_key ][ tag ]
for parent in parents:
2015-12-09 23:16:41 +00:00
parent_predicate = ClientSearch.Predicate( HC.PREDICATE_TYPE_PARENT, parent )
2015-08-05 18:42:35 +00:00
results.append( parent_predicate )
return results
def ExpandTags( self, service_key, tags ):
2015-11-25 22:00:57 +00:00
new_options = self._controller.GetNewOptions()
2015-11-18 22:44:07 +00:00
if new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
with self._lock:
tags_results = set( tags )
2015-11-11 21:20:41 +00:00
for tag in tags:
tags_results.update( self._service_keys_to_children_to_parents[ service_key ][ tag ] )
2015-08-05 18:42:35 +00:00
return tags_results
def GetParents( self, service_key, tag ):
2015-11-25 22:00:57 +00:00
new_options = self._controller.GetNewOptions()
2015-11-18 22:44:07 +00:00
if new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
with self._lock:
return self._service_keys_to_children_to_parents[ service_key ][ tag ]
def RefreshParents( self ):
2015-11-11 21:20:41 +00:00
with self._lock:
self._RefreshParents()
2015-08-05 18:42:35 +00:00
class TagSiblingsManager( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-08-05 18:42:35 +00:00
self._RefreshSiblings()
self._lock = threading.Lock()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshSiblings', 'notify_new_siblings' )
2015-08-05 18:42:35 +00:00
2015-11-11 21:20:41 +00:00
def _CollapseTags( self, tags ):
return { self._siblings[ tag ] if tag in self._siblings else tag for tag in tags }
2015-08-05 18:42:35 +00:00
def _RefreshSiblings( self ):
2015-11-25 22:00:57 +00:00
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_siblings' )
2015-08-05 18:42:35 +00:00
processed_siblings = CombineTagSiblingPairs( service_keys_to_statuses_to_pairs )
( self._siblings, self._reverse_lookup ) = CollapseTagSiblingChains( processed_siblings )
2015-11-25 22:00:57 +00:00
self._controller.pub( 'new_siblings_gui' )
2015-08-05 18:42:35 +00:00
def GetAutocompleteSiblings( self, half_complete_tag ):
with self._lock:
key_based_matching_values = { self._siblings[ key ] for key in self._siblings.keys() if ClientSearch.SearchEntryMatchesTag( half_complete_tag, key, search_siblings = False ) }
value_based_matching_values = { value for value in self._siblings.values() if ClientSearch.SearchEntryMatchesTag( half_complete_tag, value, search_siblings = False ) }
matching_values = key_based_matching_values.union( value_based_matching_values )
# all the matching values have a matching sibling somewhere in their network
# so now fetch the networks
lists_of_matching_keys = [ self._reverse_lookup[ value ] for value in matching_values ]
matching_keys = itertools.chain.from_iterable( lists_of_matching_keys )
matches = matching_values.union( matching_keys )
return matches
def GetSibling( self, tag ):
with self._lock:
if tag in self._siblings: return self._siblings[ tag ]
else: return None
def GetAllSiblings( self, tag ):
with self._lock:
if tag in self._siblings:
new_tag = self._siblings[ tag ]
elif tag in self._reverse_lookup: new_tag = tag
else: return [ tag ]
all_siblings = list( self._reverse_lookup[ new_tag ] )
all_siblings.append( new_tag )
return all_siblings
def RefreshSiblings( self ):
with self._lock: self._RefreshSiblings()
def CollapseNamespacedTags( self, namespace, tags ):
with self._lock:
results = set()
for tag in tags:
full_tag = namespace + ':' + tag
if full_tag in self._siblings:
sibling = self._siblings[ full_tag ]
if ':' in sibling: sibling = sibling.split( ':', 1 )[1]
results.add( sibling )
else: results.add( tag )
return results
def CollapsePredicates( self, predicates ):
with self._lock:
results = [ predicate for predicate in predicates if predicate.GetType() != HC.PREDICATE_TYPE_TAG ]
tag_predicates = [ predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG ]
tags_to_predicates = { predicate.GetValue() : predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG }
tags = tags_to_predicates.keys()
tags_to_include_in_results = set()
for tag in tags:
if tag in self._siblings:
old_tag = tag
old_predicate = tags_to_predicates[ old_tag ]
new_tag = self._siblings[ old_tag ]
if new_tag not in tags_to_predicates:
( old_pred_type, old_value, old_inclusive ) = old_predicate.GetInfo()
2015-12-09 23:16:41 +00:00
new_predicate = ClientSearch.Predicate( old_pred_type, new_tag, inclusive = old_inclusive )
2015-08-05 18:42:35 +00:00
tags_to_predicates[ new_tag ] = new_predicate
tags_to_include_in_results.add( new_tag )
new_predicate = tags_to_predicates[ new_tag ]
current_count = old_predicate.GetCount( HC.CURRENT )
pending_count = old_predicate.GetCount( HC.PENDING )
new_predicate.AddToCount( HC.CURRENT, current_count )
new_predicate.AddToCount( HC.PENDING, pending_count )
2015-12-02 22:32:18 +00:00
else:
tags_to_include_in_results.add( tag )
2015-08-05 18:42:35 +00:00
results.extend( [ tags_to_predicates[ tag ] for tag in tags_to_include_in_results ] )
return results
def CollapsePairs( self, pairs ):
with self._lock:
result = set()
for ( a, b ) in pairs:
if a in self._siblings: a = self._siblings[ a ]
if b in self._siblings: b = self._siblings[ b ]
result.add( ( a, b ) )
return result
2015-11-11 21:20:41 +00:00
def CollapseStatusesToTags( self, statuses_to_tags ):
with self._lock:
statuses = statuses_to_tags.keys()
for status in statuses:
statuses_to_tags[ status ] = self._CollapseTags( statuses_to_tags[ status ] )
return statuses_to_tags
2015-08-05 18:42:35 +00:00
def CollapseTags( self, tags ):
2015-11-11 21:20:41 +00:00
with self._lock:
return self._CollapseTags( tags )
2015-08-05 18:42:35 +00:00
def CollapseTagsToCount( self, tags_to_count ):
with self._lock:
results = collections.Counter()
for ( tag, count ) in tags_to_count.items():
if tag in self._siblings: tag = self._siblings[ tag ]
results[ tag ] += count
return results
2015-10-07 21:56:22 +00:00
2015-11-25 22:00:57 +00:00
class UndoManager( object ):
def __init__( self, controller ):
self._controller = controller
self._commands = []
self._inverted_commands = []
self._current_index = 0
self._lock = threading.Lock()
self._controller.sub( self, 'Undo', 'undo' )
self._controller.sub( self, 'Redo', 'redo' )
def _FilterServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
filtered_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
filtered_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
if data_type == HC.CONTENT_TYPE_FILES:
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_UNDELETE, HC.CONTENT_UPDATE_RESCIND_PETITION ): continue
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action in ( HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ): continue
else: continue
filtered_content_update = HydrusData.ContentUpdate( data_type, action, row )
filtered_content_updates.append( filtered_content_update )
if len( filtered_content_updates ) > 0:
filtered_service_keys_to_content_updates[ service_key ] = filtered_content_updates
return filtered_service_keys_to_content_updates
def _InvertServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
inverted_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
inverted_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
inverted_row = row
if data_type == HC.CONTENT_TYPE_FILES:
if action == HC.CONTENT_UPDATE_ARCHIVE: inverted_action = HC.CONTENT_UPDATE_INBOX
elif action == HC.CONTENT_UPDATE_INBOX: inverted_action = HC.CONTENT_UPDATE_ARCHIVE
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
( hashes, reason ) = row
inverted_row = hashes
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action == HC.CONTENT_UPDATE_ADD: inverted_action = HC.CONTENT_UPDATE_DELETE
elif action == HC.CONTENT_UPDATE_DELETE: inverted_action = HC.CONTENT_UPDATE_ADD
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
( tag, hashes, reason ) = row
inverted_row = ( tag, hashes )
inverted_content_update = HydrusData.ContentUpdate( data_type, inverted_action, inverted_row )
inverted_content_updates.append( inverted_content_update )
inverted_service_keys_to_content_updates[ service_key ] = inverted_content_updates
return inverted_service_keys_to_content_updates
def AddCommand( self, action, *args, **kwargs ):
with self._lock:
inverted_action = action
inverted_args = args
inverted_kwargs = kwargs
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
service_keys_to_content_updates = self._FilterServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( service_keys_to_content_updates ) == 0: return
inverted_service_keys_to_content_updates = self._InvertServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( inverted_service_keys_to_content_updates ) == 0: return
inverted_args = ( inverted_service_keys_to_content_updates, )
else: return
self._commands = self._commands[ : self._current_index ]
self._inverted_commands = self._inverted_commands[ : self._current_index ]
self._commands.append( ( action, args, kwargs ) )
self._inverted_commands.append( ( inverted_action, inverted_args, inverted_kwargs ) )
self._current_index += 1
self._controller.pub( 'notify_new_undo' )
def GetUndoRedoStrings( self ):
with self._lock:
( undo_string, redo_string ) = ( None, None )
if self._current_index > 0:
undo_index = self._current_index - 1
( action, args, kwargs ) = self._commands[ undo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
undo_string = 'undo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
redo_index = self._current_index
( action, args, kwargs ) = self._commands[ redo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
redo_string = 'redo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
return ( undo_string, redo_string )
def Undo( self ):
action = None
with self._lock:
if self._current_index > 0:
self._current_index -= 1
( action, args, kwargs ) = self._inverted_commands[ self._current_index ]
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
def Redo( self ):
action = None
with self._lock:
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
( action, args, kwargs ) = self._commands[ self._current_index ]
self._current_index += 1
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
2015-10-07 21:56:22 +00:00
class WebSessionManagerClient( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-10-07 21:56:22 +00:00
2015-11-25 22:00:57 +00:00
existing_sessions = self._controller.Read( 'web_sessions' )
2015-10-07 21:56:22 +00:00
self._names_to_sessions = { name : ( cookies, expires ) for ( name, cookies, expires ) in existing_sessions }
self._lock = threading.Lock()
def GetCookies( self, name ):
now = HydrusData.GetNow()
with self._lock:
if name in self._names_to_sessions:
( cookies, expires ) = self._names_to_sessions[ name ]
if HydrusData.TimeHasPassed( expires - 300 ): del self._names_to_sessions[ name ]
else: return cookies
# name not found, or expired
2015-10-28 21:29:05 +00:00
if name == 'deviant art':
2015-11-25 22:00:57 +00:00
( response_gumpf, cookies ) = self._controller.DoHTTP( HC.GET, 'http://www.deviantart.com/', return_cookies = True )
2015-10-28 21:29:05 +00:00
expires = now + 30 * 86400
2015-10-07 21:56:22 +00:00
if name == 'hentai foundry':
2015-11-25 22:00:57 +00:00
( response_gumpf, cookies ) = self._controller.DoHTTP( HC.GET, 'http://www.hentai-foundry.com/?enterAgree=1', return_cookies = True )
2015-10-07 21:56:22 +00:00
raw_csrf = cookies[ 'YII_CSRF_TOKEN' ] # 19b05b536885ec60b8b37650a32f8deb11c08cd1s%3A40%3A%222917dcfbfbf2eda2c1fbe43f4d4c4ec4b6902b32%22%3B
processed_csrf = urllib.unquote( raw_csrf ) # 19b05b536885ec60b8b37650a32f8deb11c08cd1s:40:"2917dcfbfbf2eda2c1fbe43f4d4c4ec4b6902b32";
csrf_token = processed_csrf.split( '"' )[1] # the 2917... bit
hentai_foundry_form_info = ClientDefaults.GetDefaultHentaiFoundryInfo()
hentai_foundry_form_info[ 'YII_CSRF_TOKEN' ] = csrf_token
body = urllib.urlencode( hentai_foundry_form_info )
request_headers = {}
2015-10-21 21:53:10 +00:00
ClientNetworking.AddCookiesToHeaders( cookies, request_headers )
2015-10-07 21:56:22 +00:00
request_headers[ 'Content-Type' ] = 'application/x-www-form-urlencoded'
2015-11-25 22:00:57 +00:00
self._controller.DoHTTP( HC.POST, 'http://www.hentai-foundry.com/site/filters', request_headers = request_headers, body = body )
2015-10-07 21:56:22 +00:00
expires = now + 60 * 60
elif name == 'pixiv':
2015-11-25 22:00:57 +00:00
( id, password ) = self._controller.Read( 'pixiv_account' )
2015-10-07 21:56:22 +00:00
if id == '' and password == '':
raise Exception( 'You need to set up your pixiv credentials in services->manage pixiv account.' )
form_fields = {}
form_fields[ 'mode' ] = 'login'
form_fields[ 'pixiv_id' ] = id
form_fields[ 'pass' ] = password
form_fields[ 'skip' ] = '1'
body = urllib.urlencode( form_fields )
headers = {}
headers[ 'Content-Type' ] = 'application/x-www-form-urlencoded'
2015-11-25 22:00:57 +00:00
( response_gumpf, cookies ) = self._controller.DoHTTP( HC.POST, 'http://www.pixiv.net/login.php', request_headers = headers, body = body, return_cookies = True )
2015-10-07 21:56:22 +00:00
# _ only given to logged in php sessions
if 'PHPSESSID' not in cookies or '_' not in cookies[ 'PHPSESSID' ]: raise Exception( 'Pixiv login credentials not accepted!' )
expires = now + 30 * 86400
self._names_to_sessions[ name ] = ( cookies, expires )
2015-11-25 22:00:57 +00:00
self._controller.Write( 'web_session', name, cookies, expires )
2015-10-07 21:56:22 +00:00
return cookies
2015-03-18 21:46:29 +00:00