2015-10-07 21:56:22 +00:00
import ClientDefaults
2015-03-18 21:46:29 +00:00
import ClientFiles
2015-10-21 21:53:10 +00:00
import ClientNetworking
2015-08-05 18:42:35 +00:00
import ClientRendering
2015-03-18 21:46:29 +00:00
import HydrusConstants as HC
import HydrusExceptions
import HydrusFileHandling
import HydrusImageHandling
2015-11-04 22:30:28 +00:00
import HydrusPaths
2015-03-18 21:46:29 +00:00
import os
import random
import Queue
import threading
import time
2015-10-07 21:56:22 +00:00
import urllib
2015-03-18 21:46:29 +00:00
import wx
2015-03-25 22:04:19 +00:00
import HydrusData
import ClientData
2015-06-03 21:05:13 +00:00
import ClientConstants as CC
2015-03-25 22:04:19 +00:00
import HydrusGlobals
2015-08-05 18:42:35 +00:00
import collections
import HydrusTags
import itertools
import ClientSearch
2015-03-18 21:46:29 +00:00
class DataCache ( object ) :
2015-06-24 22:10:14 +00:00
def __init__ ( self , cache_size_key ) :
2015-03-18 21:46:29 +00:00
self . _cache_size_key = cache_size_key
self . _keys_to_data = { }
self . _keys_fifo = [ ]
self . _total_estimated_memory_footprint = 0
self . _lock = threading . Lock ( )
wx . CallLater ( 60 * 1000 , self . MaintainCache )
2015-06-24 22:10:14 +00:00
def _DeleteItem ( self , index = 0 ) :
( deletee_key , last_access_time ) = self . _keys_fifo . pop ( index )
deletee_data = self . _keys_to_data [ deletee_key ]
self . _total_estimated_memory_footprint - = deletee_data . GetEstimatedMemoryFootprint ( )
del self . _keys_to_data [ deletee_key ]
2015-03-18 21:46:29 +00:00
def Clear ( self ) :
with self . _lock :
self . _keys_to_data = { }
self . _keys_fifo = [ ]
self . _total_estimated_memory_footprint = 0
def AddData ( self , key , data ) :
with self . _lock :
if key not in self . _keys_to_data :
2015-09-16 18:11:00 +00:00
options = HydrusGlobals . client_controller . GetOptions ( )
2015-05-13 20:22:39 +00:00
while self . _total_estimated_memory_footprint > options [ self . _cache_size_key ] :
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
self . _DeleteItem ( )
2015-03-18 21:46:29 +00:00
self . _keys_to_data [ key ] = data
2015-03-25 22:04:19 +00:00
self . _keys_fifo . append ( ( key , HydrusData . GetNow ( ) ) )
2015-03-18 21:46:29 +00:00
self . _total_estimated_memory_footprint + = data . GetEstimatedMemoryFootprint ( )
def GetData ( self , key ) :
with self . _lock :
2015-11-04 22:30:28 +00:00
if key not in self . _keys_to_data : raise Exception ( ' Cache error! Looking for ' + HydrusData . ToUnicode ( key ) + ' , but it was missing. ' )
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
for ( i , ( fifo_key , last_access_time ) ) in enumerate ( self . _keys_fifo ) :
2015-03-18 21:46:29 +00:00
if fifo_key == key :
del self . _keys_fifo [ i ]
break
2015-03-25 22:04:19 +00:00
self . _keys_fifo . append ( ( key , HydrusData . GetNow ( ) ) )
2015-03-18 21:46:29 +00:00
return self . _keys_to_data [ key ]
def HasData ( self , key ) :
with self . _lock : return key in self . _keys_to_data
def MaintainCache ( self ) :
with self . _lock :
while True :
if len ( self . _keys_fifo ) == 0 : break
else :
2015-06-24 22:10:14 +00:00
oldest_index = 0
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
( key , last_access_time ) = self . _keys_fifo [ oldest_index ]
if HydrusData . TimeHasPassed ( last_access_time + 1200 ) :
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
self . _DeleteItem ( oldest_index )
2015-03-18 21:46:29 +00:00
else : break
wx . CallLater ( 60 * 1000 , self . MaintainCache )
class LocalBooruCache ( object ) :
def __init__ ( self ) :
self . _lock = threading . Lock ( )
self . _RefreshShares ( )
2015-09-16 18:11:00 +00:00
HydrusGlobals . client_controller . sub ( self , ' RefreshShares ' , ' refresh_local_booru_shares ' )
HydrusGlobals . client_controller . sub ( self , ' RefreshShares ' , ' restart_booru ' )
2015-03-18 21:46:29 +00:00
def _CheckDataUsage ( self ) :
info = self . _local_booru_service . GetInfo ( )
max_monthly_data = info [ ' max_monthly_data ' ]
used_monthly_data = info [ ' used_monthly_data ' ]
if max_monthly_data is not None and used_monthly_data > max_monthly_data : raise HydrusExceptions . ForbiddenException ( ' This booru has used all its monthly data. Please try again next month. ' )
def _CheckFileAuthorised ( self , share_key , hash ) :
self . _CheckShareAuthorised ( share_key )
info = self . _GetInfo ( share_key )
if hash not in info [ ' hashes_set ' ] : raise HydrusExceptions . NotFoundException ( ' That file was not found in that share. ' )
def _CheckShareAuthorised ( self , share_key ) :
self . _CheckDataUsage ( )
info = self . _GetInfo ( share_key )
timeout = info [ ' timeout ' ]
2015-06-24 22:10:14 +00:00
if timeout is not None and HydrusData . TimeHasPassed ( timeout ) : raise HydrusExceptions . ForbiddenException ( ' This share has expired. ' )
2015-03-18 21:46:29 +00:00
def _GetInfo ( self , share_key ) :
try : info = self . _keys_to_infos [ share_key ]
except : raise HydrusExceptions . NotFoundException ( ' Did not find that share on this booru. ' )
if info is None :
2015-09-16 18:11:00 +00:00
info = HydrusGlobals . client_controller . Read ( ' local_booru_share ' , share_key )
2015-03-18 21:46:29 +00:00
hashes = info [ ' hashes ' ]
info [ ' hashes_set ' ] = set ( hashes )
2015-09-16 18:11:00 +00:00
media_results = HydrusGlobals . client_controller . Read ( ' media_results ' , CC . LOCAL_FILE_SERVICE_KEY , hashes )
2015-03-18 21:46:29 +00:00
info [ ' media_results ' ] = media_results
hashes_to_media_results = { media_result . GetHash ( ) : media_result for media_result in media_results }
info [ ' hashes_to_media_results ' ] = hashes_to_media_results
self . _keys_to_infos [ share_key ] = info
return info
def _RefreshShares ( self ) :
2015-09-16 18:11:00 +00:00
self . _local_booru_service = HydrusGlobals . client_controller . GetServicesManager ( ) . GetService ( CC . LOCAL_BOORU_SERVICE_KEY )
2015-03-18 21:46:29 +00:00
self . _keys_to_infos = { }
2015-09-16 18:11:00 +00:00
share_keys = HydrusGlobals . client_controller . Read ( ' local_booru_share_keys ' )
2015-03-18 21:46:29 +00:00
for share_key in share_keys : self . _keys_to_infos [ share_key ] = None
def CheckShareAuthorised ( self , share_key ) :
with self . _lock : self . _CheckShareAuthorised ( share_key )
def CheckFileAuthorised ( self , share_key , hash ) :
with self . _lock : self . _CheckFileAuthorised ( share_key , hash )
def GetGalleryInfo ( self , share_key ) :
with self . _lock :
self . _CheckShareAuthorised ( share_key )
info = self . _GetInfo ( share_key )
name = info [ ' name ' ]
text = info [ ' text ' ]
timeout = info [ ' timeout ' ]
media_results = info [ ' media_results ' ]
return ( name , text , timeout , media_results )
def GetMediaResult ( self , share_key , hash ) :
with self . _lock :
info = self . _GetInfo ( share_key )
media_result = info [ ' hashes_to_media_results ' ] [ hash ]
return media_result
def GetPageInfo ( self , share_key , hash ) :
with self . _lock :
self . _CheckFileAuthorised ( share_key , hash )
info = self . _GetInfo ( share_key )
name = info [ ' name ' ]
text = info [ ' text ' ]
timeout = info [ ' timeout ' ]
media_result = info [ ' hashes_to_media_results ' ] [ hash ]
return ( name , text , timeout , media_result )
def RefreshShares ( self ) :
with self . _lock :
self . _RefreshShares ( )
class MenuEventIdToActionCache ( object ) :
def __init__ ( self ) :
self . _ids_to_actions = { }
self . _actions_to_ids = { }
2015-09-23 21:21:02 +00:00
self . _temporary_ids = set ( )
self . _free_temporary_ids = set ( )
def _ClearTemporaries ( self ) :
for temporary_id in self . _temporary_ids . difference ( self . _free_temporary_ids ) :
temporary_action = self . _ids_to_actions [ temporary_id ]
del self . _ids_to_actions [ temporary_id ]
del self . _actions_to_ids [ temporary_action ]
self . _free_temporary_ids = set ( self . _temporary_ids )
def _GetNewId ( self , temporary ) :
if temporary :
if len ( self . _free_temporary_ids ) == 0 :
new_id = wx . NewId ( )
self . _temporary_ids . add ( new_id )
self . _free_temporary_ids . add ( new_id )
return self . _free_temporary_ids . pop ( )
else :
return wx . NewId ( )
2015-03-18 21:46:29 +00:00
def GetAction ( self , event_id ) :
2015-09-23 21:21:02 +00:00
action = None
if event_id in self . _ids_to_actions :
action = self . _ids_to_actions [ event_id ]
if event_id in self . _temporary_ids :
self . _ClearTemporaries ( )
return action
2015-03-18 21:46:29 +00:00
2015-09-23 21:21:02 +00:00
def GetId ( self , command , data = None , temporary = False ) :
2015-03-18 21:46:29 +00:00
action = ( command , data )
if action not in self . _actions_to_ids :
2015-09-23 21:21:02 +00:00
event_id = self . _GetNewId ( temporary )
2015-03-18 21:46:29 +00:00
self . _ids_to_actions [ event_id ] = action
self . _actions_to_ids [ action ] = event_id
return self . _actions_to_ids [ action ]
2015-09-23 21:21:02 +00:00
def GetPermanentId ( self , command , data = None ) :
return self . GetId ( command , data , False )
def GetTemporaryId ( self , command , data = None ) :
temporary = True
if data is None :
temporary = False
return self . GetId ( command , data , temporary )
2015-03-18 21:46:29 +00:00
MENU_EVENT_ID_TO_ACTION_CACHE = MenuEventIdToActionCache ( )
class RenderedImageCache ( object ) :
def __init__ ( self , cache_type ) :
self . _type = cache_type
if self . _type == ' fullscreen ' : self . _data_cache = DataCache ( ' fullscreen_cache_size ' )
elif self . _type == ' preview ' : self . _data_cache = DataCache ( ' preview_cache_size ' )
self . _total_estimated_memory_footprint = 0
self . _keys_being_rendered = { }
2015-09-16 18:11:00 +00:00
HydrusGlobals . client_controller . sub ( self , ' FinishedRendering ' , ' finished_rendering ' )
2015-03-18 21:46:29 +00:00
def Clear ( self ) : self . _data_cache . Clear ( )
def GetImage ( self , media , target_resolution = None ) :
hash = media . GetHash ( )
if target_resolution is None : target_resolution = media . GetResolution ( )
key = ( hash , target_resolution )
if self . _data_cache . HasData ( key ) : return self . _data_cache . GetData ( key )
elif key in self . _keys_being_rendered : return self . _keys_being_rendered [ key ]
else :
2015-08-05 18:42:35 +00:00
image_container = ClientRendering . RasterContainerImage ( media , target_resolution )
2015-03-18 21:46:29 +00:00
self . _keys_being_rendered [ key ] = image_container
return image_container
def HasImage ( self , hash , target_resolution ) :
key = ( hash , target_resolution )
return self . _data_cache . HasData ( key ) or key in self . _keys_being_rendered
def FinishedRendering ( self , key ) :
if key in self . _keys_being_rendered :
image_container = self . _keys_being_rendered [ key ]
del self . _keys_being_rendered [ key ]
self . _data_cache . AddData ( key , image_container )
class ThumbnailCache ( object ) :
def __init__ ( self ) :
self . _data_cache = DataCache ( ' thumbnail_cache_size ' )
2015-11-04 22:30:28 +00:00
self . _lock = threading . Lock ( )
self . _waterfall_queue_quick = set ( )
self . _waterfall_queue_random = [ ]
self . _waterfall_event = threading . Event ( )
2015-03-18 21:46:29 +00:00
self . _special_thumbs = { }
self . Clear ( )
threading . Thread ( target = self . DAEMONWaterfall , name = ' Waterfall Daemon ' ) . start ( )
2015-09-16 18:11:00 +00:00
HydrusGlobals . client_controller . sub ( self , ' Clear ' , ' thumbnail_resize ' )
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
def _RecalcWaterfallQueueRandom ( self ) :
self . _waterfall_queue_random = list ( self . _waterfall_queue_quick )
random . shuffle ( self . _waterfall_queue_random )
def CancelWaterfall ( self , page_key , medias ) :
with self . _lock :
self . _waterfall_queue_quick . difference_update ( ( ( page_key , media ) for media in medias ) )
self . _RecalcWaterfallQueueRandom ( )
2015-03-18 21:46:29 +00:00
def Clear ( self ) :
self . _data_cache . Clear ( )
self . _special_thumbs = { }
names = [ ' hydrus ' , ' flash ' , ' pdf ' , ' audio ' , ' video ' ]
2015-11-04 22:30:28 +00:00
( os_file_handle , temp_path ) = HydrusPaths . GetTempPath ( )
2015-05-06 20:26:18 +00:00
try :
2015-03-18 21:46:29 +00:00
2015-05-06 20:26:18 +00:00
for name in names :
2015-11-04 22:30:28 +00:00
path = os . path . join ( HC . STATIC_DIR , name + ' .png ' )
2015-05-06 20:26:18 +00:00
2015-09-16 18:11:00 +00:00
options = HydrusGlobals . client_controller . GetOptions ( )
2015-05-13 20:22:39 +00:00
thumbnail = HydrusFileHandling . GenerateThumbnail ( path , options [ ' thumbnail_dimensions ' ] )
2015-05-06 20:26:18 +00:00
with open ( temp_path , ' wb ' ) as f : f . write ( thumbnail )
2015-08-05 18:42:35 +00:00
hydrus_bitmap = ClientRendering . GenerateHydrusBitmap ( temp_path )
2015-05-06 20:26:18 +00:00
self . _special_thumbs [ name ] = hydrus_bitmap
2015-03-18 21:46:29 +00:00
2015-05-06 20:26:18 +00:00
finally :
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
HydrusPaths . CleanUpTempPath ( os_file_handle , temp_path )
2015-03-18 21:46:29 +00:00
def GetThumbnail ( self , media ) :
mime = media . GetDisplayMedia ( ) . GetMime ( )
if mime in HC . MIMES_WITH_THUMBNAILS :
hash = media . GetDisplayMedia ( ) . GetHash ( )
if not self . _data_cache . HasData ( hash ) :
path = None
try :
path = ClientFiles . GetThumbnailPath ( hash , False )
2015-08-05 18:42:35 +00:00
hydrus_bitmap = ClientRendering . GenerateHydrusBitmap ( path )
2015-03-18 21:46:29 +00:00
except HydrusExceptions . NotFoundException :
print ( ' Could not find the thumbnail for ' + hash . encode ( ' hex ' ) + ' ! ' )
return self . _special_thumbs [ ' hydrus ' ]
self . _data_cache . AddData ( hash , hydrus_bitmap )
return self . _data_cache . GetData ( hash )
elif mime in HC . AUDIO : return self . _special_thumbs [ ' audio ' ]
elif mime in HC . VIDEO : return self . _special_thumbs [ ' video ' ]
elif mime == HC . APPLICATION_FLASH : return self . _special_thumbs [ ' flash ' ]
elif mime == HC . APPLICATION_PDF : return self . _special_thumbs [ ' pdf ' ]
else : return self . _special_thumbs [ ' hydrus ' ]
2015-11-04 22:30:28 +00:00
def Waterfall ( self , page_key , medias ) :
with self . _lock :
self . _waterfall_queue_quick . update ( ( ( page_key , media ) for media in medias ) )
self . _RecalcWaterfallQueueRandom ( )
self . _waterfall_event . set ( )
2015-03-18 21:46:29 +00:00
def DAEMONWaterfall ( self ) :
2015-03-25 22:04:19 +00:00
last_paused = HydrusData . GetNowPrecise ( )
2015-03-18 21:46:29 +00:00
2015-08-26 21:18:39 +00:00
while not HydrusGlobals . view_shutdown :
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
with self . _lock :
do_wait = len ( self . _waterfall_queue_random ) == 0
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
if do_wait :
self . _waterfall_event . wait ( 1 )
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
self . _waterfall_event . clear ( )
last_paused = HydrusData . GetNowPrecise ( )
with self . _lock :
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
if len ( self . _waterfall_queue_random ) == 0 :
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
continue
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
else :
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
result = self . _waterfall_queue_random . pop ( 0 )
self . _waterfall_queue_quick . discard ( result )
( page_key , media ) = result
try :
thumbnail = self . GetThumbnail ( media )
HydrusGlobals . client_controller . pub ( ' waterfall_thumbnail ' , page_key , media , thumbnail )
if HydrusData . GetNowPrecise ( ) - last_paused > 0.005 :
time . sleep ( 0.00001 )
last_paused = HydrusData . GetNowPrecise ( )
2015-03-18 21:46:29 +00:00
except Exception as e :
2015-03-25 22:04:19 +00:00
HydrusData . ShowException ( e )
2015-03-18 21:46:29 +00:00
2015-08-05 18:42:35 +00:00
def LoopInSimpleChildrenToParents ( simple_children_to_parents , child , parent ) :
potential_loop_paths = { parent }
while len ( potential_loop_paths . intersection ( simple_children_to_parents . keys ( ) ) ) > 0 :
new_potential_loop_paths = set ( )
for potential_loop_path in potential_loop_paths . intersection ( simple_children_to_parents . keys ( ) ) :
new_potential_loop_paths . update ( simple_children_to_parents [ potential_loop_path ] )
potential_loop_paths = new_potential_loop_paths
if child in potential_loop_paths : return True
return False
def BuildSimpleChildrenToParents ( pairs ) :
simple_children_to_parents = HydrusData . default_dict_set ( )
for ( child , parent ) in pairs :
if child == parent : continue
if LoopInSimpleChildrenToParents ( simple_children_to_parents , child , parent ) : continue
simple_children_to_parents [ child ] . add ( parent )
return simple_children_to_parents
def CollapseTagSiblingChains ( processed_siblings ) :
# now to collapse chains
# A -> B and B -> C goes to A -> C and B -> C
siblings = { }
for ( old_tag , new_tag ) in processed_siblings . items ( ) :
# adding A -> B
if new_tag in siblings :
# B -> F already calculated and added, so add A -> F
siblings [ old_tag ] = siblings [ new_tag ]
else :
while new_tag in processed_siblings : new_tag = processed_siblings [ new_tag ] # pursue endpoint F
siblings [ old_tag ] = new_tag
reverse_lookup = collections . defaultdict ( list )
for ( old_tag , new_tag ) in siblings . items ( ) : reverse_lookup [ new_tag ] . append ( old_tag )
return ( siblings , reverse_lookup )
def CombineTagSiblingPairs ( service_keys_to_statuses_to_pairs ) :
# first combine the services
# if A map already exists, don't overwrite
# if A -> B forms a loop, don't write it
processed_siblings = { }
current_deleted_pairs = set ( )
for ( service_key , statuses_to_pairs ) in service_keys_to_statuses_to_pairs . items ( ) :
pairs = statuses_to_pairs [ HC . CURRENT ] . union ( statuses_to_pairs [ HC . PENDING ] )
for ( old , new ) in pairs :
if old == new : continue
if old not in processed_siblings :
next_new = new
we_have_a_loop = False
while next_new in processed_siblings :
next_new = processed_siblings [ next_new ]
if next_new == old :
we_have_a_loop = True
break
if not we_have_a_loop : processed_siblings [ old ] = new
return processed_siblings
# important thing here, and reason why it is recursive, is because we want to preserve the parent-grandparent interleaving
def BuildServiceKeysToChildrenToParents ( service_keys_to_simple_children_to_parents ) :
def AddParents ( simple_children_to_parents , children_to_parents , child , parents ) :
for parent in parents :
children_to_parents [ child ] . append ( parent )
if parent in simple_children_to_parents :
grandparents = simple_children_to_parents [ parent ]
AddParents ( simple_children_to_parents , children_to_parents , child , grandparents )
service_keys_to_children_to_parents = collections . defaultdict ( HydrusData . default_dict_list )
for ( service_key , simple_children_to_parents ) in service_keys_to_simple_children_to_parents . items ( ) :
children_to_parents = service_keys_to_children_to_parents [ service_key ]
for ( child , parents ) in simple_children_to_parents . items ( ) : AddParents ( simple_children_to_parents , children_to_parents , child , parents )
return service_keys_to_children_to_parents
def BuildServiceKeysToSimpleChildrenToParents ( service_keys_to_pairs_flat ) :
service_keys_to_simple_children_to_parents = collections . defaultdict ( HydrusData . default_dict_set )
for ( service_key , pairs ) in service_keys_to_pairs_flat . items ( ) :
service_keys_to_simple_children_to_parents [ service_key ] = BuildSimpleChildrenToParents ( pairs )
return service_keys_to_simple_children_to_parents
class TagCensorshipManager ( object ) :
def __init__ ( self ) :
self . RefreshData ( )
2015-09-16 18:11:00 +00:00
HydrusGlobals . client_controller . sub ( self , ' RefreshData ' , ' notify_new_tag_censorship ' )
2015-08-05 18:42:35 +00:00
def GetInfo ( self , service_key ) :
if service_key in self . _service_keys_to_info : return self . _service_keys_to_info [ service_key ]
else : return ( True , set ( ) )
def RefreshData ( self ) :
2015-09-16 18:11:00 +00:00
info = HydrusGlobals . client_controller . Read ( ' tag_censorship ' )
2015-08-05 18:42:35 +00:00
self . _service_keys_to_info = { }
self . _service_keys_to_predicates = { }
for ( service_key , blacklist , censorships ) in info :
self . _service_keys_to_info [ service_key ] = ( blacklist , censorships )
tag_matches = lambda tag : True in ( HydrusTags . CensorshipMatch ( tag , censorship ) for censorship in censorships )
if blacklist : predicate = lambda tag : not tag_matches ( tag )
else : predicate = tag_matches
self . _service_keys_to_predicates [ service_key ] = predicate
def FilterServiceKeysToStatusesToTags ( self , service_keys_to_statuses_to_tags ) :
filtered_service_keys_to_statuses_to_tags = collections . defaultdict ( HydrusData . default_dict_set )
for ( service_key , statuses_to_tags ) in service_keys_to_statuses_to_tags . items ( ) :
for service_key_lookup in ( CC . COMBINED_TAG_SERVICE_KEY , service_key ) :
if service_key_lookup in self . _service_keys_to_predicates :
combined_predicate = self . _service_keys_to_predicates [ service_key_lookup ]
new_statuses_to_tags = HydrusData . default_dict_set ( )
for ( status , tags ) in statuses_to_tags . items ( ) :
new_statuses_to_tags [ status ] = { tag for tag in tags if combined_predicate ( tag ) }
statuses_to_tags = new_statuses_to_tags
filtered_service_keys_to_statuses_to_tags [ service_key ] = statuses_to_tags
return filtered_service_keys_to_statuses_to_tags
def FilterTags ( self , service_key , tags ) :
for service_key in ( CC . COMBINED_TAG_SERVICE_KEY , service_key ) :
if service_key in self . _service_keys_to_predicates :
predicate = self . _service_keys_to_predicates [ service_key ]
tags = { tag for tag in tags if predicate ( tag ) }
return tags
class TagParentsManager ( object ) :
def __init__ ( self ) :
self . _RefreshParents ( )
self . _lock = threading . Lock ( )
2015-09-16 18:11:00 +00:00
HydrusGlobals . client_controller . sub ( self , ' RefreshParents ' , ' notify_new_parents ' )
2015-08-05 18:42:35 +00:00
def _RefreshParents ( self ) :
2015-09-16 18:11:00 +00:00
service_keys_to_statuses_to_pairs = HydrusGlobals . client_controller . Read ( ' tag_parents ' )
2015-08-05 18:42:35 +00:00
# first collapse siblings
2015-09-16 18:11:00 +00:00
sibling_manager = HydrusGlobals . client_controller . GetManager ( ' tag_siblings ' )
2015-08-05 18:42:35 +00:00
collapsed_service_keys_to_statuses_to_pairs = collections . defaultdict ( HydrusData . default_dict_set )
for ( service_key , statuses_to_pairs ) in service_keys_to_statuses_to_pairs . items ( ) :
if service_key == CC . COMBINED_TAG_SERVICE_KEY : continue
for ( status , pairs ) in statuses_to_pairs . items ( ) :
pairs = sibling_manager . CollapsePairs ( pairs )
collapsed_service_keys_to_statuses_to_pairs [ service_key ] [ status ] = pairs
# now collapse current and pending
service_keys_to_pairs_flat = HydrusData . default_dict_set ( )
for ( service_key , statuses_to_pairs ) in collapsed_service_keys_to_statuses_to_pairs . items ( ) :
pairs_flat = statuses_to_pairs [ HC . CURRENT ] . union ( statuses_to_pairs [ HC . PENDING ] )
service_keys_to_pairs_flat [ service_key ] = pairs_flat
# now create the combined tag service
combined_pairs_flat = set ( )
for pairs_flat in service_keys_to_pairs_flat . values ( ) :
combined_pairs_flat . update ( pairs_flat )
service_keys_to_pairs_flat [ CC . COMBINED_TAG_SERVICE_KEY ] = combined_pairs_flat
#
service_keys_to_simple_children_to_parents = BuildServiceKeysToSimpleChildrenToParents ( service_keys_to_pairs_flat )
self . _service_keys_to_children_to_parents = BuildServiceKeysToChildrenToParents ( service_keys_to_simple_children_to_parents )
def ExpandPredicates ( self , service_key , predicates ) :
# for now -- we will make an option, later
service_key = CC . COMBINED_TAG_SERVICE_KEY
results = [ ]
with self . _lock :
for predicate in predicates :
results . append ( predicate )
if predicate . GetType ( ) == HC . PREDICATE_TYPE_TAG :
tag = predicate . GetValue ( )
parents = self . _service_keys_to_children_to_parents [ service_key ] [ tag ]
for parent in parents :
2015-10-21 21:53:10 +00:00
parent_predicate = ClientData . Predicate ( HC . PREDICATE_TYPE_PARENT , parent )
2015-08-05 18:42:35 +00:00
results . append ( parent_predicate )
return results
def ExpandTags ( self , service_key , tags ) :
with self . _lock :
# for now -- we will make an option, later
service_key = CC . COMBINED_TAG_SERVICE_KEY
tags_results = set ( tags )
for tag in tags : tags_results . update ( self . _service_keys_to_children_to_parents [ service_key ] [ tag ] )
return tags_results
def GetParents ( self , service_key , tag ) :
with self . _lock :
# for now -- we will make an option, later
service_key = CC . COMBINED_TAG_SERVICE_KEY
return self . _service_keys_to_children_to_parents [ service_key ] [ tag ]
def RefreshParents ( self ) :
with self . _lock : self . _RefreshParents ( )
class TagSiblingsManager ( object ) :
def __init__ ( self ) :
self . _RefreshSiblings ( )
self . _lock = threading . Lock ( )
2015-09-16 18:11:00 +00:00
HydrusGlobals . client_controller . sub ( self , ' RefreshSiblings ' , ' notify_new_siblings ' )
2015-08-05 18:42:35 +00:00
def _RefreshSiblings ( self ) :
2015-09-16 18:11:00 +00:00
service_keys_to_statuses_to_pairs = HydrusGlobals . client_controller . Read ( ' tag_siblings ' )
2015-08-05 18:42:35 +00:00
processed_siblings = CombineTagSiblingPairs ( service_keys_to_statuses_to_pairs )
( self . _siblings , self . _reverse_lookup ) = CollapseTagSiblingChains ( processed_siblings )
2015-09-16 18:11:00 +00:00
HydrusGlobals . client_controller . pub ( ' new_siblings_gui ' )
2015-08-05 18:42:35 +00:00
def GetAutocompleteSiblings ( self , half_complete_tag ) :
with self . _lock :
key_based_matching_values = { self . _siblings [ key ] for key in self . _siblings . keys ( ) if ClientSearch . SearchEntryMatchesTag ( half_complete_tag , key , search_siblings = False ) }
value_based_matching_values = { value for value in self . _siblings . values ( ) if ClientSearch . SearchEntryMatchesTag ( half_complete_tag , value , search_siblings = False ) }
matching_values = key_based_matching_values . union ( value_based_matching_values )
# all the matching values have a matching sibling somewhere in their network
# so now fetch the networks
lists_of_matching_keys = [ self . _reverse_lookup [ value ] for value in matching_values ]
matching_keys = itertools . chain . from_iterable ( lists_of_matching_keys )
matches = matching_values . union ( matching_keys )
return matches
def GetSibling ( self , tag ) :
with self . _lock :
if tag in self . _siblings : return self . _siblings [ tag ]
else : return None
def GetAllSiblings ( self , tag ) :
with self . _lock :
if tag in self . _siblings :
new_tag = self . _siblings [ tag ]
elif tag in self . _reverse_lookup : new_tag = tag
else : return [ tag ]
all_siblings = list ( self . _reverse_lookup [ new_tag ] )
all_siblings . append ( new_tag )
return all_siblings
def RefreshSiblings ( self ) :
with self . _lock : self . _RefreshSiblings ( )
def CollapseNamespacedTags ( self , namespace , tags ) :
with self . _lock :
results = set ( )
for tag in tags :
full_tag = namespace + ' : ' + tag
if full_tag in self . _siblings :
sibling = self . _siblings [ full_tag ]
if ' : ' in sibling : sibling = sibling . split ( ' : ' , 1 ) [ 1 ]
results . add ( sibling )
else : results . add ( tag )
return results
def CollapsePredicates ( self , predicates ) :
with self . _lock :
results = [ predicate for predicate in predicates if predicate . GetType ( ) != HC . PREDICATE_TYPE_TAG ]
tag_predicates = [ predicate for predicate in predicates if predicate . GetType ( ) == HC . PREDICATE_TYPE_TAG ]
tags_to_predicates = { predicate . GetValue ( ) : predicate for predicate in predicates if predicate . GetType ( ) == HC . PREDICATE_TYPE_TAG }
tags = tags_to_predicates . keys ( )
tags_to_include_in_results = set ( )
for tag in tags :
if tag in self . _siblings :
old_tag = tag
old_predicate = tags_to_predicates [ old_tag ]
new_tag = self . _siblings [ old_tag ]
if new_tag not in tags_to_predicates :
( old_pred_type , old_value , old_inclusive ) = old_predicate . GetInfo ( )
2015-10-21 21:53:10 +00:00
new_predicate = ClientData . Predicate ( old_pred_type , new_tag , inclusive = old_inclusive )
2015-08-05 18:42:35 +00:00
tags_to_predicates [ new_tag ] = new_predicate
tags_to_include_in_results . add ( new_tag )
new_predicate = tags_to_predicates [ new_tag ]
current_count = old_predicate . GetCount ( HC . CURRENT )
pending_count = old_predicate . GetCount ( HC . PENDING )
new_predicate . AddToCount ( HC . CURRENT , current_count )
new_predicate . AddToCount ( HC . PENDING , pending_count )
else : tags_to_include_in_results . add ( tag )
results . extend ( [ tags_to_predicates [ tag ] for tag in tags_to_include_in_results ] )
return results
def CollapsePairs ( self , pairs ) :
with self . _lock :
result = set ( )
for ( a , b ) in pairs :
if a in self . _siblings : a = self . _siblings [ a ]
if b in self . _siblings : b = self . _siblings [ b ]
result . add ( ( a , b ) )
return result
def CollapseTags ( self , tags ) :
with self . _lock : return { self . _siblings [ tag ] if tag in self . _siblings else tag for tag in tags }
def CollapseTagsToCount ( self , tags_to_count ) :
with self . _lock :
results = collections . Counter ( )
for ( tag , count ) in tags_to_count . items ( ) :
if tag in self . _siblings : tag = self . _siblings [ tag ]
results [ tag ] + = count
return results
2015-10-07 21:56:22 +00:00
class WebSessionManagerClient ( object ) :
def __init__ ( self ) :
2015-10-21 21:53:10 +00:00
existing_sessions = HydrusGlobals . client_controller . Read ( ' web_sessions ' )
2015-10-07 21:56:22 +00:00
self . _names_to_sessions = { name : ( cookies , expires ) for ( name , cookies , expires ) in existing_sessions }
self . _lock = threading . Lock ( )
def GetCookies ( self , name ) :
now = HydrusData . GetNow ( )
with self . _lock :
if name in self . _names_to_sessions :
( cookies , expires ) = self . _names_to_sessions [ name ]
if HydrusData . TimeHasPassed ( expires - 300 ) : del self . _names_to_sessions [ name ]
else : return cookies
# name not found, or expired
2015-10-28 21:29:05 +00:00
if name == ' deviant art ' :
( response_gumpf , cookies ) = HydrusGlobals . client_controller . DoHTTP ( HC . GET , ' http://www.deviantart.com/ ' , return_cookies = True )
expires = now + 30 * 86400
2015-10-07 21:56:22 +00:00
if name == ' hentai foundry ' :
( response_gumpf , cookies ) = HydrusGlobals . client_controller . DoHTTP ( HC . GET , ' http://www.hentai-foundry.com/?enterAgree=1 ' , return_cookies = True )
raw_csrf = cookies [ ' YII_CSRF_TOKEN ' ] # 19b05b536885ec60b8b37650a32f8deb11c08cd1s%3A40%3A%222917dcfbfbf2eda2c1fbe43f4d4c4ec4b6902b32%22%3B
processed_csrf = urllib . unquote ( raw_csrf ) # 19b05b536885ec60b8b37650a32f8deb11c08cd1s:40:"2917dcfbfbf2eda2c1fbe43f4d4c4ec4b6902b32";
csrf_token = processed_csrf . split ( ' " ' ) [ 1 ] # the 2917... bit
hentai_foundry_form_info = ClientDefaults . GetDefaultHentaiFoundryInfo ( )
hentai_foundry_form_info [ ' YII_CSRF_TOKEN ' ] = csrf_token
body = urllib . urlencode ( hentai_foundry_form_info )
request_headers = { }
2015-10-21 21:53:10 +00:00
ClientNetworking . AddCookiesToHeaders ( cookies , request_headers )
2015-10-07 21:56:22 +00:00
request_headers [ ' Content-Type ' ] = ' application/x-www-form-urlencoded '
HydrusGlobals . client_controller . DoHTTP ( HC . POST , ' http://www.hentai-foundry.com/site/filters ' , request_headers = request_headers , body = body )
expires = now + 60 * 60
elif name == ' pixiv ' :
2015-10-21 21:53:10 +00:00
( id , password ) = HydrusGlobals . client_controller . Read ( ' pixiv_account ' )
2015-10-07 21:56:22 +00:00
if id == ' ' and password == ' ' :
raise Exception ( ' You need to set up your pixiv credentials in services->manage pixiv account. ' )
form_fields = { }
form_fields [ ' mode ' ] = ' login '
form_fields [ ' pixiv_id ' ] = id
form_fields [ ' pass ' ] = password
form_fields [ ' skip ' ] = ' 1 '
body = urllib . urlencode ( form_fields )
headers = { }
headers [ ' Content-Type ' ] = ' application/x-www-form-urlencoded '
2015-10-21 21:53:10 +00:00
( response_gumpf , cookies ) = HydrusGlobals . client_controller . DoHTTP ( HC . POST , ' http://www.pixiv.net/login.php ' , request_headers = headers , body = body , return_cookies = True )
2015-10-07 21:56:22 +00:00
# _ only given to logged in php sessions
if ' PHPSESSID ' not in cookies or ' _ ' not in cookies [ ' PHPSESSID ' ] : raise Exception ( ' Pixiv login credentials not accepted! ' )
expires = now + 30 * 86400
self . _names_to_sessions [ name ] = ( cookies , expires )
2015-10-21 21:53:10 +00:00
HydrusGlobals . client_controller . Write ( ' web_session ' , name , cookies , expires )
2015-10-07 21:56:22 +00:00
return cookies
2015-03-18 21:46:29 +00:00