hydrus/include/ClientCaches.py

2771 lines
90 KiB
Python
Raw Normal View History

2015-10-07 21:56:22 +00:00
import ClientDefaults
2015-03-18 21:46:29 +00:00
import ClientFiles
2015-10-21 21:53:10 +00:00
import ClientNetworking
2015-08-05 18:42:35 +00:00
import ClientRendering
2016-06-08 20:27:22 +00:00
import ClientSearch
import ClientThreading
2015-03-18 21:46:29 +00:00
import HydrusConstants as HC
import HydrusExceptions
import HydrusFileHandling
import HydrusImageHandling
2015-11-04 22:30:28 +00:00
import HydrusPaths
2015-11-18 22:44:07 +00:00
import HydrusSessions
2015-11-25 22:00:57 +00:00
import itertools
2015-03-18 21:46:29 +00:00
import os
import random
import Queue
2015-12-02 22:32:18 +00:00
import shutil
2015-03-18 21:46:29 +00:00
import threading
import time
2015-10-07 21:56:22 +00:00
import urllib
2015-03-18 21:46:29 +00:00
import wx
2015-03-25 22:04:19 +00:00
import HydrusData
import ClientData
2015-06-03 21:05:13 +00:00
import ClientConstants as CC
2015-03-25 22:04:19 +00:00
import HydrusGlobals
2015-08-05 18:42:35 +00:00
import collections
import HydrusTags
import itertools
2016-06-08 20:27:22 +00:00
import traceback
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
# important thing here, and reason why it is recursive, is because we want to preserve the parent-grandparent interleaving
def BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents ):
def AddParents( simple_children_to_parents, children_to_parents, child, parents ):
for parent in parents:
if parent not in children_to_parents[ child ]:
children_to_parents[ child ].append( parent )
if parent in simple_children_to_parents:
grandparents = simple_children_to_parents[ parent ]
AddParents( simple_children_to_parents, children_to_parents, child, grandparents )
service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
for ( service_key, simple_children_to_parents ) in service_keys_to_simple_children_to_parents.items():
children_to_parents = service_keys_to_children_to_parents[ service_key ]
for ( child, parents ) in simple_children_to_parents.items():
AddParents( simple_children_to_parents, children_to_parents, child, parents )
return service_keys_to_children_to_parents
def BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat ):
service_keys_to_simple_children_to_parents = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, pairs ) in service_keys_to_pairs_flat.items():
service_keys_to_simple_children_to_parents[ service_key ] = BuildSimpleChildrenToParents( pairs )
return service_keys_to_simple_children_to_parents
def BuildSimpleChildrenToParents( pairs ):
simple_children_to_parents = HydrusData.default_dict_set()
for ( child, parent ) in pairs:
if child == parent: continue
if LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ): continue
simple_children_to_parents[ child ].add( parent )
return simple_children_to_parents
def CollapseTagSiblingChains( processed_siblings ):
# now to collapse chains
# A -> B and B -> C goes to A -> C and B -> C
siblings = {}
for ( old_tag, new_tag ) in processed_siblings.items():
# adding A -> B
if new_tag in siblings:
# B -> F already calculated and added, so add A -> F
siblings[ old_tag ] = siblings[ new_tag ]
else:
while new_tag in processed_siblings: new_tag = processed_siblings[ new_tag ] # pursue endpoint F
siblings[ old_tag ] = new_tag
reverse_lookup = collections.defaultdict( list )
for ( old_tag, new_tag ) in siblings.items():
reverse_lookup[ new_tag ].append( old_tag )
return ( siblings, reverse_lookup )
def CombineTagSiblingPairs( service_keys_to_statuses_to_pairs ):
# first combine the services
# if A map already exists, don't overwrite
# if A -> B forms a loop, don't write it
processed_siblings = {}
current_deleted_pairs = set()
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
pairs = statuses_to_pairs[ HC.CURRENT ].union( statuses_to_pairs[ HC.PENDING ] )
for ( old, new ) in pairs:
if old == new: continue
if old not in processed_siblings:
next_new = new
we_have_a_loop = False
while next_new in processed_siblings:
next_new = processed_siblings[ next_new ]
if next_new == old:
we_have_a_loop = True
break
if not we_have_a_loop: processed_siblings[ old ] = new
return processed_siblings
def LoopInSimpleChildrenToParents( simple_children_to_parents, child, parent ):
potential_loop_paths = { parent }
while len( potential_loop_paths.intersection( simple_children_to_parents.keys() ) ) > 0:
new_potential_loop_paths = set()
for potential_loop_path in potential_loop_paths.intersection( simple_children_to_parents.keys() ):
new_potential_loop_paths.update( simple_children_to_parents[ potential_loop_path ] )
potential_loop_paths = new_potential_loop_paths
if child in potential_loop_paths: return True
return False
class ClientFilesManager( object ):
def __init__( self, controller ):
self._controller = controller
self._lock = threading.Lock()
2015-12-02 22:32:18 +00:00
self._prefixes_to_locations = {}
2015-11-25 22:00:57 +00:00
2016-02-17 22:06:47 +00:00
self._bad_error_occured = False
2015-11-25 22:00:57 +00:00
self._Reinit()
2016-07-27 21:53:34 +00:00
def _GenerateExpectedFilePath( self, hash, mime ):
2016-06-08 20:27:22 +00:00
hash_encoded = hash.encode( 'hex' )
2016-07-27 21:53:34 +00:00
prefix = 'f' + hash_encoded[:2]
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
location = self._prefixes_to_locations[ prefix ]
path = os.path.join( location, prefix, hash_encoded + HC.mime_ext_lookup[ mime ] )
return path
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
def _GenerateExpectedFullSizeThumbnailPath( self, hash ):
2016-06-08 20:27:22 +00:00
hash_encoded = hash.encode( 'hex' )
2016-07-27 21:53:34 +00:00
prefix = 't' + hash_encoded[:2]
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
location = self._prefixes_to_locations[ prefix ]
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
path = os.path.join( location, prefix, hash_encoded ) + '.thumbnail'
2016-06-08 20:27:22 +00:00
return path
2016-07-27 21:53:34 +00:00
def _GenerateExpectedResizedThumbnailPath( self, hash ):
2015-11-25 22:00:57 +00:00
2015-12-02 22:32:18 +00:00
hash_encoded = hash.encode( 'hex' )
2016-07-27 21:53:34 +00:00
prefix = 'r' + hash_encoded[:2]
2015-12-02 22:32:18 +00:00
location = self._prefixes_to_locations[ prefix ]
2016-07-27 21:53:34 +00:00
path = os.path.join( location, prefix, hash_encoded ) + '.thumbnail.resized'
return path
def _GenerateFullSizeThumbnail( self, hash ):
try:
file_path = self._LookForFilePath( hash )
except HydrusExceptions.FileMissingException:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It could not be regenerated because the original file was also missing. This event could indicate hard drive corruption or an unplugged external drive. Please check everything is ok.' )
try:
thumbnail = HydrusFileHandling.GenerateThumbnail( file_path )
except Exception as e:
HydrusData.ShowException( e )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It could not be regenerated from the original file for the above reason. This event could indicate hard drive corruption. Please check everything is ok.' )
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
try:
with open( full_size_path, 'wb' ) as f:
f.write( thumbnail )
except Exception as e:
HydrusData.ShowException( e )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was missing. It was regenerated from the original file, but hydrus could not write it to the location ' + full_size_path + ' for the above reason. This event could indicate hard drive corruption, and it also suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
def _GenerateResizedThumbnail( self, hash ):
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
options = self._controller.GetOptions()
thumbnail_dimensions = options[ 'thumbnail_dimensions' ]
try:
thumbnail_resized = HydrusFileHandling.GenerateThumbnail( full_size_path, thumbnail_dimensions )
except:
try:
HydrusPaths.DeletePath( full_size_path )
except:
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was found, but it would not render. An attempt to delete it was made, but that failed as well. This event could indicate hard drive corruption, and it also suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
self._GenerateFullSizeThumbnail( hash )
thumbnail_resized = HydrusFileHandling.GenerateThumbnail( full_size_path, thumbnail_dimensions )
resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
try:
with open( resized_path, 'wb' ) as f:
f.write( thumbnail_resized )
except Exception as e:
HydrusData.ShowException( e )
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was found, but the resized version would not save to disk. This event suggests that hydrus does not have permission to write to its thumbnail folder. Please check everything is ok.' )
2015-12-02 22:32:18 +00:00
def _GetRecoverTuple( self ):
2016-07-27 21:53:34 +00:00
all_locations = { location for location in self._prefixes_to_locations.values() }
all_prefixes = self._prefixes_to_locations.keys()
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
for possible_location in all_locations:
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
for prefix in all_prefixes:
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
correct_location = self._prefixes_to_locations[ prefix ]
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
if possible_location != correct_location and os.path.exists( os.path.join( possible_location, prefix ) ):
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
recoverable_location = possible_location
return ( prefix, recoverable_location, correct_location )
2015-12-02 22:32:18 +00:00
return None
2015-11-25 22:00:57 +00:00
def _GetRebalanceTuple( self ):
2016-07-27 21:53:34 +00:00
( locations_to_ideal_weights, resized_thumbnail_override ) = self._controller.GetNewOptions().GetClientFilesLocationsToIdealWeights()
total_weight = sum( locations_to_ideal_weights.values() )
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
ideal_locations_to_normalised_weights = { location : weight / total_weight for ( location, weight ) in locations_to_ideal_weights.items() }
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
current_locations_to_normalised_weights = collections.defaultdict( lambda: 0 )
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
file_prefixes = [ prefix for prefix in self._prefixes_to_locations if prefix.startswith( 'f' ) ]
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
current_locations_to_normalised_weights[ location ] += 1.0 / 256
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
for location in current_locations_to_normalised_weights.keys():
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
if location not in ideal_locations_to_normalised_weights:
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
ideal_locations_to_normalised_weights[ location ] = 0.0
2015-12-02 22:32:18 +00:00
2015-11-25 22:00:57 +00:00
#
2016-07-27 21:53:34 +00:00
overweight_locations = []
underweight_locations = []
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
for ( location, ideal_weight ) in ideal_locations_to_normalised_weights.items():
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
if location in current_locations_to_normalised_weights:
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
current_weight = current_locations_to_normalised_weights[ location ]
2015-11-25 22:00:57 +00:00
if current_weight < ideal_weight:
2016-07-27 21:53:34 +00:00
underweight_locations.append( location )
2015-11-25 22:00:57 +00:00
elif current_weight >= ideal_weight + 1.0 / 256:
2016-07-27 21:53:34 +00:00
overweight_locations.append( location )
2015-11-25 22:00:57 +00:00
else:
2016-07-27 21:53:34 +00:00
underweight_locations.append( location )
2015-11-25 22:00:57 +00:00
#
2016-07-27 21:53:34 +00:00
if len( underweight_locations ) > 0 and len( overweight_locations ) > 0:
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
overweight_location = overweight_locations.pop( 0 )
underweight_location = underweight_locations.pop( 0 )
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
random.shuffle( file_prefixes )
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
for file_prefix in file_prefixes:
location = self._prefixes_to_locations[ file_prefix ]
if location == overweight_location:
return ( file_prefix, overweight_location, underweight_location )
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
else:
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
for hex_prefix in HydrusData.IterateHexPrefixes():
full_size_prefix = 't' + hex_prefix
file_prefix = 'f' + hex_prefix
full_size_location = self._prefixes_to_locations[ full_size_prefix ]
file_location = self._prefixes_to_locations[ file_prefix ]
if full_size_location != file_location:
return ( full_size_prefix, full_size_location, file_location )
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
if resized_thumbnail_override is None:
for hex_prefix in HydrusData.IterateHexPrefixes():
resized_prefix = 'r' + hex_prefix
file_prefix = 'f' + hex_prefix
resized_location = self._prefixes_to_locations[ resized_prefix ]
file_location = self._prefixes_to_locations[ file_prefix ]
if resized_location != file_location:
return ( resized_prefix, resized_location, file_location )
else:
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
for hex_prefix in HydrusData.IterateHexPrefixes():
resized_prefix = 'r' + hex_prefix
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
resized_location = self._prefixes_to_locations[ resized_prefix ]
if resized_location != resized_thumbnail_override:
return ( resized_prefix, resized_location, resized_thumbnail_override )
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
return None
2015-11-25 22:00:57 +00:00
2015-12-02 22:32:18 +00:00
def _IterateAllFilePaths( self ):
for ( prefix, location ) in self._prefixes_to_locations.items():
2016-07-27 21:53:34 +00:00
if prefix.startswith( 'f' ):
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
yield os.path.join( dir, filename )
2016-06-08 20:27:22 +00:00
2015-12-02 22:32:18 +00:00
2016-06-08 20:27:22 +00:00
def _IterateAllThumbnailPaths( self ):
for ( prefix, location ) in self._prefixes_to_locations.items():
2016-07-27 21:53:34 +00:00
if prefix.startswith( 't' ) or prefix.startswith( 'r' ):
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
dir = os.path.join( location, prefix )
filenames = os.listdir( dir )
for filename in filenames:
2016-06-08 20:27:22 +00:00
yield os.path.join( dir, filename )
2016-07-27 21:53:34 +00:00
def _LookForFilePath( self, hash ):
2016-06-08 20:27:22 +00:00
for potential_mime in HC.ALLOWED_MIMES:
2016-07-27 21:53:34 +00:00
potential_path = self._GenerateExpectedFilePath( hash, potential_mime )
2016-06-08 20:27:22 +00:00
if os.path.exists( potential_path ):
return potential_path
2016-07-27 21:53:34 +00:00
raise HydrusExceptions.FileMissingException( 'File for ' + hash.encode( 'hex' ) + ' not found!' )
2016-06-08 20:27:22 +00:00
2015-12-02 22:32:18 +00:00
def _Reinit( self ):
self._prefixes_to_locations = self._controller.Read( 'client_files_locations' )
2016-06-08 20:27:22 +00:00
missing = set()
2016-02-17 22:06:47 +00:00
for ( prefix, location ) in self._prefixes_to_locations.items():
if os.path.exists( location ):
dir = os.path.join( location, prefix )
if not os.path.exists( dir ):
2016-06-08 20:27:22 +00:00
missing.add( dir )
2016-02-17 22:06:47 +00:00
os.makedirs( dir )
else:
2016-06-08 20:27:22 +00:00
missing.add( location )
if len( missing ) > 0 and not HydrusGlobals.is_first_start:
self._bad_error_occured = True
text = 'The external locations:'
text += os.linesep * 2
text += ', '.join( missing )
text += os.linesep * 2
text += 'Did not exist on boot! Please check your external storage options and locations and restart the client.'
HydrusData.DebugPrint( text )
wx.MessageBox( text )
def AddFile( self, hash, mime, source_path ):
with self._lock:
2016-07-27 21:53:34 +00:00
dest_path = self._GenerateExpectedFilePath( hash, mime )
2016-06-08 20:27:22 +00:00
if not os.path.exists( dest_path ):
2016-02-17 22:06:47 +00:00
2016-06-08 20:27:22 +00:00
shutil.copy2( source_path, dest_path )
2016-02-17 22:06:47 +00:00
2016-06-08 20:27:22 +00:00
return dest_path
2016-02-17 22:06:47 +00:00
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
def AddFullSizeThumbnail( self, hash, thumbnail ):
2015-12-02 22:32:18 +00:00
with self._lock:
2016-07-27 21:53:34 +00:00
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
2016-06-08 20:27:22 +00:00
with open( path, 'wb' ) as f:
f.write( thumbnail )
2016-07-27 21:53:34 +00:00
self._controller.pub( 'new_thumbnails', { hash } )
2016-06-08 20:27:22 +00:00
def ClearOrphans( self, move_location = None ):
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'clearing orphans' )
job_key.SetVariable( 'popup_text_1', 'preparing' )
self._controller.pub( 'message', job_key )
orphan_paths = []
orphan_thumbnails = []
for ( i, path ) in enumerate( self._IterateAllFilePaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ConvertIntToPrettyString( i ) + ' files, found ' + HydrusData.ConvertIntToPrettyString( len( orphan_paths ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = should_be_a_hex_hash.decode( 'hex' )
is_an_orphan = HydrusGlobals.client_controller.Read( 'is_an_orphan', 'file', hash )
except:
is_an_orphan = True
if is_an_orphan:
orphan_paths.append( path )
time.sleep( 2 )
for ( i, path ) in enumerate( self._IterateAllThumbnailPaths() ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
if i % 100 == 0:
status = 'reviewed ' + HydrusData.ConvertIntToPrettyString( i ) + ' thumbnails, found ' + HydrusData.ConvertIntToPrettyString( len( orphan_thumbnails ) ) + ' orphans'
job_key.SetVariable( 'popup_text_1', status )
try:
is_an_orphan = False
( directory, filename ) = os.path.split( path )
should_be_a_hex_hash = filename[:64]
hash = should_be_a_hex_hash.decode( 'hex' )
is_an_orphan = HydrusGlobals.client_controller.Read( 'is_an_orphan', 'thumbnail', hash )
except:
is_an_orphan = True
if is_an_orphan:
orphan_thumbnails.append( path )
time.sleep( 2 )
if len( orphan_paths ) > 0:
if move_location is None:
status = 'found ' + HydrusData.ConvertIntToPrettyString( len( orphan_paths ) ) + ' orphans, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for path in orphan_paths:
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
HydrusData.Print( 'Deleting the orphan ' + path )
status = 'deleting orphan files: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_paths ) )
job_key.SetVariable( 'popup_text_1', status )
HydrusPaths.DeletePath( path )
else:
status = 'found ' + HydrusData.ConvertIntToPrettyString( len( orphan_paths ) ) + ' orphans, now moving to ' + move_location
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for path in orphan_paths:
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
( source_dir, filename ) = os.path.split( path )
dest = os.path.join( move_location, filename )
dest = HydrusPaths.AppendPathUntilNoConflicts( dest )
HydrusData.Print( 'Moving the orphan ' + path + ' to ' + dest )
status = 'moving orphan files: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_paths ) )
job_key.SetVariable( 'popup_text_1', status )
shutil.move( path, dest )
if len( orphan_thumbnails ) > 0:
status = 'found ' + HydrusData.ConvertIntToPrettyString( len( orphan_thumbnails ) ) + ' orphan thumbnails, now deleting'
job_key.SetVariable( 'popup_text_1', status )
time.sleep( 5 )
for ( i, path ) in enumerate( orphan_thumbnails ):
( i_paused, should_quit ) = job_key.WaitIfNeeded()
if should_quit:
return
status = 'deleting orphan thumbnails: ' + HydrusData.ConvertValueRangeToPrettyString( i + 1, len( orphan_thumbnails ) )
job_key.SetVariable( 'popup_text_1', status )
HydrusData.Print( 'Deleting the orphan ' + path )
HydrusPaths.DeletePath( path )
if len( orphan_paths ) == 0 and len( orphan_thumbnails ) == 0:
final_text = 'no orphans found!'
else:
final_text = HydrusData.ConvertIntToPrettyString( len( orphan_paths ) ) + ' orphan files and ' + HydrusData.ConvertIntToPrettyString( len( orphan_thumbnails ) ) + ' orphan thumbnails cleared!'
job_key.SetVariable( 'popup_text_1', final_text )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
def DeleteFiles( self, hashes ):
with self._lock:
for hash in hashes:
try:
2016-07-27 21:53:34 +00:00
path = self._LookForFilePath( hash )
2016-06-08 20:27:22 +00:00
except HydrusExceptions.FileMissingException:
continue
ClientData.DeletePath( path )
def DeleteThumbnails( self, hashes ):
with self._lock:
for hash in hashes:
2016-07-27 21:53:34 +00:00
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
2016-06-08 20:27:22 +00:00
HydrusPaths.DeletePath( path )
HydrusPaths.DeletePath( resized_path )
2015-12-02 22:32:18 +00:00
def GetFilePath( self, hash, mime = None ):
with self._lock:
2016-06-08 20:27:22 +00:00
if mime is None:
2016-07-27 21:53:34 +00:00
path = self._LookForFilePath( hash )
2016-06-08 20:27:22 +00:00
else:
2016-07-27 21:53:34 +00:00
path = self._GenerateExpectedFilePath( hash, mime )
2016-06-08 20:27:22 +00:00
if not os.path.exists( path ):
raise HydrusExceptions.FileMissingException( 'No file found at path + ' + path + '!' )
return path
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
def GetFullSizeThumbnailPath( self, hash ):
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
with self._lock:
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
if not os.path.exists( path ):
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
self._GenerateFullSizeThumbnail( hash )
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
if not self._bad_error_occured:
self._bad_error_occured = True
HydrusData.ShowText( 'A thumbnail for a file, ' + hash.encode( 'hex' ) + ', was missing. It has been regenerated from the original file, but this event could indicate hard drive corruption. Please check everything is ok. This error may be occuring for many files, but this message will only display once per boot. If you are recovering from a fractured database, you may wish to run \'database->maintenance->regenerate thumbnails\'.' )
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
return path
2016-06-08 20:27:22 +00:00
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
def GetResizedThumbnailPath( self, hash ):
2015-12-02 22:32:18 +00:00
with self._lock:
2016-07-27 21:53:34 +00:00
path = self._GenerateExpectedResizedThumbnailPath( hash )
2016-06-08 20:27:22 +00:00
if not os.path.exists( path ):
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
self._GenerateResizedThumbnail( hash )
2015-12-02 22:32:18 +00:00
2016-06-08 20:27:22 +00:00
return path
2016-07-27 21:53:34 +00:00
def HaveFullSizeThumbnail( self, hash ):
2016-06-08 20:27:22 +00:00
with self._lock:
2016-07-27 21:53:34 +00:00
path = self._GenerateExpectedFullSizeThumbnailPath( hash )
2016-06-08 20:27:22 +00:00
return os.path.exists( path )
2015-12-02 22:32:18 +00:00
2016-01-13 22:08:19 +00:00
def Rebalance( self, partial = True, stop_time = None ):
2015-11-25 22:00:57 +00:00
2016-02-17 22:06:47 +00:00
if self._bad_error_occured:
return
2015-11-25 22:00:57 +00:00
with self._lock:
rebalance_tuple = self._GetRebalanceTuple()
while rebalance_tuple is not None:
2016-07-27 21:53:34 +00:00
( prefix, overweight_location, underweight_location ) = rebalance_tuple
2015-11-25 22:00:57 +00:00
2016-07-27 21:53:34 +00:00
text = 'Moving \'' + prefix + '\' from ' + overweight_location + ' to ' + underweight_location
2015-12-02 22:32:18 +00:00
if partial:
HydrusData.Print( text )
else:
2016-01-13 22:08:19 +00:00
self._controller.pub( 'splash_set_status_text', text )
2015-12-02 22:32:18 +00:00
HydrusData.ShowText( text )
2016-07-27 21:53:34 +00:00
# these two lines can cause a deadlock because the db sometimes calls stuff in here.
self._controller.Write( 'relocate_client_files', prefix, overweight_location, underweight_location )
2015-11-25 22:00:57 +00:00
self._Reinit()
if partial:
break
2016-01-13 22:08:19 +00:00
if stop_time is not None and HydrusData.TimeHasPassed( stop_time ):
return
2015-11-25 22:00:57 +00:00
rebalance_tuple = self._GetRebalanceTuple()
2015-12-02 22:32:18 +00:00
recover_tuple = self._GetRecoverTuple()
while recover_tuple is not None:
2016-07-27 21:53:34 +00:00
( prefix, recoverable_location, correct_location ) = recover_tuple
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
text = 'Recovering \'' + prefix + '\' from ' + recoverable_location + ' to ' + correct_location
2015-12-02 22:32:18 +00:00
if partial:
HydrusData.Print( text )
else:
2016-01-13 22:08:19 +00:00
self._controller.pub( 'splash_set_status_text', text )
2015-12-02 22:32:18 +00:00
HydrusData.ShowText( text )
2016-07-27 21:53:34 +00:00
recoverable_path = os.path.join( recoverable_location, prefix )
correct_path = os.path.join( correct_location, prefix )
2015-12-02 22:32:18 +00:00
2016-07-27 21:53:34 +00:00
HydrusPaths.MoveAndMergeTree( recoverable_path, correct_path )
2015-12-02 22:32:18 +00:00
if partial:
break
2016-01-13 22:08:19 +00:00
if stop_time is not None and HydrusData.TimeHasPassed( stop_time ):
return
2015-12-02 22:32:18 +00:00
recover_tuple = self._GetRecoverTuple()
if not partial:
HydrusData.ShowText( 'All folders balanced!' )
2015-11-25 22:00:57 +00:00
2016-06-08 20:27:22 +00:00
def RegenerateResizedThumbnail( self, hash ):
with self._lock:
2016-07-27 21:53:34 +00:00
self._GenerateResizedThumbnail( hash )
2016-06-08 20:27:22 +00:00
def RegenerateThumbnails( self, only_do_missing = False ):
2016-02-03 22:12:53 +00:00
with self._lock:
2016-06-08 20:27:22 +00:00
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'regenerating thumbnails' )
job_key.SetVariable( 'popup_text_1', 'creating directories' )
self._controller.pub( 'message', job_key )
2016-02-03 22:12:53 +00:00
2016-06-08 20:27:22 +00:00
num_broken = 0
for ( i, path ) in enumerate( self._IterateAllFilePaths() ):
2016-02-03 22:12:53 +00:00
2016-06-08 20:27:22 +00:00
try:
2016-02-03 22:12:53 +00:00
2016-06-08 20:27:22 +00:00
while job_key.IsPaused() or job_key.IsCancelled():
time.sleep( 0.1 )
if job_key.IsCancelled():
job_key.SetVariable( 'popup_text_1', 'cancelled' )
HydrusData.Print( job_key.ToString() )
return
job_key.SetVariable( 'popup_text_1', HydrusData.ConvertIntToPrettyString( i ) + ' done' )
( base, filename ) = os.path.split( path )
( hash_encoded, ext ) = filename.split( '.', 1 )
hash = hash_encoded.decode( 'hex' )
2016-07-27 21:53:34 +00:00
full_size_path = self._GenerateExpectedFullSizeThumbnailPath( hash )
2016-06-08 20:27:22 +00:00
if only_do_missing and os.path.exists( full_size_path ):
continue
mime = HydrusFileHandling.GetMime( path )
if mime in HC.MIMES_WITH_THUMBNAILS:
2016-07-27 21:53:34 +00:00
self._GenerateFullSizeThumbnail( hash )
2016-06-08 20:27:22 +00:00
2016-07-27 21:53:34 +00:00
thumbnail_resized_path = self._GenerateExpectedResizedThumbnailPath( hash )
2016-06-08 20:27:22 +00:00
if os.path.exists( thumbnail_resized_path ):
HydrusPaths.DeletePath( thumbnail_resized_path )
except:
HydrusData.Print( path )
HydrusData.Print( traceback.format_exc() )
2016-02-17 22:06:47 +00:00
2016-06-08 20:27:22 +00:00
num_broken += 1
2016-02-03 22:12:53 +00:00
2016-06-08 20:27:22 +00:00
if num_broken > 0:
job_key.SetVariable( 'popup_text_1', 'done! ' + HydrusData.ConvertIntToPrettyString( num_broken ) + ' files caused errors, which have been written to the log.' )
else:
job_key.SetVariable( 'popup_text_1', 'done!' )
HydrusData.Print( job_key.ToString() )
job_key.Finish()
2016-02-03 22:12:53 +00:00
2015-03-18 21:46:29 +00:00
class DataCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller, cache_size_key ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2015-03-18 21:46:29 +00:00
self._cache_size_key = cache_size_key
self._keys_to_data = {}
self._keys_fifo = []
self._total_estimated_memory_footprint = 0
self._lock = threading.Lock()
wx.CallLater( 60 * 1000, self.MaintainCache )
2016-04-14 01:54:29 +00:00
def _DeleteItem( self ):
2015-06-24 22:10:14 +00:00
2016-04-14 01:54:29 +00:00
( deletee_key, last_access_time ) = self._keys_fifo.pop( 0 )
2015-06-24 22:10:14 +00:00
deletee_data = self._keys_to_data[ deletee_key ]
del self._keys_to_data[ deletee_key ]
2016-04-14 01:54:29 +00:00
self._RecalcMemoryUsage()
def _RecalcMemoryUsage( self ):
self._total_estimated_memory_footprint = sum( ( data.GetEstimatedMemoryFootprint() for data in self._keys_to_data.values() ) )
2015-06-24 22:10:14 +00:00
2015-03-18 21:46:29 +00:00
def Clear( self ):
with self._lock:
self._keys_to_data = {}
self._keys_fifo = []
self._total_estimated_memory_footprint = 0
def AddData( self, key, data ):
with self._lock:
if key not in self._keys_to_data:
2015-11-25 22:00:57 +00:00
options = self._controller.GetOptions()
2015-05-13 20:22:39 +00:00
while self._total_estimated_memory_footprint > options[ self._cache_size_key ]:
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
self._DeleteItem()
2015-03-18 21:46:29 +00:00
self._keys_to_data[ key ] = data
2015-03-25 22:04:19 +00:00
self._keys_fifo.append( ( key, HydrusData.GetNow() ) )
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
self._RecalcMemoryUsage()
2015-03-18 21:46:29 +00:00
def GetData( self, key ):
with self._lock:
2016-04-14 01:54:29 +00:00
if key not in self._keys_to_data:
raise Exception( 'Cache error! Looking for ' + HydrusData.ToUnicode( key ) + ', but it was missing.' )
2015-03-18 21:46:29 +00:00
2015-06-24 22:10:14 +00:00
for ( i, ( fifo_key, last_access_time ) ) in enumerate( self._keys_fifo ):
2015-03-18 21:46:29 +00:00
if fifo_key == key:
del self._keys_fifo[ i ]
break
2015-03-25 22:04:19 +00:00
self._keys_fifo.append( ( key, HydrusData.GetNow() ) )
2015-03-18 21:46:29 +00:00
return self._keys_to_data[ key ]
def HasData( self, key ):
2016-04-14 01:54:29 +00:00
with self._lock:
return key in self._keys_to_data
2015-03-18 21:46:29 +00:00
def MaintainCache( self ):
with self._lock:
while True:
2016-04-14 01:54:29 +00:00
if len( self._keys_fifo ) == 0:
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
break
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
else:
( key, last_access_time ) = self._keys_fifo[ 0 ]
2015-06-24 22:10:14 +00:00
if HydrusData.TimeHasPassed( last_access_time + 1200 ):
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
self._DeleteItem()
else:
break
2015-03-18 21:46:29 +00:00
wx.CallLater( 60 * 1000, self.MaintainCache )
class LocalBooruCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-03-18 21:46:29 +00:00
self._lock = threading.Lock()
self._RefreshShares()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshShares', 'refresh_local_booru_shares' )
self._controller.sub( self, 'RefreshShares', 'restart_booru' )
2015-03-18 21:46:29 +00:00
def _CheckDataUsage( self ):
info = self._local_booru_service.GetInfo()
max_monthly_data = info[ 'max_monthly_data' ]
used_monthly_data = info[ 'used_monthly_data' ]
if max_monthly_data is not None and used_monthly_data > max_monthly_data: raise HydrusExceptions.ForbiddenException( 'This booru has used all its monthly data. Please try again next month.' )
def _CheckFileAuthorised( self, share_key, hash ):
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
if hash not in info[ 'hashes_set' ]: raise HydrusExceptions.NotFoundException( 'That file was not found in that share.' )
def _CheckShareAuthorised( self, share_key ):
self._CheckDataUsage()
info = self._GetInfo( share_key )
timeout = info[ 'timeout' ]
2015-06-24 22:10:14 +00:00
if timeout is not None and HydrusData.TimeHasPassed( timeout ): raise HydrusExceptions.ForbiddenException( 'This share has expired.' )
2015-03-18 21:46:29 +00:00
def _GetInfo( self, share_key ):
try: info = self._keys_to_infos[ share_key ]
except: raise HydrusExceptions.NotFoundException( 'Did not find that share on this booru.' )
if info is None:
2015-11-25 22:00:57 +00:00
info = self._controller.Read( 'local_booru_share', share_key )
2015-03-18 21:46:29 +00:00
hashes = info[ 'hashes' ]
info[ 'hashes_set' ] = set( hashes )
2016-05-04 21:50:55 +00:00
media_results = self._controller.Read( 'media_results', hashes )
2015-03-18 21:46:29 +00:00
info[ 'media_results' ] = media_results
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results }
info[ 'hashes_to_media_results' ] = hashes_to_media_results
self._keys_to_infos[ share_key ] = info
return info
def _RefreshShares( self ):
2015-11-25 22:00:57 +00:00
self._local_booru_service = self._controller.GetServicesManager().GetService( CC.LOCAL_BOORU_SERVICE_KEY )
2015-03-18 21:46:29 +00:00
self._keys_to_infos = {}
2015-11-25 22:00:57 +00:00
share_keys = self._controller.Read( 'local_booru_share_keys' )
2015-03-18 21:46:29 +00:00
for share_key in share_keys: self._keys_to_infos[ share_key ] = None
def CheckShareAuthorised( self, share_key ):
with self._lock: self._CheckShareAuthorised( share_key )
def CheckFileAuthorised( self, share_key, hash ):
with self._lock: self._CheckFileAuthorised( share_key, hash )
def GetGalleryInfo( self, share_key ):
with self._lock:
self._CheckShareAuthorised( share_key )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_results = info[ 'media_results' ]
return ( name, text, timeout, media_results )
def GetMediaResult( self, share_key, hash ):
with self._lock:
info = self._GetInfo( share_key )
media_result = info[ 'hashes_to_media_results' ][ hash ]
return media_result
def GetPageInfo( self, share_key, hash ):
with self._lock:
self._CheckFileAuthorised( share_key, hash )
info = self._GetInfo( share_key )
name = info[ 'name' ]
text = info[ 'text' ]
timeout = info[ 'timeout' ]
media_result = info[ 'hashes_to_media_results' ][ hash ]
return ( name, text, timeout, media_result )
def RefreshShares( self ):
with self._lock:
self._RefreshShares()
2015-11-25 22:00:57 +00:00
class HydrusSessionManager( object ):
def __init__( self, controller ):
self._controller = controller
existing_sessions = self._controller.Read( 'hydrus_sessions' )
self._service_keys_to_sessions = { service_key : ( session_key, expires ) for ( service_key, session_key, expires ) in existing_sessions }
self._lock = threading.Lock()
def DeleteSessionKey( self, service_key ):
with self._lock:
self._controller.Write( 'delete_hydrus_session_key', service_key )
2016-01-20 23:57:33 +00:00
if service_key in self._service_keys_to_sessions:
del self._service_keys_to_sessions[ service_key ]
2015-11-25 22:00:57 +00:00
def GetSessionKey( self, service_key ):
now = HydrusData.GetNow()
with self._lock:
if service_key in self._service_keys_to_sessions:
( session_key, expires ) = self._service_keys_to_sessions[ service_key ]
if now + 600 > expires: del self._service_keys_to_sessions[ service_key ]
else: return session_key
# session key expired or not found
service = self._controller.GetServicesManager().GetService( service_key )
( response_gumpf, cookies ) = service.Request( HC.GET, 'session_key', return_cookies = True )
try: session_key = cookies[ 'session_key' ].decode( 'hex' )
except: raise Exception( 'Service did not return a session key!' )
expires = now + HydrusSessions.HYDRUS_SESSION_LIFETIME
self._service_keys_to_sessions[ service_key ] = ( session_key, expires )
self._controller.Write( 'hydrus_session', service_key, session_key, expires )
return session_key
2015-03-18 21:46:29 +00:00
class MenuEventIdToActionCache( object ):
def __init__( self ):
self._ids_to_actions = {}
self._actions_to_ids = {}
2015-09-23 21:21:02 +00:00
self._temporary_ids = set()
self._free_temporary_ids = set()
def _ClearTemporaries( self ):
for temporary_id in self._temporary_ids.difference( self._free_temporary_ids ):
temporary_action = self._ids_to_actions[ temporary_id ]
del self._ids_to_actions[ temporary_id ]
del self._actions_to_ids[ temporary_action ]
self._free_temporary_ids = set( self._temporary_ids )
def _GetNewId( self, temporary ):
if temporary:
if len( self._free_temporary_ids ) == 0:
new_id = wx.NewId()
self._temporary_ids.add( new_id )
self._free_temporary_ids.add( new_id )
return self._free_temporary_ids.pop()
else:
return wx.NewId()
2015-03-18 21:46:29 +00:00
def GetAction( self, event_id ):
2015-09-23 21:21:02 +00:00
action = None
if event_id in self._ids_to_actions:
action = self._ids_to_actions[ event_id ]
if event_id in self._temporary_ids:
self._ClearTemporaries()
return action
2015-03-18 21:46:29 +00:00
2015-09-23 21:21:02 +00:00
def GetId( self, command, data = None, temporary = False ):
2015-03-18 21:46:29 +00:00
action = ( command, data )
if action not in self._actions_to_ids:
2015-09-23 21:21:02 +00:00
event_id = self._GetNewId( temporary )
2015-03-18 21:46:29 +00:00
self._ids_to_actions[ event_id ] = action
self._actions_to_ids[ action ] = event_id
return self._actions_to_ids[ action ]
2015-09-23 21:21:02 +00:00
def GetPermanentId( self, command, data = None ):
return self.GetId( command, data, False )
def GetTemporaryId( self, command, data = None ):
temporary = True
if data is None:
temporary = False
return self.GetId( command, data, temporary )
2015-03-18 21:46:29 +00:00
MENU_EVENT_ID_TO_ACTION_CACHE = MenuEventIdToActionCache()
class RenderedImageCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller, cache_type ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2015-03-18 21:46:29 +00:00
self._type = cache_type
2015-11-25 22:00:57 +00:00
if self._type == 'fullscreen': self._data_cache = DataCache( self._controller, 'fullscreen_cache_size' )
elif self._type == 'preview': self._data_cache = DataCache( self._controller, 'preview_cache_size' )
2015-03-18 21:46:29 +00:00
def Clear( self ): self._data_cache.Clear()
def GetImage( self, media, target_resolution = None ):
hash = media.GetHash()
2016-04-06 19:52:45 +00:00
if target_resolution is None:
target_resolution = media.GetResolution()
( media_width, media_height ) = media.GetResolution()
( target_width, target_height ) = target_resolution
if target_width > media_width or target_height > media_height:
target_resolution = media.GetResolution()
2016-04-14 01:54:29 +00:00
else:
target_resolution = ( target_width, target_height ) # to convert from wx.size or list to tuple for the cache key
2015-03-18 21:46:29 +00:00
key = ( hash, target_resolution )
2016-04-14 01:54:29 +00:00
if self._data_cache.HasData( key ):
return self._data_cache.GetData( key )
2015-03-18 21:46:29 +00:00
else:
2015-08-05 18:42:35 +00:00
image_container = ClientRendering.RasterContainerImage( media, target_resolution )
2015-03-18 21:46:29 +00:00
2016-04-14 01:54:29 +00:00
self._data_cache.AddData( key, image_container )
2015-03-18 21:46:29 +00:00
return image_container
def HasImage( self, hash, target_resolution ):
key = ( hash, target_resolution )
2016-04-14 01:54:29 +00:00
return self._data_cache.HasData( key )
2015-03-18 21:46:29 +00:00
class ThumbnailCache( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
2015-03-18 21:46:29 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
self._data_cache = DataCache( self._controller, 'thumbnail_cache_size' )
2016-06-08 20:27:22 +00:00
self._client_files_manager = self._controller.GetClientFilesManager()
2015-03-18 21:46:29 +00:00
2015-11-04 22:30:28 +00:00
self._lock = threading.Lock()
self._waterfall_queue_quick = set()
self._waterfall_queue_random = []
self._waterfall_event = threading.Event()
2015-03-18 21:46:29 +00:00
self._special_thumbs = {}
self.Clear()
threading.Thread( target = self.DAEMONWaterfall, name = 'Waterfall Daemon' ).start()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'Clear', 'thumbnail_resize' )
2015-03-18 21:46:29 +00:00
2016-06-08 20:27:22 +00:00
def _GetResizedHydrusBitmapFromHardDrive( self, display_media ):
2015-12-23 22:51:04 +00:00
2016-06-08 20:27:22 +00:00
options = self._controller.GetOptions()
thumbnail_dimensions = options[ 'thumbnail_dimensions' ]
2015-12-23 22:51:04 +00:00
2016-06-08 20:27:22 +00:00
if tuple( thumbnail_dimensions ) == HC.UNSCALED_THUMBNAIL_DIMENSIONS:
full_size = True
else:
full_size = False
hash = display_media.GetHash()
2015-12-23 22:51:04 +00:00
locations_manager = display_media.GetLocationsManager()
if locations_manager.HasLocal():
try:
2016-07-27 21:53:34 +00:00
if full_size:
path = self._client_files_manager.GetFullSizeThumbnailPath( hash )
else:
path = self._client_files_manager.GetResizedThumbnailPath( hash )
2015-12-23 22:51:04 +00:00
2016-02-17 22:06:47 +00:00
except HydrusExceptions.FileMissingException as e:
2015-12-23 22:51:04 +00:00
HydrusData.ShowException( e )
2016-06-08 20:27:22 +00:00
return self._special_thumbs[ 'hydrus' ]
2015-12-23 22:51:04 +00:00
else:
try:
2016-07-27 21:53:34 +00:00
if full_size:
path = self._client_files_manager.GetFullSizeThumbnailPath( hash )
else:
path = self._client_files_manager.GetResizedThumbnailPath( hash )
2015-12-23 22:51:04 +00:00
2016-06-08 20:27:22 +00:00
except HydrusExceptions.FileMissingException:
2015-12-23 22:51:04 +00:00
2016-06-08 20:27:22 +00:00
return self._special_thumbs[ 'hydrus' ]
2015-12-23 22:51:04 +00:00
2016-06-08 20:27:22 +00:00
try:
2015-12-23 22:51:04 +00:00
2016-06-08 20:27:22 +00:00
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path )
2015-12-23 22:51:04 +00:00
2016-06-08 20:27:22 +00:00
except Exception as e:
HydrusData.ShowException( e )
2015-12-23 22:51:04 +00:00
try:
2016-06-08 20:27:22 +00:00
self._client_files_manager.RegenerateResizedThumbnail( hash )
2015-12-23 22:51:04 +00:00
try:
2016-06-08 20:27:22 +00:00
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path )
2015-12-23 22:51:04 +00:00
except Exception as e:
HydrusData.ShowException( e )
2016-06-08 20:27:22 +00:00
raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.encode( 'hex' ) + ' was broken. It was regenerated, but the new file would not render for the above reason. Please inform the hydrus developer what has happened.' )
2015-12-23 22:51:04 +00:00
2016-06-08 20:27:22 +00:00
except Exception as e:
HydrusData.ShowException( e )
return self._special_thumbs[ 'hydrus' ]
options = HydrusGlobals.client_controller.GetOptions()
( media_x, media_y ) = display_media.GetResolution()
( actual_x, actual_y ) = hydrus_bitmap.GetSize()
( desired_x, desired_y ) = options[ 'thumbnail_dimensions' ]
too_large = actual_x > desired_x or actual_y > desired_y
small_original_image = actual_x == media_x and actual_y == media_y
too_small = actual_x < desired_x and actual_y < desired_y
if too_large or ( too_small and not small_original_image ):
self._client_files_manager.RegenerateResizedThumbnail( hash )
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( path )
2015-12-23 22:51:04 +00:00
return hydrus_bitmap
2015-11-04 22:30:28 +00:00
def _RecalcWaterfallQueueRandom( self ):
self._waterfall_queue_random = list( self._waterfall_queue_quick )
random.shuffle( self._waterfall_queue_random )
def CancelWaterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.difference_update( ( ( page_key, media ) for media in medias ) )
self._RecalcWaterfallQueueRandom()
2015-03-18 21:46:29 +00:00
def Clear( self ):
2016-02-03 22:12:53 +00:00
with self._lock:
2015-03-18 21:46:29 +00:00
2016-02-03 22:12:53 +00:00
self._data_cache.Clear()
self._special_thumbs = {}
names = [ 'hydrus', 'flash', 'pdf', 'audio', 'video' ]
( os_file_handle, temp_path ) = HydrusPaths.GetTempPath()
try:
2015-05-06 20:26:18 +00:00
2016-02-03 22:12:53 +00:00
for name in names:
path = os.path.join( HC.STATIC_DIR, name + '.png' )
options = self._controller.GetOptions()
thumbnail = HydrusFileHandling.GenerateThumbnail( path, options[ 'thumbnail_dimensions' ] )
with open( temp_path, 'wb' ) as f: f.write( thumbnail )
hydrus_bitmap = ClientRendering.GenerateHydrusBitmap( temp_path )
self._special_thumbs[ name ] = hydrus_bitmap
2015-05-06 20:26:18 +00:00
2016-02-03 22:12:53 +00:00
finally:
2015-05-06 20:26:18 +00:00
2016-02-03 22:12:53 +00:00
HydrusPaths.CleanUpTempPath( os_file_handle, temp_path )
2015-05-06 20:26:18 +00:00
2015-03-18 21:46:29 +00:00
def GetThumbnail( self, media ):
2015-11-11 21:20:41 +00:00
display_media = media.GetDisplayMedia()
2016-04-06 19:52:45 +00:00
if display_media.GetLocationsManager().ShouldHaveThumbnail():
2015-03-18 21:46:29 +00:00
2016-04-06 19:52:45 +00:00
mime = display_media.GetMime()
2015-03-18 21:46:29 +00:00
2016-04-06 19:52:45 +00:00
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
2015-03-18 21:46:29 +00:00
2016-04-06 19:52:45 +00:00
if not self._data_cache.HasData( hash ):
hydrus_bitmap = self._GetResizedHydrusBitmapFromHardDrive( display_media )
self._data_cache.AddData( hash, hydrus_bitmap )
2015-03-18 21:46:29 +00:00
2016-04-06 19:52:45 +00:00
return self._data_cache.GetData( hash )
2015-03-18 21:46:29 +00:00
2016-04-06 19:52:45 +00:00
elif mime in HC.AUDIO: return self._special_thumbs[ 'audio' ]
elif mime in HC.VIDEO: return self._special_thumbs[ 'video' ]
elif mime == HC.APPLICATION_FLASH: return self._special_thumbs[ 'flash' ]
elif mime == HC.APPLICATION_PDF: return self._special_thumbs[ 'pdf' ]
else: return self._special_thumbs[ 'hydrus' ]
2015-03-18 21:46:29 +00:00
2016-04-06 19:52:45 +00:00
else:
return self._special_thumbs[ 'hydrus' ]
2015-03-18 21:46:29 +00:00
2016-02-03 22:12:53 +00:00
2015-03-18 21:46:29 +00:00
2015-11-11 21:20:41 +00:00
def HasThumbnailCached( self, media ):
display_media = media.GetDisplayMedia()
mime = display_media.GetMime()
if mime in HC.MIMES_WITH_THUMBNAILS:
hash = display_media.GetHash()
return self._data_cache.HasData( hash )
else:
return True
2015-11-04 22:30:28 +00:00
def Waterfall( self, page_key, medias ):
with self._lock:
self._waterfall_queue_quick.update( ( ( page_key, media ) for media in medias ) )
2015-11-25 22:00:57 +00:00
self._RecalcWaterfallQueueRandom()
self._waterfall_event.set()
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
def DAEMONWaterfall( self ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
last_paused = HydrusData.GetNowPrecise()
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
while not HydrusGlobals.view_shutdown:
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
with self._lock:
do_wait = len( self._waterfall_queue_random ) == 0
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
if do_wait:
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._waterfall_event.wait( 1 )
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._waterfall_event.clear()
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
last_paused = HydrusData.GetNowPrecise()
with self._lock:
if len( self._waterfall_queue_random ) == 0:
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
continue
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
else:
result = self._waterfall_queue_random.pop( 0 )
self._waterfall_queue_quick.discard( result )
( page_key, media ) = result
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
try:
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self.GetThumbnail( media ) # to load it
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._controller.pub( 'waterfall_thumbnail', page_key, media )
if HydrusData.GetNowPrecise() - last_paused > 0.005:
time.sleep( 0.00001 )
last_paused = HydrusData.GetNowPrecise()
except Exception as e:
HydrusData.ShowException( e )
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
class ServicesManager( object ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
2015-08-05 18:42:35 +00:00
2015-11-25 22:00:57 +00:00
self._controller = controller
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self._lock = threading.Lock()
self._keys_to_services = {}
self._services_sorted = []
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self.RefreshServices()
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshServices', 'notify_new_services_data' )
2015-11-18 22:44:07 +00:00
2016-03-09 19:37:14 +00:00
def FilterValidServiceKeys( self, service_keys ):
with self._lock:
filtered_service_keys = [ service_key for service_key in service_keys if service_key in self._keys_to_services ]
return filtered_service_keys
2015-11-25 22:00:57 +00:00
def GetService( self, service_key ):
2015-11-18 22:44:07 +00:00
with self._lock:
2016-03-09 19:37:14 +00:00
try:
return self._keys_to_services[ service_key ]
except KeyError:
raise HydrusExceptions.DataMissing( 'That service was not found!' )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
def GetServices( self, types = HC.ALL_SERVICES, randomised = True ):
2015-11-18 22:44:07 +00:00
with self._lock:
2015-11-25 22:00:57 +00:00
services = [ service for service in self._services_sorted if service.GetServiceType() in types ]
if randomised:
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
random.shuffle( services )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
return services
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
def RefreshServices( self ):
with self._lock:
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
services = self._controller.Read( 'services' )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self._keys_to_services = { service.GetServiceKey() : service for service in services }
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
compare_function = lambda a, b: cmp( a.GetName(), b.GetName() )
2015-11-18 22:44:07 +00:00
2015-11-25 22:00:57 +00:00
self._services_sorted = list( services )
self._services_sorted.sort( cmp = compare_function )
2015-11-18 22:44:07 +00:00
2015-08-05 18:42:35 +00:00
class TagCensorshipManager( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-08-05 18:42:35 +00:00
self.RefreshData()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshData', 'notify_new_tag_censorship' )
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
def _CensorshipMatches( self, tag, blacklist, censorships ):
if blacklist:
return not HydrusTags.CensorshipMatch( tag, censorships )
else:
return HydrusTags.CensorshipMatch( tag, censorships )
2015-08-05 18:42:35 +00:00
def GetInfo( self, service_key ):
if service_key in self._service_keys_to_info: return self._service_keys_to_info[ service_key ]
else: return ( True, set() )
def RefreshData( self ):
2016-04-06 19:52:45 +00:00
rows = self._controller.Read( 'tag_censorship' )
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
self._service_keys_to_info = { service_key : ( blacklist, censorships ) for ( service_key, blacklist, censorships ) in rows }
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
def FilterStatusesToPairs( self, service_key, statuses_to_pairs ):
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
if service_key_lookup in self._service_keys_to_info:
2015-11-18 22:44:07 +00:00
2016-04-06 19:52:45 +00:00
( blacklist, censorships ) = self._service_keys_to_info[ service_key_lookup ]
2015-11-18 22:44:07 +00:00
2016-04-06 19:52:45 +00:00
new_statuses_to_pairs = HydrusData.default_dict_set()
2015-11-18 22:44:07 +00:00
2016-04-06 19:52:45 +00:00
for ( status, pairs ) in statuses_to_pairs.items():
new_statuses_to_pairs[ status ] = { ( one, two ) for ( one, two ) in pairs if self._CensorshipMatches( one, blacklist, censorships ) and self._CensorshipMatches( two, blacklist, censorships ) }
statuses_to_pairs = new_statuses_to_pairs
2015-11-18 22:44:07 +00:00
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
return statuses_to_pairs
2015-08-05 18:42:35 +00:00
def FilterServiceKeysToStatusesToTags( self, service_keys_to_statuses_to_tags ):
2016-04-06 19:52:45 +00:00
if CC.COMBINED_TAG_SERVICE_KEY in self._service_keys_to_info:
( blacklist, censorships ) = self._service_keys_to_info[ CC.COMBINED_TAG_SERVICE_KEY ]
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
service_keys = service_keys_to_statuses_to_tags.keys()
for service_key in service_keys:
statuses_to_tags = service_keys_to_statuses_to_tags[ service_key ]
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
statuses = statuses_to_tags.keys()
for status in statuses:
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
tags = statuses_to_tags[ status ]
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
statuses_to_tags[ status ] = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
for ( service_key, ( blacklist, censorships ) ) in self._service_keys_to_info.items():
if service_key == CC.COMBINED_TAG_SERVICE_KEY:
continue
if service_key in service_keys_to_statuses_to_tags:
statuses_to_tags = service_keys_to_statuses_to_tags[ service_key ]
statuses = statuses_to_tags.keys()
for status in statuses:
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
tags = statuses_to_tags[ status ]
statuses_to_tags[ status ] = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
return service_keys_to_statuses_to_tags
2015-08-05 18:42:35 +00:00
def FilterTags( self, service_key, tags ):
2016-04-06 19:52:45 +00:00
for service_key_lookup in ( CC.COMBINED_TAG_SERVICE_KEY, service_key ):
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
if service_key_lookup in self._service_keys_to_info:
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
( blacklist, censorships ) = self._service_keys_to_info[ service_key_lookup ]
2015-08-05 18:42:35 +00:00
2016-04-06 19:52:45 +00:00
tags = { tag for tag in tags if self._CensorshipMatches( tag, blacklist, censorships ) }
2015-08-05 18:42:35 +00:00
return tags
class TagParentsManager( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-08-05 18:42:35 +00:00
2015-11-11 21:20:41 +00:00
self._service_keys_to_children_to_parents = collections.defaultdict( HydrusData.default_dict_list )
2015-08-05 18:42:35 +00:00
self._RefreshParents()
self._lock = threading.Lock()
2015-11-25 22:00:57 +00:00
self._controller.sub( self, 'RefreshParents', 'notify_new_parents' )
2015-08-05 18:42:35 +00:00
def _RefreshParents( self ):
2015-11-25 22:00:57 +00:00
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_parents' )
2015-08-05 18:42:35 +00:00
# first collapse siblings
2015-11-25 22:00:57 +00:00
sibling_manager = self._controller.GetManager( 'tag_siblings' )
2015-08-05 18:42:35 +00:00
collapsed_service_keys_to_statuses_to_pairs = collections.defaultdict( HydrusData.default_dict_set )
for ( service_key, statuses_to_pairs ) in service_keys_to_statuses_to_pairs.items():
if service_key == CC.COMBINED_TAG_SERVICE_KEY: continue
for ( status, pairs ) in statuses_to_pairs.items():
pairs = sibling_manager.CollapsePairs( pairs )
collapsed_service_keys_to_statuses_to_pairs[ service_key ][ status ] = pairs
# now collapse current and pending
service_keys_to_pairs_flat = HydrusData.default_dict_set()
for ( service_key, statuses_to_pairs ) in collapsed_service_keys_to_statuses_to_pairs.items():
pairs_flat = statuses_to_pairs[ HC.CURRENT ].union( statuses_to_pairs[ HC.PENDING ] )
service_keys_to_pairs_flat[ service_key ] = pairs_flat
# now create the combined tag service
combined_pairs_flat = set()
for pairs_flat in service_keys_to_pairs_flat.values():
combined_pairs_flat.update( pairs_flat )
service_keys_to_pairs_flat[ CC.COMBINED_TAG_SERVICE_KEY ] = combined_pairs_flat
#
service_keys_to_simple_children_to_parents = BuildServiceKeysToSimpleChildrenToParents( service_keys_to_pairs_flat )
self._service_keys_to_children_to_parents = BuildServiceKeysToChildrenToParents( service_keys_to_simple_children_to_parents )
def ExpandPredicates( self, service_key, predicates ):
2015-11-25 22:00:57 +00:00
new_options = self._controller.GetNewOptions()
2015-11-18 22:44:07 +00:00
if new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
results = []
with self._lock:
for predicate in predicates:
results.append( predicate )
if predicate.GetType() == HC.PREDICATE_TYPE_TAG:
tag = predicate.GetValue()
parents = self._service_keys_to_children_to_parents[ service_key ][ tag ]
for parent in parents:
2015-12-09 23:16:41 +00:00
parent_predicate = ClientSearch.Predicate( HC.PREDICATE_TYPE_PARENT, parent )
2015-08-05 18:42:35 +00:00
results.append( parent_predicate )
return results
def ExpandTags( self, service_key, tags ):
2015-11-25 22:00:57 +00:00
new_options = self._controller.GetNewOptions()
2015-11-18 22:44:07 +00:00
if new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
with self._lock:
tags_results = set( tags )
2015-11-11 21:20:41 +00:00
for tag in tags:
tags_results.update( self._service_keys_to_children_to_parents[ service_key ][ tag ] )
2015-08-05 18:42:35 +00:00
return tags_results
def GetParents( self, service_key, tag ):
2015-11-25 22:00:57 +00:00
new_options = self._controller.GetNewOptions()
2015-11-18 22:44:07 +00:00
if new_options.GetBoolean( 'apply_all_parents_to_all_services' ):
service_key = CC.COMBINED_TAG_SERVICE_KEY
2015-08-05 18:42:35 +00:00
with self._lock:
return self._service_keys_to_children_to_parents[ service_key ][ tag ]
def RefreshParents( self ):
2015-11-11 21:20:41 +00:00
with self._lock:
self._RefreshParents()
2015-08-05 18:42:35 +00:00
class TagSiblingsManager( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-08-05 18:42:35 +00:00
self._RefreshSiblings()
self._lock = threading.Lock()
2016-07-13 17:37:44 +00:00
self._controller.sub( self, 'RefreshSiblings', 'notify_new_siblings_data' )
2015-08-05 18:42:35 +00:00
2015-11-11 21:20:41 +00:00
def _CollapseTags( self, tags ):
return { self._siblings[ tag ] if tag in self._siblings else tag for tag in tags }
2015-08-05 18:42:35 +00:00
def _RefreshSiblings( self ):
2015-11-25 22:00:57 +00:00
service_keys_to_statuses_to_pairs = self._controller.Read( 'tag_siblings' )
2015-08-05 18:42:35 +00:00
processed_siblings = CombineTagSiblingPairs( service_keys_to_statuses_to_pairs )
( self._siblings, self._reverse_lookup ) = CollapseTagSiblingChains( processed_siblings )
2015-11-25 22:00:57 +00:00
self._controller.pub( 'new_siblings_gui' )
2015-08-05 18:42:35 +00:00
2016-03-09 19:37:14 +00:00
def GetAutocompleteSiblings( self, search_text, exact_match = False ):
2015-08-05 18:42:35 +00:00
with self._lock:
2016-03-09 19:37:14 +00:00
if exact_match:
key_based_matching_values = set()
if search_text in self._siblings:
key_based_matching_values = { self._siblings[ search_text ] }
else:
key_based_matching_values = set()
value_based_matching_values = { value for value in self._siblings.values() if value == search_text }
else:
2016-03-16 22:19:14 +00:00
matching_keys = ClientSearch.FilterTagsBySearchEntry( search_text, self._siblings.keys(), search_siblings = False )
2016-03-09 19:37:14 +00:00
2016-03-16 22:19:14 +00:00
key_based_matching_values = { self._siblings[ key ] for key in matching_keys }
value_based_matching_values = ClientSearch.FilterTagsBySearchEntry( search_text, self._siblings.values(), search_siblings = False )
2016-03-09 19:37:14 +00:00
2015-08-05 18:42:35 +00:00
matching_values = key_based_matching_values.union( value_based_matching_values )
# all the matching values have a matching sibling somewhere in their network
# so now fetch the networks
lists_of_matching_keys = [ self._reverse_lookup[ value ] for value in matching_values ]
matching_keys = itertools.chain.from_iterable( lists_of_matching_keys )
matches = matching_values.union( matching_keys )
return matches
def GetSibling( self, tag ):
with self._lock:
if tag in self._siblings: return self._siblings[ tag ]
else: return None
def GetAllSiblings( self, tag ):
with self._lock:
if tag in self._siblings:
new_tag = self._siblings[ tag ]
elif tag in self._reverse_lookup: new_tag = tag
else: return [ tag ]
all_siblings = list( self._reverse_lookup[ new_tag ] )
all_siblings.append( new_tag )
return all_siblings
def RefreshSiblings( self ):
with self._lock: self._RefreshSiblings()
def CollapseNamespacedTags( self, namespace, tags ):
with self._lock:
results = set()
for tag in tags:
full_tag = namespace + ':' + tag
if full_tag in self._siblings:
sibling = self._siblings[ full_tag ]
if ':' in sibling: sibling = sibling.split( ':', 1 )[1]
results.add( sibling )
else: results.add( tag )
return results
def CollapsePredicates( self, predicates ):
with self._lock:
results = [ predicate for predicate in predicates if predicate.GetType() != HC.PREDICATE_TYPE_TAG ]
tag_predicates = [ predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG ]
tags_to_predicates = { predicate.GetValue() : predicate for predicate in predicates if predicate.GetType() == HC.PREDICATE_TYPE_TAG }
tags = tags_to_predicates.keys()
tags_to_include_in_results = set()
for tag in tags:
if tag in self._siblings:
old_tag = tag
old_predicate = tags_to_predicates[ old_tag ]
new_tag = self._siblings[ old_tag ]
if new_tag not in tags_to_predicates:
( old_pred_type, old_value, old_inclusive ) = old_predicate.GetInfo()
2016-06-22 20:59:24 +00:00
new_predicate = ClientSearch.Predicate( old_pred_type, new_tag, old_inclusive )
2015-08-05 18:42:35 +00:00
tags_to_predicates[ new_tag ] = new_predicate
tags_to_include_in_results.add( new_tag )
new_predicate = tags_to_predicates[ new_tag ]
current_count = old_predicate.GetCount( HC.CURRENT )
pending_count = old_predicate.GetCount( HC.PENDING )
new_predicate.AddToCount( HC.CURRENT, current_count )
new_predicate.AddToCount( HC.PENDING, pending_count )
2015-12-02 22:32:18 +00:00
else:
tags_to_include_in_results.add( tag )
2015-08-05 18:42:35 +00:00
results.extend( [ tags_to_predicates[ tag ] for tag in tags_to_include_in_results ] )
return results
def CollapsePairs( self, pairs ):
with self._lock:
result = set()
for ( a, b ) in pairs:
if a in self._siblings: a = self._siblings[ a ]
if b in self._siblings: b = self._siblings[ b ]
result.add( ( a, b ) )
return result
2015-11-11 21:20:41 +00:00
def CollapseStatusesToTags( self, statuses_to_tags ):
with self._lock:
statuses = statuses_to_tags.keys()
2016-06-22 20:59:24 +00:00
new_statuses_to_tags = HydrusData.default_dict_set()
2015-11-11 21:20:41 +00:00
for status in statuses:
2016-06-22 20:59:24 +00:00
new_statuses_to_tags[ status ] = self._CollapseTags( statuses_to_tags[ status ] )
2015-11-11 21:20:41 +00:00
2016-06-22 20:59:24 +00:00
return new_statuses_to_tags
2015-11-11 21:20:41 +00:00
2015-08-05 18:42:35 +00:00
def CollapseTags( self, tags ):
2015-11-11 21:20:41 +00:00
with self._lock:
return self._CollapseTags( tags )
2015-08-05 18:42:35 +00:00
def CollapseTagsToCount( self, tags_to_count ):
with self._lock:
results = collections.Counter()
for ( tag, count ) in tags_to_count.items():
if tag in self._siblings: tag = self._siblings[ tag ]
results[ tag ] += count
return results
2015-10-07 21:56:22 +00:00
2015-11-25 22:00:57 +00:00
class UndoManager( object ):
def __init__( self, controller ):
self._controller = controller
self._commands = []
self._inverted_commands = []
self._current_index = 0
self._lock = threading.Lock()
self._controller.sub( self, 'Undo', 'undo' )
self._controller.sub( self, 'Redo', 'redo' )
def _FilterServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
filtered_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
filtered_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
if data_type == HC.CONTENT_TYPE_FILES:
2016-02-17 22:06:47 +00:00
if action in ( HC.CONTENT_UPDATE_ADD, HC.CONTENT_UPDATE_DELETE, HC.CONTENT_UPDATE_UNDELETE, HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ): continue
2015-11-25 22:00:57 +00:00
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action in ( HC.CONTENT_UPDATE_RESCIND_PETITION, HC.CONTENT_UPDATE_ADVANCED ): continue
else: continue
filtered_content_update = HydrusData.ContentUpdate( data_type, action, row )
filtered_content_updates.append( filtered_content_update )
if len( filtered_content_updates ) > 0:
filtered_service_keys_to_content_updates[ service_key ] = filtered_content_updates
return filtered_service_keys_to_content_updates
def _InvertServiceKeysToContentUpdates( self, service_keys_to_content_updates ):
inverted_service_keys_to_content_updates = {}
for ( service_key, content_updates ) in service_keys_to_content_updates.items():
inverted_content_updates = []
for content_update in content_updates:
( data_type, action, row ) = content_update.ToTuple()
inverted_row = row
if data_type == HC.CONTENT_TYPE_FILES:
if action == HC.CONTENT_UPDATE_ARCHIVE: inverted_action = HC.CONTENT_UPDATE_INBOX
elif action == HC.CONTENT_UPDATE_INBOX: inverted_action = HC.CONTENT_UPDATE_ARCHIVE
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
( hashes, reason ) = row
inverted_row = hashes
elif data_type == HC.CONTENT_TYPE_MAPPINGS:
if action == HC.CONTENT_UPDATE_ADD: inverted_action = HC.CONTENT_UPDATE_DELETE
elif action == HC.CONTENT_UPDATE_DELETE: inverted_action = HC.CONTENT_UPDATE_ADD
elif action == HC.CONTENT_UPDATE_PEND: inverted_action = HC.CONTENT_UPDATE_RESCIND_PEND
elif action == HC.CONTENT_UPDATE_RESCIND_PEND: inverted_action = HC.CONTENT_UPDATE_PEND
elif action == HC.CONTENT_UPDATE_PETITION:
inverted_action = HC.CONTENT_UPDATE_RESCIND_PETITION
( tag, hashes, reason ) = row
inverted_row = ( tag, hashes )
inverted_content_update = HydrusData.ContentUpdate( data_type, inverted_action, inverted_row )
inverted_content_updates.append( inverted_content_update )
inverted_service_keys_to_content_updates[ service_key ] = inverted_content_updates
return inverted_service_keys_to_content_updates
def AddCommand( self, action, *args, **kwargs ):
with self._lock:
inverted_action = action
inverted_args = args
inverted_kwargs = kwargs
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
service_keys_to_content_updates = self._FilterServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( service_keys_to_content_updates ) == 0: return
inverted_service_keys_to_content_updates = self._InvertServiceKeysToContentUpdates( service_keys_to_content_updates )
if len( inverted_service_keys_to_content_updates ) == 0: return
inverted_args = ( inverted_service_keys_to_content_updates, )
else: return
self._commands = self._commands[ : self._current_index ]
self._inverted_commands = self._inverted_commands[ : self._current_index ]
self._commands.append( ( action, args, kwargs ) )
self._inverted_commands.append( ( inverted_action, inverted_args, inverted_kwargs ) )
self._current_index += 1
self._controller.pub( 'notify_new_undo' )
def GetUndoRedoStrings( self ):
with self._lock:
( undo_string, redo_string ) = ( None, None )
if self._current_index > 0:
undo_index = self._current_index - 1
( action, args, kwargs ) = self._commands[ undo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
undo_string = 'undo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
redo_index = self._current_index
( action, args, kwargs ) = self._commands[ redo_index ]
if action == 'content_updates':
( service_keys_to_content_updates, ) = args
redo_string = 'redo ' + ClientData.ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates )
return ( undo_string, redo_string )
def Undo( self ):
action = None
with self._lock:
if self._current_index > 0:
self._current_index -= 1
( action, args, kwargs ) = self._inverted_commands[ self._current_index ]
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
def Redo( self ):
action = None
with self._lock:
if len( self._commands ) > 0 and self._current_index < len( self._commands ):
( action, args, kwargs ) = self._commands[ self._current_index ]
self._current_index += 1
if action is not None:
self._controller.WriteSynchronous( action, *args, **kwargs )
self._controller.pub( 'notify_new_undo' )
2015-10-07 21:56:22 +00:00
class WebSessionManagerClient( object ):
2015-11-25 22:00:57 +00:00
def __init__( self, controller ):
self._controller = controller
2015-10-07 21:56:22 +00:00
2015-11-25 22:00:57 +00:00
existing_sessions = self._controller.Read( 'web_sessions' )
2015-10-07 21:56:22 +00:00
self._names_to_sessions = { name : ( cookies, expires ) for ( name, cookies, expires ) in existing_sessions }
self._lock = threading.Lock()
def GetCookies( self, name ):
now = HydrusData.GetNow()
with self._lock:
if name in self._names_to_sessions:
( cookies, expires ) = self._names_to_sessions[ name ]
if HydrusData.TimeHasPassed( expires - 300 ): del self._names_to_sessions[ name ]
else: return cookies
# name not found, or expired
2015-10-28 21:29:05 +00:00
if name == 'deviant art':
2015-11-25 22:00:57 +00:00
( response_gumpf, cookies ) = self._controller.DoHTTP( HC.GET, 'http://www.deviantart.com/', return_cookies = True )
2015-10-28 21:29:05 +00:00
expires = now + 30 * 86400
2015-10-07 21:56:22 +00:00
if name == 'hentai foundry':
2015-11-25 22:00:57 +00:00
( response_gumpf, cookies ) = self._controller.DoHTTP( HC.GET, 'http://www.hentai-foundry.com/?enterAgree=1', return_cookies = True )
2015-10-07 21:56:22 +00:00
raw_csrf = cookies[ 'YII_CSRF_TOKEN' ] # 19b05b536885ec60b8b37650a32f8deb11c08cd1s%3A40%3A%222917dcfbfbf2eda2c1fbe43f4d4c4ec4b6902b32%22%3B
processed_csrf = urllib.unquote( raw_csrf ) # 19b05b536885ec60b8b37650a32f8deb11c08cd1s:40:"2917dcfbfbf2eda2c1fbe43f4d4c4ec4b6902b32";
csrf_token = processed_csrf.split( '"' )[1] # the 2917... bit
hentai_foundry_form_info = ClientDefaults.GetDefaultHentaiFoundryInfo()
hentai_foundry_form_info[ 'YII_CSRF_TOKEN' ] = csrf_token
body = urllib.urlencode( hentai_foundry_form_info )
request_headers = {}
2015-10-21 21:53:10 +00:00
ClientNetworking.AddCookiesToHeaders( cookies, request_headers )
2015-10-07 21:56:22 +00:00
request_headers[ 'Content-Type' ] = 'application/x-www-form-urlencoded'
2015-11-25 22:00:57 +00:00
self._controller.DoHTTP( HC.POST, 'http://www.hentai-foundry.com/site/filters', request_headers = request_headers, body = body )
2015-10-07 21:56:22 +00:00
expires = now + 60 * 60
elif name == 'pixiv':
2016-04-06 19:52:45 +00:00
result = self._controller.Read( 'serialisable_simple', 'pixiv_account' )
2015-10-07 21:56:22 +00:00
2016-04-06 19:52:45 +00:00
if result is None:
2015-10-07 21:56:22 +00:00
2016-04-06 19:52:45 +00:00
raise HydrusExceptions.DataMissing( 'You need to set up your pixiv credentials in services->manage pixiv account.' )
2015-10-07 21:56:22 +00:00
2016-04-06 19:52:45 +00:00
( id, password ) = result
2015-10-07 21:56:22 +00:00
form_fields = {}
form_fields[ 'mode' ] = 'login'
form_fields[ 'pixiv_id' ] = id
form_fields[ 'pass' ] = password
form_fields[ 'skip' ] = '1'
body = urllib.urlencode( form_fields )
headers = {}
headers[ 'Content-Type' ] = 'application/x-www-form-urlencoded'
2015-11-25 22:00:57 +00:00
( response_gumpf, cookies ) = self._controller.DoHTTP( HC.POST, 'http://www.pixiv.net/login.php', request_headers = headers, body = body, return_cookies = True )
2015-10-07 21:56:22 +00:00
# _ only given to logged in php sessions
if 'PHPSESSID' not in cookies or '_' not in cookies[ 'PHPSESSID' ]: raise Exception( 'Pixiv login credentials not accepted!' )
expires = now + 30 * 86400
self._names_to_sessions[ name ] = ( cookies, expires )
2015-11-25 22:00:57 +00:00
self._controller.Write( 'web_session', name, cookies, expires )
2015-10-07 21:56:22 +00:00
return cookies
2015-03-18 21:46:29 +00:00