Version 263

This commit is contained in:
Hydrus Network Developer 2017-07-05 16:09:28 -05:00
parent 39fc17268f
commit cb098bd993
30 changed files with 1779 additions and 687 deletions

View File

@ -8,6 +8,42 @@
<div class="content">
<h3>changelog</h3>
<ul>
<li><h3>version 263</h3></li>
<ul>
<li>greatly improved how gui sessions are loaded--now the page tabs are loaded instantly, but the thumbnails are loaded in the background. session loaded should be significantly less laggy and buggy</li>
<li>the issue of pages sometimes initially sizing at zero size (which could be caused by minimising the client while a session load was ongoing) should hence be better/fixed completely!</li>
<li>gui sessions will now load their files in the exact order in which they were saved--behaviour that I think was not previously always reliable</li>
<li>more general network code work and polishing</li>
<li>added and improved unit tests for network code</li>
<li>improved how short-time-delta data bandwidth is reported</li>
<li>improved how short-time-delta data bandwidth is tested</li>
<li>wrote a networkjobcontrol to display and control the new network job object</li>
<li>tumblr parser now produces 68.-less urls</li>
<li>tumblr parser now produces https urls</li>
<li>cleaned up tumblr parser a little</li>
<li>url caches will clip existing tumblr urls of the 68.-subdomain and convert to the new raw format, clearing out dupes along the way</li>
<li>url caches will convert existing tumblr and pixiv urls to https, clearing out dupes along the way</li>
<li>the pixiv parser now deals with missing creator/title tags without errors</li>
<li>extended the specific file domain tag cache to also store deleted mappings, resulting in much faster request building for clients with large numbers of deleted mappings</li>
<li>improved some downloader page queue text display timing</li>
<li>added support for more types of mp4 file</li>
<li>improved how some memory maintenance calls work</li>
<li>improved how hydrus datacaches track their recently-used-data fifo list</li>
<li>pages now regularly clear out spare thumbnail canvas bmps</li>
<li>pages now regularly clear out cached thumbnail canvas bmps when they are not the currently viewed page</li>
<li>import caches, when asked for url membership, will now test both the http and https versions the url</li>
<li>maybe improved how 'open in file browser' works in windows</li>
<li>fixed the 'recount video frames' advanced thumbnail menu entry, which wasn't working with the new ffmpeg wrapper</li>
<li>moved some bloaty master hash data out of client.db and into client.master.db</li>
<li>pubsub profile will no longer profile 'message', as it just makes for spam</li>
<li>reduced some serverside pubsub spam</li>
<li>reduced some significant clientside pubsub spam that I think was smashing the gui event loop at inconvenient moments</li>
<li>improved some client shutdown object sync code</li>
<li>fixed an issue where some duplicate maintenance popups would not clear themselves up properly if interrupted mid-job</li>
<li>cleaned up some http-https conversion and comparison code</li>
<li>fixed some status-setting code that meant thumbnail pages were sometimes setting status after they were replaced and scheduled for deletion</li>
<li>misc improvements</li>
</ul>
<li><h3>version 262</h3></li>
<ul>
<li>added apng support!</li>
@ -72,7 +108,7 @@
</ul>
<li><h3>version 260</h3></li>
<ul>
<li>?fixed video parsing when the video metadata includes random non-utf-friendly garbage</li>
<li>fixed video parsing when the video metadata includes random non-utf-friendly garbage</li>
<li>fixed video parsing when ffmpeg reports no fps at all</li>
<li>improved video frame counting accuracy</li>
<li>thumbnail waterfall process now adapts to the current speed of the cache and can hence use significantly less overhead</li>

View File

@ -1238,18 +1238,18 @@ class DataCache( object ):
self._cache_size = cache_size
self._keys_to_data = {}
self._keys_fifo = []
self._keys_fifo = collections.OrderedDict()
self._total_estimated_memory_footprint = 0
self._lock = threading.Lock()
wx.CallLater( 60 * 1000, self.MaintainCache )
self._controller.sub( self, 'MaintainCache', 'memory_maintenance_pulse' )
def _DeleteItem( self ):
( deletee_key, last_access_time ) = self._keys_fifo.pop( 0 )
( deletee_key, last_access_time ) = self._keys_fifo.popitem( last = False )
deletee_data = self._keys_to_data[ deletee_key ]
@ -1264,18 +1264,14 @@ class DataCache( object ):
def _TouchKey( self, key ):
for ( i, ( fifo_key, last_access_time ) ) in enumerate( self._keys_fifo ):
# have to delete first, rather than overwriting, so the ordereddict updates its internal order
if key in self._keys_fifo:
if fifo_key == key:
del self._keys_fifo[ i ]
break
del self._keys_fifo[ key ]
self._keys_fifo.append( ( key, HydrusData.GetNow() ) )
self._keys_fifo[ key ] = HydrusData.GetNow()
def Clear( self ):
@ -1283,7 +1279,7 @@ class DataCache( object ):
with self._lock:
self._keys_to_data = {}
self._keys_fifo = []
self._keys_fifo = collections.OrderedDict()
self._total_estimated_memory_footprint = 0
@ -1304,7 +1300,7 @@ class DataCache( object ):
self._keys_to_data[ key ] = data
self._keys_fifo.append( ( key, HydrusData.GetNow() ) )
self._TouchKey( key )
self._RecalcMemoryUsage()
@ -1363,7 +1359,7 @@ class DataCache( object ):
else:
( key, last_access_time ) = self._keys_fifo[ 0 ]
( key, last_access_time ) = next( self._keys_fifo.iteritems() )
if HydrusData.TimeHasPassed( last_access_time + 1200 ):
@ -1377,8 +1373,6 @@ class DataCache( object ):
wx.CallLater( 60 * 1000, self.MaintainCache )
class LocalBooruCache( object ):

View File

@ -247,9 +247,10 @@ new_page_goes_string_lookup[ NEW_PAGE_GOES_FAR_RIGHT ] = 'go far right'
NETWORK_CONTEXT_GLOBAL = 0
NETWORK_CONTEXT_HYDRUS = 1
NETWORK_CONTEXT_DOMAIN = 1
NETWORK_CONTEXT_DOWNLOADER = 1
NETWORK_CONTEXT_SUBSCRIPTION = 1
NETWORK_CONTEXT_DOMAIN = 2
NETWORK_CONTEXT_DOWNLOADER = 3
NETWORK_CONTEXT_DOWNLOADER_QUERY = 4
NETWORK_CONTEXT_SUBSCRIPTION = 5
SHORTCUT_MODIFIER_CTRL = 0
SHORTCUT_MODIFIER_ALT = 1

View File

@ -210,7 +210,7 @@ class Controller( HydrusController.HydrusController ):
if move_knocked_us_out_of_idle:
self.pub( 'refresh_status' )
self.pubimmediate( 'refresh_status' )
@ -478,7 +478,7 @@ class Controller( HydrusController.HydrusController ):
HG.force_idle_mode = not HG.force_idle_mode
self.pub( 'wake_daemons' )
self.pub( 'refresh_status' )
self.pubimmediate( 'refresh_status' )
def GetApp( self ):
@ -581,6 +581,18 @@ class Controller( HydrusController.HydrusController ):
self.InitClientFilesManager()
#
bandwidth_manager = self.Read( 'serialisable', HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER )
session_manager = self.Read( 'serialisable', HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER )
login_manager = ClientNetworking.NetworkLoginManager()
self.network_engine = ClientNetworking.NetworkEngine( self, bandwidth_manager, session_manager, login_manager )
self.CallToThread( self.network_engine.MainLoop )
#
self._client_session_manager = ClientCaches.HydrusSessionManager( self )
self._shortcuts_manager = ClientCaches.ShortcutsManager( self )
@ -778,9 +790,9 @@ class Controller( HydrusController.HydrusController ):
def MaintainMemory( self ):
def MaintainMemorySlow( self ):
HydrusController.HydrusController.MaintainMemory( self )
HydrusController.HydrusController.MaintainMemorySlow( self )
if HydrusData.TimeHasPassed( self._timestamps[ 'last_page_change' ] + 30 * 60 ):
@ -1031,6 +1043,20 @@ class Controller( HydrusController.HydrusController ):
self.WriteSynchronous( 'dirty_services', dirty_services )
if self.network_engine.bandwidth_manager.IsDirty():
self.WriteSynchronous( 'serialisable', self.network_engine.bandwidth_manager )
self.network_engine.bandwidth_manager.SetClean()
if self.network_engine.session_manager.IsDirty():
self.WriteSynchronous( 'serialisable', self.network_engine.session_manager )
self.network_engine.session_manager.SetClean()
def SetServices( self, services ):
@ -1043,6 +1069,16 @@ class Controller( HydrusController.HydrusController ):
def ShutdownModel( self ):
if not HG.emergency_exit:
self.SaveDirtyObjects()
HydrusController.HydrusController.ShutdownModel( self )
def ShutdownView( self ):
if not HG.emergency_exit:

View File

@ -2,6 +2,7 @@ import ClientData
import ClientDefaults
import ClientImageHandling
import ClientMedia
import ClientNetworking
import ClientRatings
import ClientSearch
import ClientServices
@ -99,11 +100,13 @@ def GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id ):
cache_current_mappings_table_name = 'external_caches.specific_current_mappings_cache_' + suffix
cache_deleted_mappings_table_name = 'external_caches.specific_deleted_mappings_cache_' + suffix
cache_pending_mappings_table_name = 'external_caches.specific_pending_mappings_cache_' + suffix
ac_cache_table_name = 'external_caches.specific_ac_cache_' + suffix
return ( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name )
return ( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name )
def report_content_speed_to_job_key( job_key, rows_done, total_rows, precise_timestamp, num_rows, row_name ):
@ -1419,99 +1422,104 @@ class DB( HydrusDB.HydrusDB ):
pub_job_key = True
( total_num_hash_ids_in_cache, ) = self._c.execute( 'SELECT COUNT( * ) FROM shape_search_cache;' ).fetchone()
hash_ids = [ hash_id for ( hash_id, ) in self._c.execute( 'SELECT hash_id FROM shape_maintenance_phash_regen;' ) ]
client_files_manager = self._controller.client_files_manager
total_done_previously = total_num_hash_ids_in_cache - len( hash_ids )
for ( i, hash_id ) in enumerate( hash_ids ):
try:
job_key.SetVariable( 'popup_title', 'similar files metadata maintenance' )
( total_num_hash_ids_in_cache, ) = self._c.execute( 'SELECT COUNT( * ) FROM shape_search_cache;' ).fetchone()
if pub_job_key and not job_key_pubbed:
self._controller.pub( 'message', job_key )
job_key_pubbed = True
hash_ids = [ hash_id for ( hash_id, ) in self._c.execute( 'SELECT hash_id FROM shape_maintenance_phash_regen;' ) ]
( i_paused, should_quit ) = job_key.WaitIfNeeded()
client_files_manager = self._controller.client_files_manager
should_stop = stop_time is not None and HydrusData.TimeHasPassed( stop_time )
total_done_previously = total_num_hash_ids_in_cache - len( hash_ids )
if should_quit or should_stop:
for ( i, hash_id ) in enumerate( hash_ids ):
return
job_key.SetVariable( 'popup_title', 'similar files metadata maintenance' )
if i % 50 == 0:
gc.collect()
text = 'regenerating similar file metadata - ' + HydrusData.ConvertValueRangeToPrettyString( total_done_previously + i, total_num_hash_ids_in_cache )
HG.client_controller.pub( 'splash_set_status_text', text )
job_key.SetVariable( 'popup_text_1', text )
job_key.SetVariable( 'popup_gauge_1', ( total_done_previously + i, total_num_hash_ids_in_cache ) )
try:
hash = self._GetHash( hash_id )
mime = self._GetMime( hash_id )
if mime in HC.MIMES_WE_CAN_PHASH:
if pub_job_key and not job_key_pubbed:
path = client_files_manager.GetFilePath( hash, mime )
self._controller.pub( 'message', job_key )
if mime in ( HC.IMAGE_JPEG, HC.IMAGE_PNG ):
job_key_pubbed = True
( i_paused, should_quit ) = job_key.WaitIfNeeded()
should_stop = stop_time is not None and HydrusData.TimeHasPassed( stop_time )
if should_quit or should_stop:
return
if i % 50 == 0:
gc.collect()
text = 'regenerating similar file metadata - ' + HydrusData.ConvertValueRangeToPrettyString( total_done_previously + i, total_num_hash_ids_in_cache )
HG.client_controller.pub( 'splash_set_status_text', text )
job_key.SetVariable( 'popup_text_1', text )
job_key.SetVariable( 'popup_gauge_1', ( total_done_previously + i, total_num_hash_ids_in_cache ) )
try:
hash = self._GetHash( hash_id )
mime = self._GetMime( hash_id )
if mime in HC.MIMES_WE_CAN_PHASH:
try:
path = client_files_manager.GetFilePath( hash, mime )
if mime in ( HC.IMAGE_JPEG, HC.IMAGE_PNG ):
phashes = ClientImageHandling.GenerateShapePerceptualHashes( path )
except Exception as e:
HydrusData.Print( 'Could not generate phashes for ' + path )
HydrusData.PrintException( e )
phashes = []
try:
phashes = ClientImageHandling.GenerateShapePerceptualHashes( path )
except Exception as e:
HydrusData.Print( 'Could not generate phashes for ' + path )
HydrusData.PrintException( e )
phashes = []
else:
phashes = []
else:
except HydrusExceptions.FileMissingException:
phashes = []
except HydrusExceptions.FileMissingException:
existing_phash_ids = { phash_id for ( phash_id, ) in self._c.execute( 'SELECT phash_id FROM shape_perceptual_hash_map WHERE hash_id = ?;', ( hash_id, ) ) }
phashes = []
correct_phash_ids = self._CacheSimilarFilesAssociatePHashes( hash_id, phashes )
incorrect_phash_ids = existing_phash_ids.difference( correct_phash_ids )
if len( incorrect_phash_ids ) > 0:
self._CacheSimilarFilesDisassociatePHashes( hash_id, incorrect_phash_ids )
self._c.execute( 'DELETE FROM shape_maintenance_phash_regen WHERE hash_id = ?;', ( hash_id, ) )
existing_phash_ids = { phash_id for ( phash_id, ) in self._c.execute( 'SELECT phash_id FROM shape_perceptual_hash_map WHERE hash_id = ?;', ( hash_id, ) ) }
finally:
correct_phash_ids = self._CacheSimilarFilesAssociatePHashes( hash_id, phashes )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.DeleteVariable( 'popup_gauge_1' )
incorrect_phash_ids = existing_phash_ids.difference( correct_phash_ids )
job_key.Finish()
job_key.Delete( 5 )
if len( incorrect_phash_ids ) > 0:
self._CacheSimilarFilesDisassociatePHashes( hash_id, incorrect_phash_ids )
self._c.execute( 'DELETE FROM shape_maintenance_phash_regen WHERE hash_id = ?;', ( hash_id, ) )
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.Finish()
job_key.Delete( 30 )
def _CacheSimilarFilesMaintainTree( self, job_key = None, stop_time = None, abandon_if_other_work_to_do = False ):
@ -1536,56 +1544,61 @@ class DB( HydrusDB.HydrusDB ):
pub_job_key = True
job_key.SetVariable( 'popup_title', 'similar files metadata maintenance' )
rebalance_phash_ids = [ phash_id for ( phash_id, ) in self._c.execute( 'SELECT phash_id FROM shape_maintenance_branch_regen;' ) ]
num_to_do = len( rebalance_phash_ids )
while len( rebalance_phash_ids ) > 0:
try:
if pub_job_key and not job_key_pubbed:
self._controller.pub( 'message', job_key )
job_key_pubbed = True
( i_paused, should_quit ) = job_key.WaitIfNeeded()
should_stop = stop_time is not None and HydrusData.TimeHasPassed( stop_time )
if should_quit or should_stop:
return
num_done = num_to_do - len( rebalance_phash_ids )
text = 'rebalancing similar file metadata - ' + HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
HG.client_controller.pub( 'splash_set_status_text', text )
job_key.SetVariable( 'popup_text_1', text )
job_key.SetVariable( 'popup_gauge_1', ( num_done, num_to_do ) )
with HydrusDB.TemporaryIntegerTable( self._c, rebalance_phash_ids, 'phash_id' ) as temp_table_name:
# can't turn this into selectfromlist due to the order clause. we need to do this all at once
( biggest_phash_id, ) = self._c.execute( 'SELECT phash_id FROM shape_vptree NATURAL JOIN ' + temp_table_name + ' ORDER BY inner_population + outer_population DESC;' ).fetchone()
self._CacheSimilarFilesRegenerateBranch( job_key, biggest_phash_id )
job_key.SetVariable( 'popup_title', 'similar files metadata maintenance' )
rebalance_phash_ids = [ phash_id for ( phash_id, ) in self._c.execute( 'SELECT phash_id FROM shape_maintenance_branch_regen;' ) ]
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.DeleteVariable( 'popup_text_2' ) # used in the regenbranch call
job_key.Finish()
job_key.Delete( 30 )
num_to_do = len( rebalance_phash_ids )
while len( rebalance_phash_ids ) > 0:
if pub_job_key and not job_key_pubbed:
self._controller.pub( 'message', job_key )
job_key_pubbed = True
( i_paused, should_quit ) = job_key.WaitIfNeeded()
should_stop = stop_time is not None and HydrusData.TimeHasPassed( stop_time )
if should_quit or should_stop:
return
num_done = num_to_do - len( rebalance_phash_ids )
text = 'rebalancing similar file metadata - ' + HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
HG.client_controller.pub( 'splash_set_status_text', text )
job_key.SetVariable( 'popup_text_1', text )
job_key.SetVariable( 'popup_gauge_1', ( num_done, num_to_do ) )
with HydrusDB.TemporaryIntegerTable( self._c, rebalance_phash_ids, 'phash_id' ) as temp_table_name:
# can't turn this into selectfromlist due to the order clause. we need to do this all at once
( biggest_phash_id, ) = self._c.execute( 'SELECT phash_id FROM shape_vptree NATURAL JOIN ' + temp_table_name + ' ORDER BY inner_population + outer_population DESC;' ).fetchone()
self._CacheSimilarFilesRegenerateBranch( job_key, biggest_phash_id )
rebalance_phash_ids = [ phash_id for ( phash_id, ) in self._c.execute( 'SELECT phash_id FROM shape_maintenance_branch_regen;' ) ]
finally:
job_key.SetVariable( 'popup_text_1', 'done!' )
job_key.DeleteVariable( 'popup_gauge_1' )
job_key.DeleteVariable( 'popup_text_2' ) # used in the regenbranch call
job_key.Finish()
job_key.Delete( 5 )
def _CacheSimilarFilesMaintenanceDue( self ):
@ -2127,7 +2140,7 @@ class DB( HydrusDB.HydrusDB ):
def _CacheSpecificMappingsAddFiles( self, file_service_id, tag_service_id, hash_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._c.executemany( 'INSERT OR IGNORE INTO ' + cache_files_table_name + ' VALUES ( ? );', ( ( hash_id, ) for hash_id in hash_ids ) )
@ -2143,11 +2156,16 @@ class DB( HydrusDB.HydrusDB ):
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
deleted_mapping_ids_raw = self._c.execute( 'SELECT tag_id, hash_id FROM ' + deleted_mappings_table_name + ' WHERE hash_id IN ' + splayed_group_of_hash_ids + ';' ).fetchall()
deleted_mapping_ids_dict = HydrusData.BuildKeyToSetDict( deleted_mapping_ids_raw )
pending_mapping_ids_raw = self._c.execute( 'SELECT tag_id, hash_id FROM ' + pending_mappings_table_name + ' WHERE hash_id IN ' + splayed_group_of_hash_ids + ';' ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( deleted_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
for tag_id in all_ids_seen:
@ -2161,6 +2179,19 @@ class DB( HydrusDB.HydrusDB ):
self._c.executemany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in current_hash_ids ) )
#
deleted_hash_ids = deleted_mapping_ids_dict[ tag_id ]
num_deleted = len( deleted_hash_ids )
if num_deleted > 0:
self._c.executemany( 'INSERT OR IGNORE INTO ' + cache_deleted_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in deleted_hash_ids ) )
#
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
num_pending = len( pending_hash_ids )
@ -2187,7 +2218,7 @@ class DB( HydrusDB.HydrusDB ):
def _CacheSpecificMappingsAddMappings( self, file_service_id, tag_service_id, mappings_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
for ( tag_id, hash_ids ) in mappings_ids:
@ -2216,17 +2247,23 @@ class DB( HydrusDB.HydrusDB ):
self._c.execute( 'UPDATE ' + ac_cache_table_name + ' SET current_count = current_count + ? WHERE tag_id = ?;', ( num_added, tag_id ) )
#
self._c.executemany( 'DELETE FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in hash_ids ) )
def _CacheSpecificMappingsDrop( self, file_service_id, tag_service_id ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._c.execute( 'DROP TABLE IF EXISTS ' + cache_files_table_name + ';' )
self._c.execute( 'DROP TABLE IF EXISTS ' + cache_current_mappings_table_name + ';' )
self._c.execute( 'DROP TABLE IF EXISTS ' + cache_deleted_mappings_table_name + ';' )
self._c.execute( 'DROP TABLE IF EXISTS ' + cache_pending_mappings_table_name + ';' )
self._c.execute( 'DROP TABLE IF EXISTS ' + ac_cache_table_name + ';' )
@ -2234,7 +2271,7 @@ class DB( HydrusDB.HydrusDB ):
def _CacheSpecificMappingsDeleteFiles( self, file_service_id, tag_service_id, hash_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._c.executemany( 'DELETE FROM ' + cache_files_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
@ -2248,11 +2285,16 @@ class DB( HydrusDB.HydrusDB ):
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
deleted_mapping_ids_raw = self._c.execute( 'SELECT tag_id, hash_id FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id IN ' + splayed_group_of_hash_ids + ';' ).fetchall()
deleted_mapping_ids_dict = HydrusData.BuildKeyToSetDict( deleted_mapping_ids_raw )
pending_mapping_ids_raw = self._c.execute( 'SELECT tag_id, hash_id FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id IN ' + splayed_group_of_hash_ids + ';' ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( deleted_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
for tag_id in all_ids_seen:
@ -2266,6 +2308,19 @@ class DB( HydrusDB.HydrusDB ):
self._c.executemany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in current_hash_ids ) )
#
deleted_hash_ids = deleted_mapping_ids_dict[ tag_id ]
num_deleted = len( deleted_hash_ids )
if num_deleted > 0:
self._c.executemany( 'DELETE FROM ' + cache_deleted_mappings_table_name + ' WHERE tag_id = ? AND hash_id = ?;', ( ( tag_id, hash_id ) for hash_id in deleted_hash_ids ) )
#
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
num_pending = len( pending_hash_ids )
@ -2289,7 +2344,7 @@ class DB( HydrusDB.HydrusDB ):
def _CacheSpecificMappingsDeleteMappings( self, file_service_id, tag_service_id, mappings_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
for ( tag_id, hash_ids ) in mappings_ids:
@ -2308,12 +2363,16 @@ class DB( HydrusDB.HydrusDB ):
self._c.execute( 'DELETE FROM ' + ac_cache_table_name + ' WHERE tag_id = ? AND current_count = ? AND pending_count = ?;', ( tag_id, 0, 0 ) )
#
self._c.executemany( 'INSERT OR IGNORE INTO ' + cache_deleted_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in hash_ids ) )
def _CacheSpecificMappingsFilterHashIds( self, file_service_id, tag_service_id, hash_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
select_statement = 'SELECT hash_id FROM ' + cache_files_table_name + ' WHERE hash_id IN %s;'
@ -2322,12 +2381,14 @@ class DB( HydrusDB.HydrusDB ):
def _CacheSpecificMappingsGenerate( self, file_service_id, tag_service_id ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._c.execute( 'CREATE TABLE ' + cache_files_table_name + ' ( hash_id INTEGER PRIMARY KEY );' )
self._c.execute( 'CREATE TABLE ' + cache_current_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._c.execute( 'CREATE TABLE ' + cache_deleted_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._c.execute( 'CREATE TABLE ' + cache_pending_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
self._c.execute( 'CREATE TABLE ' + ac_cache_table_name + ' ( tag_id INTEGER PRIMARY KEY, current_count INTEGER, pending_count INTEGER );' )
@ -2344,7 +2405,7 @@ class DB( HydrusDB.HydrusDB ):
def _CacheSpecificMappingsGetAutocompleteCounts( self, file_service_id, tag_service_id, tag_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
select_statement = 'SELECT tag_id, current_count, pending_count FROM ' + ac_cache_table_name + ' WHERE tag_id IN %s;'
@ -2353,7 +2414,7 @@ class DB( HydrusDB.HydrusDB ):
def _CacheSpecificMappingsPendMappings( self, file_service_id, tag_service_id, mappings_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
for ( tag_id, hash_ids ) in mappings_ids:
@ -2378,7 +2439,7 @@ class DB( HydrusDB.HydrusDB ):
def _CacheSpecificMappingsRescindPendingMappings( self, file_service_id, tag_service_id, mappings_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
for ( tag_id, hash_ids ) in mappings_ids:
@ -2619,11 +2680,6 @@ class DB( HydrusDB.HydrusDB ):
self._c.execute( 'CREATE TABLE json_dumps ( dump_type INTEGER PRIMARY KEY, version INTEGER, dump BLOB_BYTES );' )
self._c.execute( 'CREATE TABLE json_dumps_named ( dump_type INTEGER, dump_name TEXT, version INTEGER, dump BLOB_BYTES, PRIMARY KEY ( dump_type, dump_name ) );' )
self._c.execute( 'CREATE TABLE local_hashes ( hash_id INTEGER PRIMARY KEY, md5 BLOB_BYTES, sha1 BLOB_BYTES, sha512 BLOB_BYTES );' )
self._CreateIndex( 'local_hashes', [ 'md5' ] )
self._CreateIndex( 'local_hashes', [ 'sha1' ] )
self._CreateIndex( 'local_hashes', [ 'sha512' ] )
self._c.execute( 'CREATE TABLE local_ratings ( service_id INTEGER REFERENCES services ON DELETE CASCADE, hash_id INTEGER, rating REAL, PRIMARY KEY ( service_id, hash_id ) );' )
self._CreateIndex( 'local_ratings', [ 'hash_id' ] )
self._CreateIndex( 'local_ratings', [ 'rating' ] )
@ -2693,6 +2749,11 @@ class DB( HydrusDB.HydrusDB ):
self._c.execute( 'CREATE TABLE IF NOT EXISTS external_master.hashes ( hash_id INTEGER PRIMARY KEY, hash BLOB_BYTES UNIQUE );' )
self._c.execute( 'CREATE TABLE external_master.local_hashes ( hash_id INTEGER PRIMARY KEY, md5 BLOB_BYTES, sha1 BLOB_BYTES, sha512 BLOB_BYTES );' )
self._CreateIndex( 'external_master.local_hashes', [ 'md5' ] )
self._CreateIndex( 'external_master.local_hashes', [ 'sha1' ] )
self._CreateIndex( 'external_master.local_hashes', [ 'sha512' ] )
self._c.execute( 'CREATE TABLE IF NOT EXISTS external_master.namespaces ( namespace_id INTEGER PRIMARY KEY, namespace TEXT UNIQUE );' )
self._c.execute( 'CREATE TABLE IF NOT EXISTS external_master.subtags ( subtag_id INTEGER PRIMARY KEY, subtag TEXT UNIQUE );' )
@ -2750,6 +2811,14 @@ class DB( HydrusDB.HydrusDB ):
self._SetJSONDump( shortcuts )
bandwidth_manager = ClientDefaults.GetDefaultBandwidthManager()
self._SetJSONDump( bandwidth_manager )
session_manager = ClientNetworking.NetworkSessionManager()
self._SetJSONDump( session_manager )
self._c.execute( 'INSERT INTO namespaces ( namespace_id, namespace ) VALUES ( ?, ? );', ( 1, '' ) )
self._c.execute( 'INSERT INTO version ( version ) VALUES ( ? );', ( HC.SOFTWARE_VERSION, ) )
@ -3760,7 +3829,7 @@ class DB( HydrusDB.HydrusDB ):
else:
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
current_selects.append( 'SELECT hash_id FROM ' + cache_current_mappings_table_name + ' NATURAL JOIN tags WHERE namespace_id = ' + str( namespace_id ) + ';' )
pending_selects.append( 'SELECT hash_id FROM ' + cache_pending_mappings_table_name + ' NATURAL JOIN tags WHERE namespace_id = ' + str( namespace_id ) + ';' )
@ -3815,7 +3884,7 @@ class DB( HydrusDB.HydrusDB ):
else:
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
current_selects.append( 'SELECT hash_id FROM ' + cache_current_mappings_table_name + ' NATURAL JOIN tags WHERE namespace_id IN ' + HydrusData.SplayListForDB( namespace_ids ) + ' AND subtag_id IN ' + HydrusData.SplayListForDB( subtag_ids ) + ';' )
pending_selects.append( 'SELECT hash_id FROM ' + cache_pending_mappings_table_name + ' NATURAL JOIN tags WHERE namespace_id IN ' + HydrusData.SplayListForDB( namespace_ids ) + ' AND subtag_id IN ' + HydrusData.SplayListForDB( subtag_ids ) + ';' )
@ -4364,7 +4433,7 @@ class DB( HydrusDB.HydrusDB ):
else:
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
current_selects.append( 'SELECT hash_id FROM ' + cache_current_mappings_table_name + ' NATURAL JOIN tags WHERE subtag_id IN ' + HydrusData.SplayListForDB( subtag_ids ) + ';' )
pending_selects.append( 'SELECT hash_id FROM ' + cache_pending_mappings_table_name + ' NATURAL JOIN tags WHERE subtag_id IN ' + HydrusData.SplayListForDB( subtag_ids ) + ';' )
@ -4439,7 +4508,7 @@ class DB( HydrusDB.HydrusDB ):
else:
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
current_selects.append( 'SELECT hash_id FROM ' + cache_current_mappings_table_name + ' NATURAL JOIN tags WHERE namespace_id = ' + str( namespace_id ) + ' AND subtag_id = ' + str( subtag_id ) + ';' )
pending_selects.append( 'SELECT hash_id FROM ' + cache_pending_mappings_table_name + ' NATURAL JOIN tags WHERE namespace_id = ' + str( namespace_id ) + ' AND subtag_id = ' + str( subtag_id ) + ';' )
@ -4466,7 +4535,7 @@ class DB( HydrusDB.HydrusDB ):
else:
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, search_tag_service_id )
current_selects.append( 'SELECT hash_id FROM ' + cache_current_mappings_table_name + ' NATURAL JOIN tags WHERE subtag_id = ' + str( subtag_id ) + ';' )
pending_selects.append( 'SELECT hash_id FROM ' + cache_pending_mappings_table_name + ' NATURAL JOIN tags WHERE subtag_id = ' + str( subtag_id ) + ';' )
@ -4915,17 +4984,18 @@ class DB( HydrusDB.HydrusDB ):
if common_file_service_id is None:
tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_CURRENT, tag_id ) ) for ( hash_id, tag_id ) in self._SelectFromList( 'SELECT hash_id, tag_id FROM ' + current_mappings_table_name + ' WHERE hash_id IN %s;', hash_ids ) )
tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_DELETED, tag_id ) ) for ( hash_id, tag_id ) in self._SelectFromList( 'SELECT hash_id, tag_id FROM ' + deleted_mappings_table_name + ' WHERE hash_id IN %s;', hash_ids ) )
tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_PENDING, tag_id ) ) for ( hash_id, tag_id ) in self._SelectFromList( 'SELECT hash_id, tag_id FROM ' + pending_mappings_table_name + ' WHERE hash_id IN %s;', hash_ids ) )
else:
( cache_files_table_name, cache_current_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( common_file_service_id, tag_service_id )
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( common_file_service_id, tag_service_id )
tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_CURRENT, tag_id ) ) for ( hash_id, tag_id ) in self._SelectFromList( 'SELECT hash_id, tag_id FROM ' + cache_current_mappings_table_name + ' WHERE hash_id IN %s;', hash_ids ) )
tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_DELETED, tag_id ) ) for ( hash_id, tag_id ) in self._SelectFromList( 'SELECT hash_id, tag_id FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id IN %s;', hash_ids ) )
tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_PENDING, tag_id ) ) for ( hash_id, tag_id ) in self._SelectFromList( 'SELECT hash_id, tag_id FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id IN %s;', hash_ids ) )
tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_DELETED, tag_id ) ) for ( hash_id, tag_id ) in self._SelectFromList( 'SELECT hash_id, tag_id FROM ' + deleted_mappings_table_name + ' WHERE hash_id IN %s;', hash_ids ) )
tag_data.extend( ( hash_id, ( tag_service_id, HC.CONTENT_STATUS_PETITIONED, tag_id ) ) for ( hash_id, tag_id ) in self._SelectFromList( 'SELECT hash_id, tag_id FROM ' + petitioned_mappings_table_name + ' WHERE hash_id IN %s;', hash_ids ) )
@ -6060,16 +6130,7 @@ class DB( HydrusDB.HydrusDB ):
def _GetURLStatus( self, url ):
search_urls = [ url ]
if url.startswith( 'http://' ):
search_urls.append( 'https://' + url[7:] )
elif url.startswith( 'https://' ):
search_urls.append( 'http://' + url[8:] )
search_urls = ClientData.GetSearchURLs( url )
for search_url in search_urls:
@ -9470,6 +9531,105 @@ class DB( HydrusDB.HydrusDB ):
if version == 262:
self._controller.pub( 'splash_set_status_text', 'moving some hash data' )
self._c.execute( 'CREATE TABLE IF NOT EXISTS external_master.hashes ( hash_id INTEGER PRIMARY KEY, hash BLOB_BYTES UNIQUE );' )
self._c.execute( 'CREATE TABLE external_master.local_hashes ( hash_id INTEGER PRIMARY KEY, md5 BLOB_BYTES, sha1 BLOB_BYTES, sha512 BLOB_BYTES );' )
self._CreateIndex( 'external_master.local_hashes', [ 'md5' ] )
self._CreateIndex( 'external_master.local_hashes', [ 'sha1' ] )
self._CreateIndex( 'external_master.local_hashes', [ 'sha512' ] )
self._c.execute( 'INSERT INTO external_master.local_hashes SELECT * FROM main.local_hashes;' )
self._c.execute( 'DROP TABLE main.local_hashes;' )
self._c.execute( 'ANALYZE external_master.local_hashes;' )
self._Commit()
self._CloseDBCursor()
self._controller.pub( 'splash_set_status_text', 'vacuuming main db ' )
db_path = os.path.join( self._db_dir, 'client.db' )
try:
if HydrusDB.CanVacuum( db_path ):
HydrusDB.VacuumDB( db_path )
except Exception as e:
HydrusData.Print( 'Vacuum failed!' )
HydrusData.PrintException( e )
self._InitDBCursor()
self._BeginImmediate()
#
bandwidth_manager = ClientDefaults.GetDefaultBandwidthManager()
self._SetJSONDump( bandwidth_manager )
session_manager = ClientNetworking.NetworkSessionManager()
self._SetJSONDump( session_manager )
#
self._controller.pub( 'splash_set_status_text', 'generating deleted tag cache' )
tag_service_ids = self._GetServiceIds( HC.TAG_SERVICES )
file_service_ids = self._GetServiceIds( HC.AUTOCOMPLETE_CACHE_SPECIFIC_FILE_SERVICES )
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
( cache_files_table_name, cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name, ac_cache_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._c.execute( 'CREATE TABLE IF NOT EXISTS ' + cache_deleted_mappings_table_name + ' ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;' )
hash_ids = [ hash_id for ( hash_id, ) in self._c.execute( 'SELECT hash_id FROM current_files WHERE service_id = ?;', ( file_service_id, ) ) ]
if len( hash_ids ) > 0:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( tag_service_id )
for group_of_hash_ids in HydrusData.SplitListIntoChunks( hash_ids, 100 ):
splayed_group_of_hash_ids = HydrusData.SplayListForDB( group_of_hash_ids )
deleted_mapping_ids_raw = self._c.execute( 'SELECT tag_id, hash_id FROM ' + deleted_mappings_table_name + ' WHERE hash_id IN ' + splayed_group_of_hash_ids + ';' ).fetchall()
deleted_mapping_ids_dict = HydrusData.BuildKeyToSetDict( deleted_mapping_ids_raw )
all_ids_seen = set( deleted_mapping_ids_dict.keys() )
for tag_id in all_ids_seen:
deleted_hash_ids = deleted_mapping_ids_dict[ tag_id ]
num_deleted = len( deleted_hash_ids )
if num_deleted > 0:
self._c.executemany( 'INSERT OR IGNORE INTO ' + cache_deleted_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in deleted_hash_ids ) )
self._c.execute( 'ANALYZE ' + cache_deleted_mappings_table_name + ';' )
self._controller.pub( 'splash_set_title_text', 'updated db to v' + str( version + 1 ) )
self._c.execute( 'UPDATE version SET version = ?;', ( version + 1, ) )
@ -10109,6 +10269,11 @@ class DB( HydrusDB.HydrusDB ):
self.pub_after_commit( 'service_updates_gui', service_keys_to_service_updates )
def publish_status_update( self ):
self._controller.pubimmediate( 'refresh_status' )
def GetInitialMessages( self ):
return self._initial_messages

View File

@ -114,6 +114,40 @@ def ColourIsGreyish( colour ):
return greyish
def ConvertHTTPSToHTTP( url ):
if url.startswith( 'http://' ):
return url
elif url.startswith( 'https://' ):
http_url = 'http://' + url[8:]
return http_url
else:
raise Exception( 'Given a url that did not have a scheme!' )
def ConvertHTTPToHTTPS( url ):
if url.startswith( 'https://' ):
return url
elif url.startswith( 'http://' ):
https_url = 'https://' + url[7:]
return https_url
else:
raise Exception( 'Given a url that did not have a scheme!' )
def ConvertServiceKeysToContentUpdatesToPrettyString( service_keys_to_content_updates ):
num_files = 0
@ -339,6 +373,21 @@ def GetMediasTagCount( pool, tag_service_key = CC.COMBINED_TAG_SERVICE_KEY, coll
return ( current_tags_to_count, deleted_tags_to_count, pending_tags_to_count, petitioned_tags_to_count )
def GetSearchURLs( url ):
search_urls = [ url ]
if url.startswith( 'http://' ):
search_urls.append( ConvertHTTPToHTTPS( url ) )
elif url.startswith( 'https://' ):
search_urls.append( ConvertHTTPSToHTTP( url ) )
return search_urls
def GetSortChoices( add_namespaces_and_ratings = True ):
sort_choices = list( CC.SORT_CHOICES )

View File

@ -1,10 +1,79 @@
import ClientConstants as CC
import ClientData
import ClientNetworking
import HydrusConstants as HC
import HydrusGlobals as HG
import HydrusNetworking
import os
import wx
def GetDefaultBandwidthManager():
KB = 1024
MB = 1024 ** 2
GB = 1024 ** 3
bandwidth_manager = ClientNetworking.NetworkBandwidthManager()
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 1, 5 ) # stop accidental spam
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 60, 120 ) # smooth out heavy usage. db prob needs a break
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 86400, 10 * GB ) # check your inbox lad
bandwidth_manager.SetRules( ClientNetworking.GLOBAL_NETWORK_CONTEXT, rules )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 1, 1 ) # don't ever hammer a domain
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 86400, 2 * GB ) # don't go nuts on a site in a single day
bandwidth_manager.SetRules( ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN ), rules )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 86400, 64 * MB ) # don't sync a giant db in one day
bandwidth_manager.SetRules( ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_HYDRUS ), rules )
#
rules = HydrusNetworking.BandwidthRules()
bandwidth_manager.SetRules( ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_DOWNLOADER ), rules )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 600, 60 ) # after that first sample of small files, take it easy
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 600, 256 * MB ) # after that first sample of big files, take it easy
bandwidth_manager.SetRules( ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_DOWNLOADER_QUERY ), rules )
#
rules = HydrusNetworking.BandwidthRules()
rules.AddRule( HC.BANDWIDTH_TYPE_REQUESTS, 5, 1 ) # be extremely polite
rules.AddRule( HC.BANDWIDTH_TYPE_DATA, 86400, 256 * MB ) # catch up on a big sub in little chunks every day
bandwidth_manager.SetRules( ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_SUBSCRIPTION ), rules )
#
return bandwidth_manager
def GetClientDefaultOptions():
options = {}

View File

@ -1,4 +1,5 @@
import bs4
import ClientData
import ClientNetworking
import HydrusConstants as HC
import HydrusExceptions
@ -1712,13 +1713,22 @@ class GalleryPixiv( Gallery ):
user = soup.find( 'h1', class_ = 'user' )
tags.append( 'creator:' + user.string )
if user is not None:
tags.append( 'creator:' + user.string )
title_parent = soup.find( 'section', class_ = re.compile( 'work-info' ) )
title = title_parent.find( 'h1', class_ = 'title' )
tags.append( 'title:' + title.string )
if title_parent is not None:
title = title_parent.find( 'h1', class_ = 'title' )
if title is not None:
tags.append( 'title:' + title.string )
return ( image_url, tags )
@ -1809,6 +1819,31 @@ class GalleryTumblr( Gallery ):
return raw_url
def Remove68Subdomain( long_url ):
# sometimes the 68 subdomain gives a 404 on the raw url, so:
# convert this:
# http://68.media.tumblr.com/5af0d991f26ef9fdad5a0c743fb1eca2/tumblr_opl012ZBOu1tiyj7vo1_raw.jpg
# to this:
# http://media.tumblr.com/5af0d991f26ef9fdad5a0c743fb1eca2/tumblr_opl012ZBOu1tiyj7vo1_raw.jpg
# I am not sure if it is always 68, but let's not assume
( scheme, rest ) = long_url.split( '://', 1 )
if rest.startswith( 'media.tumblr.com' ):
return long_url
( gumpf, shorter_rest ) = rest.split( '.', 1 )
shorter_url = scheme + '://' + shorter_rest
return shorter_url
definitely_no_more_pages = False
processed_raw_json = data.split( 'var tumblr_api_read = ' )[1][:-2] # -1 takes a js ';' off the end
@ -1837,15 +1872,28 @@ class GalleryTumblr( Gallery ):
if len( post[ 'photos' ] ) == 0:
photos = [ post ]
else:
photos = post[ 'photos' ]
for photo in photos:
try:
url = post[ 'photo-url-1280' ]
url = photo[ 'photo-url-1280' ]
if raw_url_available:
url = ConvertRegularToRawURL( url )
url = Remove68Subdomain( url )
url = ClientData.ConvertHTTPToHTTPS( url )
SetExtraURLInfo( url, tags )
urls.append( url )
@ -1855,29 +1903,6 @@ class GalleryTumblr( Gallery ):
pass
else:
for photo in post[ 'photos' ]:
try:
url = photo[ 'photo-url-1280' ]
if raw_url_available:
url = ConvertRegularToRawURL( url )
SetExtraURLInfo( url, tags )
urls.append( url )
except:
pass

View File

@ -81,7 +81,6 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
self._focus_holder = wx.Window( self, size = ( 0, 0 ) )
self._loading_session = False
self._media_status_override = None
self._closed_pages = []
self._deleted_page_keys = set()
@ -145,44 +144,7 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
self._RefreshStatusBar()
default_gui_session = HC.options[ 'default_gui_session' ]
existing_session_names = self._controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION )
cannot_load_from_db = default_gui_session not in existing_session_names
load_a_blank_page = HC.options[ 'default_gui_session' ] == 'just a blank page' or cannot_load_from_db
if not load_a_blank_page:
if self._controller.LastShutdownWasBad():
# this can be upgraded to a nicer checkboxlist dialog to select pages or w/e
message = 'It looks like the last instance of the client did not shut down cleanly.'
message += os.linesep * 2
message += 'Would you like to try loading your default session \'' + default_gui_session + '\', or just a blank page?'
with ClientGUIDialogs.DialogYesNo( self, message, title = 'Previous shutdown was bad', yes_label = 'try to load the default session', no_label = 'just load a blank page' ) as dlg:
if dlg.ShowModal() == wx.ID_NO:
load_a_blank_page = True
if load_a_blank_page:
self._NewPageQuery( CC.LOCAL_FILE_SERVICE_KEY )
else:
self._LoadGUISession( default_gui_session )
wx.CallLater( 5 * 60 * 1000, self.SaveLastSession )
wx.CallAfter( self._InitialiseSession ) # do this in callafter as some pages want to talk to controller.gui, which doesn't exist yet!
def _AboutWindow( self ):
@ -280,74 +242,6 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
def _AppendGUISession( self, name ):
def do_it( session, starting_index ):
try:
if not HC.PLATFORM_LINUX:
# on linux, this stops session pages from accepting keyboard input, wew
wx.CallAfter( self._notebook.Disable )
forced_insertion_index = starting_index
for ( page_name, management_controller, initial_hashes ) in session.IteratePages():
try:
if len( initial_hashes ) > 0:
initial_media_results = []
for group_of_inital_hashes in HydrusData.SplitListIntoChunks( initial_hashes, 256 ):
more_media_results = self._controller.Read( 'media_results', group_of_inital_hashes )
initial_media_results.extend( more_media_results )
self._media_status_override = u'Loading session page \'' + page_name + u'\'\u2026 ' + HydrusData.ConvertValueRangeToPrettyString( len( initial_media_results ), len( initial_hashes ) )
self._controller.pub( 'refresh_status' )
else:
initial_media_results = []
wx.CallAfter( self._NewPage, page_name, management_controller, initial_media_results = initial_media_results, forced_insertion_index = forced_insertion_index )
forced_insertion_index += 1
except Exception as e:
HydrusData.ShowException( e )
finally:
self._loading_session = False
self._media_status_override = None
if not HC.PLATFORM_LINUX:
wx.CallAfter( self._notebook.Enable )
if self._loading_session:
HydrusData.ShowText( 'Sorry, currently loading a session. Please wait.' )
return
self._loading_session = True
try:
session = self._controller.Read( 'serialisable_named', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION, name )
@ -364,7 +258,28 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
starting_index = self._GetDefaultPageInsertionIndex()
self._controller.CallToThread( do_it, session, starting_index )
try:
forced_insertion_index = starting_index
for ( page_name, management_controller, initial_hashes ) in session.IteratePages():
try:
self._NewPage( page_name, management_controller, initial_hashes = initial_hashes, forced_insertion_index = forced_insertion_index )
forced_insertion_index += 1
except Exception as e:
HydrusData.ShowException( e )
finally:
self._media_status_override = None
def _AutoRepoSetup( self ):
@ -1770,14 +1685,49 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
def _LoadGUISession( self, name ):
def _InitialiseSession( self ):
if self._loading_session:
default_gui_session = HC.options[ 'default_gui_session' ]
existing_session_names = self._controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION )
cannot_load_from_db = default_gui_session not in existing_session_names
load_a_blank_page = HC.options[ 'default_gui_session' ] == 'just a blank page' or cannot_load_from_db
if not load_a_blank_page:
HydrusData.ShowText( 'Sorry, currently loading a session. Please wait.' )
if self._controller.LastShutdownWasBad():
# this can be upgraded to a nicer checkboxlist dialog to select pages or w/e
message = 'It looks like the last instance of the client did not shut down cleanly.'
message += os.linesep * 2
message += 'Would you like to try loading your default session \'' + default_gui_session + '\', or just a blank page?'
with ClientGUIDialogs.DialogYesNo( self, message, title = 'Previous shutdown was bad', yes_label = 'try to load the default session', no_label = 'just load a blank page' ) as dlg:
if dlg.ShowModal() == wx.ID_NO:
load_a_blank_page = True
return
if load_a_blank_page:
self._NewPageQuery( CC.LOCAL_FILE_SERVICE_KEY )
else:
self._LoadGUISession( default_gui_session )
wx.CallLater( 5 * 60 * 1000, self.SaveLastSession )
def _LoadGUISession( self, name ):
for page in [ self._notebook.GetPage( i ) for i in range( self._notebook.GetPageCount() ) ]:
@ -1843,7 +1793,7 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
self._controller.pub( 'wake_daemons' )
self._controller.pub( 'refresh_status' )
self._controller.pubimmediate( 'refresh_status' )
def _ManageParsingScripts( self ):
@ -2024,17 +1974,17 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
def _NewPage( self, page_name, management_controller, initial_media_results = None, forced_insertion_index = None ):
def _NewPage( self, page_name, management_controller, initial_hashes = None, forced_insertion_index = None ):
self._controller.ResetIdleTimer()
self._controller.ResetPageChangeTimer()
if initial_media_results is None:
if initial_hashes is None:
initial_media_results = []
initial_hashes = []
page = ClientGUIPages.Page( self._notebook, self._controller, management_controller, initial_media_results )
page = ClientGUIPages.Page( self._notebook, self._controller, management_controller, initial_hashes )
if forced_insertion_index is None:
@ -2122,12 +2072,19 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
self._NewPage( page_name, management_controller )
def _NewPageQuery( self, file_service_key, initial_media_results = None, initial_predicates = None ):
def _NewPageQuery( self, file_service_key, initial_hashes = None, initial_predicates = None ):
if initial_media_results is None: initial_media_results = []
if initial_predicates is None: initial_predicates = []
if initial_hashes is None:
initial_hashes = []
search_enabled = len( initial_media_results ) == 0
if initial_predicates is None:
initial_predicates = []
search_enabled = len( initial_hashes ) == 0
new_options = self._controller.GetNewOptions()
@ -2142,7 +2099,7 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
management_controller = ClientGUIManagement.CreateManagementControllerQuery( file_service_key, file_search_context, search_enabled )
self._NewPage( 'files', management_controller, initial_media_results = initial_media_results )
self._NewPage( 'files', management_controller, initial_hashes = initial_hashes )
def _OpenDBFolder( self ):
@ -2444,13 +2401,6 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
def _SaveGUISession( self, name = None ):
if self._loading_session:
HydrusData.ShowText( 'Sorry, currently loading a session. Please wait.' )
return
if name is None:
while True:
@ -2503,14 +2453,7 @@ class FrameGUI( ClientGUITopLevelWindows.FrameThatResizes ):
management_controller = page.GetManagementController()
# this bit could obviously be 'getmediaresultsobject' or whatever, with sort/collect/selection/view status
media = page.GetMedia()
hashes = set()
for m in media: hashes.update( m.GetHashes() )
hashes = list( hashes )
hashes = list( page.GetHashes() )
session.AddPage( page_name, management_controller, hashes )
@ -3005,7 +2948,7 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
def CurrentlyBusy( self ):
return self._loading_session
return False
def EventCharHook( self, event ):
@ -3221,10 +3164,7 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
try:
if not self._loading_session:
self._SaveGUISession( 'last session' )
self._SaveGUISession( 'last session' )
self._message_manager.CleanBeforeDestroy()
@ -3340,6 +3280,20 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
def IsCurrentPage( self, page_key ):
result = self._notebook.GetCurrentPage()
if result is None:
return False
else:
return page_key == result.GetPageKey()
def NewPageDuplicateFilter( self ):
self._NewPageDuplicateFilter()
@ -3372,12 +3326,19 @@ The password is cleartext here but obscured in the entry dialog. Enter a blank p
def NewPagePetitions( self, service_key ): self._NewPagePetitions( service_key )
def NewPageQuery( self, service_key, initial_media_results = None, initial_predicates = None ):
def NewPageQuery( self, service_key, initial_hashes = None, initial_predicates = None ):
if initial_media_results is None: initial_media_results = []
if initial_predicates is None: initial_predicates = []
if initial_hashes is None:
initial_hashes = []
self._NewPageQuery( service_key, initial_media_results = initial_media_results, initial_predicates = initial_predicates )
if initial_predicates is None:
initial_predicates = []
self._NewPageQuery( service_key, initial_hashes = initial_hashes, initial_predicates = initial_predicates )
def NewSimilarTo( self, file_service_key, hash, hamming_distance ):

View File

@ -1779,9 +1779,7 @@ class PopupMessage( PopupWindow ):
hashes = result
media_results = HG.client_controller.Read( 'media_results', hashes )
HG.client_controller.pub( 'new_page_query', CC.LOCAL_FILE_SERVICE_KEY, initial_media_results = media_results )
HG.client_controller.pub( 'new_page_query', CC.LOCAL_FILE_SERVICE_KEY, initial_hashes = hashes )
@ -3773,7 +3771,10 @@ class TextAndGauge( wx.Panel ):
def SetValue( self, text, value, range ):
self._st.SetLabelText( text )
if text != self._st.GetLabelText():
self._st.SetLabelText( text )
if value is None or range is None:
@ -3781,8 +3782,15 @@ class TextAndGauge( wx.Panel ):
else:
self._gauge.SetRange( range )
self._gauge.SetValue( value )
if range != self._gauge.GetRange():
self._gauge.SetRange( range )
if value != self._gauge.GetValue():
self._gauge.SetValue( value )

View File

@ -12,6 +12,8 @@ import HydrusNetworking
import os
import wx
ID_TIMER_NETWORK_JOB = wx.NewId()
class BandwidthRulesCtrl( ClientGUICommon.StaticBox ):
def __init__( self, parent, bandwidth_rules ):
@ -345,3 +347,110 @@ class EditStringToStringDictControl( wx.Panel ):
return value_dict
class NetworkJobControl( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._network_job = None
self._text_and_gauge = ClientGUICommon.TextAndGauge( self )
self._cancel_button = ClientGUICommon.BetterBitmapButton( self, CC.GlobalBMPs.stop, self.Cancel )
#
self._Update()
#
hbox = wx.BoxSizer( wx.HORIZONTAL )
hbox.AddF( self._text_and_gauge, CC.FLAGS_EXPAND_BOTH_WAYS )
hbox.AddF( self._cancel_button, CC.FLAGS_VCENTER )
self.SetSizer( hbox )
#
self.Bind( wx.EVT_TIMER, self.TIMEREventUpdate, id = ID_TIMER_NETWORK_JOB )
self._move_hide_timer = wx.Timer( self, id = ID_TIMER_NETWORK_JOB )
self._move_hide_timer.Start( 250, wx.TIMER_CONTINUOUS )
def _Update( self ):
if self._network_job is None:
self._text_and_gauge.SetValue( '', 0, 1 )
can_cancel = False
else:
if self._network_job.IsDone():
can_cancel = False
else:
can_cancel = True
( status_text, current_speed, bytes_read, bytes_to_read ) = self._network_job.GetStatus()
if self._network_job.HasError():
text = status_text
else:
text = status_text + ' ' + HydrusData.ConvertIntToBytes( current_speed ) + '/s'
self._text_and_gauge.SetValue( text, bytes_read, bytes_to_read )
if can_cancel:
if not self._cancel_button.IsEnabled():
self._cancel_button.Enable()
else:
if self._cancel_button.IsEnabled():
self._cancel_button.Disable()
def Cancel( self ):
if self._network_job is not None:
self._network_job.Cancel()
def ClearNetworkJob( self ):
self._network_job = None
def SetNetworkJob( self, network_job ):
self._network_job = network_job
def TIMEREventUpdate( self, event ):
if self.IsShown():
self._Update()

View File

@ -14,6 +14,7 @@ import ClientGUIACDropdown
import ClientGUICanvas
import ClientGUICollapsible
import ClientGUICommon
import ClientGUIControls
import ClientGUIDialogs
import ClientGUIListBoxes
import ClientGUIMedia
@ -2617,7 +2618,7 @@ class ManagementPanelImporterURLs( ManagementPanelImporter ):
self._overall_status = wx.StaticText( self._url_panel )
self._current_action = wx.StaticText( self._url_panel )
self._file_gauge = ClientGUICommon.Gauge( self._url_panel )
self._file_download_control = ClientGUIControls.NetworkJobControl( self._url_panel )
self._overall_gauge = ClientGUICommon.Gauge( self._url_panel )
self._pause_button = wx.BitmapButton( self._url_panel, bitmap = CC.GlobalBMPs.pause )
@ -2651,7 +2652,7 @@ class ManagementPanelImporterURLs( ManagementPanelImporter ):
self._url_panel.AddF( self._overall_status, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.AddF( self._current_action, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.AddF( self._file_gauge, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.AddF( self._file_download_control, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.AddF( self._overall_gauge, CC.FLAGS_EXPAND_PERPENDICULAR )
self._url_panel.AddF( button_sizer, CC.FLAGS_BUTTON_SIZER )
self._url_panel.AddF( input_hbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
@ -2677,20 +2678,7 @@ class ManagementPanelImporterURLs( ManagementPanelImporter ):
self._urls_import = self._management_controller.GetVariable( 'urls_import' )
def file_download_hook( gauge_range, gauge_value ):
try:
self._file_gauge.SetRange( gauge_range )
self._file_gauge.SetValue( gauge_value )
except wx.PyDeadObjectError:
pass
self._urls_import.SetDownloadHook( file_download_hook )
self._urls_import.SetDownloadControl( self._file_download_control )
import_file_options = self._urls_import.GetOptions()
@ -3209,7 +3197,10 @@ class ManagementPanelPetitions( ManagementPanel ):
file_service_key = self._management_controller.GetKey( 'file_service' )
with wx.BusyCursor(): media_results = self._controller.Read( 'media_results', hashes )
with wx.BusyCursor():
media_results = self._controller.Read( 'media_results', hashes )
panel = ClientGUIMedia.MediaPanelThumbnails( self._page, self._page_key, file_service_key, media_results )

View File

@ -1395,9 +1395,7 @@ class MediaPanel( ClientMedia.ListeningMediaList, wx.ScrolledWindow ):
if hashes is not None and len( hashes ) > 0:
media_results = HG.client_controller.Read( 'media_results', hashes )
HG.client_controller.pub( 'new_page_query', self._file_service_key, initial_media_results = media_results )
HG.client_controller.pub( 'new_page_query', self._file_service_key, initial_hashes = hashes )
@ -1407,9 +1405,7 @@ class MediaPanel( ClientMedia.ListeningMediaList, wx.ScrolledWindow ):
if hashes is not None and len( hashes ) > 0:
media_results = HG.client_controller.Read( 'media_results', hashes )
HG.client_controller.pub( 'new_page_query', self._file_service_key, initial_media_results = media_results )
HG.client_controller.pub( 'new_page_query', self._file_service_key, initial_hashes = hashes )
@ -1497,6 +1493,11 @@ class MediaPanel( ClientMedia.ListeningMediaList, wx.ScrolledWindow ):
if self._focussed_media is not None: self._HitMedia( self._focussed_media, False, False )
def ClearPageKey( self ):
self._page_key = HydrusData.GenerateKey()
def Collect( self, page_key, collect_by = -1 ):
if page_key == self._page_key:
@ -1716,6 +1717,7 @@ class MediaPanelThumbnails( MediaPanel ):
self.RefreshAcceleratorTable()
HG.client_controller.sub( self, 'MaintainPageCache', 'memory_maintenance_pulse' )
HG.client_controller.sub( self, 'NewThumbnails', 'new_thumbnails' )
HG.client_controller.sub( self, 'ThumbnailsResized', 'thumbnail_resize' )
HG.client_controller.sub( self, 'RefreshAcceleratorTable', 'notify_new_options' )
@ -1751,6 +1753,16 @@ class MediaPanelThumbnails( MediaPanel ):
self._dirty_canvas_pages.append( wx.EmptyBitmap( client_width, self._num_rows_per_canvas_page * thumbnail_span_height, 24 ) )
def _DeleteAllDirtyPages( self ):
for bmp in self._dirty_canvas_pages:
bmp.Destroy()
self._dirty_canvas_pages = []
def _DirtyAllPages( self ):
clean_indices = self._clean_canvas_pages.keys()
@ -1760,8 +1772,6 @@ class MediaPanelThumbnails( MediaPanel ):
self._DirtyPage( clean_index )
self.Refresh()
def _DirtyPage( self, clean_index ):
@ -2158,19 +2168,9 @@ class MediaPanelThumbnails( MediaPanel ):
if thumb_layout_changed or width_got_bigger:
clean_indices = self._clean_canvas_pages.keys()
self._DirtyAllPages()
for clean_index in clean_indices:
self._DirtyPage( clean_index )
for bmp in self._dirty_canvas_pages:
bmp.Destroy()
self._dirty_canvas_pages = []
self._DeleteAllDirtyPages()
self.Refresh()
@ -2202,6 +2202,8 @@ class MediaPanelThumbnails( MediaPanel ):
HG.client_controller.pub( 'sorted_media_pulse', self._page_key, self._sorted_media )
self.Refresh()
def _ScrollEnd( self, shift = False ):
@ -2587,7 +2589,10 @@ class MediaPanelThumbnails( MediaPanel ):
thumbnail = self._GetThumbnailUnderMouse( event )
if thumbnail is not None: self._HitMedia( thumbnail, event.CmdDown(), event.ShiftDown() )
if thumbnail is not None:
self._HitMedia( thumbnail, event.CmdDown(), event.ShiftDown() )
all_locations_managers = [ media.GetLocationsManager() for media in self._sorted_media ]
selected_locations_managers = [ media.GetLocationsManager() for media in self._selected_media ]
@ -3420,6 +3425,16 @@ class MediaPanelThumbnails( MediaPanel ):
event.Skip()
def MaintainPageCache( self ):
if not HG.client_controller.GetGUI().IsCurrentPage( self._page_key ):
self._DirtyAllPages()
self._DeleteAllDirtyPages()
def NewThumbnails( self, hashes ):
affected_thumbnails = self._GetMedia( hashes )
@ -3505,6 +3520,8 @@ class MediaPanelThumbnails( MediaPanel ):
self._DirtyAllPages()
self.Refresh()
def ThumbnailsResized( self ):

View File

@ -19,7 +19,7 @@ import HydrusGlobals as HG
class Page( wx.SplitterWindow ):
def __init__( self, parent, controller, management_controller, initial_media_results ):
def __init__( self, parent, controller, management_controller, initial_hashes ):
wx.SplitterWindow.__init__( self, parent )
@ -29,6 +29,8 @@ class Page( wx.SplitterWindow ):
self._management_controller = management_controller
self._initial_hashes = initial_hashes
self._management_controller.SetKey( 'page', self._page_key )
self._pretty_status = ''
@ -51,7 +53,7 @@ class Page( wx.SplitterWindow ):
self._preview_panel = ClientGUICanvas.CanvasPanel( self._search_preview_split, self._page_key )
self._media_panel = ClientGUIMedia.MediaPanelThumbnails( self, self._page_key, file_service_key, initial_media_results )
self._media_panel = ClientGUIMedia.MediaPanelThumbnails( self, self._page_key, file_service_key, [] )
self._search_preview_split.SplitHorizontally( self._management_panel, self._preview_panel, HC.options[ 'vpos' ] )
@ -65,8 +67,45 @@ class Page( wx.SplitterWindow ):
self._controller.sub( self, 'SetPrettyStatus', 'new_page_status' )
self._controller.sub( self, 'SwapMediaPanel', 'swap_media_panel' )
if initial_hashes is not None and len( initial_hashes ) > 0:
self._initialised = False
self._controller.CallToThread( self.THREADLoadInitialMediaResults )
else:
self._initialised = True
def CleanBeforeDestroy( self ): self._management_panel.CleanBeforeDestroy()
def _SetPrettyStatus( self, status ):
self._pretty_status = status
self._controller.pubimmediate( 'refresh_status' )
def _SwapMediaPanel( self, new_panel ):
self._preview_panel.SetMedia( None )
self._media_panel.ClearPageKey()
self.ReplaceWindow( self._media_panel, new_panel )
self._media_panel.Hide()
# If this is a CallAfter, OS X segfaults on refresh jej
wx.CallLater( 500, self._media_panel.Destroy )
self._media_panel = new_panel
def CleanBeforeDestroy( self ):
self._management_panel.CleanBeforeDestroy()
def EventPreviewUnsplit( self, event ):
@ -82,6 +121,27 @@ class Page( wx.SplitterWindow ):
self._controller.pub( 'set_focus', self._page_key, None )
def GetHashes( self ):
if self._initialised:
media = self.GetMedia()
hashes = []
for m in media:
hashes.extend( m.GetHashes() )
return hashes
else:
return self._initial_hashes
def GetManagementController( self ):
return self._management_controller
@ -156,7 +216,10 @@ class Page( wx.SplitterWindow ):
def RefreshQuery( self ):
self._controller.pub( 'refresh_query', self._page_key )
if self._initialised:
self._controller.pub( 'refresh_query', self._page_key )
def ShowHideSplit( self ):
@ -175,15 +238,31 @@ class Page( wx.SplitterWindow ):
def SetMediaFocus( self ): self._media_panel.SetFocus()
def SetMediaFocus( self ):
self._media_panel.SetFocus()
def SetMediaResults( self, media_results ):
file_service_key = self._management_controller.GetKey( 'file_service' )
media_panel = ClientGUIMedia.MediaPanelThumbnails( self, self._page_key, file_service_key, media_results )
self._SwapMediaPanel( media_panel )
self._initialised = True
self._initial_hashes = []
def SetPrettyStatus( self, page_key, status ):
if page_key == self._page_key:
self._pretty_status = status
self._controller.pub( 'refresh_status' )
if self._initialised:
self._SetPrettyStatus( status )
@ -201,16 +280,7 @@ class Page( wx.SplitterWindow ):
if page_key == self._page_key:
self._preview_panel.SetMedia( None )
self.ReplaceWindow( self._media_panel, new_panel )
self._media_panel.Hide()
# If this is a CallAfter, OS X segfaults on refresh jej
wx.CallLater( 500, self._media_panel.Destroy )
self._media_panel = new_panel
self._SwapMediaPanel( new_panel )
@ -219,6 +289,28 @@ class Page( wx.SplitterWindow ):
self._management_panel.TestAbleToClose()
def THREADLoadInitialMediaResults( self ):
initial_media_results = []
for group_of_initial_hashes in HydrusData.SplitListIntoChunks( self._initial_hashes, 256 ):
more_media_results = self._controller.Read( 'media_results', group_of_initial_hashes )
initial_media_results.extend( more_media_results )
status = u'Loading initial files\u2026 ' + HydrusData.ConvertValueRangeToPrettyString( len( initial_media_results ), len( self._initial_hashes ) )
self._SetPrettyStatus( status )
hashes_to_media_results = { media_result.GetHash() : media_result for media_result in initial_media_results }
sorted_initial_media_results = [ hashes_to_media_results[ hash ] for hash in self._initial_hashes ]
wx.CallAfter( self.SetMediaResults, sorted_initial_media_results )
class GUISession( HydrusSerialisable.SerialisableBaseNamed ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION

View File

@ -188,9 +188,7 @@ class ReviewServicePanel( wx.Panel ):
for ( name, text, timeout, ( num_hashes, hashes, share_key ) ) in self._booru_shares.GetSelectedClientData():
media_results = self._controller.Read( 'media_results', hashes )
self._controller.pub( 'new_page_query', CC.LOCAL_FILE_SERVICE_KEY, initial_media_results = media_results )
self._controller.pub( 'new_page_query', CC.LOCAL_FILE_SERVICE_KEY, initial_hashes = hashes )
@ -1126,9 +1124,7 @@ class ReviewServicePanel( wx.Panel ):
hashes = HG.client_controller.Read( 'service_directory', self._service.GetServiceKey(), multihash )
media_results = HG.client_controller.Read( 'media_results', hashes )
HG.client_controller.pub( 'new_page_query', CC.LOCAL_FILE_SERVICE_KEY, initial_media_results = media_results )
HG.client_controller.pub( 'new_page_query', CC.LOCAL_FILE_SERVICE_KEY, initial_hashes = hashes )
finally:

View File

@ -4,6 +4,7 @@ import ClientData
import ClientDefaults
import ClientDownloading
import ClientFiles
import ClientNetworking
import ClientThreading
import collections
import HydrusConstants as HC
@ -695,6 +696,8 @@ class HDDImport( HydrusSerialisable.SerialisableBase ):
with self._lock:
self._RegenerateSeedCacheStatus( page_key )
if path in self._paths_to_tags:
service_keys_to_tags = self._paths_to_tags[ path ]
@ -1769,7 +1772,7 @@ HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIAL
class SeedCache( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SEED_CACHE
SERIALISABLE_VERSION = 4
SERIALISABLE_VERSION = 5
def __init__( self ):
@ -1871,6 +1874,98 @@ class SeedCache( HydrusSerialisable.SerialisableBase ):
return ( 4, new_serialisable_info )
if version == 4:
def ConvertRegularToRawURL( regular_url ):
# convert this:
# http://68.media.tumblr.com/5af0d991f26ef9fdad5a0c743fb1eca2/tumblr_opl012ZBOu1tiyj7vo1_500.jpg
# to this:
# http://68.media.tumblr.com/5af0d991f26ef9fdad5a0c743fb1eca2/tumblr_opl012ZBOu1tiyj7vo1_raw.jpg
# the 500 part can be a bunch of stuff, including letters
url_components = regular_url.split( '_' )
last_component = url_components[ -1 ]
( number_gubbins, file_ext ) = last_component.split( '.' )
raw_last_component = 'raw.' + file_ext
url_components[ -1 ] = raw_last_component
raw_url = '_'.join( url_components )
return raw_url
def Remove68Subdomain( long_url ):
# sometimes the 68 subdomain gives a 404 on the raw url, so:
# convert this:
# http://68.media.tumblr.com/5af0d991f26ef9fdad5a0c743fb1eca2/tumblr_opl012ZBOu1tiyj7vo1_raw.jpg
# to this:
# http://media.tumblr.com/5af0d991f26ef9fdad5a0c743fb1eca2/tumblr_opl012ZBOu1tiyj7vo1_raw.jpg
# I am not sure if it is always 68, but let's not assume
( scheme, rest ) = long_url.split( '://', 1 )
if rest.startswith( 'media.tumblr.com' ):
return long_url
( gumpf, shorter_rest ) = rest.split( '.', 1 )
shorter_url = scheme + '://' + shorter_rest
return shorter_url
new_serialisable_info = []
good_seeds = set()
for ( seed, seed_info ) in old_serialisable_info:
try:
parse = urlparse.urlparse( seed )
if 'media.tumblr.com' in parse.netloc:
seed = Remove68Subdomain( seed )
seed = ConvertRegularToRawURL( seed )
seed = ClientData.ConvertHTTPToHTTPS( seed )
if 'pixiv.net' in parse.netloc:
seed = ClientData.ConvertHTTPToHTTPS( seed )
if seed in good_seeds: # we hit a dupe, so skip it
continue
except:
pass
good_seeds.add( seed )
new_serialisable_info.append( ( seed, seed_info ) )
return ( 5, new_serialisable_info )
def AddSeeds( self, seeds ):
@ -1883,9 +1978,21 @@ class SeedCache( HydrusSerialisable.SerialisableBase ):
for seed in seeds:
if seed in self._seeds_to_info:
if seed.startswith( 'http' ):
self._seeds_ordered.remove( seed )
search_seeds = ClientData.GetSearchURLs( seed )
else:
search_seeds = [ seed ]
for search_seed in search_seeds:
if search_seed in self._seeds_to_info:
continue
self._seeds_ordered.append( seed )
@ -2085,7 +2192,24 @@ class SeedCache( HydrusSerialisable.SerialisableBase ):
with self._lock:
return seed in self._seeds_to_info
if seed.startswith( 'http' ):
search_seeds = ClientData.GetSearchURLs( seed )
else:
search_seeds = [ seed ]
for search_seed in search_seeds:
if search_seed in self._seeds_to_info:
return True
return False
@ -2811,6 +2935,10 @@ class ThreadWatcherImport( HydrusSerialisable.SerialisableBase ):
try:
with self._lock:
self._RegenerateSeedCacheStatus( page_key )
file_original_filename = self._urls_to_filenames[ file_url ]
downloaded_tags = [ 'filename:' + file_original_filename ]
@ -3225,7 +3353,8 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
self._paused = False
self._seed_cache_status = ( 'initialising', ( 0, 1 ) )
self._file_download_hook = None
self._download_control_set = None
self._download_control_clear = None
self._lock = threading.Lock()
@ -3260,8 +3389,6 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
def _WorkOnFiles( self, page_key ):
do_wait = True
file_url = self._urls_cache.GetNextSeed( CC.STATUS_UNKNOWN )
if file_url is None:
@ -3271,8 +3398,15 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
try:
with self._lock:
self._RegenerateSeedCacheStatus( page_key )
( status, hash ) = HG.client_controller.Read( 'url_status', file_url )
url_not_known_beforehand = status == CC.STATUS_NEW
if status == CC.STATUS_DELETED:
if not self._import_file_options.GetExcludeDeleted():
@ -3287,21 +3421,64 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
try:
report_hooks = []
network_job = ClientNetworking.NetworkJob( 'GET', file_url, temp_path = temp_path )
with self._lock:
if self._file_download_hook is not None:
if self._download_control_set is not None:
report_hooks.append( self._file_download_hook )
wx.CallAfter( self._download_control_set, network_job )
HG.client_controller.DoHTTP( HC.GET, file_url, report_hooks = report_hooks, temp_path = temp_path )
try:
HG.client_controller.network_engine.AddJob( network_job )
while not network_job.IsDone():
time.sleep( 0.1 )
finally:
if self._download_control_clear is not None:
wx.CallAfter( self._download_control_clear )
client_files_manager = HG.client_controller.client_files_manager
( status, hash ) = client_files_manager.ImportFile( temp_path, import_file_options = self._import_file_options )
if HG.view_shutdown:
raise HydrusExceptions.ShutdownException()
elif network_job.HasError():
status = CC.STATUS_FAILED
self._urls_cache.UpdateSeedStatus( file_url, status, note = network_job.GetErrorText() )
time.sleep( 2 )
elif network_job.IsCancelled():
status = CC.STATUS_SKIPPED
self._urls_cache.UpdateSeedStatus( file_url, status, note = 'cancelled during download!' )
else:
( status, hash ) = HG.client_controller.client_files_manager.ImportFile( temp_path, import_file_options = self._import_file_options )
self._urls_cache.UpdateSeedStatus( file_url, status )
if url_not_known_beforehand and hash is not None:
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ HydrusData.ContentUpdate( HC.CONTENT_TYPE_URLS, HC.CONTENT_UPDATE_ADD, ( hash, ( file_url, ) ) ) ] }
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
finally:
@ -3310,15 +3487,9 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
else:
do_wait = False
self._urls_cache.UpdateSeedStatus( file_url, status )
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ HydrusData.ContentUpdate( HC.CONTENT_TYPE_URLS, HC.CONTENT_UPDATE_ADD, ( hash, ( file_url, ) ) ) ] }
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
self._urls_cache.UpdateSeedStatus( file_url, status )
if status in ( CC.STATUS_SUCCESSFUL, CC.STATUS_REDUNDANT ):
( media_result, ) = HG.client_controller.Read( 'media_results', ( hash, ) )
@ -3339,8 +3510,6 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
self._urls_cache.UpdateSeedStatus( file_url, status, exception = e )
wx.CallAfter( self._file_download_hook, 1, 0 )
with self._lock:
self._RegenerateSeedCacheStatus( page_key )
@ -3348,11 +3517,6 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
HG.client_controller.pub( 'update_status', page_key )
if do_wait:
ClientData.WaitPolitely( page_key )
return True
@ -3388,6 +3552,10 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
HG.client_controller.WaitUntilPubSubsEmpty()
except HydrusExceptions.ShutdownException:
return
except Exception as e:
HydrusData.ShowException( e )
@ -3437,11 +3605,12 @@ class URLsImport( HydrusSerialisable.SerialisableBase ):
def SetDownloadHook( self, hook ):
def SetDownloadControl( self, download_control ):
with self._lock:
self._file_download_hook = hook
self._download_control_set = download_control.SetNetworkJob
self._download_control_clear = download_control.ClearNetworkJob

View File

@ -97,6 +97,60 @@ def ConvertDomainIntoAllApplicableDomains( domain ):
return domains
def ConvertStatusCodeAndDataIntoExceptionInfo( status_code, data ):
error_text = data
if len( error_text ) > 1024:
large_chunk = error_text[:4096]
smaller_chunk = large_chunk[:256]
HydrusData.DebugPrint( large_chunk )
error_text = 'The server\'s error text was too long to display. The first part follows, while a larger chunk has been written to the log.'
error_text += os.linesep
error_text += smaller_chunk
if status_code == 304:
eclass = HydrusExceptions.NotModifiedException
elif status_code == 401:
eclass = HydrusExceptions.PermissionException
elif status_code == 403:
eclass = HydrusExceptions.ForbiddenException
elif status_code == 404:
eclass = HydrusExceptions.NotFoundException
elif status_code == 419:
eclass = HydrusExceptions.SessionException
elif status_code == 426:
eclass = HydrusExceptions.NetworkVersionException
elif status_code >= 500:
eclass = HydrusExceptions.ServerException
else:
eclass = HydrusExceptions.NetworkException
e = eclass( error_text )
return ( e, error_text )
def ConvertURLIntoDomain( url ):
parser_result = urlparse.urlparse( url )
@ -1030,7 +1084,7 @@ class HTTPConnection( object ):
class NetworkBandwidthManager( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_BANDWIDTH_MANAGER
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER
SERIALISABLE_VERSION = 1
def __init__( self ):
@ -1039,12 +1093,14 @@ class NetworkBandwidthManager( HydrusSerialisable.SerialisableBase ):
self.engine = None
self._dirty = False
self._lock = threading.Lock()
self._network_contexts_to_bandwidth_trackers = collections.defaultdict( HydrusNetworking.BandwidthTracker )
self._network_contexts_to_bandwidth_rules = {}
for context_type in [ CC.NETWORK_CONTEXT_GLOBAL, CC.NETWORK_CONTEXT_HYDRUS, CC.NETWORK_CONTEXT_DOMAIN, CC.NETWORK_CONTEXT_DOWNLOADER, CC.NETWORK_CONTEXT_SUBSCRIPTION ]:
for context_type in [ CC.NETWORK_CONTEXT_GLOBAL, CC.NETWORK_CONTEXT_HYDRUS, CC.NETWORK_CONTEXT_DOMAIN, CC.NETWORK_CONTEXT_DOWNLOADER, CC.NETWORK_CONTEXT_DOWNLOADER_QUERY, CC.NETWORK_CONTEXT_SUBSCRIPTION ]:
self._network_contexts_to_bandwidth_rules[ NetworkContext( context_type ) ] = HydrusNetworking.BandwidthRules()
@ -1062,31 +1118,36 @@ class NetworkBandwidthManager( HydrusSerialisable.SerialisableBase ):
def _GetSerialisableInfo( self ):
serialisable_global_tracker = self._global_bandwidth_tracker.GetSerialisableTuple()
serialisable_global_rules = self._global_bandwidth_rules.GetSerialisableTuple()
all_serialisable_trackers = [ ( network_context.GetSerialisableTuple(), tracker.GetSerialisableTuple() ) for ( network_context, tracker ) in self._network_contexts_to_bandwidth_trackers.items() ]
all_serialisable_rules = [ ( network_context.GetSerialisableTuple(), rules.GetSerialisableTuple() ) for ( network_context, rules ) in self._network_contexts_to_bandwidth_rules.items() ]
all_serialisable_trackers = [ ( domain, tracker.GetSerialisableTuple() ) for ( domain, tracker ) in self._network_contexts_to_bandwidth_trackers ]
all_serialisable_rules = [ ( domain, rules.GetSerialisableTuple() ) for ( domain, rules ) in self._network_contexts_to_bandwidth_rules ]
return ( serialisable_global_tracker, serialisable_global_rules, all_serialisable_trackers, all_serialisable_rules )
return ( all_serialisable_trackers, all_serialisable_rules )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( serialisable_global_tracker, serialisable_global_rules, all_serialisable_trackers, all_serialisable_rules ) = serialisable_info
( all_serialisable_trackers, all_serialisable_rules ) = serialisable_info
self._global_bandwidth_tracker = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_global_tracker )
self._global_bandwidth_rules = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_global_rules )
for ( domain, serialisable_tracker ) in all_serialisable_trackers:
for ( serialisable_network_context, serialisable_tracker ) in all_serialisable_trackers:
self._network_contexts_to_bandwidth_trackers[ domain ] = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tracker )
network_context = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_network_context )
tracker = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_tracker )
self._network_contexts_to_bandwidth_trackers[ network_context ] = tracker
for ( domain, serialisable_rules ) in all_serialisable_rules:
for ( serialisable_network_context, serialisable_rules ) in all_serialisable_rules:
self._network_contexts_to_bandwidth_rules[ domain ] = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_rules )
network_context = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_network_context )
rules = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_rules )
self._network_contexts_to_bandwidth_rules[ network_context ] = rules
def _SetDirty( self ):
self._dirty = True
def CanContinue( self, network_contexts ):
@ -1145,6 +1206,8 @@ class NetworkBandwidthManager( HydrusSerialisable.SerialisableBase ):
self._SetDirty()
def GetDomains( self, history_time_delta_threshold = 86400 * 30 ):
@ -1189,6 +1252,14 @@ class NetworkBandwidthManager( HydrusSerialisable.SerialisableBase ):
def IsDirty( self ):
with self._lock:
return self._dirty
def ReportDataUsed( self, network_contexts, num_bytes ):
with self._lock:
@ -1198,6 +1269,8 @@ class NetworkBandwidthManager( HydrusSerialisable.SerialisableBase ):
self._network_contexts_to_bandwidth_trackers[ network_context ].ReportDataUsed( num_bytes )
self._SetDirty()
def ReportRequestUsed( self, network_contexts ):
@ -1209,6 +1282,16 @@ class NetworkBandwidthManager( HydrusSerialisable.SerialisableBase ):
self._network_contexts_to_bandwidth_trackers[ network_context ].ReportRequestUsed()
self._SetDirty()
def SetClean( self ):
with self._lock:
self._dirty = False
def SetRules( self, network_context, bandwidth_rules ):
@ -1219,7 +1302,7 @@ class NetworkBandwidthManager( HydrusSerialisable.SerialisableBase ):
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_BANDWIDTH_MANAGER ] = NetworkBandwidthManager
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER ] = NetworkBandwidthManager
class NetworkContext( HydrusSerialisable.SerialisableBase ):
@ -1253,14 +1336,14 @@ class NetworkContext( HydrusSerialisable.SerialisableBase ):
if self.context_data is None:
serialisable_context_data = self.context_data.encode( 'hex' )
serialisable_context_data = self.context_data
else:
serialisable_context_data = self.context_data
serialisable_context_data = self.context_data.encode( 'hex' )
return ( self.context_type, self.context_data )
return ( self.context_type, serialisable_context_data )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
@ -1360,8 +1443,6 @@ class NetworkEngine( object ):
job.SetStatus( u'waiting on bandwidth\u2026' )
job.Sleep( 5 )
return True
else:
@ -1486,11 +1567,6 @@ class NetworkEngine( object ):
self._is_shutdown = True
def Start( self ):
self.controller.CallToThread( self.MainLoop )
def Shutdown( self ):
self._local_shutdown = True
@ -1519,7 +1595,6 @@ class NetworkJob( object ):
self._stream_io = cStringIO.StringIO()
self._has_error = False
self._error_exception = None
self._error_text = None
@ -1527,11 +1602,11 @@ class NetworkJob( object ):
self._is_cancelled = False
self._bandwidth_manual_override = False
self._status_code = None
self._last_time_ongoing_bandwidth_failed = 0
self._status_text = u'initialising\u2026'
self._num_bytes_read = 0
self._num_bytes_to_read = None
self._num_bytes_to_read = 1
self._network_contexts = self._GenerateNetworkContexts()
@ -1594,57 +1669,65 @@ class NetworkJob( object ):
def _ObeysBandwidth( self ):
return self._bandwidth_manual_override or self._for_login
return not ( self._bandwidth_manual_override or self._for_login )
def _OngoingBandwidthOK( self ):
if self._ObeysBandwidth():
now = HydrusData.GetNow()
if now == self._last_time_ongoing_bandwidth_failed: # it won't have changed, so no point spending any cpu checking
return self.engine.bandwidth_manager.CanContinue( self._network_contexts )
return False
else:
return True
result = self.engine.bandwidth_manager.CanContinue( self._network_contexts )
if not result:
self._last_time_ongoing_bandwidth_failed = now
return result
def _ReadResponse( self, response, stream_dest ):
if 'content-length' in response.headers:
with self._lock:
self._num_bytes_to_read = int( response.headers[ 'content-length' ] )
if 'content-length' in response.headers:
self._num_bytes_to_read = int( response.headers[ 'content-length' ] )
else:
self._num_bytes_to_read = None
try:
for chunk in response.iter_content( chunk_size = 65536 ):
for chunk in response.iter_content( chunk_size = 65536 ):
if self._IsCancelled():
if self._IsCancelled():
return
return
stream_dest.write( chunk )
chunk_length = len( chunk )
stream_dest.write( chunk )
chunk_length = len( chunk )
with self._lock:
self._num_bytes_read += chunk_length
self._ReportDataUsed( chunk_length )
self._WaitOnOngoingBandwidth()
finally:
num_bytes_used = self._num_bytes_read
if self._body is not None:
num_bytes_used += len( self._body )
self._ReportDataUsed( chunk_length )
self._WaitOnOngoingBandwidth()
def _ReportDataUsed( self, num_bytes ):
@ -1669,7 +1752,6 @@ class NetworkJob( object ):
def _SetError( self, e, error ):
self._has_error = True
self._error_exception = e
self._error_text = error
@ -1681,11 +1763,16 @@ class NetworkJob( object ):
self._is_done = True
def _Sleep( self, seconds ):
self._wake_time = HydrusData.GetNow() + seconds
def _WaitOnOngoingBandwidth( self ):
while not self._OngoingBandwidthOK() and not self._IsCancelled():
time.sleep( 0.5 )
time.sleep( 0.1 )
@ -1693,9 +1780,19 @@ class NetworkJob( object ):
with self._lock:
if self._ObeysBandwidth:
if self._ObeysBandwidth():
return self.engine.bandwidth_manager.CanStart( self._network_contexts )
result = self.engine.bandwidth_manager.CanStart( self._network_contexts )
if not result:
self._status_text = u'waiting on bandwidth\u2026' # add the 'waiting ~4 minutes' text stuff here
# if the time to wait > 10s:
# self._Sleep( 10 )
return result
else:
@ -1782,7 +1879,7 @@ class NetworkJob( object ):
with self._lock:
return self._has_error
return self._error_exception is not None
@ -1820,7 +1917,14 @@ class NetworkJob( object ):
else:
return self.engine.login_manager.NeedsLogin( self._network_contexts )
result = self.engine.login_manager.NeedsLogin( self._network_contexts )
if result:
self._status_text = u'waiting on login\u2026'
return result
@ -1831,6 +1935,8 @@ class NetworkJob( object ):
self._bandwidth_manual_override = True
self._wake_time = 0
def SetStatus( self, text ):
@ -1845,7 +1951,7 @@ class NetworkJob( object ):
with self._lock:
self._wake_time = HydrusData.GetNow() + seconds
self._Sleep( seconds )
@ -1882,8 +1988,6 @@ class NetworkJob( object ):
self._ReportDataUsed( len( self._body ) )
self._status_code = response.status_code
if response.ok:
@ -1898,7 +2002,7 @@ class NetworkJob( object ):
else:
with open( self._temp_path, 'rb' ) as f:
with open( self._temp_path, 'wb' ) as f:
self._ReadResponse( response, f )
@ -1913,7 +2017,7 @@ class NetworkJob( object ):
with self._lock:
self._status_text = '404 - Not Found' # ConvertStatusCodeIntoEnglish( response.status_code )
self._status_text = str( response.status_code ) + ' - ' + str( response.reason )
self._ReadResponse( response, self._stream_io )
@ -1924,7 +2028,7 @@ class NetworkJob( object ):
data = self._stream_io.read()
( e, error_text ) = ( HydrusExceptions.NotFoundException( 'wew' ), 'Bunch of html that was returned or whatever.' ) # ConvertStatusCodeAndDataIntoExceptionInfo( response.status_code, data )
( e, error_text ) = ConvertStatusCodeAndDataIntoExceptionInfo( response.status_code, data )
self._SetError( e, error_text )
@ -1968,6 +2072,29 @@ class NetworkJobDownloader( NetworkJob ):
return network_contexts
class NetworkJobDownloaderQuery( NetworkJobDownloader ):
def __init__( self, downloader_page_key, downloader_key, method, url, body = None, temp_path = None ):
self._downloader_page_key = downloader_page_key
NetworkJobDownloader.__init__( self, downloader_key, method, url, body, temp_path = temp_path )
def _GenerateNetworkContexts( self ):
network_contexts = NetworkJob._GenerateNetworkContexts( self )
network_contexts.append( NetworkContext( CC.NETWORK_CONTEXT_DOWNLOADER_QUERY, self._downloader_page_key ) )
return network_contexts
def _GetSessionNetworkContext( self ):
return self._network_contexts[-2] # the downloader one
class NetworkJobSubscription( NetworkJobDownloader ):
def __init__( self, subscription_key, downloader_key, method, url, body = None, temp_path = None ):
@ -1993,11 +2120,11 @@ class NetworkJobSubscription( NetworkJobDownloader ):
class NetworkJobHydrus( NetworkJob ):
def __init__( self, service_key, method, url, body = None, temp_path = None ):
def __init__( self, service_key, method, url, body = None, temp_path = None, for_login = False ):
self._service_key = service_key
NetworkJob.__init__( self, method, url, body, temp_path = temp_path )
NetworkJob.__init__( self, method, url, body, temp_path = temp_path, for_login = for_login )
def _GenerateNetworkContexts( self ):
@ -2022,79 +2149,58 @@ class NetworkLoginManager( HydrusSerialisable.SerialisableBase ):
self._lock = threading.Lock()
# for every loginnable network_context, we need
# a login script
# hydrus script is different, obvs
# start with this
# domain login scripts are complicated
# say this is notimplemented yet, then convert hf and pixiv over
# current login status
# current login expiry
self._network_contexts_to_logins = {}
# essentially, we need to answer:
# needslogin
# canlogin
# dologin
# a login has:
# a network_context it works for (PRIMARY KEY)
# a login script
# rules to check validity in cookies in a current session (fold that into the login script, which may have several stages of this)
# current user/pass/whatever
# current script validity
# current credentials validity
# recent error? some way of dealing with 'domain is currently down, so try again later'
self._network_contexts_to_sessions = {}
def _GenerateSession( self, network_context ):
session = requests.Session()
session.headers.update( { 'User-Agent', 'hydrus/' + str( HC.NETWORK_VERSION ) } )
if network_context.context_type == CC.NETWORK_CONTEXT_HYDRUS:
session.verify = False
return session
# so, we fetch all the logins, ask them for the network contexts so we can set up the dict
def _GetSerialisableInfo( self ):
serialisable_network_contexts_to_sessions = [ ( network_context.GetSerialisableTuple(), cPickle.dumps( session ) ) for ( network_context, session ) in self._network_contexts_to_sessions.items() ]
return serialisable_network_contexts_to_sessions
return {}
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
serialisable_network_contexts_to_sessions = serialisable_info
for ( serialisable_network_context, pickled_session ) in serialisable_network_contexts_to_sessions:
network_context = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_network_context )
session = cPickle.loads( pickled_session )
self._network_contexts_to_sessions[ network_context ] = session
self._network_contexts_to_logins = {}
def ClearSession( self, network_context ):
def CanLogin( self, network_contexts ):
with self._lock:
if network_context in self._network_contexts_to_sessions:
del self._network_contexts_to_sessions[ network_context ]
# look them up in our structure
# if they have a login, is it valid?
# valid means we have tested credentials and it hasn't been invalidated by a parsing error or similar
# I think this just means saying Login.CanLogin( credentials )
return False
def GetSession( self, network_context ):
def GenerateLoginProcess( self, network_contexts ):
with self._lock:
if network_context not in self._network_contexts_to_sessions:
self._network_contexts_to_sessions[ network_context ] = self._GenerateSession( network_context )
return self._network_contexts_to_sessions[ network_context ]
# look up the logins
# login_process = Login.GenerateLoginProcess
# say CallToThread( login_process.start, engine, credentials )
# return login_process
# the login can update itself if there are problems. it should also inform the user
raise NotImplementedError()
def NeedsLogin( self, network_contexts ):
# look up the network contexts in our structure
# if they have a login, see if they match the 'is logged in' predicates
# otherwise:
return False
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_LOGIN_MANAGER ] = NetworkLoginManager
@ -2110,6 +2216,8 @@ class NetworkSessionManager( HydrusSerialisable.SerialisableBase ):
self.engine = None
self._dirty = False
self._lock = threading.Lock()
self._network_contexts_to_sessions = {}
@ -2119,7 +2227,7 @@ class NetworkSessionManager( HydrusSerialisable.SerialisableBase ):
session = requests.Session()
session.headers.update( { 'User-Agent', 'hydrus/' + str( HC.NETWORK_VERSION ) } )
session.headers.update( { 'User-Agent' : 'hydrus/' + str( HC.NETWORK_VERSION ) } )
if network_context.context_type == CC.NETWORK_CONTEXT_HYDRUS:
@ -2143,12 +2251,17 @@ class NetworkSessionManager( HydrusSerialisable.SerialisableBase ):
for ( serialisable_network_context, pickled_session ) in serialisable_network_contexts_to_sessions:
network_context = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_network_context )
session = cPickle.loads( pickled_session )
session = cPickle.loads( str( pickled_session ) )
self._network_contexts_to_sessions[ network_context ] = session
def _SetDirty( self ):
self._dirty = True
def ClearSession( self, network_context ):
with self._lock:
@ -2169,8 +2282,26 @@ class NetworkSessionManager( HydrusSerialisable.SerialisableBase ):
self._network_contexts_to_sessions[ network_context ] = self._GenerateSession( network_context )
self._SetDirty()
return self._network_contexts_to_sessions[ network_context ]
def IsDirty( self ):
with self._lock:
return self._dirty
def SetClean( self ):
with self._lock:
self._dirty = False
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER ] = NetworkSessionManager

View File

@ -49,7 +49,7 @@ options = {}
# Misc
NETWORK_VERSION = 18
SOFTWARE_VERSION = 262
SOFTWARE_VERSION = 263
UNSCALED_THUMBNAIL_DIMENSIONS = ( 200, 200 )

View File

@ -239,7 +239,8 @@ class HydrusController( object ):
if not self._no_daemons:
self._daemons.append( HydrusThreading.DAEMONWorker( self, 'SleepCheck', HydrusDaemons.DAEMONSleepCheck, period = 120 ) )
self._daemons.append( HydrusThreading.DAEMONWorker( self, 'MaintainMemory', HydrusDaemons.DAEMONMaintainMemory, period = 300 ) )
self._daemons.append( HydrusThreading.DAEMONWorker( self, 'MaintainMemoryFast', HydrusDaemons.DAEMONMaintainMemoryFast, period = 60 ) )
self._daemons.append( HydrusThreading.DAEMONWorker( self, 'MaintainMemorySlow', HydrusDaemons.DAEMONMaintainMemorySlow, period = 300 ) )
self._daemons.append( HydrusThreading.DAEMONBackgroundWorker( self, 'MaintainDB', HydrusDaemons.DAEMONMaintainDB, period = 300, init_wait = 60 ) )
@ -262,7 +263,7 @@ class HydrusController( object ):
pass
def MaintainMemory( self ):
def MaintainMemorySlow( self ):
sys.stdout.flush()
sys.stderr.flush()

View File

@ -129,6 +129,7 @@ class HydrusDB( object ):
self._db_name = db_name
self._no_wal = no_wal
self._transaction_started = 0
self._in_transaction = False
self._connection_timestamp = 0
@ -288,6 +289,7 @@ class HydrusDB( object ):
self._c.execute( 'BEGIN IMMEDIATE;' )
self._transaction_started = HydrusData.GetNow()
self._in_transaction = True
@ -515,7 +517,7 @@ class HydrusDB( object ):
self._current_status = 'db write locked'
self._controller.pub( 'refresh_status' )
self.publish_status_update()
self._BeginImmediate()
@ -524,7 +526,7 @@ class HydrusDB( object ):
self._current_status = 'db read locked'
self._controller.pub( 'refresh_status' )
self.publish_status_update()
if job_type in ( 'read', 'read_write' ):
@ -539,7 +541,7 @@ class HydrusDB( object ):
self._current_status = 'db committing'
self._controller.pub( 'refresh_status' )
self.publish_status_update()
self._Commit()
@ -576,7 +578,7 @@ class HydrusDB( object ):
self._current_status = ''
self._controller.pub( 'refresh_status' )
self.publish_status_update()
@ -683,6 +685,11 @@ class HydrusDB( object ):
self._pubsubs.append( ( topic, args, kwargs ) )
def publish_status_update( self ):
pass
def CurrentlyDoingJob( self ):
return self._currently_doing_job
@ -745,7 +752,7 @@ class HydrusDB( object ):
self._currently_doing_job = True
self._current_job_name = job.ToString()
self._controller.pub( 'refresh_status' )
self.publish_status_update()
self._pubsubs = []
@ -780,7 +787,7 @@ class HydrusDB( object ):
self._currently_doing_job = False
self._current_job_name = ''
self._controller.pub( 'refresh_status' )
self.publish_status_update()
except Queue.Empty:

View File

@ -11,9 +11,13 @@ def DAEMONMaintainDB( controller ):
controller.MaintainDB()
def DAEMONMaintainMemory( controller ):
def DAEMONMaintainMemoryFast( controller ):
controller.MaintainMemory()
controller.pub( 'memory_maintenance_pulse' )
def DAEMONMaintainMemorySlow( controller ):
controller.MaintainMemorySlow()
def DAEMONSleepCheck( controller ):

View File

@ -313,7 +313,7 @@ class BandwidthTracker( HydrusSerialisable.SerialisableBase ):
if time_delta < self.MAX_SECONDS_TIME_DELTA:
window = 1
window = 0
counter = self._seconds_bytes
elif time_delta < self.MAX_MINUTES_TIME_DELTA:
@ -336,7 +336,7 @@ class BandwidthTracker( HydrusSerialisable.SerialisableBase ):
if time_delta < self.MAX_SECONDS_TIME_DELTA:
window = 1
window = 0
counter = self._seconds_requests
elif time_delta < self.MAX_MINUTES_TIME_DELTA:
@ -391,7 +391,7 @@ class BandwidthTracker( HydrusSerialisable.SerialisableBase ):
if time_delta is not None and bandwidth_type == HC.BANDWIDTH_TYPE_DATA and time_delta <= 5:
usage = self._GetWeightedApproximateUsage( bandwidth_type, time_delta )
usage = self._GetWeightedApproximateUsage( time_delta )
else:
@ -403,26 +403,38 @@ class BandwidthTracker( HydrusSerialisable.SerialisableBase ):
return usage
def _GetWeightedApproximateUsage( self, bandwidth_type, time_delta ):
def _GetWeightedApproximateUsage( self, time_delta ):
LONG_DELTA = time_delta * 15
SHORT_DELTA = time_delta * 3
SEARCH_DELTA = time_delta * 5
SHORT_WEIGHT = 3
window = 0
counter = self._seconds_bytes
usage_long = self._GetRawUsage( bandwidth_type, LONG_DELTA )
usage_short = self._GetRawUsage( bandwidth_type, SHORT_DELTA )
SEARCH_DELTA += window
total_weighted_usage = usage_long + ( usage_short * SHORT_WEIGHT )
now = HydrusData.GetNow()
total_weight = LONG_DELTA + ( SHORT_DELTA * SHORT_WEIGHT )
since = now - SEARCH_DELTA
# since this is in bytes, an int for the final answer is fine and proper
usage = int( total_weighted_usage / total_weight )
valid_keys = [ key for key in counter.keys() if key >= since ]
# usage per sec would be this / time_delta
if len( valid_keys ) == 0:
return 0
return usage
# If we want the average speed over past five secs but nothing has happened in sec 4 and 5, we don't want to count them
# otherwise your 1MB/s counts as 200KB/s
earliest_timestamp = min( valid_keys )
SAMPLE_DELTA = max( now - earliest_timestamp, 1 )
total_bytes = sum( ( counter[ key ] for key in valid_keys ) )
time_delta_average = total_bytes / SAMPLE_DELTA
return time_delta_average
def _MaintainCache( self ):

View File

@ -579,7 +579,7 @@ def OpenFileLocation( path ):
if HC.PLATFORM_WINDOWS:
cmd = 'explorer /select,"' + path + '"'
cmd = 'explorer /select, "' + path + '"'
elif HC.PLATFORM_OSX:

View File

@ -103,18 +103,13 @@ class HydrusPubSub( object ):
# do this _outside_ the lock, lol
if HG.pubsub_profile_mode:
pubsub_profilable = topic != 'message'
if HG.pubsub_profile_mode and pubsub_profilable:
summary = 'Profiling ' + HydrusData.ConvertIntToPrettyString( len( callables ) ) + ' x ' + topic
if topic == 'message':
HydrusData.Print( summary )
else:
HydrusData.ShowText( summary )
HydrusData.ShowText( summary )
for callable in callables:

View File

@ -47,7 +47,7 @@ SERIALISABLE_TYPE_SHORTCUT = 41
SERIALISABLE_TYPE_APPLICATION_COMMAND = 42
SERIALISABLE_TYPE_DUPLICATE_ACTION_OPTIONS = 43
SERIALISABLE_TYPE_TAG_CENSOR = 44
SERIALISABLE_TYPE_BANDWIDTH_MANAGER = 45
SERIALISABLE_TYPE_NETWORK_BANDWIDTH_MANAGER = 45
SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER = 46
SERIALISABLE_TYPE_NETWORK_CONTEXT = 47
SERIALISABLE_TYPE_NETWORK_LOGIN_MANAGER = 48

View File

@ -176,7 +176,7 @@ def GetFFMPEGInfoLines( path, count_frames_manually = False ):
def GetFFMPEGVideoProperties( path, count_frames_manually = False ):
lines = GetFFMPEGInfoLines( path )
lines = GetFFMPEGInfoLines( path, count_frames_manually )
if not ParseFFMPEGHasVideo( lines ):
@ -325,6 +325,10 @@ def GetMime( path ):
return HC.AUDIO_MP3
elif 'mp4' in mime_text:
return HC.VIDEO_MP4
elif mime_text == 'ogg':
return HC.AUDIO_OGG

View File

@ -3,6 +3,7 @@ import ClientNetworking
import collections
import HydrusConstants as HC
import HydrusData
import HydrusExceptions
import HydrusNetworking
import os
import TestConstants
@ -19,6 +20,8 @@ GOOD_RESPONSE = ''.join( chr( i ) for i in range( 256 ) )
# 256KB of gumpf
LONG_GOOD_RESPONSE = GOOD_RESPONSE * 4 * 256
BAD_RESPONSE = '500, it done broke'
@all_requests
def catch_all( url, request ):
@ -30,12 +33,27 @@ MOCK_SUBDOMAIN = 'top.wew.lad'
MOCK_URL = 'https://wew.lad/folder/request&key1=value1&key2=value2'
MOCK_SUBURL = 'https://top.wew.lad/folder2/request&key1=value1&key2=value2'
MOCK_HYDRUS_SERVICE_KEY = HydrusData.GenerateKey()
MOCK_HYDRUS_ADDRESS = '123.45.67.89'
MOCK_HYDRUS_DOMAIN = '123.45.67.89:45871'
MOCK_HYDRUS_URL = 'https://123.45.67.89:45871/muh_hydrus_command'
@urlmatch( netloc = 'wew.lad' )
def catch_wew_error( url, request ):
return { 'status_code' : 500, 'reason' : 'Internal Server Error', 'content' : BAD_RESPONSE }
@urlmatch( netloc = 'wew.lad' )
def catch_wew_ok( url, request ):
return GOOD_RESPONSE
@urlmatch( netloc = '123.45.67.89:45871' )
@urlmatch( netloc = MOCK_HYDRUS_ADDRESS )
def catch_hydrus_error( url, request ):
return { 'status_code' : 500, 'reason' : 'Internal Server Error', 'content' : BAD_RESPONSE }
@urlmatch( netloc = MOCK_HYDRUS_ADDRESS )
def catch_hydrus_ok( url, request ):
return GOOD_RESPONSE
@ -68,6 +86,7 @@ class TestBandwidthManager( unittest.TestCase ):
GLOBAL_NETWORK_CONTEXTS = [ ClientNetworking.GLOBAL_NETWORK_CONTEXT ]
DOMAIN_NETWORK_CONTEXTS = [ ClientNetworking.GLOBAL_NETWORK_CONTEXT, DOMAIN_NETWORK_CONTEXT ]
SUBDOMAIN_NETWORK_CONTEXTS = [ ClientNetworking.GLOBAL_NETWORK_CONTEXT, DOMAIN_NETWORK_CONTEXT, SUBDOMAIN_NETWORK_CONTEXT ]
#
fast_forward = HydrusData.GetNow() + 3600
@ -93,25 +112,25 @@ class TestBandwidthManager( unittest.TestCase ):
#
bm.SetRules( None, EMPTY_RULES )
bm.SetRules( MOCK_DOMAIN, EMPTY_RULES )
bm.SetRules( MOCK_SUBDOMAIN, EMPTY_RULES )
bm.SetRules( ClientNetworking.GLOBAL_NETWORK_CONTEXT, EMPTY_RULES )
bm.SetRules( DOMAIN_NETWORK_CONTEXT, EMPTY_RULES )
bm.SetRules( SUBDOMAIN_NETWORK_CONTEXT, EMPTY_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
bm.SetRules( None, PERMISSIVE_DATA_RULES )
bm.SetRules( MOCK_DOMAIN, PERMISSIVE_DATA_RULES )
bm.SetRules( MOCK_SUBDOMAIN, PERMISSIVE_DATA_RULES )
bm.SetRules( ClientNetworking.GLOBAL_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES )
bm.SetRules( DOMAIN_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES )
bm.SetRules( SUBDOMAIN_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
bm.SetRules( None, PERMISSIVE_REQUEST_RULES )
bm.SetRules( MOCK_DOMAIN, PERMISSIVE_REQUEST_RULES )
bm.SetRules( MOCK_SUBDOMAIN, PERMISSIVE_REQUEST_RULES )
bm.SetRules( ClientNetworking.GLOBAL_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES )
bm.SetRules( DOMAIN_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES )
bm.SetRules( SUBDOMAIN_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
@ -119,19 +138,19 @@ class TestBandwidthManager( unittest.TestCase ):
#
bm.SetRules( MOCK_SUBDOMAIN, RESTRICTIVE_DATA_RULES )
bm.SetRules( SUBDOMAIN_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
bm.SetRules( MOCK_SUBDOMAIN, RESTRICTIVE_REQUEST_RULES )
bm.SetRules( SUBDOMAIN_NETWORK_CONTEXT, RESTRICTIVE_REQUEST_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
bm.SetRules( MOCK_SUBDOMAIN, PERMISSIVE_REQUEST_RULES )
bm.SetRules( SUBDOMAIN_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
@ -139,49 +158,49 @@ class TestBandwidthManager( unittest.TestCase ):
#
bm.SetRules( MOCK_DOMAIN, RESTRICTIVE_DATA_RULES )
bm.SetRules( DOMAIN_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
bm.SetRules( MOCK_DOMAIN, RESTRICTIVE_REQUEST_RULES )
bm.SetRules( DOMAIN_NETWORK_CONTEXT, RESTRICTIVE_REQUEST_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
bm.SetRules( MOCK_DOMAIN, PERMISSIVE_REQUEST_RULES )
bm.SetRules( DOMAIN_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( MOCK_SUBDOMAIN ) )
self.assertTrue( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
#
bm.SetRules( None, RESTRICTIVE_DATA_RULES )
bm.SetRules( ClientNetworking.GLOBAL_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES )
self.assertFalse( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
bm.SetRules( None, RESTRICTIVE_REQUEST_RULES )
bm.SetRules( ClientNetworking.GLOBAL_NETWORK_CONTEXT, RESTRICTIVE_REQUEST_RULES )
self.assertFalse( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
bm.SetRules( None, PERMISSIVE_REQUEST_RULES )
bm.SetRules( ClientNetworking.GLOBAL_NETWORK_CONTEXT, PERMISSIVE_REQUEST_RULES )
self.assertTrue( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
self.assertTrue( bm.CanStart( MOCK_SUBDOMAIN ) )
self.assertTrue( bm.CanStart( SUBDOMAIN_NETWORK_CONTEXTS ) )
# add some rules for all
#
bm.SetRules( None, RESTRICTIVE_DATA_RULES )
bm.SetRules( MOCK_DOMAIN, RESTRICTIVE_REQUEST_RULES )
bm.SetRules( MOCK_DOMAIN, EMPTY_RULES )
bm.SetRules( ClientNetworking.GLOBAL_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES )
bm.SetRules( DOMAIN_NETWORK_CONTEXT, RESTRICTIVE_REQUEST_RULES )
bm.SetRules( DOMAIN_NETWORK_CONTEXT, EMPTY_RULES )
self.assertFalse( bm.CanStart( GLOBAL_NETWORK_CONTEXTS ) )
self.assertFalse( bm.CanStart( DOMAIN_NETWORK_CONTEXTS ) )
@ -208,7 +227,7 @@ class TestNetworkingEngine( unittest.TestCase ):
self.assertFalse( engine.IsRunning() )
self.assertFalse( engine.IsShutdown() )
engine.Start()
mock_controller.CallToThread( engine.MainLoop )
time.sleep( 0.1 )
@ -237,7 +256,7 @@ class TestNetworkingEngine( unittest.TestCase ):
self.assertFalse( engine.IsRunning() )
self.assertFalse( engine.IsShutdown() )
engine.Start()
mock_controller.CallToThread( engine.MainLoop )
time.sleep( 0.1 )
@ -252,11 +271,56 @@ class TestNetworkingEngine( unittest.TestCase ):
self.assertTrue( engine.IsShutdown() )
def test_engine_simple_job( self ):
mock_controller = TestConstants.MockController()
bandwidth_manager = ClientNetworking.NetworkBandwidthManager()
session_manager = ClientNetworking.NetworkSessionManager()
login_manager = ClientNetworking.NetworkLoginManager()
engine = ClientNetworking.NetworkEngine( mock_controller, bandwidth_manager, session_manager, login_manager )
self.assertFalse( engine.IsRunning() )
self.assertFalse( engine.IsShutdown() )
mock_controller.CallToThread( engine.MainLoop )
#
with HTTMock( catch_all ):
with HTTMock( catch_wew_ok ):
job = ClientNetworking.NetworkJob( 'GET', MOCK_URL )
engine.AddJob( job )
time.sleep( 0.1 )
self.assertTrue( job.IsDone() )
self.assertFalse( job.HasError() )
engine._new_work_to_do.set()
time.sleep( 0.1 )
self.assertEqual( len( engine._jobs_bandwidth_throttled ), 0 )
self.assertEqual( len( engine._jobs_login_throttled ), 0 )
self.assertEqual( len( engine._jobs_ready_to_start ), 0 )
self.assertEqual( len( engine._jobs_downloading ), 0 )
#
engine.Shutdown()
class TestNetworkingJob( unittest.TestCase ):
def _GetJob( self ):
def _GetJob( self, for_login = False ):
job = ClientNetworking.NetworkJob( 'GET', MOCK_URL )
job = ClientNetworking.NetworkJob( 'GET', MOCK_URL, for_login = for_login )
mock_controller = TestConstants.MockController()
bandwidth_manager = ClientNetworking.NetworkBandwidthManager()
@ -314,35 +378,94 @@ class TestNetworkingJob( unittest.TestCase ):
class TestNetworkingJobWeb( unittest.TestCase ):
def _GetJob( self ):
def test_bandwidth_exceeded( self ):
job = ClientNetworking.NetworkJob( 'GET', MOCK_URL )
RESTRICTIVE_DATA_RULES = HydrusNetworking.BandwidthRules()
controller = TestConstants.MockController()
RESTRICTIVE_DATA_RULES.AddRule( HC.BANDWIDTH_TYPE_DATA, None, 10 )
job.controller = controller
DOMAIN_NETWORK_CONTEXT = ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, MOCK_DOMAIN )
return job
#
job = self._GetJob()
self.assertEqual( job.BandwidthOK(), True )
job.engine.bandwidth_manager.ReportDataUsed( [ DOMAIN_NETWORK_CONTEXT ], 50 )
job.engine.bandwidth_manager.SetRules( DOMAIN_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES )
self.assertEqual( job.BandwidthOK(), False )
#
job = self._GetJob( for_login = True )
self.assertEqual( job.BandwidthOK(), True )
job.engine.bandwidth_manager.ReportDataUsed( [ DOMAIN_NETWORK_CONTEXT ], 50 )
job.engine.bandwidth_manager.SetRules( DOMAIN_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES )
self.assertEqual( job.BandwidthOK(), True )
def test_bandwidth_ok( self ):
# test bandwidth override
PERMISSIVE_DATA_RULES = HydrusNetworking.BandwidthRules()
# test bandwidth ok
# test it not ok
PERMISSIVE_DATA_RULES.AddRule( HC.BANDWIDTH_TYPE_DATA, None, 1048576 )
# repeat for the login one
DOMAIN_NETWORK_CONTEXT = ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, MOCK_DOMAIN )
pass
#
job = self._GetJob()
job.engine.bandwidth_manager.ReportDataUsed( [ DOMAIN_NETWORK_CONTEXT ], 50 )
self.assertEqual( job.BandwidthOK(), True )
job.engine.bandwidth_manager.SetRules( DOMAIN_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES )
self.assertEqual( job.BandwidthOK(), True )
#
job = self._GetJob( for_login = True )
job.engine.bandwidth_manager.ReportDataUsed( [ DOMAIN_NETWORK_CONTEXT ], 50 )
self.assertEqual( job.BandwidthOK(), True )
job.engine.bandwidth_manager.SetRules( DOMAIN_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES )
self.assertEqual( job.BandwidthOK(), True )
def test_bandwidth_reported( self ):
with HTTMock( catch_all ):
with HTTMock( catch_wew_ok ):
job = self._GetJob()
job.Start()
bm = job.engine.bandwidth_manager
tracker = bm.GetTracker( ClientNetworking.GLOBAL_NETWORK_CONTEXT )
self.assertTrue( tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, None ), 1 )
self.assertTrue( tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, None ), 256 )
def test_done_ok( self ):
return # need to flush out session, bandwidth, login code
with HTTMock( catch_all ):
with HTTMock( catch_wew_ok ):
@ -355,18 +478,32 @@ class TestNetworkingJobWeb( unittest.TestCase ):
self.assertEqual( job.GetContent(), GOOD_RESPONSE )
self.assertEqual( job.GetStatus(), ( 'done!', 256, 256, None ) )
def test_error( self ):
job = self._GetJob()
# do a requests job that cancels
# haserror
# geterrorexception
# geterrortext
with HTTMock( catch_all ):
with HTTMock( catch_wew_error ):
job = self._GetJob()
job.Start()
self.assertTrue( job.HasError() )
self.assertEqual( job.GetContent(), BAD_RESPONSE )
self.assertEqual( type( job.GetErrorException() ), HydrusExceptions.ServerException )
self.assertTrue( job.GetErrorText(), BAD_RESPONSE )
self.assertEqual( job.GetStatus(), ( '500 - Internal Server Error', 18, 18, None ) )
def test_generate_login_process( self ):
@ -385,33 +522,95 @@ class TestNetworkingJobWeb( unittest.TestCase ):
class TestNetworkingJobHydrus( unittest.TestCase ):
def _GetJob( self ):
def _GetJob( self, for_login = False ):
job = ClientNetworking.NetworkJob( 'GET', 'https://123.45.67.89:45871/muh_hydrus_command' )
job = ClientNetworking.NetworkJobHydrus( MOCK_HYDRUS_SERVICE_KEY, 'GET', MOCK_HYDRUS_URL, for_login = for_login )
controller = TestConstants.MockController()
mock_controller = TestConstants.MockController()
bandwidth_manager = ClientNetworking.NetworkBandwidthManager()
session_manager = ClientNetworking.NetworkSessionManager()
login_manager = ClientNetworking.NetworkLoginManager()
job.controller = controller
engine = ClientNetworking.NetworkEngine( mock_controller, bandwidth_manager, session_manager, login_manager )
job.engine = engine
return job
def test_bandwidth_exceeded( self ):
RESTRICTIVE_DATA_RULES = HydrusNetworking.BandwidthRules()
RESTRICTIVE_DATA_RULES.AddRule( HC.BANDWIDTH_TYPE_DATA, None, 10 )
HYDRUS_NETWORK_CONTEXT = ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_HYDRUS, MOCK_HYDRUS_SERVICE_KEY )
#
job = self._GetJob()
self.assertEqual( job.BandwidthOK(), True )
job.engine.bandwidth_manager.ReportDataUsed( [ HYDRUS_NETWORK_CONTEXT ], 50 )
job.engine.bandwidth_manager.SetRules( HYDRUS_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES )
self.assertEqual( job.BandwidthOK(), False )
#
job = self._GetJob( for_login = True )
self.assertEqual( job.BandwidthOK(), True )
job.engine.bandwidth_manager.ReportDataUsed( [ HYDRUS_NETWORK_CONTEXT ], 50 )
job.engine.bandwidth_manager.SetRules( HYDRUS_NETWORK_CONTEXT, RESTRICTIVE_DATA_RULES )
self.assertEqual( job.BandwidthOK(), True )
def test_bandwidth_ok( self ):
# test bandwidth override
PERMISSIVE_DATA_RULES = HydrusNetworking.BandwidthRules()
# test bandwidth ok
# test it not ok
PERMISSIVE_DATA_RULES.AddRule( HC.BANDWIDTH_TYPE_DATA, None, 1048576 )
# repeat for the login one
HYDRUS_NETWORK_CONTEXT = ClientNetworking.NetworkContext( CC.NETWORK_CONTEXT_HYDRUS, MOCK_HYDRUS_SERVICE_KEY )
#
job = self._GetJob()
job.engine.bandwidth_manager.ReportDataUsed( [ HYDRUS_NETWORK_CONTEXT ], 50 )
self.assertEqual( job.BandwidthOK(), True )
job.engine.bandwidth_manager.SetRules( HYDRUS_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES )
self.assertEqual( job.BandwidthOK(), True )
#
job = self._GetJob( for_login = True )
job.engine.bandwidth_manager.ReportDataUsed( [ HYDRUS_NETWORK_CONTEXT ], 50 )
self.assertEqual( job.BandwidthOK(), True )
job.engine.bandwidth_manager.SetRules( HYDRUS_NETWORK_CONTEXT, PERMISSIVE_DATA_RULES )
self.assertEqual( job.BandwidthOK(), True )
def test_bandwidth_reported( self ):
pass
def test_done_ok( self ):
return # need to flush out session, bandwidth, login code
with HTTMock( catch_all ):
with HTTMock( catch_hydrus_ok ):
@ -424,18 +623,32 @@ class TestNetworkingJobHydrus( unittest.TestCase ):
self.assertEqual( job.GetContent(), GOOD_RESPONSE )
self.assertEqual( job.GetStatus(), ( 'done!', 256, 256, None ) )
def test_error( self ):
job = self._GetJob()
# do a requests job that cancels
# haserror
# geterrorexception
# geterrortext
with HTTMock( catch_all ):
with HTTMock( catch_hydrus_error ):
job = self._GetJob()
job.Start()
self.assertTrue( job.HasError() )
self.assertEqual( job.GetContent(), BAD_RESPONSE )
self.assertEqual( type( job.GetErrorException() ), HydrusExceptions.ServerException )
self.assertTrue( job.GetErrorText(), BAD_RESPONSE )
self.assertEqual( job.GetStatus(), ( '500 - Internal Server Error', 18, 18, None ) )
def test_generate_login_process( self ):

View File

@ -43,7 +43,7 @@ class MockController( object ):
def ModelIsShutdown( self ):
return self.model_is_shutdown
return self.model_is_shutdown or HG.test_controller.ModelIsShutdown()
class FakeHTTPConnectionManager():

View File

@ -15,6 +15,7 @@ import collections
import HydrusConstants as HC
import HydrusData
import HydrusExceptions
import HydrusVideoHandling
import HydrusGlobals as HG
import HydrusNetwork
import HydrusSerialisable
@ -685,7 +686,13 @@ class TestClientDB( unittest.TestCase ):
test_files.append( ( 'muh_webm.webm', '55b6ce9d067326bf4b2fbe66b8f51f366bc6e5f776ba691b0351364383c43fcb', 84069, HC.VIDEO_WEBM, 640, 360, 4010, 120, None ) )
test_files.append( ( 'muh_jpg.jpg', '5d884d84813beeebd59a35e474fa3e4742d0f2b6679faa7609b245ddbbd05444', 42296, HC.IMAGE_JPEG, 392, 498, None, None, None ) )
test_files.append( ( 'muh_png.png', 'cdc67d3b377e6e1397ffa55edc5b50f6bdf4482c7a6102c6f27fa351429d6f49', 31452, HC.IMAGE_PNG, 191, 196, None, None, None ) )
test_files.append( ( 'muh_apng.png', '9e7b8b5abc7cb11da32db05671ce926a2a2b701415d1b2cb77a28deea51010c3', 616956, HC.IMAGE_APNG, 500, 500, 1880, 47, None ) )
if '3.2.4' in HydrusVideoHandling.GetFFMPEGVersion():
apng_duration = 3133
else:
apng_duration = 1880
test_files.append( ( 'muh_apng.png', '9e7b8b5abc7cb11da32db05671ce926a2a2b701415d1b2cb77a28deea51010c3', 616956, HC.IMAGE_APNG, 500, 500, apng_duration, 47, None ) )
test_files.append( ( 'muh_gif.gif', '00dd9e9611ebc929bfc78fde99a0c92800bbb09b9d18e0946cea94c099b211c2', 15660, HC.IMAGE_GIF, 329, 302, 600, 5, None ) )
for ( filename, hex_hash, size, mime, width, height, duration, num_frames, num_words ) in test_files:

View File

@ -452,10 +452,10 @@ class TestBandwidthTracker( unittest.TestCase ):
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 170 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 1 ), 1 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 85 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 1024 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 2 ), 1 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 6 ), 1024 )
@ -477,10 +477,10 @@ class TestBandwidthTracker( unittest.TestCase ):
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 42 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 204 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 1 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 85 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 204 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 2 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 6 ), 1024 )
@ -505,10 +505,10 @@ class TestBandwidthTracker( unittest.TestCase ):
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 0 ), 0 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 53 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 1 ), 217 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 1 ), 2 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 90 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 2 ), 217 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_REQUESTS, 2 ), 2 )
self.assertEqual( bandwidth_tracker.GetUsage( HC.BANDWIDTH_TYPE_DATA, 6 ), 1088 )