Version 415

closes #642, closes #393
This commit is contained in:
Hydrus Network Developer 2020-10-28 17:20:33 -05:00
parent 7c8714d1fc
commit 9e7325714e
18 changed files with 708 additions and 167 deletions

View File

@ -8,7 +8,18 @@
<div class="content">
<h3>changelog</h3>
<ul>
<li><h3>version 413</h3></li>
<li><h3>version 415</h3></li>
<ul>
<li>in _options->gui pages_ you can now set the main window's page tab alignment to top/left/right/bottom (previously it was just top/left). this property now updates for all page of pages on options ok, it no longer needs client restart (issue #642)</li>
<li>the maintenance task that migrates tag display from the current values to the ideal application now works in significantly smaller steps. big lag from adding hundreds of childen to one parent (or similar siblings) should now be radically reduced</li>
<li>rejiggered some layout in the new tag display dialogs</li>
<li>added green/red texts to the new tag display dialogs to talk about when sync can work atm and how fast to expect changes to apply</li>
<li>reordered the new tag 'siblings/parents info' right-click menu so the dynamic 'has x siblings/parents' submenus are on the bottom</li>
<li>added basic client api calls for /add_files/..., delete_files, undelete_files, archive_files, and unarchive_files. they take 'hash' and 'hashes' parameters. I am throwing these out at the end of the week, so they don't have documentation or proper unit tests, but feel free to play with them (issue #393)</li>
<li>sped up some UI refresh on content update for very large sessions</li>
<li>sped up right-click tag/file menu any/all select actions on very large file sessions</li>
</ul>
<li><h3>version 414</h3></li>
<ul>
<li>tl;dr: you don't have to do anything. if you haven't heard of a tag parent before, no worries. the database should work better now</li>
<li>.</li>

View File

@ -42,6 +42,18 @@ DISCRIMINANT_ARCHIVE = 3
DISCRIMINANT_DOWNLOADING = 4
DISCRIMINANT_LOCAL_BUT_NOT_IN_TRASH = 5
DIRECTION_UP = 0
DIRECTION_LEFT = 1
DIRECTION_RIGHT = 2
DIRECTION_DOWN = 3
directions_alignment_string_lookup = {}
directions_alignment_string_lookup[ DIRECTION_UP ] = 'top'
directions_alignment_string_lookup[ DIRECTION_LEFT ] = 'left'
directions_alignment_string_lookup[ DIRECTION_RIGHT ] = 'right'
directions_alignment_string_lookup[ DIRECTION_DOWN ] = 'bottom'
DUMPER_NOT_DUMPED = 0
DUMPER_DUMPED_OK = 1
DUMPER_RECOVERABLE_ERROR = 2

View File

@ -891,7 +891,7 @@ class DB( HydrusDB.HydrusDB ):
other_implied_by_tag_ids = set( implied_by_tag_ids )
other_implied_by_tag_ids.discard( storage_tag_id )
# get the count of pending that are tagged by storage_tag_id but not tagged by any of the other implications
# get the count of pending that are tagged by storage_tag_id but not tagged by any of the other implied_by
num_pending_to_be_rescinded = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_PENDING, tag_service_id, ( storage_tag_id, ), other_implied_by_tag_ids, hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
@ -964,7 +964,7 @@ class DB( HydrusDB.HydrusDB ):
other_implied_by_tag_ids = set( implied_by_tag_ids )
other_implied_by_tag_ids.discard( storage_tag_id )
# get the count of current that are tagged by storage_tag_id but not tagged by any of the other implications
# get the count of current that are tagged by storage_tag_id but not tagged by any of the other implied_by
num_deletable = self._GetWithAndWithoutTagsForFilesFileCount( HC.CONTENT_STATUS_CURRENT, tag_service_id, ( storage_tag_id, ), other_implied_by_tag_ids, hash_ids, temp_hash_ids_table_name, file_service_ids_to_hash_ids )
@ -2341,36 +2341,38 @@ class DB( HydrusDB.HydrusDB ):
actual_sibling_rows = set( self._c.execute( 'SELECT bad_tag_id, ideal_tag_id FROM {};'.format( cache_actual_tag_siblings_lookup_table_name ) ) )
ideal_sibling_rows = set( self._c.execute( 'SELECT bad_tag_id, ideal_tag_id FROM {};'.format( cache_ideal_tag_siblings_lookup_table_name ) ) )
sibling_tag_ids_to_sync = { ideal_tag_id for ( bad_tag_id, ideal_tag_id ) in ideal_sibling_rows.symmetric_difference( actual_sibling_rows ) }
sibling_rows_to_remove = actual_sibling_rows.difference( ideal_sibling_rows )
sibling_rows_to_add = ideal_sibling_rows.difference( actual_sibling_rows )
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = GenerateTagParentsLookupCacheTableNames( service_id )
actual_parent_rows = set( self._c.execute( 'SELECT child_tag_id, ancestor_tag_id FROM {};'.format( cache_actual_tag_parents_lookup_table_name ) ) )
ideal_parent_rows = set( self._c.execute( 'SELECT child_tag_id, ancestor_tag_id FROM {};'.format( cache_ideal_tag_parents_lookup_table_name ) ) )
parent_tag_ids_to_sync = { ancestor_tag_id for ( child_tag_id, ancestor_tag_id ) in ideal_parent_rows.symmetric_difference( actual_parent_rows ) }
parent_rows_to_remove = actual_parent_rows.difference( ideal_parent_rows )
parent_rows_to_add = ideal_parent_rows.difference( actual_parent_rows )
num_actual_rows = len( actual_sibling_rows ) + len( actual_parent_rows )
num_ideal_rows = len( ideal_sibling_rows ) + len( ideal_parent_rows )
self._service_ids_to_display_application_status[ service_id ] = ( sibling_tag_ids_to_sync, parent_tag_ids_to_sync, num_actual_rows, num_ideal_rows )
self._service_ids_to_display_application_status[ service_id ] = ( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows )
( sibling_tag_ids_to_sync, parent_tag_ids_to_sync, num_actual_rows, num_ideal_rows ) = self._service_ids_to_display_application_status[ service_id ]
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self._service_ids_to_display_application_status[ service_id ]
return ( sibling_tag_ids_to_sync, parent_tag_ids_to_sync, num_actual_rows, num_ideal_rows )
return ( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows )
def _CacheTagDisplayGetApplicationStatusNumbers( self, service_key ):
service_id = self._GetServiceId( service_key )
( sibling_tag_ids_to_sync, parent_tag_ids_to_sync, num_actual_rows, num_ideal_rows ) = self._CacheTagDisplayGetApplicationStatus( service_id )
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self._CacheTagDisplayGetApplicationStatus( service_id )
status = {}
status[ 'num_siblings_to_sync' ] = len( sibling_tag_ids_to_sync )
status[ 'num_parents_to_sync' ] = len( parent_tag_ids_to_sync )
status[ 'num_siblings_to_sync' ] = len( sibling_rows_to_add ) + len( sibling_rows_to_remove )
status[ 'num_parents_to_sync' ] = len( parent_rows_to_add ) + len( parent_rows_to_remove )
status[ 'num_actual_rows' ] = num_actual_rows
status[ 'num_ideal_rows' ] = num_ideal_rows
@ -2784,149 +2786,347 @@ class DB( HydrusDB.HydrusDB ):
time_started = HydrusData.GetNowFloat()
tag_ids_altered = set()
tag_service_id = self._GetServiceId( service_key )
( sibling_tag_ids_to_sync, parent_tag_ids_to_sync, num_actual_rows, num_ideal_rows ) = self._CacheTagDisplayGetApplicationStatus( tag_service_id )
all_tag_ids_altered = set()
while len( sibling_tag_ids_to_sync ) + len( parent_tag_ids_to_sync ) > 0 and not HydrusData.TimeHasPassedFloat( time_started + work_time ):
( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows ) = self._CacheTagDisplayGetApplicationStatus( tag_service_id )
while len( sibling_rows_to_add ) + len( sibling_rows_to_remove ) + len( parent_rows_to_add ) + len( parent_rows_to_remove ) > 0 and not HydrusData.TimeHasPassedFloat( time_started + work_time ):
# I had the idea that it might be easier to do siblings first, then parents, but now I have resolved to do this by chain, rather than portion of chain, that may not be true
# ok, so it turns out that migrating entire chains at once was sometimes laggy for certain large parent chains like 'azur lane'
# imagine the instance where we simply want to parent a hundred As to a single B--we obviously don't have to do all that in one go
# therefore, we are now going to break the migration into smaller pieces
if len( sibling_tag_ids_to_sync ) > 0:
( tag_id_to_sync, ) = random.sample( sibling_tag_ids_to_sync, 1 )
else:
( tag_id_to_sync, ) = random.sample( parent_tag_ids_to_sync, 1 )
# I spent a large amount of time trying to figure out a way to _completely_ sync subsets of a chain's tags. this was a gigantic logical pain and complete sync couldn't get neat subsets in certain situations
#█▓█▓███▓█▓███████████████████████████████▓▓▓███▓████████████████
#█▓▓█▓▓▓▓▓███████████████████▓▓▓▓▓▓▓▓▓██████▓▓███▓███████████████
#█▓███▓████████████████▓▒░ ░▒▓██████████████████████
#█▓▓▓▓██████████████▒ ░░░░░░░░░░░░ ▒▓███████████████████
#█▓█▓████████████▓░ ░░░░░░░░░░░░░░░░░ ░░░ ░▓█████████████████
#██████████████▓ ░░▒▒▒▒▒░░ ░░░ ░░ ░ ░░░░ ░████████████████
#█████████████▒ ░░░▒▒▒▒░░░░░░░░ ░ ░░░░ ████▓▓█████████
#▓▓██████████▒ ░░░░▒▓▒░▒▒░░ ░░░ ░ ░ ░░░░░ ███▓▓▓████████
#███▓███████▒ ▒▒▒░░▒▒▒▒░░░ ░ ░░░ ░░░ ███▓▓▓███████
#██████████▓ ▒▒░▒░▒░▒▒▒▒▒░▒░ ░░ ░░░░░ ░ ██▓▓▓███████
#█████▓▓▓█▒ ▒▒░▒░░░░▒▒░░░░░▒░ ░ ░ ▒▒▒ ██▓▓███████
#▓▓▓▓▓▓▓█░ ▒▓░░▒░░▒▒▒▒▓░░░░░▒░░ ░ ░░▒▒▒▒░ ▒██▓█▓▓▓▓▓▓
#▓▓▓▓███▓ ▒▒▒░░░▒▒░░▒░▒▒░░ ░░░░░ ░░░▒░ ▒░▒ ███▓▓▓▓▓▓▓
#███████▓░▒▒▒▒▒▒░░░▒▒▒░░░░ ░ ░░░ ░░░▒▒░ ░██▓████▓▓
#▓▓█▓███▒▒▒▓▒▒▓░░▒░▒▒▒▒░░░░░ ░ ░ ░ ░░░░░░▒░░░ ██▓█████▓
#▒▒▓▓▓▓▓▓▒▓▓░░▓▒ ▒▒░▒▒▒▒▒░░ ░░ ░░░▒░▒▓▓██████
#▒▒▓▓▓▓▓▓▒▒▒░▒▒▓░░░▒▒▒▒▒▒░ ░░░░▒▒░▒▓▓▓▓▓▓▓▓
#▓▒▓▓▓▓▓▓▒▓░ ▒▒▒▓▒▒░░▒▒▒▒▒▒░▒▒▒▒▒▒▒▒▒▒▒░░░░░▒░▒░░░▒░▒▒▒░▓█▓▓▓▓▓▓▓
#▓▒▒▓▓▓▓▓▓▓▓░ ▒▒▒▓▒▓▒▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▒▒▓▓▓▓▓▓▓▓▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓
#▓▓▓▓▓▓▓▓▓▓▓▓▒░▒▒▒░▒▒▓▒▒▒░░▒▓▓▓██▓▓▓░░░░░▒▒▒▓▓▒ ░▒▒▒▒▒▒▓▓▓▓▒▒▒▓▓▓
#█▓█▓▓▓▓▓▓▓▓▓▓▓▒▒▒▒▒▒▓▓▓▒▒▒▓▓▓▓▒▒▒▓█▓ ░▓▓▒▒▓█▓▒░▒▒▒▒▓█▓█▓▓▓▓▓▓▓
#█████▓▒▓▓▓▓▓▒▓▓▒▒▒▒▒▒▒▒▒▒▓▒░▒▓▒░░ ░▒▒ ░░░ ▓█▓▓▓▒▒▒▒█▓▒▒▒▓▓▓▓▓▒
#█████▓▓▓█▓▓▓▓▒▓▓▓▒▒▒▒▒▒░▒▒░░░░ ░░░▒░ ▒ ░ ░ ░▒░░▒▓▓▓▒▒▒▒▒▒▒▒░
#████▓▓▓███▓▓▓▓▓▓▓▒▒▒▒░░ ▒▒░ ░░░░▒▒ ░▒░▒░ ░░ ░▓█▓▓▒▒▒▒░░▒▒▒
#███▓▓▓█████▓▓▓▒▒▓▒▒▒▒▒░░ ░ ░░▒░ ░▒▒▒ ▒░░▒░░ ▒▓▒▒▒░▒▒▒▒▓▓▓▒
#████▓███████▓▒▒▒░▒▒▓▓▓▒▒░░ ░ ▒▒▓██▒▒▓▓░ ░░░░▒▒░▒▒▒▒▒▓▒▓▒▓▒▒
#████████████▒░▒██░▒▓▓▓▓▓▒▒▒░░░░ ▒▓▒▓▓▓▒░▒▒░ ▒▒▒▓▒▒▒▒▓▒▒▓▓▓▒▒▒▒
#████▓▓▓▓▓▓▒▓▒ ▓▓ ▒▓▓▓▓▓▓▒▒▒░░░░░ ░ ░░░▒░░▒▒▒▒▒▒ ▒▓▒▒▒▒▒▒▒▒▒
#▓░░░░░░░▒▒▓▓▓ ▒█▒ ▒▒▓▒▒▒▒▒▒░░░░ ░░░ ░ ░ ▒░▒▒▒▒▒░░▒▓▒▒▒▒▒▒▒▓▒
#▒▒░░░▒▒▒▒▓▒▒▓▒░ ░▒▒▒▒▓▒▒▒▒▒▒▒▒▒▒▒▓▓▓▓▒▒▓▓▓▓▒░▒▒▒▒▒░░▒▓▒▒▒▒▒▒▒▓▒▒
#▓▒▒▒▓▓▓▓▓▒▒▒▒▒▓▓▒▓██▓▓▓▒▒▒▒▒░░▒▒▒▒░░░▒▒░░▒▒▓▒░░▒▓▓▓▒▓▓▒▒▒▒▒▒▒▒▒▒
#▓▒▓▓▓▓▒▒▒▒▒▒▒▒▒▒▓▓▒▓▓▓▓▓▒▒▒▒░░░░░░▒▒▒▒▒▒░░ ░▒░░▒▒▒▒▒▒▒▒▒▒▓▒▓▓▓▓▒
#▓▒▒▒▒▒▓▓▓▒▓▓▓▓▓▓▓▒▒▒▓▓▓▓▓▒▒▒░░░░░░░ ░░░░░▒▒▓▒▒▒▒▒▒▒▒▓▓▓▓▓▓▓▓
#▓▓▓▓▓▓▓▓▒▒▒▒▒▓▓▓▒▓▒▒▓▓▓▓▓▓▓▒▒▒░░░░░░ ░░▒▒▒▒▓▒▒▒▒▒▒▒▓▒▒▓▓▓▓▓▓
#▓▓▓▓▓▓▓▒▒▒▒▓▓▓▓▒▒▒▓▓▓▓▓▓▓▓▓▓▓▓▓▒▒░░▒▒░░░▒▒▓▓▓▒▒█▓▒▓▒▒▒▓▓▒▒▓▓▓▓▓▓
#█▓▓▓▓▒▒▓▓▓▓▓▓▓▓▓▒▓▓▓▓▓▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓▓▓▓▒▒░█▓▓▓▓▓▒▒▒▒▒▒▓▓▓▓▓
#▓▓▓▒▒▒▒▒▓▓▓▓▓▒▓▓▓▒▒▒▒▒ ░▓▓▓▓▓▓▓▓▓██▓█▓▓▓▒▓▒░░░ ▓▓▒▓▒▒▒▒▒▒▒▒▒▓▓▓▒
#
# IN MEMORIAM
# tag_ids_to_trunkward_additional_implication_work_weight
#
# ok, so it is atm too complicated to figure out incremental sub-chain actual->ideal migration, so what we are going to do is just get all possible ideal/actual chains touched by our to-regen master id and clear and update them all
# it is possible that a current ideal chain has stuff in a current actual chain
# _and vice versa(!)_
# so we'll just do an exhaustive search
# I am now moving to table row addition/subtraction. we'll try to move one row at a time and do the smallest amount of work
tag_ids_to_sync = { tag_id_to_sync }
tag_ids_to_lookup = { tag_id_to_sync }
# There are potential multi-row optimisations here to reduce total work amount. Stuff like reordering existing chains, reassigning siblings.
# e.g. if sibling A->B moves to A->C, we now go:
# rescind A->B sibling: remove A->B, add A->A implications
# add A->C sibling: remove A->A, add A->C implications
# However, multi-row tech requires mixing removes and adds, which means we again stray into Hell Logic Zone 3000. We'll put the thought off.
while len( tag_ids_to_lookup ) > 0:
this_round_tag_ids = set( self._CacheTagDisplayGetChainsMembers( ClientTags.TAG_DISPLAY_IDEAL, tag_service_id, tag_ids_to_lookup ) )
this_round_tag_ids.update( self._CacheTagDisplayGetChainsMembers( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids_to_lookup ) )
new_tag_ids = this_round_tag_ids.difference( tag_ids_to_sync )
tag_ids_to_sync.update( new_tag_ids )
tag_ids_to_lookup = new_tag_ids
# I can always remove a sibling row from actual and stay valid. this does not invalidate ideals in parents table
# I can always remove a parent row from actual and stay valid
actual_tag_ids_to_implications = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids_to_sync )
ideal_tag_ids_to_implications = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_IDEAL, tag_service_id, tag_ids_to_sync )
# I know I can copy a parent to actual if the tags aren't in any pending removes
# I know I can copy a sibling to actual if the tags aren't in any pending removes (I would if there were pending removes indicating merges or something, but there won't be!)
# if I am feeling very clever, I could potentially add tag_ids_to_migrate_implications, which would be an UPDATE
# this would only work for tag_ids that have the same current implied by in actual and ideal (e.g. moving a tag sibling from A->B to B->A)
# may be better to do this in a merged add/deleteimplication function that would be able to well detect this with 'same current implied' of count > 0 for that domain
# we will remove surplus rows from actual and then add needed rows
tag_ids_to_delete_implications = {}
tag_ids_to_add_implications = {}
# There may be multi-row optimisations here to reduce total work amount, I am not sure. Probably for stuff like reordering existing chains. It probably requires mixing removes and adds, which means we stray into hell logic mode, so we'll put the thought off.
for tag_id in tag_ids_to_sync:
actual_implications = actual_tag_ids_to_implications[ tag_id ]
ideal_implications = ideal_tag_ids_to_implications[ tag_id ]
to_delete = actual_implications.difference( ideal_implications )
to_add = ideal_implications.difference( actual_implications )
if len( to_delete ) > 0:
tag_ids_to_delete_implications[ tag_id ] = to_delete
tag_ids_altered.add( tag_id )
tag_ids_altered.update( to_delete )
if len( to_add ) > 0:
tag_ids_to_add_implications[ tag_id ] = to_add
tag_ids_altered.add( tag_id )
tag_ids_altered.update( to_add )
# If we need to remove 1,000 mappings and then add 500 to be correct, we'll be doing 1,500 total no matter the order we do them in. This 1,000/500 is not the sum of all the current rows' individual current estimated work.
# When removing, the sum overestimates, when adding, the sum underestimates. The number of sibling/parent rows to change is obviously also the same.
# ok, now sync our actual tables to ideal
# When you remove a row, the other row estimates may stay as weighty, or they may get less. (e.g. removing sibling A->B makes the parent B->C easier to remove later)
# When you add a row, the other row estimates may stay as weighty, or they may get more. (e.g. adding parent A->B makes adding the sibling b->B more difficult later on)
# The main priority of this function is to reduce each piece of work time.
# When removing, we can break down the large jobs by doing small jobs. So, by doing small jobs first, we reduce max job time.
# However, if we try that strategy when adding, we actually increase max job time, as those delayed big jobs only have the option of staying the same or getting bigger! We get zoom speed and then clunk mode.
# Therefore, when adding, to limit max work time for the whole migration, we want to actually choose the largest jobs first! That work has to be done, and it doesn't get easier!
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = GenerateTagSiblingsLookupCacheTableNames( tag_service_id )
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = GenerateTagParentsLookupCacheTableNames( tag_service_id )
# delete all actual from actual
def GetWeightedSiblingRow( sibling_rows, index ):
# when you change the sibling A->B in the _lookup table_:
# you need to add/remove about A number of mappings for B and all it implies. the weight is: A * count( all the B->X implications )
ideal_tag_ids = { i for ( b, i ) in sibling_rows }
ideal_tag_ids_to_implies = self._CacheTagDisplayGetTagsToImplies( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, ideal_tag_ids )
bad_tag_ids = { b for ( b, i ) in sibling_rows }
bad_tag_ids_to_count = self._GetAutocompleteCountsEstimate( ClientTags.TAG_DISPLAY_STORAGE, tag_service_id, self._combined_file_service_id, bad_tag_ids, True, True )
weight_and_rows = [ ( bad_tag_ids_to_count[ b ] * len( ideal_tag_ids_to_implies[ i ] ) + 1, ( b, i ) ) for ( b, i ) in sibling_rows ]
weight_and_rows.sort()
return weight_and_rows[ index ]
self._c.executemany( 'DELETE FROM {} WHERE bad_tag_id = ? OR ideal_tag_id = ?;'.format( cache_actual_tag_siblings_lookup_table_name ), ( ( tag_id, tag_id ) for tag_id in tag_ids_to_sync ) )
def GetWeightedParentRow( parent_rows, index ):
# when you change the parent A->B in the _lookup table_:
# you need to add/remove mappings (of B) for all instances of A and all that implies it. the weight is: sum( all the X->A implications )
child_tag_ids = { c for ( c, a ) in parent_rows }
child_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, child_tag_ids )
all_child_tags = set( child_tag_ids )
all_child_tags.update( itertools.chain.from_iterable( child_tag_ids_to_implied_by.values() ) )
child_tag_ids_to_count = self._GetAutocompleteCountsEstimate( ClientTags.TAG_DISPLAY_STORAGE, tag_service_id, self._combined_file_service_id, all_child_tags, True, True )
weight_and_rows = [ ( sum( ( child_tag_ids_to_count[ implied_by ] for implied_by in child_tag_ids_to_implied_by[ c ] ) ), ( c, p ) ) for ( c, p ) in parent_rows ]
weight_and_rows.sort()
return weight_and_rows[ index ]
actual_rows_delta = - self._GetRowCount()
# first up, the removees. what is in actual but not ideal
self._c.executemany( 'DELETE FROM {} WHERE child_tag_id = ? OR ancestor_tag_id = ?;'.format( cache_actual_tag_parents_lookup_table_name ), ( ( tag_id, tag_id ) for tag_id in tag_ids_to_sync ) )
# if I switch to storing the actual rows in cache, that's nice mate, I can just sample from there, no need for query every time
actual_rows_delta -= self._GetRowCount()
some_removee_sibling_rows = random.sample( sibling_rows_to_remove, min( len( sibling_rows_to_remove ), 20 ) )
some_removee_parent_rows = random.sample( parent_rows_to_remove, min( len( parent_rows_to_remove ), 20 ) )
# insert all ideal into actual
if len( some_removee_sibling_rows ) + len( some_removee_parent_rows ) > 0:
smallest_sibling_weight = None
smallest_sibling_row = None
smallest_parent_weight = None
smallest_parent_row = None
if len( some_removee_sibling_rows ) > 0:
( smallest_sibling_weight, smallest_sibling_row ) = GetWeightedSiblingRow( some_removee_sibling_rows, 0 )
if len( some_removee_parent_rows ) > 0:
( smallest_parent_weight, smallest_parent_row ) = GetWeightedParentRow( some_removee_parent_rows, 0 )
if smallest_sibling_weight is not None and smallest_parent_weight is not None:
if smallest_sibling_weight < smallest_parent_weight:
smallest_parent_weight = None
smallest_parent_row = None
else:
smallest_sibling_weight = None
smallest_sibling_row = None
if smallest_sibling_row is not None:
chain_tag_ids = self._CacheTagDisplayGetChainsMembers( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, smallest_sibling_row )
previous_chain_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, chain_tag_ids )
self._c.execute( 'DELETE FROM {} WHERE bad_tag_id = ? AND ideal_tag_id = ?;'.format( cache_actual_tag_siblings_lookup_table_name ), smallest_sibling_row )
after_chain_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, chain_tag_ids )
sibling_rows_to_remove.discard( smallest_sibling_row )
if smallest_parent_row is not None:
chain_tag_ids = self._CacheTagDisplayGetChainsMembers( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, smallest_parent_row )
previous_chain_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, chain_tag_ids )
self._c.execute( 'DELETE FROM {} WHERE child_tag_id = ? AND ancestor_tag_id = ?;'.format( cache_actual_tag_parents_lookup_table_name ), smallest_parent_row )
after_chain_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, chain_tag_ids )
parent_rows_to_remove.discard( smallest_parent_row )
num_actual_rows -= 1
else:
# there is nothing to remove, so we'll now go for what is in ideal but not actual
some_addee_sibling_rows = random.sample( sibling_rows_to_add, min( len( sibling_rows_to_add ), 20 ) )
some_addee_parent_rows = random.sample( parent_rows_to_add, min( len( parent_rows_to_add ), 20 ) )
if len( some_addee_sibling_rows ) + len( some_addee_parent_rows ) > 0:
largest_sibling_weight = None
largest_sibling_row = None
largest_parent_weight = None
largest_parent_row = None
if len( some_addee_sibling_rows ) > 0:
( largest_sibling_weight, largest_sibling_row ) = GetWeightedSiblingRow( some_addee_sibling_rows, -1 )
if len( some_addee_parent_rows ) > 0:
( largest_parent_weight, largest_parent_row ) = GetWeightedParentRow( some_addee_parent_rows, -1 )
if largest_sibling_weight is not None and largest_parent_weight is not None:
if largest_sibling_weight > largest_parent_weight:
largest_parent_weight = None
largest_parent_row = None
else:
largest_sibling_weight = None
largest_sibling_row = None
if largest_sibling_row is not None:
chain_tag_ids = self._CacheTagDisplayGetChainsMembers( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, largest_sibling_row )
previous_chain_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, chain_tag_ids )
self._c.execute( 'INSERT OR IGNORE INTO {} ( bad_tag_id, ideal_tag_id ) VALUES ( ?, ? );'.format( cache_actual_tag_siblings_lookup_table_name ), largest_sibling_row )
after_chain_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, chain_tag_ids )
sibling_rows_to_add.discard( largest_sibling_row )
if largest_parent_row is not None:
chain_tag_ids = self._CacheTagDisplayGetChainsMembers( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, largest_parent_row )
previous_chain_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, chain_tag_ids )
self._c.execute( 'INSERT OR IGNORE INTO {} ( child_tag_id, ancestor_tag_id ) VALUES ( ?, ? );'.format( cache_actual_tag_parents_lookup_table_name ), largest_parent_row )
after_chain_tag_ids_to_implied_by = self._CacheTagDisplayGetTagsToImpliedBy( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, chain_tag_ids )
parent_rows_to_add.discard( largest_parent_row )
num_actual_rows += 1
else:
del self._service_ids_to_display_application_status[ tag_service_id ]
break
self._c.executemany( 'INSERT OR IGNORE INTO {} ( bad_tag_id, ideal_tag_id ) SELECT bad_tag_id, ideal_tag_id FROM {} WHERE bad_tag_id = ? OR ideal_tag_id = ?;'.format( cache_actual_tag_siblings_lookup_table_name, cache_ideal_tag_siblings_lookup_table_name ), ( ( tag_id, tag_id ) for tag_id in tag_ids_to_sync ) )
#
actual_rows_delta += self._GetRowCount()
tag_ids_to_delete_implied_by = collections.defaultdict( set )
tag_ids_to_add_implied_by = collections.defaultdict( set )
self._c.executemany( 'INSERT OR IGNORE INTO {} ( child_tag_id, ancestor_tag_id ) SELECT child_tag_id, ancestor_tag_id FROM {} WHERE child_tag_id = ? OR ancestor_tag_id = ?;'.format( cache_actual_tag_parents_lookup_table_name, cache_ideal_tag_parents_lookup_table_name ), ( ( tag_id, tag_id ) for tag_id in tag_ids_to_sync ) )
for tag_id in chain_tag_ids:
previous_implied_by = previous_chain_tag_ids_to_implied_by[ tag_id ]
after_implied_by = after_chain_tag_ids_to_implied_by[ tag_id ]
to_delete = previous_implied_by.difference( after_implied_by )
to_add = after_implied_by.difference( previous_implied_by )
if len( to_delete ) > 0:
tag_ids_to_delete_implied_by[ tag_id ] = to_delete
all_tag_ids_altered.add( tag_id )
all_tag_ids_altered.update( to_delete )
if len( to_add ) > 0:
tag_ids_to_add_implied_by[ tag_id ] = to_add
all_tag_ids_altered.add( tag_id )
all_tag_ids_altered.update( to_add )
actual_rows_delta += self._GetRowCount()
# now do the implications
# clear and copy siblings and parents and me from actual to ideal for those tags mate
# if I am feeling very clever, I could potentially add tag_ids_to_migrate_implied_by, which would be an UPDATE
# this would only work for tag_ids that have the same current implied by in actual and ideal (e.g. moving a tag sibling from A->B to B->A)
# may be better to do this in a merged add/deleteimplication function that would be able to well detect this with 'same current implied' of count > 0 for that domain
file_service_ids = self._GetServiceIds( HC.AUTOCOMPLETE_CACHE_SPECIFIC_FILE_SERVICES )
for file_service_id in file_service_ids:
for ( tag_id, implication_tag_ids ) in tag_ids_to_delete_implications.items():
for ( tag_id, implication_tag_ids ) in tag_ids_to_delete_implied_by.items():
self._CacheSpecificDisplayMappingsDeleteImplications( file_service_id, tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_add_implications.items():
for ( tag_id, implication_tag_ids ) in tag_ids_to_add_implied_by.items():
self._CacheSpecificDisplayMappingsAddImplications( file_service_id, tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_delete_implications.items():
for ( tag_id, implication_tag_ids ) in tag_ids_to_delete_implied_by.items():
self._CacheCombinedFilesDisplayMappingsDeleteImplications( tag_service_id, implication_tag_ids, tag_id )
for ( tag_id, implication_tag_ids ) in tag_ids_to_add_implications.items():
for ( tag_id, implication_tag_ids ) in tag_ids_to_add_implied_by.items():
self._CacheCombinedFilesDisplayMappingsAddImplications( tag_service_id, implication_tag_ids, tag_id )
sibling_tag_ids_to_sync = sibling_tag_ids_to_sync.difference( tag_ids_to_sync )
parent_tag_ids_to_sync = parent_tag_ids_to_sync.difference( tag_ids_to_sync )
num_actual_rows += actual_rows_delta
self._service_ids_to_display_application_status[ tag_service_id ] = ( sibling_tag_ids_to_sync, parent_tag_ids_to_sync, num_actual_rows, num_ideal_rows )
self._service_ids_to_display_application_status[ tag_service_id ] = ( sibling_rows_to_add, sibling_rows_to_remove, parent_rows_to_add, parent_rows_to_remove, num_actual_rows, num_ideal_rows )
if len( tag_ids_altered ) > 0:
if len( all_tag_ids_altered ) > 0:
self._regen_tags_managers_tag_ids.update( tag_ids_altered )
self._regen_tags_managers_tag_ids.update( all_tag_ids_altered )
self.pub_after_job( 'notify_new_tag_display_sync_status', service_key )
still_needs_work = len( sibling_tag_ids_to_sync ) + len( parent_tag_ids_to_sync ) > 0
still_needs_work = len( sibling_rows_to_add ) + len( sibling_rows_to_remove ) + len( parent_rows_to_add ) + len( parent_rows_to_remove ) > 0
return still_needs_work
@ -6852,6 +7052,54 @@ class DB( HydrusDB.HydrusDB ):
return ids_to_count
def _GetAutocompleteCountsEstimate( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ], include_current_tags: bool, include_pending_tags: bool ):
ids_to_count = collections.Counter()
if not include_current_tags and not include_pending_tags:
return ids_to_count
ids_to_count_statuses = self._GetAutocompleteCountsEstimateStatuses( tag_display_type, tag_service_id, file_service_id, tag_ids )
for ( tag_id, ( current_count, pending_count ) ) in ids_to_count_statuses.items():
count = 0
if include_current_tags:
count += current_count
if include_current_tags:
count += pending_count
ids_to_count[ tag_id ] = count
return ids_to_count
def _GetAutocompleteCountsEstimateStatuses( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ] ):
include_current_tags = True
include_pending_tags = True
ids_to_count_full = self._GetAutocompleteCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current_tags, include_pending_tags )
ids_to_count_statuses = { tag_id : ( 0, 0 ) for tag_id in tag_ids }
for ( tag_id, ( current_min, current_max, pending_min, pending_max ) ) in ids_to_count_full.items():
ids_to_count_statuses[ tag_id ] = ( current_min, pending_min )
return ids_to_count_statuses
def _GetAutocompleteCurrentPendingPositiveCountsAndWeights( self, tag_display_type, file_service_id, tag_service_id, tag_ids ):
include_current = True
@ -18447,8 +18695,6 @@ class DB( HydrusDB.HydrusDB ):
message = 'The new siblings/parents system calculates tags in pieces in the background. If you are on an SSD, this should be no big deal, but if you are on an HDD, it could make normal use laggy.'
message += os.linesep * 2
message += 'LATE EDIT: It seems that the PTR gives a one-time heavy period of lag. If you select SSD, be warned the client may lock up for a few minutes, one time, soon after boot. Just let it run, or select HDD here to have it do that work when you aren\'t looking.'
message += os.linesep * 2
message += 'Please let the client know if you are on an HDD to set the calculation to only happen in idle time.'
from hydrus.client.gui import ClientGUIDialogsQuick
@ -18570,6 +18816,28 @@ class DB( HydrusDB.HydrusDB ):
if version == 414:
try:
new_options = self._GetJSONDump( HydrusSerialisable.SERIALISABLE_TYPE_CLIENT_OPTIONS )
notebook_tabs_on_left = new_options.GetBoolean( 'notebook_tabs_on_left' )
if notebook_tabs_on_left:
new_options.SetInteger( 'notebook_tab_alignment', CC.DIRECTION_LEFT )
self._SetJSONDump( new_options )
except Exception as e:
# no worries
pass
self._controller.frame_splash_status.SetTitleText( 'updated db to v{}'.format( HydrusData.ToHumanInt( version + 1 ) ) )
self._c.execute( 'UPDATE version SET version = ?;', ( version + 1, ) )

View File

@ -50,7 +50,11 @@ class HydrusServiceClientAPI( HydrusClientService ):
root.putChild( b'add_files', add_files )
add_files.putChild( b'add_file', ClientLocalServerResources.HydrusResourceClientAPIRestrictedAddFile( self._service, self._client_requests_domain ) )
add_files.putChild( b'add_file', ClientLocalServerResources.HydrusResourceClientAPIRestrictedAddFilesAddFile( self._service, self._client_requests_domain ) )
add_files.putChild( b'delete_files', ClientLocalServerResources.HydrusResourceClientAPIRestrictedAddFilesDeleteFiles( self._service, self._client_requests_domain ) )
add_files.putChild( b'undelete_files', ClientLocalServerResources.HydrusResourceClientAPIRestrictedAddFilesUndeleteFiles( self._service, self._client_requests_domain ) )
add_files.putChild( b'archive_files', ClientLocalServerResources.HydrusResourceClientAPIRestrictedAddFilesArchiveFiles( self._service, self._client_requests_domain ) )
add_files.putChild( b'unarchive_files', ClientLocalServerResources.HydrusResourceClientAPIRestrictedAddFilesUnarchiveFiles( self._service, self._client_requests_domain ) )
add_tags = NoResource()

View File

@ -766,13 +766,15 @@ class HydrusResourceClientAPIRestrictedAccountVerify( HydrusResourceClientAPIRes
return response_context
class HydrusResourceClientAPIRestrictedAddFile( HydrusResourceClientAPIRestricted ):
class HydrusResourceClientAPIRestrictedAddFiles( HydrusResourceClientAPIRestricted ):
def _CheckAPIPermissions( self, request ):
request.client_api_permissions.CheckPermission( ClientAPI.CLIENT_API_PERMISSION_ADD_FILES )
class HydrusResourceClientAPIRestrictedAddFilesAddFile( HydrusResourceClientAPIRestrictedAddFiles ):
def _threadDoPOSTJob( self, request ):
if not hasattr( request, 'temp_file_info' ):
@ -821,6 +823,146 @@ class HydrusResourceClientAPIRestrictedAddFile( HydrusResourceClientAPIRestricte
return response_context
class HydrusResourceClientAPIRestrictedAddFilesArchiveFiles( HydrusResourceClientAPIRestrictedAddFiles ):
def _threadDoPOSTJob( self, request ):
hashes = set()
if 'hash' in request.parsed_request_args:
hash = request.parsed_request_args.GetValue( 'hash', bytes )
hashes.add( hash )
if 'hashes' in request.parsed_request_args:
more_hashes = request.parsed_request_args.GetValue( 'hashes', list )
hashes.update( more_hashes )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ARCHIVE, hashes )
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] }
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
response_context = HydrusServerResources.ResponseContext( 200 )
return response_context
class HydrusResourceClientAPIRestrictedAddFilesDeleteFiles( HydrusResourceClientAPIRestrictedAddFiles ):
def _threadDoPOSTJob( self, request ):
hashes = set()
if 'hash' in request.parsed_request_args:
hash = request.parsed_request_args.GetValue( 'hash', bytes )
hashes.add( hash )
if 'hashes' in request.parsed_request_args:
more_hashes = request.parsed_request_args.GetValue( 'hashes', list )
hashes.update( more_hashes )
# expand this to take file service and reason
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, hashes )
service_keys_to_content_updates = { CC.LOCAL_FILE_SERVICE_KEY : [ content_update ] }
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
response_context = HydrusServerResources.ResponseContext( 200 )
return response_context
class HydrusResourceClientAPIRestrictedAddFilesUnarchiveFiles( HydrusResourceClientAPIRestrictedAddFiles ):
def _threadDoPOSTJob( self, request ):
hashes = set()
if 'hash' in request.parsed_request_args:
hash = request.parsed_request_args.GetValue( 'hash', bytes )
hashes.add( hash )
if 'hashes' in request.parsed_request_args:
more_hashes = request.parsed_request_args.GetValue( 'hashes', list )
hashes.update( more_hashes )
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_INBOX, hashes )
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] }
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
response_context = HydrusServerResources.ResponseContext( 200 )
return response_context
class HydrusResourceClientAPIRestrictedAddFilesUndeleteFiles( HydrusResourceClientAPIRestrictedAddFiles ):
def _threadDoPOSTJob( self, request ):
hashes = set()
if 'hash' in request.parsed_request_args:
hash = request.parsed_request_args.GetValue( 'hash', bytes )
hashes.add( hash )
if 'hashes' in request.parsed_request_args:
more_hashes = request.parsed_request_args.GetValue( 'hashes', list )
hashes.update( more_hashes )
# expand this to take file service, if and when we move to multiple trashes or whatever
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_UNDELETE, hashes )
service_keys_to_content_updates = { CC.TRASH_SERVICE_KEY : [ content_update ] }
if len( service_keys_to_content_updates ) > 0:
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
response_context = HydrusServerResources.ResponseContext( 200 )
return response_context
class HydrusResourceClientAPIRestrictedAddTags( HydrusResourceClientAPIRestricted ):
def _CheckAPIPermissions( self, request ):

View File

@ -180,8 +180,6 @@ class ClientOptions( HydrusSerialisable.SerialisableBase ):
self._dictionary[ 'booleans' ][ 'pause_all_watcher_checkers' ] = False
self._dictionary[ 'booleans' ][ 'pause_all_gallery_searches' ] = False
self._dictionary[ 'booleans' ][ 'notebook_tabs_on_left' ] = False
self._dictionary[ 'booleans' ][ 'popup_message_force_min_width' ] = False
self._dictionary[ 'booleans' ][ 'always_show_iso_time' ] = False
@ -282,6 +280,8 @@ class ClientOptions( HydrusSerialisable.SerialisableBase ):
self._dictionary[ 'integers' ] = {}
self._dictionary[ 'integers' ][ 'notebook_tab_alignment' ] = CC.DIRECTION_UP
self._dictionary[ 'integers' ][ 'video_buffer_size_mb' ] = 96
self._dictionary[ 'integers' ][ 'related_tags_search_1_duration_ms' ] = 250

View File

@ -106,7 +106,7 @@ class RatingLikeCanvas( ClientGUIRatings.RatingLike ):
hashes = content_update.GetHashes()
if len( self._hashes.intersection( hashes ) ) > 0:
if HydrusData.SetsIntersect( self._hashes, hashes ):
self._dirty = True
@ -218,7 +218,7 @@ class RatingNumericalCanvas( ClientGUIRatings.RatingNumerical ):
hashes = content_update.GetHashes()
if len( self._hashes.intersection( hashes ) ) > 0:
if HydrusData.SetsIntersect( self._hashes, hashes ):
self._dirty = True

View File

@ -673,6 +673,7 @@ class CheckboxManagerOptions( CheckboxManager ):
new_options.InvertBoolean( self._boolean_name )
HG.client_controller.pub( 'checkbox_manager_inverted' )
HG.client_controller.pub( 'notify_new_menu_option' )
class AlphaColourControl( QW.QWidget ):

View File

@ -936,6 +936,13 @@ class Page( QW.QSplitter ):
self._management_panel.REPEATINGPageUpdate()
directions_for_notebook_tabs = {}
directions_for_notebook_tabs[ CC.DIRECTION_UP ] = QW.QTabWidget.North
directions_for_notebook_tabs[ CC.DIRECTION_LEFT ] = QW.QTabWidget.West
directions_for_notebook_tabs[ CC.DIRECTION_RIGHT ] = QW.QTabWidget.East
directions_for_notebook_tabs[ CC.DIRECTION_DOWN ] = QW.QTabWidget.South
class PagesNotebook( QP.TabWidgetWithDnD ):
def __init__( self, parent, controller, name ):
@ -944,15 +951,9 @@ class PagesNotebook( QP.TabWidgetWithDnD ):
self._parent_notebook = parent
# this is disabled for now because it seems borked in Qt
if controller.new_options.GetBoolean( 'notebook_tabs_on_left' ):
self.setTabPosition( QW.QTabWidget.West )
else:
self.setTabPosition( QW.QTabWidget.North )
direction = controller.new_options.GetInteger( 'notebook_tab_alignment' )
self.setTabPosition( directions_for_notebook_tabs[ direction ] )
self._controller = controller
@ -968,7 +969,7 @@ class PagesNotebook( QP.TabWidgetWithDnD ):
self._controller.sub( self, 'RefreshPageName', 'refresh_page_name' )
self._controller.sub( self, 'NotifyPageUnclosed', 'notify_page_unclosed' )
self._controller.sub( self, '_UpdatePageTabEliding', 'notify_new_options' )
self._controller.sub( self, '_UpdateOptions', 'notify_new_options' )
self.currentChanged.connect( self.pageJustChanged )
self.pageDragAndDropped.connect( self._RefreshPageNamesAfterDnD )
@ -981,7 +982,7 @@ class PagesNotebook( QP.TabWidgetWithDnD ):
self._previous_page_index = -1
self._UpdatePageTabEliding()
self._UpdateOptions()
def _RefreshPageNamesAfterDnD( self, page_widget, source_widget ):
@ -999,7 +1000,7 @@ class PagesNotebook( QP.TabWidgetWithDnD ):
def _UpdatePageTabEliding( self ):
def _UpdateOptions( self ):
if HG.client_controller.new_options.GetBoolean( 'elide_page_tab_names' ):
@ -1010,6 +1011,10 @@ class PagesNotebook( QP.TabWidgetWithDnD ):
self.tabBar().setElideMode( QC.Qt.ElideNone )
direction = HG.client_controller.new_options.GetInteger( 'notebook_tab_alignment' )
self.setTabPosition( directions_for_notebook_tabs[ direction ] )
def _UpdatePreviousPageIndex( self ):

View File

@ -1838,7 +1838,7 @@ class EditMediaViewOptionsPanel( ClientGUIScrolledPanels.EditPanel ):
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
if len( set( possible_show_actions ).intersection( { CC.MEDIA_VIEWER_ACTION_SHOW_WITH_NATIVE, CC.MEDIA_VIEWER_ACTION_SHOW_WITH_MPV } ) ) == 0:
if set( possible_show_actions ).isdisjoint( { CC.MEDIA_VIEWER_ACTION_SHOW_WITH_NATIVE, CC.MEDIA_VIEWER_ACTION_SHOW_WITH_MPV } ):
self._media_scale_up.hide()
self._media_scale_down.hide()

View File

@ -1427,7 +1427,12 @@ class ManageOptionsPanel( ClientGUIScrolledPanels.ManagePanel ):
self._default_new_page_goes.addItem( CC.new_page_goes_string_lookup[ value], value )
self._notebook_tabs_on_left = QW.QCheckBox( self._pages_panel )
self._notebook_tab_alignment = ClientGUICommon.BetterChoice( self._pages_panel )
for value in [ CC.DIRECTION_UP, CC.DIRECTION_LEFT, CC.DIRECTION_RIGHT, CC.DIRECTION_DOWN ]:
self._notebook_tab_alignment.addItem( CC.directions_alignment_string_lookup[ value ], value )
self._total_pages_warning = QP.MakeQSpinBox( self._pages_panel, min=5, max=500 )
@ -1497,7 +1502,7 @@ class ManageOptionsPanel( ClientGUIScrolledPanels.ManagePanel ):
self._default_new_page_goes.SetValue( self._new_options.GetInteger( 'default_new_page_goes' ) )
self._notebook_tabs_on_left.setChecked( self._new_options.GetBoolean( 'notebook_tabs_on_left' ) )
self._notebook_tab_alignment.SetValue( self._new_options.GetInteger( 'notebook_tab_alignment' ) )
self._max_page_name_chars.setValue( self._new_options.GetInteger( 'max_page_name_chars' ) )
@ -1537,7 +1542,7 @@ class ManageOptionsPanel( ClientGUIScrolledPanels.ManagePanel ):
rows = []
rows.append( ( 'By default, put new page tabs on (requires restart): ', self._default_new_page_goes ) )
rows.append( ( 'Line notebook tabs down the left: ', self._notebook_tabs_on_left ) )
rows.append( ( 'Notebook tab alignment: ', self._notebook_tab_alignment ) )
rows.append( ( 'Reverse page tab shift-drag behaviour: ', self._reverse_page_shift_drag_behaviour ) )
rows.append( ( 'Warn at this many total pages: ', self._total_pages_warning ) )
@ -1598,7 +1603,7 @@ class ManageOptionsPanel( ClientGUIScrolledPanels.ManagePanel ):
HC.options[ 'default_gui_session' ] = self._default_gui_session.currentText()
self._new_options.SetBoolean( 'notebook_tabs_on_left', self._notebook_tabs_on_left.isChecked() )
self._new_options.SetInteger( 'notebook_tab_alignment', self._notebook_tab_alignment.GetValue() )
self._new_options.SetInteger( 'last_session_save_period_minutes', self._last_session_save_period_minutes.value() )
@ -2151,7 +2156,7 @@ class ManageOptionsPanel( ClientGUIScrolledPanels.ManagePanel ):
pretty_preview_show_action += ', start with embed button'
no_show = len( set( ( media_show_action, preview_show_action ) ).intersection( { CC.MEDIA_VIEWER_ACTION_SHOW_WITH_NATIVE, CC.MEDIA_VIEWER_ACTION_SHOW_WITH_MPV } ) ) == 0
no_show = { media_show_action, preview_show_action }.isdisjoint( { CC.MEDIA_VIEWER_ACTION_SHOW_WITH_NATIVE, CC.MEDIA_VIEWER_ACTION_SHOW_WITH_MPV } )
if no_show:

View File

@ -415,7 +415,7 @@ class ManageAccountTypesPanel( ClientGUIScrolledPanels.ManagePanel ):
keys = set( self._deletee_account_type_keys_to_new_account_type_keys.keys() )
values = set( self._deletee_account_type_keys_to_new_account_type_keys.values() )
return len( keys.intersection( values ) ) > 0
return HydrusData.SetsIntersect( keys, values )
while key_transfer_not_collapsed():

View File

@ -250,6 +250,41 @@ class EditTagDisplayApplication( ClientGUIScrolledPanels.EditPanel ):
vbox = QP.VBoxLayout()
message = 'While a tag service normally applies its own siblings and parents to itself, it does not have to. If you want a different service\'s siblings (e.g. putting the PTR\'s siblings on your "my tags"), or multiple services\', then set it here. You can also apply no siblings or parents at all.'
message += os.linesep * 2
message += 'If there are conflicts, the services at the top of the list have precedence. Parents are collapsed by sibling rules before they are applied.'
self._message = ClientGUICommon.BetterStaticText( self, label = message )
self._message.setWordWrap( True )
self._sync_status = ClientGUICommon.BetterStaticText( self )
self._sync_status.setWordWrap( True )
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._sync_status.setText( 'Siblings and parents are set to sync all the time. Changes will start applying as soon as you ok this dialog.' )
self._sync_status.setObjectName( 'HydrusValid' )
else:
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
self._sync_status.setText( 'Siblings and parents are only set to sync during idle time. Changes here will only start to apply when you are not using the client.' )
else:
self._sync_status.setText( 'Siblings and parents are not set to sync in the background at any time. If there is sync work to do, you will have to force it to run using the \'review\' window under _tags->siblings and parents sync_.' )
self._sync_status.setObjectName( 'HydrusWarning' )
self._sync_status.style().polish( self._sync_status )
QP.AddToLayout( vbox, self._message, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._tag_services_notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
@ -279,15 +314,6 @@ class EditTagDisplayApplication( ClientGUIScrolledPanels.EditPanel ):
self._master_service_key = master_service_key
message = 'While a tag service normally applies its own siblings and parents to itself, it does not have to. If you want a different service\'s siblings (e.g. putting the PTR\'s siblings on your "my tags"), or multiple services\', then set it here. You can also apply no siblings or parents at all.'
message += os.linesep * 2
message += 'If there are conflicts, the services at the top of the list have precedence. Parents are collapsed by sibling rules before they are applied.'
message += os.linesep * 2
message += 'New sibling and parent rules will be calculated in the background, so changes will not be instant. You can review the current \'sync\' under _services->display sync_.'
self._message = ClientGUICommon.BetterStaticText( self, label = message )
self._message.setWordWrap( True )
#
self._sibling_box = ClientGUICommon.StaticBox( self, 'sibling application' )
@ -322,7 +348,6 @@ class EditTagDisplayApplication( ClientGUIScrolledPanels.EditPanel ):
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._message, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sibling_box, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._parent_box, CC.FLAGS_EXPAND_BOTH_WAYS )
@ -2563,7 +2588,7 @@ class ManageTagsPanel( ClientGUIScrolledPanels.ManagePanel ):
for m in self._media:
if len( m.GetHashes().intersection( content_update.GetHashes() ) ) > 0:
if HydrusData.SetsIntersect( m.GetHashes(), content_update.GetHashes() ):
m.GetMediaResult().ProcessContentUpdate( service_key, content_update )
@ -4431,14 +4456,48 @@ class ReviewTagDisplayMaintenancePanel( ClientGUIScrolledPanels.ReviewPanel ):
message = 'Figuring out how tags should appear according to sibling and parent application rules takes time. When you set new rules, the changes do not happen immediately--the client catches up in the background. You can review current progress and force faster sync here.'
st = ClientGUICommon.BetterStaticText( self, label = message )
st.setWordWrap( True )
self._message = ClientGUICommon.BetterStaticText( self, label = message )
self._message.setWordWrap( True )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
self._sync_status = ClientGUICommon.BetterStaticText( self )
self._sync_status.setWordWrap( True )
self._UpdateStatusText()
QP.AddToLayout( vbox, self._message, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._sync_status, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._tag_services_notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
HG.client_controller.sub( self, '_UpdateStatusText', 'notify_new_menu_option' )
def _UpdateStatusText( self ):
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._sync_status.setText( 'Siblings and parents are set to sync all the time. If there is work to do here, it should be cleared out in real time as you watch.' )
self._sync_status.setObjectName( 'HydrusValid' )
else:
if HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_idle' ):
self._sync_status.setText( 'Siblings and parents are only set to sync during idle time. If there is work to do here, it should be cleared out when you are not using the client.' )
else:
self._sync_status.setText( 'Siblings and parents are not set to sync in the background at any time. If there is work to do here, you can force it now by clicking \'work now!\' button.' )
self._sync_status.setObjectName( 'HydrusWarning' )
self._sync_status.style().polish( self._sync_status )
class _Panel( QW.QWidget ):
@ -4591,7 +4650,14 @@ class ReviewTagDisplayMaintenancePanel( ClientGUIScrolledPanels.ReviewPanel ):
else:
self._go_faster_button.setText( 'work hard now!' )
if not HG.client_controller.new_options.GetBoolean( 'tag_display_maintenance_during_active' ):
self._go_faster_button.setText( 'work now!' )
else:
self._go_faster_button.setText( 'work hard now!' )

View File

@ -2276,8 +2276,27 @@ class ListBoxTags( ListBox ):
siblings_and_parents_menu = QW.QMenu( menu )
if can_launch_sibling_and_parent_dialogs:
if len( selected_actual_tags ) == 1:
( tag, ) = selected_actual_tags
text = tag
else:
text = 'selection'
ClientGUIMenus.AppendMenuItem( siblings_and_parents_menu, 'add siblings to ' + text, 'Add a sibling to this tag.', self._ProcessMenuTagEvent, 'sibling' )
ClientGUIMenus.AppendMenuItem( siblings_and_parents_menu, 'add parents to ' + text, 'Add a parent to this tag.', self._ProcessMenuTagEvent, 'parent' )
if can_show_siblings_and_parents:
ClientGUIMenus.AppendSeparator( siblings_and_parents_menu )
siblings_menu = QW.QMenu( siblings_and_parents_menu )
parents_menu = QW.QMenu( siblings_and_parents_menu )
@ -2376,25 +2395,6 @@ class ListBoxTags( ListBox ):
async_job.start()
if can_launch_sibling_and_parent_dialogs:
ClientGUIMenus.AppendSeparator( siblings_and_parents_menu )
if len( selected_actual_tags ) == 1:
( tag, ) = selected_actual_tags
text = tag
else:
text = 'selection'
ClientGUIMenus.AppendMenuItem( siblings_and_parents_menu, 'add siblings to ' + text, 'Add a sibling to this tag.', self._ProcessMenuTagEvent, 'sibling' )
ClientGUIMenus.AppendMenuItem( siblings_and_parents_menu, 'add parents to ' + text, 'Add a parent to this tag.', self._ProcessMenuTagEvent, 'parent' )
ClientGUIMenus.AppendMenu( menu, siblings_and_parents_menu, 'siblings and parents' )
@ -2611,7 +2611,7 @@ class ListBoxTags( ListBox ):
predicates_selection_string = 'selected'
some_selected_in_current = len( predicates.intersection( current_predicates ) ) > 0
some_selected_in_current = HydrusData.SetsIntersect( predicates, current_predicates )
if some_selected_in_current:
@ -2625,7 +2625,7 @@ class ListBoxTags( ListBox ):
ClientGUIMenus.AppendMenuItem( menu, 'require {} for current search'.format( predicates_selection_string ), 'Add the selected predicates from the current search.', self._ProcessMenuPredicateEvent, 'add_predicates' )
some_selected_are_excluded_explicitly = len( inverse_predicates.intersection( current_predicates ) ) > 0
some_selected_are_excluded_explicitly = HydrusData.SetsIntersect( inverse_predicates, current_predicates )
if some_selected_are_excluded_explicitly:

View File

@ -1134,11 +1134,13 @@ class MediaList( object ):
if and_or_or == 'AND':
return sum( ( 1 for m in flat_media if len( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ).intersection( select_tags ) ) == len( select_tags ) ) )
select_tags = set( select_tags )
return sum( ( 1 for m in flat_media if select_tags.issubset( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ) ) ) )
elif and_or_or == 'OR':
return sum( ( 1 for m in flat_media if len( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ).intersection( select_tags ) ) > 0 ) )
return sum( ( 1 for m in flat_media if HydrusData.SetsIntersect( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ), select_tags ) ) )
@ -1213,11 +1215,13 @@ class MediaList( object ):
if and_or_or == 'AND':
filtered_media = [ m for m in flat_media if len( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ).intersection( select_tags ) ) == len( select_tags ) ]
select_tags = set( select_tags )
filtered_media = [ m for m in flat_media if select_tags.issubset( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ) ) ]
elif and_or_or == 'OR':
filtered_media = [ m for m in flat_media if len( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ).intersection( select_tags ) ) > 0 ]
filtered_media = [ m for m in flat_media if HydrusData.SetsIntersect( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ), select_tags ) ]
@ -1277,11 +1281,13 @@ class MediaList( object ):
if and_or_or == 'AND':
filtered_media = { m for m in self._sorted_media if len( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ).intersection( select_tags ) ) == len( select_tags ) }
select_tags = set( select_tags )
filtered_media = { m for m in self._sorted_media if select_tags.issubset( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ) ) }
elif and_or_or == 'OR':
filtered_media = { m for m in self._sorted_media if len( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ).intersection( select_tags ) ) > 0 }
filtered_media = { m for m in self._sorted_media if HydrusData.SetsIntersect( m.GetTagsManager().GetCurrentAndPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_ACTUAL ), select_tags ) }

View File

@ -70,7 +70,7 @@ options = {}
# Misc
NETWORK_VERSION = 18
SOFTWARE_VERSION = 414
SOFTWARE_VERSION = 415
CLIENT_API_VERSION = 14
SERVER_THUMBNAIL_DIMENSIONS = ( 200, 200 )

View File

@ -1214,6 +1214,27 @@ def RestartProcess():
os.execv( exe, args )
def SetsIntersect( a, b ):
# not a.isdisjoint( b )
if not isinstance( a, set ):
a = set( a )
if not isinstance( b, set ):
b = set( b )
if len( a ) > len( b ):
( a, b ) = ( b, a )
return True in ( i in b for i in a )
def SmoothOutMappingIterator( xs, n ):
# de-spikifies mappings, so if there is ( tag, 20k files ), it breaks that up into manageable chunks

View File

@ -1814,7 +1814,7 @@ class Metadata( HydrusSerialisable.SerialisableBase ):
for ( update_index, ( update_hashes, begin, end ) ) in data:
if len( hashes.intersection( update_hashes ) ) > 0:
if HydrusData.SetsIntersect( hashes, update_hashes ):
return end