New Repair Code
This is the first version of significant changes to the database boot repair code and start of module self-repair responsibility. This db should be able to boot with a missing client.caches.db and repair and repopulate almost everything.
This commit is contained in:
parent
4e416873fb
commit
b6876258a8
|
@ -1137,32 +1137,6 @@ class DB( HydrusDB.HydrusDB ):
|
|||
self._CacheCombinedFilesDisplayMappingsRegeneratePending( tag_service_id, status_hook = status_hook )
|
||||
|
||||
|
||||
def _CacheLocalHashIdsGenerate( self ):
|
||||
|
||||
self.modules_hashes_local_cache.ClearCache()
|
||||
|
||||
self._controller.frame_splash_status.SetSubtext( 'reading local file data' )
|
||||
|
||||
local_hash_ids = self.modules_files_storage.GetCurrentHashIdsList( self.modules_services.combined_local_file_service_id )
|
||||
|
||||
BLOCK_SIZE = 10000
|
||||
num_to_do = len( local_hash_ids )
|
||||
|
||||
for ( i, block_of_hash_ids ) in enumerate( HydrusData.SplitListIntoChunks( local_hash_ids, BLOCK_SIZE ) ):
|
||||
|
||||
self._controller.frame_splash_status.SetSubtext( 'caching local file data {}'.format( HydrusData.ConvertValueRangeToPrettyString( i * BLOCK_SIZE, num_to_do ) ) )
|
||||
|
||||
self.modules_hashes_local_cache.AddHashIdsToCache( block_of_hash_ids )
|
||||
|
||||
|
||||
table_names = self.modules_hashes_local_cache.GetExpectedTableNames()
|
||||
|
||||
for table_name in table_names:
|
||||
|
||||
self.modules_db_maintenance.AnalyzeTable( table_name )
|
||||
|
||||
|
||||
|
||||
def _CacheLocalTagIdsGenerate( self ):
|
||||
|
||||
# update this to be a thing for the self.modules_tags_local_cache, maybe give it the ac cach as a param, or just boot that lad with it
|
||||
|
@ -1189,7 +1163,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
self.modules_tags_local_cache.AddTagIdsToCache( block_of_tag_ids )
|
||||
|
||||
|
||||
table_names = self.modules_tags_local_cache.GetExpectedTableNames()
|
||||
table_names = self.modules_tags_local_cache.GetExpectedInitialTableNames()
|
||||
|
||||
for table_name in table_names:
|
||||
|
||||
|
@ -11668,20 +11642,22 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
#
|
||||
|
||||
self.modules_files_storage = ClientDBFilesStorage.ClientDBFilesStorage( self._c, self.modules_services, self.modules_texts )
|
||||
|
||||
self._modules.append( self.modules_files_storage )
|
||||
|
||||
#
|
||||
|
||||
self.modules_tags_local_cache = ClientDBDefinitionsCache.ClientDBCacheLocalTags( self._c, self.modules_tags )
|
||||
|
||||
self._modules.append( self.modules_tags_local_cache )
|
||||
|
||||
self.modules_hashes_local_cache = ClientDBDefinitionsCache.ClientDBCacheLocalHashes( self._c, self.modules_hashes )
|
||||
self.modules_hashes_local_cache = ClientDBDefinitionsCache.ClientDBCacheLocalHashes( self._c, self.modules_hashes, self.modules_services, self.modules_files_storage )
|
||||
|
||||
self._modules.append( self.modules_hashes_local_cache )
|
||||
|
||||
#
|
||||
|
||||
self.modules_files_storage = ClientDBFilesStorage.ClientDBFilesStorage( self._c, self.modules_services, self.modules_texts )
|
||||
|
||||
self._modules.append( self.modules_files_storage )
|
||||
|
||||
self.modules_mappings_storage = ClientDBMappingsStorage.ClientDBMappingsStorage( self._c, self.modules_services )
|
||||
|
||||
self._modules.append( self.modules_mappings_storage )
|
||||
|
@ -13244,7 +13220,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
job_key.SetVariable( 'popup_text_1', message )
|
||||
self._controller.frame_splash_status.SetSubtext( message )
|
||||
|
||||
self._CacheLocalHashIdsGenerate()
|
||||
self.modules_hashes_local_cache.Repopulate()
|
||||
|
||||
finally:
|
||||
|
||||
|
@ -13682,6 +13658,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
self._CacheSpecificMappingsGenerate( file_service_id, tag_service_id )
|
||||
|
||||
self._cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
for tag_service_id in tag_service_ids:
|
||||
|
||||
|
@ -13704,6 +13682,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
self._CacheCombinedFilesMappingsGenerate( tag_service_id )
|
||||
|
||||
self._cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
if tag_service_key is None:
|
||||
|
||||
|
@ -13889,221 +13869,23 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
|
||||
|
||||
def _RepairDB( self ):
|
||||
def _RepairDB( self, version ):
|
||||
|
||||
# migrate most of this gubbins to the new modules system, and HydrusDB tbh!
|
||||
|
||||
self._controller.frame_splash_status.SetText( 'checking database' )
|
||||
|
||||
( version, ) = self._Execute( 'SELECT version FROM version;' ).fetchone()
|
||||
|
||||
HydrusDB.HydrusDB._RepairDB( self )
|
||||
HydrusDB.HydrusDB._RepairDB( self, version )
|
||||
|
||||
self._weakref_media_result_cache = ClientMediaResultCache.MediaResultCache()
|
||||
|
||||
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
|
||||
file_service_ids = self.modules_services.GetServiceIds( HC.AUTOCOMPLETE_CACHE_SPECIFIC_FILE_SERVICES )
|
||||
|
||||
# master
|
||||
|
||||
existing_master_tables = self._STS( self._Execute( 'SELECT name FROM external_master.sqlite_master WHERE type = ?;', ( 'table', ) ) )
|
||||
|
||||
main_master_tables = set()
|
||||
|
||||
main_master_tables.add( 'hashes' )
|
||||
main_master_tables.add( 'namespaces' )
|
||||
main_master_tables.add( 'subtags' )
|
||||
main_master_tables.add( 'tags' )
|
||||
main_master_tables.add( 'texts' )
|
||||
|
||||
if version >= 396:
|
||||
|
||||
main_master_tables.add( 'labels' )
|
||||
main_master_tables.add( 'notes' )
|
||||
|
||||
|
||||
missing_main_tables = main_master_tables.difference( existing_master_tables )
|
||||
|
||||
if len( missing_main_tables ) > 0:
|
||||
|
||||
message = 'On boot, some required master tables were missing. This could be due to the entire \'master\' database file being missing or due to some other problem. Critical data is missing, so the client cannot boot! The exact missing tables were:'
|
||||
message += os.linesep * 2
|
||||
message += os.linesep.join( missing_main_tables )
|
||||
message += os.linesep * 2
|
||||
message += 'The boot will fail once you click ok. If you do not know what happened and how to fix this, please take a screenshot and contact hydrus dev.'
|
||||
|
||||
self._controller.SafeShowCriticalMessage( 'Error', message )
|
||||
|
||||
raise Exception( 'Master database was invalid!' )
|
||||
|
||||
|
||||
if 'local_hashes' not in existing_master_tables:
|
||||
|
||||
message = 'On boot, the \'local_hashes\' tables was missing.'
|
||||
message += os.linesep * 2
|
||||
message += 'If you wish, click ok on this message and the client will recreate it--empty, without data--which should at least let the client boot. The client can repopulate the table in through the file maintenance jobs, the \'regenerate non-standard hashes\' job. But if you want to solve this problem otherwise, kill the hydrus process now.'
|
||||
message += os.linesep * 2
|
||||
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
|
||||
|
||||
BlockingSafeShowMessage( message )
|
||||
|
||||
self._Execute( 'CREATE TABLE external_master.local_hashes ( hash_id INTEGER PRIMARY KEY, md5 BLOB_BYTES, sha1 BLOB_BYTES, sha512 BLOB_BYTES );' )
|
||||
|
||||
|
||||
self._CreateIndex( 'external_master.local_hashes', [ 'md5' ] )
|
||||
self._CreateIndex( 'external_master.local_hashes', [ 'sha1' ] )
|
||||
self._CreateIndex( 'external_master.local_hashes', [ 'sha512' ] )
|
||||
|
||||
# mappings
|
||||
|
||||
existing_mapping_tables = self._STS( self._Execute( 'SELECT name FROM external_mappings.sqlite_master WHERE type = ?;', ( 'table', ) ) )
|
||||
|
||||
main_mappings_tables = set()
|
||||
|
||||
for service_id in tag_service_ids:
|
||||
|
||||
main_mappings_tables.update( ( name.split( '.' )[1] for name in ClientDBMappingsStorage.GenerateMappingsTableNames( service_id ) ) )
|
||||
|
||||
|
||||
missing_main_tables = sorted( main_mappings_tables.difference( existing_mapping_tables ) )
|
||||
|
||||
if len( missing_main_tables ) > 0:
|
||||
|
||||
message = 'On boot, some important mappings tables were missing! This could be due to the entire \'mappings\' database file being missing or some other problem. The tags in these tables are lost. The exact missing tables were:'
|
||||
message += os.linesep * 2
|
||||
message += os.linesep.join( missing_main_tables )
|
||||
message += os.linesep * 2
|
||||
message += 'If you wish, click ok on this message and the client will recreate these tables--empty, without data--which should at least let the client boot. If the affected tag service(s) are tag repositories, you will want to reset the processing cache so the client can repopulate the tables from your cached update files. But if you want to solve this problem otherwise, kill the hydrus process now.'
|
||||
message += os.linesep * 2
|
||||
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
|
||||
|
||||
BlockingSafeShowMessage( message )
|
||||
|
||||
for service_id in tag_service_ids:
|
||||
|
||||
self.modules_mappings_storage.GenerateMappingsTables( service_id )
|
||||
|
||||
|
||||
|
||||
# caches
|
||||
|
||||
existing_cache_tables = self._STS( self._Execute( 'SELECT name FROM external_caches.sqlite_master WHERE type = ?;', ( 'table', ) ) )
|
||||
|
||||
main_cache_tables = set()
|
||||
|
||||
main_cache_tables.add( 'shape_vptree' )
|
||||
main_cache_tables.add( 'shape_maintenance_branch_regen' )
|
||||
|
||||
missing_main_tables = sorted( main_cache_tables.difference( existing_cache_tables ) )
|
||||
|
||||
if len( missing_main_tables ) > 0:
|
||||
|
||||
message = 'On boot, some important caches tables were missing! This could be due to the entire \'caches\' database file being missing or some other problem. Data related to duplicate file search may have been lost. The exact missing tables were:'
|
||||
message += os.linesep * 2
|
||||
message += os.linesep.join( missing_main_tables )
|
||||
message += os.linesep * 2
|
||||
message += 'If you wish, click ok on this message and the client will recreate these tables--empty, without data--which should at least let the client boot. But if you want to solve this problem otherwise, kill the hydrus process now.'
|
||||
message += os.linesep * 2
|
||||
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
|
||||
|
||||
BlockingSafeShowMessage( message )
|
||||
|
||||
|
||||
if version >= 414:
|
||||
|
||||
# tag display caches
|
||||
|
||||
tag_display_cache_service_ids = list( self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) )
|
||||
|
||||
missing_tag_sibling_cache_tables = []
|
||||
|
||||
for tag_service_id in tag_display_cache_service_ids:
|
||||
|
||||
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = ClientDBTagSiblings.GenerateTagSiblingsLookupCacheTableNames( tag_service_id )
|
||||
|
||||
actual_missing = cache_actual_tag_siblings_lookup_table_name.split( '.' )[1] not in existing_cache_tables
|
||||
|
||||
ideal_missing = cache_ideal_tag_siblings_lookup_table_name.split( '.' )[1] not in existing_cache_tables
|
||||
|
||||
if actual_missing:
|
||||
|
||||
missing_tag_sibling_cache_tables.append( cache_actual_tag_siblings_lookup_table_name )
|
||||
|
||||
|
||||
if ideal_missing:
|
||||
|
||||
missing_tag_sibling_cache_tables.append( cache_ideal_tag_siblings_lookup_table_name )
|
||||
|
||||
|
||||
if actual_missing or ideal_missing:
|
||||
|
||||
self.modules_tag_siblings.Generate( tag_service_id )
|
||||
|
||||
|
||||
self._CreateIndex( cache_actual_tag_siblings_lookup_table_name, [ 'ideal_tag_id' ] )
|
||||
self._CreateIndex( cache_ideal_tag_siblings_lookup_table_name, [ 'ideal_tag_id' ] )
|
||||
|
||||
|
||||
if len( missing_tag_sibling_cache_tables ) > 0:
|
||||
|
||||
missing_tag_sibling_cache_tables.sort()
|
||||
|
||||
message = 'On boot, some important tag sibling cache tables were missing! This could be due to the entire \'caches\' database file being missing or some other problem. All of this data can be regenerated. The exact missing tables were:'
|
||||
message += os.linesep * 2
|
||||
message += os.linesep.join( missing_tag_sibling_cache_tables )
|
||||
message += os.linesep * 2
|
||||
message += 'If you wish, click ok on this message and the client will recreate and repopulate these tables with the correct data. But if you want to solve this problem otherwise, kill the hydrus process now.'
|
||||
message += os.linesep * 2
|
||||
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
|
||||
|
||||
BlockingSafeShowMessage( message )
|
||||
|
||||
|
||||
missing_tag_parent_cache_tables = []
|
||||
|
||||
for tag_service_id in tag_display_cache_service_ids:
|
||||
|
||||
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = ClientDBTagParents.GenerateTagParentsLookupCacheTableNames( tag_service_id )
|
||||
|
||||
actual_missing = cache_actual_tag_parents_lookup_table_name.split( '.' )[1] not in existing_cache_tables
|
||||
|
||||
ideal_missing = cache_ideal_tag_parents_lookup_table_name.split( '.' )[1] not in existing_cache_tables
|
||||
|
||||
if actual_missing:
|
||||
|
||||
missing_tag_parent_cache_tables.append( cache_actual_tag_parents_lookup_table_name )
|
||||
|
||||
|
||||
if ideal_missing:
|
||||
|
||||
missing_tag_parent_cache_tables.append( cache_ideal_tag_parents_lookup_table_name )
|
||||
|
||||
|
||||
if actual_missing or ideal_missing:
|
||||
|
||||
self.modules_tag_parents.Generate( tag_service_id )
|
||||
|
||||
|
||||
self._CreateIndex( cache_actual_tag_parents_lookup_table_name, [ 'ancestor_tag_id' ] )
|
||||
self._CreateIndex( cache_ideal_tag_parents_lookup_table_name, [ 'ancestor_tag_id' ] )
|
||||
|
||||
|
||||
if len( missing_tag_parent_cache_tables ) > 0:
|
||||
|
||||
missing_tag_parent_cache_tables.sort()
|
||||
|
||||
message = 'On boot, some important tag parent cache tables were missing! This could be due to the entire \'caches\' database file being missing or some other problem. All of this data can be regenerated. The exact missing tables were:'
|
||||
message += os.linesep * 2
|
||||
message += os.linesep.join( missing_tag_parent_cache_tables )
|
||||
message += os.linesep * 2
|
||||
message += 'If you wish, click ok on this message and the client will recreate and repopulate these tables with the correct data. But if you want to solve this problem otherwise, kill the hydrus process now.'
|
||||
message += os.linesep * 2
|
||||
message += 'If you do not already know what caused this, it was likely a hard drive fault--either due to a recent abrupt power cut or actual hardware failure. Check \'help my db is broke.txt\' in the install_dir/db directory as soon as you can.'
|
||||
|
||||
BlockingSafeShowMessage( message )
|
||||
|
||||
|
||||
|
||||
mappings_cache_tables = set()
|
||||
|
||||
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
|
||||
|
@ -14140,12 +13922,10 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
BlockingSafeShowMessage( message )
|
||||
|
||||
# quick hack
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_caches.local_tags_cache ( tag_id INTEGER PRIMARY KEY, tag TEXT UNIQUE );' )
|
||||
|
||||
self._RegenerateTagMappingsCache()
|
||||
|
||||
|
||||
# delete this when mappings caches are moved to modules that will auto-heal this!
|
||||
for ( file_service_id, tag_service_id ) in itertools.product( file_service_ids, tag_service_ids ):
|
||||
|
||||
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
|
||||
|
@ -14162,6 +13942,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
self._CreateIndex( cache_display_pending_mappings_table_name, [ 'tag_id', 'hash_id' ], unique = True )
|
||||
|
||||
|
||||
self._cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
if version >= 424:
|
||||
|
||||
|
@ -14226,6 +14008,8 @@ class DB( HydrusDB.HydrusDB ):
|
|||
self._CacheTagsGenerate( file_service_id, tag_service_id )
|
||||
self._CacheTagsPopulate( file_service_id, tag_service_id )
|
||||
|
||||
self._cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -16391,7 +16175,7 @@ class DB( HydrusDB.HydrusDB ):
|
|||
|
||||
#
|
||||
|
||||
self._CacheLocalHashIdsGenerate()
|
||||
self.modules_hashes_local_cache.Repopulate()
|
||||
|
||||
|
||||
if version == 447:
|
||||
|
|
|
@ -1,29 +1,35 @@
|
|||
import sqlite3
|
||||
import typing
|
||||
|
||||
from hydrus.core import HydrusDB
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDBBase
|
||||
from hydrus.core import HydrusExceptions
|
||||
from hydrus.core import HydrusGlobals as HG
|
||||
from hydrus.core import HydrusTags
|
||||
|
||||
from hydrus.client.db import ClientDBFilesStorage
|
||||
from hydrus.client.db import ClientDBMaster
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBServices
|
||||
|
||||
class ClientDBCacheLocalHashes( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBCacheLocalHashes( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor, modules_hashes: ClientDBMaster.ClientDBMasterHashes ):
|
||||
def __init__( self, cursor: sqlite3.Cursor, modules_hashes: ClientDBMaster.ClientDBMasterHashes, modules_services: ClientDBServices.ClientDBMasterServices, modules_files_storage: ClientDBFilesStorage.ClientDBFilesStorage ):
|
||||
|
||||
self.modules_hashes = modules_hashes
|
||||
self.modules_services = modules_services
|
||||
self.modules_files_storage = modules_files_storage
|
||||
|
||||
self._hash_ids_to_hashes_cache = {}
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client hashes local cache', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client hashes local cache', cursor )
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
|
||||
return index_generation_tuples
|
||||
return {
|
||||
'external_caches.local_hashes_cache' : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, hash BLOB_BYTES UNIQUE );', 429 )
|
||||
}
|
||||
|
||||
|
||||
def _PopulateHashIdsToHashesCache( self, hash_ids ):
|
||||
|
@ -71,9 +77,9 @@ class ClientDBCacheLocalHashes( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
def _RepairRepopulateTables( self, table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_caches.local_hashes_cache ( hash_id INTEGER PRIMARY KEY, hash BLOB_BYTES UNIQUE );' )
|
||||
self.Repopulate()
|
||||
|
||||
|
||||
def AddHashIdsToCache( self, hash_ids ):
|
||||
|
@ -93,15 +99,6 @@ class ClientDBCacheLocalHashes( HydrusDBModule.HydrusDBModule ):
|
|||
self._ExecuteMany( 'DELETE FROM local_hashes_cache WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'external_caches.local_hashes_cache'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetHash( self, hash_id ) -> str:
|
||||
|
||||
self._PopulateHashIdsToHashesCache( ( hash_id, ) )
|
||||
|
@ -196,7 +193,26 @@ class ClientDBCacheLocalHashes( HydrusDBModule.HydrusDBModule ):
|
|||
return result is not None
|
||||
|
||||
|
||||
class ClientDBCacheLocalTags( HydrusDBModule.HydrusDBModule ):
|
||||
def Repopulate( self ):
|
||||
|
||||
self.ClearCache()
|
||||
|
||||
HG.client_controller.frame_splash_status.SetSubtext( 'reading local file data' )
|
||||
|
||||
local_hash_ids = self.modules_files_storage.GetCurrentHashIdsList( self.modules_services.combined_local_file_service_id )
|
||||
|
||||
BLOCK_SIZE = 10000
|
||||
num_to_do = len( local_hash_ids )
|
||||
|
||||
for ( i, block_of_hash_ids ) in enumerate( HydrusData.SplitListIntoChunks( local_hash_ids, BLOCK_SIZE ) ):
|
||||
|
||||
HG.client_controller.frame_splash_status.SetSubtext( 'caching local file data {}'.format( HydrusData.ConvertValueRangeToPrettyString( i * BLOCK_SIZE, num_to_do ) ) )
|
||||
|
||||
self.AddHashIdsToCache( block_of_hash_ids )
|
||||
|
||||
|
||||
|
||||
class ClientDBCacheLocalTags( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor, modules_tags: ClientDBMaster.ClientDBMasterTags ):
|
||||
|
||||
|
@ -204,14 +220,14 @@ class ClientDBCacheLocalTags( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
self._tag_ids_to_tags_cache = {}
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client tags local cache', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client tags local cache', cursor )
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
|
||||
return index_generation_tuples
|
||||
return {
|
||||
'external_caches.local_tags_cache' : ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER PRIMARY KEY, tag TEXT UNIQUE );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def _PopulateTagIdsToTagsCache( self, tag_ids ):
|
||||
|
@ -259,9 +275,9 @@ class ClientDBCacheLocalTags( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
def _RepairRepopulateTables( self, table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_caches.local_tags_cache ( tag_id INTEGER PRIMARY KEY, tag TEXT UNIQUE );' )
|
||||
ClientDBModule.BlockingSafeShowMessage( 'Unfortunately, the local tag cache cannot repopulate itself yet during repair. Once you boot, please run _database->regenerate->local tag cache_.' )
|
||||
|
||||
|
||||
def AddTagIdsToCache( self, tag_ids ):
|
||||
|
@ -281,15 +297,6 @@ class ClientDBCacheLocalTags( HydrusDBModule.HydrusDBModule ):
|
|||
self._ExecuteMany( 'DELETE FROM local_tags_cache WHERE tag_id = ?;', ( ( tag_id, ) for tag_id in tag_ids ) )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'external_caches.local_tags_cache'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
|
||||
|
||||
# we actually provide a backup, which we may want to automate later in mappings caches etc...
|
||||
|
|
|
@ -3,17 +3,17 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusGlobals as HG
|
||||
|
||||
from hydrus.client import ClientFiles
|
||||
from hydrus.client.db import ClientDBDefinitionsCache
|
||||
from hydrus.client.db import ClientDBFilesMetadataBasic
|
||||
from hydrus.client.db import ClientDBMaster
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBSimilarFiles
|
||||
from hydrus.client.media import ClientMediaResultCache
|
||||
|
||||
class ClientDBFilesMaintenance( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBFilesMaintenance( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -25,7 +25,7 @@ class ClientDBFilesMaintenance( HydrusDBModule.HydrusDBModule ):
|
|||
weakref_media_result_cache: ClientMediaResultCache.MediaResultCache
|
||||
):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client files maintenance', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client files maintenance', cursor )
|
||||
|
||||
self.modules_hashes = modules_hashes
|
||||
self.modules_hashes_local_cache = modules_hashes_local_cache
|
||||
|
@ -34,11 +34,11 @@ class ClientDBFilesMaintenance( HydrusDBModule.HydrusDBModule ):
|
|||
self._weakref_media_result_cache = weakref_media_result_cache
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
|
||||
return index_generation_tuples
|
||||
return {
|
||||
'external_caches.file_maintenance_jobs' : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, job_type INTEGER, time_can_start INTEGER, PRIMARY KEY ( hash_id, job_type ) );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def AddJobs( self, hash_ids, job_type, time_can_start = 0 ):
|
||||
|
@ -72,11 +72,6 @@ class ClientDBFilesMaintenance( HydrusDBModule.HydrusDBModule ):
|
|||
self._Execute( 'DELETE FROM file_maintenance_jobs WHERE job_type = ?;', ( job_type, ) )
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_caches.file_maintenance_jobs ( hash_id INTEGER, job_type INTEGER, time_can_start INTEGER, PRIMARY KEY ( hash_id, job_type ) );' )
|
||||
|
||||
|
||||
def ClearJobs( self, cleared_job_tuples ):
|
||||
|
||||
new_file_info = set()
|
||||
|
@ -194,15 +189,6 @@ class ClientDBFilesMaintenance( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'external_caches.file_maintenance_jobs'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetJob( self, job_types = None ):
|
||||
|
||||
if job_types is None:
|
||||
|
|
|
@ -4,32 +4,43 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusDB
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusExceptions
|
||||
|
||||
class ClientDBFilesMetadataBasic( HydrusDBModule.HydrusDBModule ):
|
||||
from hydrus.client.db import ClientDBModule
|
||||
|
||||
class ClientDBFilesMetadataBasic( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor ):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client files metadata', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client files metadata', cursor )
|
||||
|
||||
self.inbox_hash_ids = set()
|
||||
|
||||
self._InitCaches()
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialIndexGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_tuples.append( ( 'files_info', [ 'size' ], False ) )
|
||||
index_generation_tuples.append( ( 'files_info', [ 'mime' ], False ) )
|
||||
index_generation_tuples.append( ( 'files_info', [ 'width' ], False ) )
|
||||
index_generation_tuples.append( ( 'files_info', [ 'height' ], False ) )
|
||||
index_generation_tuples.append( ( 'files_info', [ 'duration' ], False ) )
|
||||
index_generation_tuples.append( ( 'files_info', [ 'num_frames' ], False ) )
|
||||
index_generation_dict[ 'main.files_info' ] = [
|
||||
( [ 'size' ], False, 400 ),
|
||||
( [ 'mime' ], False, 400 ),
|
||||
( [ 'width' ], False, 400 ),
|
||||
( [ 'height' ], False, 400 ),
|
||||
( [ 'duration' ], False, 400 ),
|
||||
( [ 'num_frames' ], False, 400 )
|
||||
]
|
||||
|
||||
return index_generation_tuples
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {
|
||||
'main.file_inbox' : ( 'CREATE TABLE {} ( hash_id INTEGER PRIMARY KEY );', 400 ),
|
||||
'main.files_info' : ( 'CREATE TABLE {} ( hash_id INTEGER PRIMARY KEY, size INTEGER, mime INTEGER, width INTEGER, height INTEGER, duration INTEGER, num_frames INTEGER, has_audio INTEGER_BOOLEAN, num_words INTEGER );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def _InitCaches( self ):
|
||||
|
@ -74,22 +85,6 @@ class ClientDBFilesMetadataBasic( HydrusDBModule.HydrusDBModule ):
|
|||
return archiveable_hash_ids
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE file_inbox ( hash_id INTEGER PRIMARY KEY );' )
|
||||
self._Execute( 'CREATE TABLE files_info ( hash_id INTEGER PRIMARY KEY, size INTEGER, mime INTEGER, width INTEGER, height INTEGER, duration INTEGER, num_frames INTEGER, has_audio INTEGER_BOOLEAN, num_words INTEGER );' )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'file_inbox',
|
||||
'files_info'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetMime( self, hash_id: int ) -> int:
|
||||
|
||||
result = self._Execute( 'SELECT mime FROM files_info WHERE hash_id = ?;', ( hash_id, ) ).fetchone()
|
||||
|
|
|
@ -5,24 +5,24 @@ import typing
|
|||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDB
|
||||
from hydrus.core import HydrusDBModule
|
||||
|
||||
from hydrus.client import ClientConstants as CC
|
||||
from hydrus.client import ClientSearch
|
||||
from hydrus.client.db import ClientDBMaster
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBServices
|
||||
|
||||
def GenerateFilesTableNames( service_id: int ) -> typing.Tuple[ str, str, str, str ]:
|
||||
|
||||
suffix = str( service_id )
|
||||
|
||||
current_files_table_name = 'current_files_{}'.format( suffix )
|
||||
current_files_table_name = 'main.current_files_{}'.format( suffix )
|
||||
|
||||
deleted_files_table_name = 'deleted_files_{}'.format( suffix )
|
||||
deleted_files_table_name = 'main.deleted_files_{}'.format( suffix )
|
||||
|
||||
pending_files_table_name = 'pending_files_{}'.format( suffix )
|
||||
pending_files_table_name = 'main.pending_files_{}'.format( suffix )
|
||||
|
||||
petitioned_files_table_name = 'petitioned_files_{}'.format( suffix )
|
||||
petitioned_files_table_name = 'main.petitioned_files_{}'.format( suffix )
|
||||
|
||||
return ( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name )
|
||||
|
||||
|
@ -85,23 +85,62 @@ class DBLocationSearchContext( object ):
|
|||
|
||||
|
||||
|
||||
class ClientDBFilesStorage( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBFilesStorage( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices, modules_texts: ClientDBMaster.ClientDBMasterTexts ):
|
||||
|
||||
self.modules_services = modules_services
|
||||
self.modules_texts = modules_texts
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client files storage', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client files storage', cursor )
|
||||
|
||||
self.temp_file_storage_table_name = None
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
return {
|
||||
'main.local_file_deletion_reasons' : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );', 400 )
|
||||
}
|
||||
|
||||
return index_generation_tuples
|
||||
|
||||
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
|
||||
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ current_files_table_name ] = [
|
||||
( [ 'timestamp' ], False, 447 )
|
||||
]
|
||||
|
||||
index_generation_dict[ deleted_files_table_name ] = [
|
||||
( [ 'timestamp' ], False, 447 ),
|
||||
( [ 'original_timestamp' ], False, 447 )
|
||||
]
|
||||
|
||||
index_generation_dict[ petitioned_files_table_name ] = [
|
||||
( [ 'reason_id' ], False, 447 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
|
||||
|
||||
return {
|
||||
current_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, timestamp INTEGER );', 447 ),
|
||||
deleted_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, timestamp INTEGER, original_timestamp INTEGER );', 447 ),
|
||||
pending_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );', 447 ),
|
||||
petitioned_files_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );', 447 )
|
||||
}
|
||||
|
||||
|
||||
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
|
||||
|
||||
return self.modules_services.GetServiceIds( HC.SPECIFIC_FILE_SERVICES )
|
||||
|
||||
|
||||
def AddFiles( self, service_id, insert_rows ):
|
||||
|
@ -194,11 +233,6 @@ class ClientDBFilesStorage( HydrusDBModule.HydrusDBModule ):
|
|||
return service_ids_to_nums_cleared
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE local_file_deletion_reasons ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );' )
|
||||
|
||||
|
||||
def DeletePending( self, service_id: int ):
|
||||
|
||||
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
|
||||
|
@ -309,19 +343,19 @@ class ClientDBFilesStorage( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
def GenerateFilesTables( self, service_id: int ):
|
||||
|
||||
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name ) = GenerateFilesTableNames( service_id )
|
||||
table_generation_dict = self._GetServiceTableGenerationDict( service_id )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, timestamp INTEGER );'.format( current_files_table_name ) )
|
||||
self._CreateIndex( current_files_table_name, [ 'timestamp' ] )
|
||||
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
|
||||
|
||||
self._Execute( create_query_without_name.format( table_name ) )
|
||||
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, timestamp INTEGER, original_timestamp INTEGER );'.format( deleted_files_table_name ) )
|
||||
self._CreateIndex( deleted_files_table_name, [ 'timestamp' ] )
|
||||
self._CreateIndex( deleted_files_table_name, [ 'original_timestamp' ] )
|
||||
index_generation_dict = self._GetServiceIndexGenerationDict( service_id )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );'.format( pending_files_table_name ) )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, reason_id INTEGER );'.format( petitioned_files_table_name ) )
|
||||
self._CreateIndex( petitioned_files_table_name, [ 'reason_id' ] )
|
||||
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
|
||||
|
||||
self._CreateIndex( table_name, columns, unique = unique )
|
||||
|
||||
|
||||
|
||||
def GetAPendingHashId( self, service_id ):
|
||||
|
@ -552,15 +586,6 @@ class ClientDBFilesStorage( HydrusDBModule.HydrusDBModule ):
|
|||
return db_location_search_context
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'local_file_deletion_reasons',
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetHashIdsToCurrentServiceIds( self, temp_hash_ids_table_name ):
|
||||
|
||||
hash_ids_to_current_file_service_ids = collections.defaultdict( list )
|
||||
|
|
|
@ -6,26 +6,28 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusGlobals as HG
|
||||
|
||||
from hydrus.client import ClientThreading
|
||||
from hydrus.client.db import ClientDBModule
|
||||
|
||||
class ClientDBMaintenance( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBMaintenance( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor, db_dir: str, db_filenames: typing.Collection[ str ] ):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client db maintenance', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client db maintenance', cursor )
|
||||
|
||||
self._db_dir = db_dir
|
||||
self._db_filenames = db_filenames
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
|
||||
return index_generation_tuples
|
||||
return {
|
||||
'main.last_shutdown_work_time' : ( 'CREATE TABLE {} ( last_shutdown_work_time INTEGER );', 400 ),
|
||||
'main.analyze_timestamps' : ( 'CREATE TABLE {} ( name TEXT, num_rows INTEGER, timestamp INTEGER );', 400 ),
|
||||
'main.vacuum_timestamps' : ( 'CREATE TABLE {} ( name TEXT, timestamp INTEGER );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def _TableHasAtLeastRowCount( self, name, row_count ):
|
||||
|
@ -140,25 +142,6 @@ class ClientDBMaintenance( HydrusDBModule.HydrusDBModule ):
|
|||
self._Execute( 'INSERT OR IGNORE INTO analyze_timestamps ( name, num_rows, timestamp ) VALUES ( ?, ?, ? );', ( name, num_rows, HydrusData.GetNow() ) )
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE last_shutdown_work_time ( last_shutdown_work_time INTEGER );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE analyze_timestamps ( name TEXT, num_rows INTEGER, timestamp INTEGER );' )
|
||||
self._Execute( 'CREATE TABLE vacuum_timestamps ( name TEXT, timestamp INTEGER );' )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'last_shutdown_work_time',
|
||||
'analyze_timestamps',
|
||||
'vacuum_timestamps'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetLastShutdownWorkTime( self ):
|
||||
|
||||
result = self._Execute( 'SELECT last_shutdown_work_time FROM last_shutdown_work_time;' ).fetchone()
|
||||
|
|
|
@ -2,8 +2,8 @@ import sqlite3
|
|||
import typing
|
||||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusDBModule
|
||||
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBServices
|
||||
|
||||
def GenerateMappingsTableNames( service_id: int ) -> typing.Tuple[ str, str, str, str ]:
|
||||
|
@ -20,32 +20,55 @@ def GenerateMappingsTableNames( service_id: int ) -> typing.Tuple[ str, str, str
|
|||
|
||||
return ( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name )
|
||||
|
||||
class ClientDBMappingsStorage( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBMappingsStorage( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices ):
|
||||
|
||||
self.modules_services = modules_services
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client mappings storage', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client mappings storage', cursor )
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
|
||||
|
||||
return index_generation_tuples
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ current_mappings_table_name ] = [
|
||||
( [ 'hash_id', 'tag_id' ], True, 400 )
|
||||
]
|
||||
|
||||
index_generation_dict[ deleted_mappings_table_name ] = [
|
||||
( [ 'hash_id', 'tag_id' ], True, 400 )
|
||||
]
|
||||
|
||||
index_generation_dict[ pending_mappings_table_name ] = [
|
||||
( [ 'hash_id', 'tag_id' ], True, 400 )
|
||||
]
|
||||
|
||||
index_generation_dict[ petitioned_mappings_table_name ] = [
|
||||
( [ 'hash_id', 'tag_id' ], True, 400 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
pass
|
||||
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
|
||||
|
||||
return {
|
||||
current_mappings_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;', 400 ),
|
||||
deleted_mappings_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;', 400 ),
|
||||
pending_mappings_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;', 400 ),
|
||||
petitioned_mappings_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, reason_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;', 400 )
|
||||
}
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
|
||||
|
||||
expected_table_names = []
|
||||
|
||||
return expected_table_names
|
||||
return self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
|
||||
|
||||
|
||||
def ClearMappingsTables( self, service_id: int ):
|
||||
|
@ -70,19 +93,19 @@ class ClientDBMappingsStorage( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
def GenerateMappingsTables( self, service_id: int ):
|
||||
|
||||
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
|
||||
table_generation_dict = self._GetServiceTableGenerationDict( service_id )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;'.format( current_mappings_table_name ) )
|
||||
self._CreateIndex( current_mappings_table_name, [ 'hash_id', 'tag_id' ], unique = True )
|
||||
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
|
||||
|
||||
self._Execute( create_query_without_name.format( table_name ) )
|
||||
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;'.format( deleted_mappings_table_name ) )
|
||||
self._CreateIndex( deleted_mappings_table_name, [ 'hash_id', 'tag_id' ], unique = True )
|
||||
index_generation_dict = self._GetServiceIndexGenerationDict( service_id )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;'.format( pending_mappings_table_name ) )
|
||||
self._CreateIndex( pending_mappings_table_name, [ 'hash_id', 'tag_id' ], unique = True )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, reason_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;'.format( petitioned_mappings_table_name ) )
|
||||
self._CreateIndex( petitioned_mappings_table_name, [ 'hash_id', 'tag_id' ], unique = True )
|
||||
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
|
||||
|
||||
self._CreateIndex( table_name, columns, unique = unique )
|
||||
|
||||
|
||||
|
||||
def GetCurrentFilesCount( self, service_id: int ) -> int:
|
||||
|
|
|
@ -4,31 +4,48 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDB
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusDBBase
|
||||
from hydrus.core import HydrusExceptions
|
||||
from hydrus.core import HydrusTags
|
||||
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.networking import ClientNetworkingDomain
|
||||
|
||||
class ClientDBMasterHashes( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBMasterHashes( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor ):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client hashes master', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client hashes master', cursor )
|
||||
|
||||
self._hash_ids_to_hashes_cache = {}
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetCriticalTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
index_generation_tuples = []
|
||||
return {
|
||||
'external_master.hashes'
|
||||
}
|
||||
|
||||
index_generation_tuples.append( ( 'external_master.local_hashes', [ 'md5' ], False ) )
|
||||
index_generation_tuples.append( ( 'external_master.local_hashes', [ 'sha1' ], False ) )
|
||||
index_generation_tuples.append( ( 'external_master.local_hashes', [ 'sha512' ], False ) )
|
||||
|
||||
def _GetInitialIndexGenerationDict( self ) -> dict:
|
||||
|
||||
return index_generation_tuples
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ 'external_master.local_hashes' ] = [
|
||||
( [ 'md5' ], False, 400 ),
|
||||
( [ 'sha1' ], False, 400 ),
|
||||
( [ 'sha512' ], False, 400 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {
|
||||
'external_master.hashes' : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, hash BLOB_BYTES UNIQUE );', 400 ),
|
||||
'external_master.local_hashes' : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, md5 BLOB_BYTES, sha1 BLOB_BYTES, sha512 BLOB_BYTES );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def _PopulateHashIdsToHashesCache( self, hash_ids, exception_on_error = False ):
|
||||
|
@ -98,23 +115,6 @@ class ClientDBMasterHashes( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.hashes ( hash_id INTEGER PRIMARY KEY, hash BLOB_BYTES UNIQUE );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.local_hashes ( hash_id INTEGER PRIMARY KEY, md5 BLOB_BYTES, sha1 BLOB_BYTES, sha512 BLOB_BYTES );' )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'external_master.hashes',
|
||||
'external_master.local_hashes'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetExtraHash( self, hash_type, hash_id ) -> bytes:
|
||||
|
||||
result = self._Execute( 'SELECT {} FROM local_hashes WHERE hash_id = ?;'.format( hash_type ), ( hash_id, ) ).fetchone()
|
||||
|
@ -312,41 +312,29 @@ class ClientDBMasterHashes( HydrusDBModule.HydrusDBModule ):
|
|||
self._Execute( 'INSERT OR IGNORE INTO local_hashes ( hash_id, md5, sha1, sha512 ) VALUES ( ?, ?, ?, ? );', ( hash_id, sqlite3.Binary( md5 ), sqlite3.Binary( sha1 ), sqlite3.Binary( sha512 ) ) )
|
||||
|
||||
|
||||
class ClientDBMasterTexts( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBMasterTexts( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor ):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client texts master', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client texts master', cursor )
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
|
||||
return index_generation_tuples
|
||||
return {
|
||||
'external_master.labels' : ( 'CREATE TABLE IF NOT EXISTS {} ( label_id INTEGER PRIMARY KEY, label TEXT UNIQUE );', 400 ),
|
||||
'external_master.notes' : ( 'CREATE TABLE IF NOT EXISTS {} ( note_id INTEGER PRIMARY KEY, note TEXT UNIQUE );', 400 ),
|
||||
'external_master.texts' : ( 'CREATE TABLE IF NOT EXISTS {} ( text_id INTEGER PRIMARY KEY, text TEXT UNIQUE );', 400 ),
|
||||
'external_caches.notes_fts4' : ( 'CREATE VIRTUAL TABLE IF NOT EXISTS {} USING fts4( note );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
def _RepairRepopulateTables( self, repopulate_table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.labels ( label_id INTEGER PRIMARY KEY, label TEXT UNIQUE );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.notes ( note_id INTEGER PRIMARY KEY, note TEXT UNIQUE );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.texts ( text_id INTEGER PRIMARY KEY, text TEXT UNIQUE );' )
|
||||
|
||||
self._Execute( 'CREATE VIRTUAL TABLE IF NOT EXISTS external_caches.notes_fts4 USING fts4( note );' )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'external_master.labels',
|
||||
'external_master.notes',
|
||||
'external_master.texts',
|
||||
'external_caches.notes_fts4'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
if 'external_caches.notes_fts4' in repopulate_table_names:
|
||||
|
||||
self._Execute( 'REPLACE INTO notes_fts4 ( docid, note ) SELECT note_id, note FROM notes;' )
|
||||
|
||||
|
||||
|
||||
def GetLabelId( self, label ):
|
||||
|
@ -424,25 +412,45 @@ class ClientDBMasterTexts( HydrusDBModule.HydrusDBModule ):
|
|||
return text_id
|
||||
|
||||
|
||||
class ClientDBMasterTags( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBMasterTags( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor ):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client tags master', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client tags master', cursor )
|
||||
|
||||
self.null_namespace_id = None
|
||||
|
||||
self._tag_ids_to_tags_cache = {}
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetCriticalTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
index_generation_tuples = []
|
||||
return {
|
||||
'external_master.namespaces',
|
||||
'external_master.subtags',
|
||||
'external_master.tags'
|
||||
}
|
||||
|
||||
index_generation_tuples.append( ( 'external_master.tags', [ 'subtag_id' ], False ) )
|
||||
index_generation_tuples.append( ( 'external_master.tags', [ 'namespace_id', 'subtag_id' ], True ) )
|
||||
|
||||
def _GetInitialIndexGenerationDict( self ) -> dict:
|
||||
|
||||
return index_generation_tuples
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ 'external_master.tags' ] = [
|
||||
( [ 'subtag_id' ], False, 400 ),
|
||||
( [ 'namespace_id', 'subtag_id' ], True, 412 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {
|
||||
'external_master.namespaces' : ( 'CREATE TABLE IF NOT EXISTS {} ( namespace_id INTEGER PRIMARY KEY, namespace TEXT UNIQUE );', 400 ),
|
||||
'external_master.subtags' : ( 'CREATE TABLE IF NOT EXISTS {} ( subtag_id INTEGER PRIMARY KEY, subtag TEXT UNIQUE );', 400 ),
|
||||
'external_master.tags' : ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER PRIMARY KEY, namespace_id INTEGER, subtag_id INTEGER );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def _PopulateTagIdsToTagsCache( self, tag_ids ):
|
||||
|
@ -502,26 +510,6 @@ class ClientDBMasterTags( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.namespaces ( namespace_id INTEGER PRIMARY KEY, namespace TEXT UNIQUE );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.subtags ( subtag_id INTEGER PRIMARY KEY, subtag TEXT UNIQUE );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.tags ( tag_id INTEGER PRIMARY KEY, namespace_id INTEGER, subtag_id INTEGER );' )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'external_master.namespaces',
|
||||
'external_master.subtags',
|
||||
'external_master.tags'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetNamespaceId( self, namespace ) -> int:
|
||||
|
||||
if namespace == '':
|
||||
|
@ -738,37 +726,30 @@ class ClientDBMasterTags( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
class ClientDBMasterURLs( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBMasterURLs( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor ):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client urls master', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client urls master', cursor )
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialIndexGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_tuples.append( ( 'external_master.urls', [ 'domain_id' ], False ) )
|
||||
|
||||
return index_generation_tuples
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.url_domains ( domain_id INTEGER PRIMARY KEY, domain TEXT UNIQUE );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.urls ( url_id INTEGER PRIMARY KEY, domain_id INTEGER, url TEXT UNIQUE );' )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'external_master.url_domains',
|
||||
'external_master.urls'
|
||||
index_generation_dict[ 'external_master.urls' ] = [
|
||||
( [ 'domain_id' ], False, 400 )
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {
|
||||
'external_master.url_domains' : ( 'CREATE TABLE IF NOT EXISTS {} ( domain_id INTEGER PRIMARY KEY, domain TEXT UNIQUE );', 400 ),
|
||||
'external_master.urls' : ( 'CREATE TABLE IF NOT EXISTS {} ( url_id INTEGER PRIMARY KEY, domain_id INTEGER, url TEXT UNIQUE );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
import os
|
||||
import sqlite3
|
||||
import typing
|
||||
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusExceptions
|
||||
from hydrus.core import HydrusGlobals as HG
|
||||
|
||||
def BlockingSafeShowMessage( message ):
|
||||
|
||||
from qtpy import QtWidgets as QW
|
||||
|
||||
HG.client_controller.CallBlockingToQt( HG.client_controller.app, QW.QMessageBox.warning, None, 'Warning', message )
|
||||
|
||||
class ClientDBModule( HydrusDBModule.HydrusDBModule ):
|
||||
|
||||
def _PresentMissingIndicesWarningToUser( self, index_names ):
|
||||
|
||||
index_names = sorted( index_names )
|
||||
|
||||
HydrusData.DebugPrint( 'The "{}" database module is missing the following indices:'.format( self.name ) )
|
||||
HydrusData.DebugPrint( os.linesep.join( index_names ) )
|
||||
|
||||
message = 'Your "{}" database module was missing {} indices. More information has been written to the log. This may or may not be a big deal, and on its own is completely recoverable. If you do not have further problems, hydev does not need to know about it. The indices will be regenerated once you proceed--it may take some time.'.format( self.name, len( index_names ) )
|
||||
|
||||
BlockingSafeShowMessage( message )
|
||||
|
||||
HG.client_controller.frame_splash_status.SetText( 'recreating indices' )
|
||||
|
||||
|
||||
def _PresentMissingTablesWarningToUser( self, table_names ):
|
||||
|
||||
table_names = sorted( table_names )
|
||||
|
||||
HydrusData.DebugPrint( 'The "{}" database module is missing the following tables:'.format( self.name ) )
|
||||
HydrusData.DebugPrint( os.linesep.join( table_names ) )
|
||||
|
||||
message = 'Your "{}" database module was missing {} tables. More information has been written to the log. This is a serious problem and possibly due to hard drive damage. You should check "install_dir/db/help my db is broke.txt" for background reading. If you have a functional backup, kill the hydrus process now and rollback to that backup.'.format( self.name, len( table_names ) )
|
||||
message += os.linesep * 2
|
||||
message += 'Otherwise, proceed and the missing tables will be recreated. Your client should be able to boot, but full automatic recovery may not be possible and you may encounter further errors. A database maintenance task or repository processing reset may be able to fix you up once the client boots. Hydev will be able to help if you run into trouble.'
|
||||
|
||||
BlockingSafeShowMessage( message )
|
||||
|
||||
HG.client_controller.frame_splash_status.SetText( 'recreating tables' )
|
||||
|
||||
|
|
@ -6,8 +6,7 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDB
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusDBBase
|
||||
from hydrus.core import HydrusExceptions
|
||||
from hydrus.core.networking import HydrusNetwork
|
||||
|
||||
|
@ -16,6 +15,7 @@ from hydrus.client.db import ClientDBDefinitionsCache
|
|||
from hydrus.client.db import ClientDBFilesMaintenance
|
||||
from hydrus.client.db import ClientDBFilesMetadataBasic
|
||||
from hydrus.client.db import ClientDBFilesStorage
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBServices
|
||||
|
||||
def GenerateRepositoryDefinitionTableNames( service_id: int ):
|
||||
|
@ -47,12 +47,12 @@ def GenerateRepositoryUpdatesTableNames( service_id: int ):
|
|||
|
||||
return ( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name )
|
||||
|
||||
class ClientDBRepositories( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBRepositories( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cursor: sqlite3.Cursor,
|
||||
cursor_transaction_wrapper: HydrusDB.DBCursorTransactionWrapper,
|
||||
cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper,
|
||||
modules_services: ClientDBServices.ClientDBMasterServices,
|
||||
modules_files_storage: ClientDBFilesStorage.ClientDBFilesStorage,
|
||||
modules_files_metadata_basic: ClientDBFilesMetadataBasic.ClientDBFilesMetadataBasic,
|
||||
|
@ -63,7 +63,7 @@ class ClientDBRepositories( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
# since we'll mostly be talking about hashes and tags we don't have locally, I think we shouldn't use the local caches
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client repositories', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client repositories', cursor )
|
||||
|
||||
self._cursor_transaction_wrapper = cursor_transaction_wrapper
|
||||
self.modules_services = modules_services
|
||||
|
@ -96,11 +96,40 @@ class ClientDBRepositories( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
|
||||
|
||||
return index_generation_tuples
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ repository_updates_table_name ] = [
|
||||
( [ 'hash_id' ], True, 449 )
|
||||
]
|
||||
|
||||
index_generation_dict[ repository_updates_processed_table_name ] = [
|
||||
( [ 'content_type' ], False, 449 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
|
||||
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryDefinitionTableNames( service_id )
|
||||
|
||||
return {
|
||||
repository_updates_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( update_index INTEGER, hash_id INTEGER, PRIMARY KEY ( update_index, hash_id ) );', 449 ),
|
||||
repository_unregistered_updates_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );', 449 ),
|
||||
repository_updates_processed_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, content_type INTEGER, processed INTEGER_BOOLEAN, PRIMARY KEY ( hash_id, content_type ) );', 449 ),
|
||||
hash_id_map_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( service_hash_id INTEGER PRIMARY KEY, hash_id INTEGER );', 400 ),
|
||||
tag_id_map_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( service_tag_id INTEGER PRIMARY KEY, tag_id INTEGER );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
|
||||
|
||||
return self.modules_services.GetServiceIds( HC.REPOSITORIES )
|
||||
|
||||
|
||||
def _HandleCriticalRepositoryDefinitionError( self, service_id, name, bad_ids ):
|
||||
|
@ -216,11 +245,6 @@ class ClientDBRepositories( HydrusDBModule.HydrusDBModule ):
|
|||
self._RegisterUpdates( service_id )
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def DropRepositoryTables( self, service_id: int ):
|
||||
|
||||
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
|
||||
|
@ -247,28 +271,19 @@ class ClientDBRepositories( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
def GenerateRepositoryTables( self, service_id: int ):
|
||||
|
||||
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
|
||||
table_generation_dict = self._GetServiceTableGenerationDict( service_id )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( update_index INTEGER, hash_id INTEGER, PRIMARY KEY ( update_index, hash_id ) );'.format( repository_updates_table_name ) )
|
||||
self._CreateIndex( repository_updates_table_name, [ 'hash_id' ] )
|
||||
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
|
||||
|
||||
self._Execute( create_query_without_name.format( table_name ) )
|
||||
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );'.format( repository_unregistered_updates_table_name ) )
|
||||
index_generation_dict = self._GetServiceIndexGenerationDict( service_id )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, content_type INTEGER, processed INTEGER_BOOLEAN, PRIMARY KEY ( hash_id, content_type ) );'.format( repository_updates_processed_table_name ) )
|
||||
self._CreateIndex( repository_updates_processed_table_name, [ 'content_type' ] )
|
||||
|
||||
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryDefinitionTableNames( service_id )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( service_hash_id INTEGER PRIMARY KEY, hash_id INTEGER );'.format( hash_id_map_table_name ) )
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( service_tag_id INTEGER PRIMARY KEY, tag_id INTEGER );'.format( tag_id_map_table_name ) )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
|
||||
|
||||
self._CreateIndex( table_name, columns, unique = unique )
|
||||
|
||||
|
||||
|
||||
def GetRepositoryProgress( self, service_key: bytes ):
|
||||
|
|
|
@ -6,13 +6,13 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDB
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusDBBase
|
||||
from hydrus.core import HydrusExceptions
|
||||
from hydrus.core import HydrusGlobals as HG
|
||||
from hydrus.core import HydrusSerialisable
|
||||
|
||||
from hydrus.client import ClientConstants as CC
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBServices
|
||||
|
||||
YAML_DUMP_ID_SINGLE = 0
|
||||
|
@ -142,32 +142,35 @@ class MaintenanceTracker( object ):
|
|||
self._total_new_hashed_serialisable_bytes += num_bytes
|
||||
|
||||
|
||||
class ClientDBSerialisable( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBSerialisable( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor, db_dir, cursor_transaction_wrapper: HydrusDB.DBCursorTransactionWrapper, modules_services: ClientDBServices.ClientDBMasterServices ):
|
||||
def __init__( self, cursor: sqlite3.Cursor, db_dir, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper, modules_services: ClientDBServices.ClientDBMasterServices ):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client serialisable', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client serialisable', cursor )
|
||||
|
||||
self._db_dir = db_dir
|
||||
self._cursor_transaction_wrapper = cursor_transaction_wrapper
|
||||
self.modules_services = modules_services
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetCriticalTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
index_generation_tuples = []
|
||||
|
||||
return index_generation_tuples
|
||||
return {
|
||||
'main.json_dict',
|
||||
'main.json_dumps',
|
||||
'main.yaml_dumps'
|
||||
}
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
self._Execute( 'CREATE TABLE json_dict ( name TEXT PRIMARY KEY, dump BLOB_BYTES );' )
|
||||
self._Execute( 'CREATE TABLE json_dumps ( dump_type INTEGER PRIMARY KEY, version INTEGER, dump BLOB_BYTES );' )
|
||||
self._Execute( 'CREATE TABLE json_dumps_named ( dump_type INTEGER, dump_name TEXT, version INTEGER, timestamp INTEGER, dump BLOB_BYTES, PRIMARY KEY ( dump_type, dump_name, timestamp ) );' )
|
||||
self._Execute( 'CREATE TABLE json_dumps_hashed ( hash BLOB_BYTES PRIMARY KEY, dump_type INTEGER, version INTEGER, dump BLOB_BYTES );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE yaml_dumps ( dump_type INTEGER, dump_name TEXT, dump TEXT_YAML, PRIMARY KEY ( dump_type, dump_name ) );' )
|
||||
return {
|
||||
'main.json_dict' : ( 'CREATE TABLE {} ( name TEXT PRIMARY KEY, dump BLOB_BYTES );', 400 ),
|
||||
'main.json_dumps' : ( 'CREATE TABLE {} ( dump_type INTEGER PRIMARY KEY, version INTEGER, dump BLOB_BYTES );', 400 ),
|
||||
'main.json_dumps_named' : ( 'CREATE TABLE {} ( dump_type INTEGER, dump_name TEXT, version INTEGER, timestamp INTEGER, dump BLOB_BYTES, PRIMARY KEY ( dump_type, dump_name, timestamp ) );', 400 ),
|
||||
'main.json_dumps_hashed' : ( 'CREATE TABLE {} ( hash BLOB_BYTES PRIMARY KEY, dump_type INTEGER, version INTEGER, dump BLOB_BYTES );', 442 ),
|
||||
'main.yaml_dumps' : ( 'CREATE TABLE {} ( dump_type INTEGER, dump_name TEXT, dump TEXT_YAML, PRIMARY KEY ( dump_type, dump_name ) );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def DeleteJSONDump( self, dump_type ):
|
||||
|
@ -231,18 +234,6 @@ class ClientDBSerialisable( HydrusDBModule.HydrusDBModule ):
|
|||
return all_expected_hashes
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'json_dict',
|
||||
'json_dumps',
|
||||
'json_dumps_named',
|
||||
'yaml_dumps'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetHashedJSONDumps( self, hashes ):
|
||||
|
||||
shown_missing_dump_message = False
|
||||
|
|
|
@ -3,19 +3,19 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusExceptions
|
||||
from hydrus.core import HydrusSerialisable
|
||||
|
||||
from hydrus.client import ClientConstants as CC
|
||||
from hydrus.client import ClientSearch
|
||||
from hydrus.client import ClientServices
|
||||
from hydrus.client.db import ClientDBModule
|
||||
|
||||
class ClientDBMasterServices( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBMasterServices( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor ):
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client services master', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client services master', cursor )
|
||||
|
||||
self._service_ids_to_services = {}
|
||||
self._service_keys_to_service_ids = {}
|
||||
|
@ -29,11 +29,18 @@ class ClientDBMasterServices( HydrusDBModule.HydrusDBModule ):
|
|||
self._InitCaches()
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetCriticalTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
index_generation_tuples = []
|
||||
return {
|
||||
'main.services'
|
||||
}
|
||||
|
||||
return index_generation_tuples
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {
|
||||
'main.services' : ( 'CREATE TABLE {} ( service_id INTEGER PRIMARY KEY AUTOINCREMENT, service_key BLOB_BYTES UNIQUE, service_type INTEGER, name TEXT, dictionary_string TEXT );', 400 )
|
||||
}
|
||||
|
||||
|
||||
def _InitCaches( self ):
|
||||
|
@ -61,20 +68,6 @@ class ClientDBMasterServices( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE services ( service_id INTEGER PRIMARY KEY AUTOINCREMENT, service_key BLOB_BYTES UNIQUE, service_type INTEGER, name TEXT, dictionary_string TEXT );' )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'services'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def AddService( self, service_key, service_type, name, dictionary: HydrusSerialisable.SerialisableBase ) -> int:
|
||||
|
||||
dictionary_string = dictionary.DumpToString()
|
||||
|
|
|
@ -5,22 +5,22 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDB
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusDBBase
|
||||
from hydrus.core import HydrusGlobals as HG
|
||||
|
||||
from hydrus.client import ClientThreading
|
||||
from hydrus.client.db import ClientDBFilesStorage
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBServices
|
||||
|
||||
class ClientDBSimilarFiles( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBSimilarFiles( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices, modules_files_storage: ClientDBFilesStorage.ClientDBFilesStorage ):
|
||||
|
||||
self.modules_services = modules_services
|
||||
self.modules_files_storage = modules_files_storage
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client similar files', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client similar files', cursor )
|
||||
|
||||
|
||||
def _AddLeaf( self, phash_id, phash ):
|
||||
|
@ -193,14 +193,30 @@ class ClientDBSimilarFiles( HydrusDBModule.HydrusDBModule ):
|
|||
self._ExecuteMany( 'INSERT OR REPLACE INTO shape_vptree ( phash_id, parent_id, radius, inner_id, inner_population, outer_id, outer_population ) VALUES ( ?, ?, ?, ?, ?, ?, ? );', insert_rows )
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialIndexGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = []
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_tuples.append( ( 'external_master.shape_perceptual_hash_map', [ 'hash_id' ], False ) )
|
||||
index_generation_tuples.append( ( 'external_caches.shape_vptree', [ 'parent_id' ], False ) )
|
||||
index_generation_dict[ 'external_master.shape_perceptual_hash_map' ] = [
|
||||
( [ 'hash_id' ], False, 451 )
|
||||
]
|
||||
|
||||
return index_generation_tuples
|
||||
index_generation_dict[ 'external_caches.shape_vptree' ] = [
|
||||
( [ 'parent_id' ], False, 400 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {
|
||||
'external_master.shape_perceptual_hashes' : ( 'CREATE TABLE IF NOT EXISTS {} ( phash_id INTEGER PRIMARY KEY, phash BLOB_BYTES UNIQUE );', 451 ),
|
||||
'external_master.shape_perceptual_hash_map' : ( 'CREATE TABLE IF NOT EXISTS {} ( phash_id INTEGER, hash_id INTEGER, PRIMARY KEY ( phash_id, hash_id ) );', 451 ),
|
||||
'external_caches.shape_vptree' : ( 'CREATE TABLE IF NOT EXISTS {} ( phash_id INTEGER PRIMARY KEY, parent_id INTEGER, radius INTEGER, inner_id INTEGER, inner_population INTEGER, outer_id INTEGER, outer_population INTEGER );', 400 ),
|
||||
'external_caches.shape_maintenance_branch_regen' : ( 'CREATE TABLE IF NOT EXISTS {} ( phash_id INTEGER PRIMARY KEY );', 400 ),
|
||||
'main.shape_search_cache' : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY, searched_distance INTEGER );', 451 )
|
||||
}
|
||||
|
||||
|
||||
def _GetPHashId( self, phash ):
|
||||
|
@ -383,6 +399,14 @@ class ClientDBSimilarFiles( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def _RepairRepopulateTables( self, repopulate_table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
|
||||
|
||||
if 'external_caches.shape_vptree' in repopulate_table_names or 'external_caches.shape_maintenance_branch_regen' in repopulate_table_names:
|
||||
|
||||
self.RegenerateTree()
|
||||
|
||||
|
||||
|
||||
def AssociatePHashes( self, hash_id, phashes ):
|
||||
|
||||
phash_ids = set()
|
||||
|
@ -404,19 +428,6 @@ class ClientDBSimilarFiles( HydrusDBModule.HydrusDBModule ):
|
|||
return phash_ids
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.shape_perceptual_hashes ( phash_id INTEGER PRIMARY KEY, phash BLOB_BYTES UNIQUE );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_master.shape_perceptual_hash_map ( phash_id INTEGER, hash_id INTEGER, PRIMARY KEY ( phash_id, hash_id ) );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_caches.shape_vptree ( phash_id INTEGER PRIMARY KEY, parent_id INTEGER, radius INTEGER, inner_id INTEGER, inner_population INTEGER, outer_id INTEGER, outer_population INTEGER );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS external_caches.shape_maintenance_branch_regen ( phash_id INTEGER PRIMARY KEY );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS shape_search_cache ( hash_id INTEGER PRIMARY KEY, searched_distance INTEGER );' )
|
||||
|
||||
|
||||
def DisassociatePHashes( self, hash_id, phash_ids ):
|
||||
|
||||
self._ExecuteMany( 'DELETE FROM shape_perceptual_hash_map WHERE phash_id = ? AND hash_id = ?;', ( ( phash_id, hash_id ) for phash_id in phash_ids ) )
|
||||
|
@ -435,14 +446,6 @@ class ClientDBSimilarFiles( HydrusDBModule.HydrusDBModule ):
|
|||
return result is not None
|
||||
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = []
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetMaintenanceStatus( self ):
|
||||
|
||||
searched_distances_to_count = collections.Counter( dict( self._Execute( 'SELECT searched_distance, COUNT( * ) FROM shape_search_cache GROUP BY searched_distance;' ) ) )
|
||||
|
|
|
@ -5,9 +5,10 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusDBBase
|
||||
|
||||
from hydrus.client.db import ClientDBDefinitionsCache
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBServices
|
||||
from hydrus.client.db import ClientDBTagSiblings
|
||||
from hydrus.client.metadata import ClientTags
|
||||
|
@ -33,7 +34,7 @@ def GenerateTagParentsLookupCacheTableNames( service_id ):
|
|||
|
||||
return ( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name )
|
||||
|
||||
class ClientDBTagParents( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBTagParents( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -52,17 +53,85 @@ class ClientDBTagParents( HydrusDBModule.HydrusDBModule ):
|
|||
self._service_ids_to_applicable_service_ids = None
|
||||
self._service_ids_to_interested_service_ids = None
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client tag parents', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client tag parents', cursor )
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialIndexGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = [
|
||||
( 'tag_parents', [ 'service_id', 'parent_tag_id' ], False ),
|
||||
( 'tag_parent_petitions', [ 'service_id', 'parent_tag_id' ], False ),
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ 'tag_parents' ] = [
|
||||
( [ 'service_id', 'parent_tag_id' ], False, 420 )
|
||||
]
|
||||
|
||||
return index_generation_tuples
|
||||
index_generation_dict[ 'tag_parent_petitions' ] = [
|
||||
( [ 'service_id', 'parent_tag_id' ], False, 420 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {
|
||||
'main.tag_parents' : ( 'CREATE TABLE {} ( service_id INTEGER, child_tag_id INTEGER, parent_tag_id INTEGER, status INTEGER, PRIMARY KEY ( service_id, child_tag_id, parent_tag_id, status ) );', 414 ),
|
||||
'main.tag_parent_petitions' : ( 'CREATE TABLE {} ( service_id INTEGER, child_tag_id INTEGER, parent_tag_id INTEGER, status INTEGER, reason_id INTEGER, PRIMARY KEY ( service_id, child_tag_id, parent_tag_id, status ) );', 414 ),
|
||||
'main.tag_parent_application' : ( 'CREATE TABLE {} ( master_service_id INTEGER, service_index INTEGER, application_service_id INTEGER, PRIMARY KEY ( master_service_id, service_index ) );', 414 )
|
||||
}
|
||||
|
||||
|
||||
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = GenerateTagParentsLookupCacheTableNames( service_id )
|
||||
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ cache_actual_tag_parents_lookup_table_name ] = [
|
||||
( [ 'ancestor_tag_id' ], False, 414 )
|
||||
]
|
||||
|
||||
index_generation_dict[ cache_ideal_tag_parents_lookup_table_name ] = [
|
||||
( [ 'ancestor_tag_id' ], False, 414 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = GenerateTagParentsLookupCacheTableNames( service_id )
|
||||
|
||||
return {
|
||||
cache_actual_tag_parents_lookup_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( child_tag_id INTEGER, ancestor_tag_id INTEGER, PRIMARY KEY ( child_tag_id, ancestor_tag_id ) );', 414 ),
|
||||
cache_ideal_tag_parents_lookup_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( child_tag_id INTEGER, ancestor_tag_id INTEGER, PRIMARY KEY ( child_tag_id, ancestor_tag_id ) );', 414 )
|
||||
}
|
||||
|
||||
|
||||
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
|
||||
|
||||
return self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
|
||||
|
||||
|
||||
def _RepairRepopulateTables( self, repopulate_table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
|
||||
|
||||
for service_id in self._GetServiceIdsWeGenerateDynamicTablesFor():
|
||||
|
||||
table_generation_dict = self._GetServiceTableGenerationDict( service_id )
|
||||
|
||||
this_service_table_names = set( table_generation_dict.keys() )
|
||||
|
||||
this_service_needs_repopulation = len( this_service_table_names.intersection( repopulate_table_names ) ) > 0
|
||||
|
||||
if this_service_needs_repopulation:
|
||||
|
||||
self._service_ids_to_applicable_service_ids = None
|
||||
self._service_ids_to_interested_service_ids = None
|
||||
|
||||
self.Regen( ( service_id, ) )
|
||||
|
||||
cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
|
||||
|
||||
def AddTagParents( self, service_id, pairs ):
|
||||
|
@ -85,14 +154,6 @@ class ClientDBTagParents( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE tag_parents ( service_id INTEGER, child_tag_id INTEGER, parent_tag_id INTEGER, status INTEGER, PRIMARY KEY ( service_id, child_tag_id, parent_tag_id, status ) );' )
|
||||
self._Execute( 'CREATE TABLE tag_parent_petitions ( service_id INTEGER, child_tag_id INTEGER, parent_tag_id INTEGER, status INTEGER, reason_id INTEGER, PRIMARY KEY ( service_id, child_tag_id, parent_tag_id, status ) );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE tag_parent_application ( master_service_id INTEGER, service_index INTEGER, application_service_id INTEGER, PRIMARY KEY ( master_service_id, service_index ) );' )
|
||||
|
||||
|
||||
def DeleteTagParents( self, service_id, pairs ):
|
||||
|
||||
self._ExecuteMany( 'DELETE FROM tag_parents WHERE service_id = ? AND child_tag_id = ? AND parent_tag_id = ?;', ( ( service_id, child_tag_id, parent_tag_id ) for ( child_tag_id, parent_tag_id ) in pairs ) )
|
||||
|
@ -155,13 +216,19 @@ class ClientDBTagParents( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
def Generate( self, tag_service_id ):
|
||||
|
||||
( cache_ideal_tag_parents_lookup_table_name, cache_actual_tag_parents_lookup_table_name ) = GenerateTagParentsLookupCacheTableNames( tag_service_id )
|
||||
table_generation_dict = self._GetServiceTableGenerationDict( tag_service_id )
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( child_tag_id INTEGER, ancestor_tag_id INTEGER, PRIMARY KEY ( child_tag_id, ancestor_tag_id ) );'.format( cache_actual_tag_parents_lookup_table_name ) )
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( child_tag_id INTEGER, ancestor_tag_id INTEGER, PRIMARY KEY ( child_tag_id, ancestor_tag_id ) );'.format( cache_ideal_tag_parents_lookup_table_name ) )
|
||||
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
|
||||
|
||||
self._Execute( create_query_without_name.format( table_name ) )
|
||||
|
||||
|
||||
self._CreateIndex( cache_actual_tag_parents_lookup_table_name, [ 'ancestor_tag_id' ] )
|
||||
self._CreateIndex( cache_ideal_tag_parents_lookup_table_name, [ 'ancestor_tag_id' ] )
|
||||
index_generation_dict = self._GetServiceIndexGenerationDict( tag_service_id )
|
||||
|
||||
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
|
||||
|
||||
self._CreateIndex( table_name, columns, unique = unique )
|
||||
|
||||
|
||||
self._Execute( 'INSERT OR IGNORE INTO tag_parent_application ( master_service_id, service_index, application_service_id ) VALUES ( ?, ?, ? );', ( tag_service_id, 0, tag_service_id ) )
|
||||
|
||||
|
@ -364,17 +431,6 @@ class ClientDBTagParents( HydrusDBModule.HydrusDBModule ):
|
|||
return descendant_ids
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'tag_parents',
|
||||
'tag_parent_petitions',
|
||||
'tag_parent_application'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetInterestedServiceIds( self, tag_service_id ):
|
||||
|
||||
if self._service_ids_to_interested_service_ids is None:
|
||||
|
|
|
@ -5,11 +5,12 @@ import typing
|
|||
|
||||
from hydrus.core import HydrusConstants as HC
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusDBModule
|
||||
from hydrus.core import HydrusDBBase
|
||||
|
||||
from hydrus.client import ClientConstants as CC
|
||||
from hydrus.client.db import ClientDBDefinitionsCache
|
||||
from hydrus.client.db import ClientDBMaster
|
||||
from hydrus.client.db import ClientDBModule
|
||||
from hydrus.client.db import ClientDBServices
|
||||
from hydrus.client.metadata import ClientTags
|
||||
from hydrus.client.metadata import ClientTagsHandling
|
||||
|
@ -34,7 +35,7 @@ def GenerateTagSiblingsLookupCacheTableNames( service_id ):
|
|||
|
||||
return ( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name )
|
||||
|
||||
class ClientDBTagSiblings( HydrusDBModule.HydrusDBModule ):
|
||||
class ClientDBTagSiblings( ClientDBModule.ClientDBModule ):
|
||||
|
||||
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices, modules_tags: ClientDBMaster.ClientDBMasterTags, modules_tags_local_cache: ClientDBDefinitionsCache.ClientDBCacheLocalTags ):
|
||||
|
||||
|
@ -47,7 +48,7 @@ class ClientDBTagSiblings( HydrusDBModule.HydrusDBModule ):
|
|||
self._service_ids_to_applicable_service_ids = None
|
||||
self._service_ids_to_interested_service_ids = None
|
||||
|
||||
HydrusDBModule.HydrusDBModule.__init__( self, 'client tag siblings', cursor )
|
||||
ClientDBModule.ClientDBModule.__init__( self, 'client tag siblings', cursor )
|
||||
|
||||
|
||||
def _GenerateApplicationDicts( self ):
|
||||
|
@ -69,14 +70,82 @@ class ClientDBTagSiblings( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _GetInitialIndexGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_tuples = [
|
||||
( 'tag_siblings', [ 'service_id', 'good_tag_id' ], False ),
|
||||
( 'tag_sibling_petitions', [ 'service_id', 'good_tag_id' ], False ),
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ 'tag_siblings' ] = [
|
||||
( [ 'service_id', 'good_tag_id' ], False, 420 )
|
||||
]
|
||||
|
||||
return index_generation_tuples
|
||||
index_generation_dict[ 'tag_sibling_petitions' ] = [
|
||||
( [ 'service_id', 'good_tag_id' ], False, 420 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {
|
||||
'main.tag_siblings' : ( 'CREATE TABLE {} ( service_id INTEGER, bad_tag_id INTEGER, good_tag_id INTEGER, status INTEGER, PRIMARY KEY ( service_id, bad_tag_id, status ) );', 414 ),
|
||||
'main.tag_sibling_petitions' : ( 'CREATE TABLE {} ( service_id INTEGER, bad_tag_id INTEGER, good_tag_id INTEGER, status INTEGER, reason_id INTEGER, PRIMARY KEY ( service_id, bad_tag_id, status ) );', 414 ),
|
||||
'main.tag_sibling_application' : ( 'CREATE TABLE {} ( master_service_id INTEGER, service_index INTEGER, application_service_id INTEGER, PRIMARY KEY ( master_service_id, service_index ) );', 414 )
|
||||
}
|
||||
|
||||
|
||||
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = GenerateTagSiblingsLookupCacheTableNames( service_id )
|
||||
|
||||
index_generation_dict = {}
|
||||
|
||||
index_generation_dict[ cache_actual_tag_siblings_lookup_table_name ] = [
|
||||
( [ 'ideal_tag_id' ], False, 414 )
|
||||
]
|
||||
|
||||
index_generation_dict[ cache_ideal_tag_siblings_lookup_table_name ] = [
|
||||
( [ 'ideal_tag_id' ], False, 414 )
|
||||
]
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = GenerateTagSiblingsLookupCacheTableNames( service_id )
|
||||
|
||||
return {
|
||||
cache_actual_tag_siblings_lookup_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( bad_tag_id INTEGER PRIMARY KEY, ideal_tag_id INTEGER );', 414 ),
|
||||
cache_ideal_tag_siblings_lookup_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( bad_tag_id INTEGER PRIMARY KEY, ideal_tag_id INTEGER );', 414 )
|
||||
}
|
||||
|
||||
|
||||
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
|
||||
|
||||
return self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
|
||||
|
||||
|
||||
def _RepairRepopulateTables( self, repopulate_table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
|
||||
|
||||
for service_id in self._GetServiceIdsWeGenerateDynamicTablesFor():
|
||||
|
||||
table_generation_dict = self._GetServiceTableGenerationDict( service_id )
|
||||
|
||||
this_service_table_names = set( table_generation_dict.keys() )
|
||||
|
||||
this_service_needs_repopulation = len( this_service_table_names.intersection( repopulate_table_names ) ) > 0
|
||||
|
||||
if this_service_needs_repopulation:
|
||||
|
||||
self._service_ids_to_applicable_service_ids = None
|
||||
self._service_ids_to_interested_service_ids = None
|
||||
|
||||
self.Regen( ( service_id, ) )
|
||||
|
||||
cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
|
||||
|
||||
def AddTagSiblings( self, service_id, pairs ):
|
||||
|
@ -99,14 +168,6 @@ class ClientDBTagSiblings( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
|
||||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
self._Execute( 'CREATE TABLE tag_siblings ( service_id INTEGER, bad_tag_id INTEGER, good_tag_id INTEGER, status INTEGER, PRIMARY KEY ( service_id, bad_tag_id, status ) );' )
|
||||
self._Execute( 'CREATE TABLE tag_sibling_petitions ( service_id INTEGER, bad_tag_id INTEGER, good_tag_id INTEGER, status INTEGER, reason_id INTEGER, PRIMARY KEY ( service_id, bad_tag_id, status ) );' )
|
||||
|
||||
self._Execute( 'CREATE TABLE tag_sibling_application ( master_service_id INTEGER, service_index INTEGER, application_service_id INTEGER, PRIMARY KEY ( master_service_id, service_index ) );' )
|
||||
|
||||
|
||||
def DeleteTagSiblings( self, service_id, pairs ):
|
||||
|
||||
self._ExecuteMany( 'DELETE FROM tag_siblings WHERE service_id = ? AND bad_tag_id = ? AND good_tag_id = ?;', ( ( service_id, bad_tag_id, good_tag_id ) for ( bad_tag_id, good_tag_id ) in pairs ) )
|
||||
|
@ -195,16 +256,19 @@ class ClientDBTagSiblings( HydrusDBModule.HydrusDBModule ):
|
|||
|
||||
def Generate( self, tag_service_id ):
|
||||
|
||||
self._service_ids_to_applicable_service_ids = None
|
||||
self._service_ids_to_interested_service_ids = None
|
||||
table_generation_dict = self._GetServiceTableGenerationDict( tag_service_id )
|
||||
|
||||
( cache_ideal_tag_siblings_lookup_table_name, cache_actual_tag_siblings_lookup_table_name ) = GenerateTagSiblingsLookupCacheTableNames( tag_service_id )
|
||||
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
|
||||
|
||||
self._Execute( create_query_without_name.format( table_name ) )
|
||||
|
||||
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( bad_tag_id INTEGER PRIMARY KEY, ideal_tag_id INTEGER );'.format( cache_actual_tag_siblings_lookup_table_name ) )
|
||||
self._Execute( 'CREATE TABLE IF NOT EXISTS {} ( bad_tag_id INTEGER PRIMARY KEY, ideal_tag_id INTEGER );'.format( cache_ideal_tag_siblings_lookup_table_name ) )
|
||||
index_generation_dict = self._GetServiceIndexGenerationDict( tag_service_id )
|
||||
|
||||
self._CreateIndex( cache_actual_tag_siblings_lookup_table_name, [ 'ideal_tag_id' ] )
|
||||
self._CreateIndex( cache_ideal_tag_siblings_lookup_table_name, [ 'ideal_tag_id' ] )
|
||||
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
|
||||
|
||||
self._CreateIndex( table_name, columns, unique = unique )
|
||||
|
||||
|
||||
self._Execute( 'INSERT OR IGNORE INTO tag_sibling_application ( master_service_id, service_index, application_service_id ) VALUES ( ?, ?, ? );', ( tag_service_id, 0, tag_service_id ) )
|
||||
|
||||
|
@ -335,17 +399,6 @@ class ClientDBTagSiblings( HydrusDBModule.HydrusDBModule ):
|
|||
self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id ) SELECT bad_tag_id FROM {} CROSS JOIN {} USING ( ideal_tag_id );'.format( results_table_name, ideal_tag_ids_table_name, cache_tag_siblings_lookup_table_name ) )
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
expected_table_names = [
|
||||
'tag_siblings',
|
||||
'tag_sibling_petitions',
|
||||
'tag_sibling_application'
|
||||
]
|
||||
|
||||
return expected_table_names
|
||||
|
||||
|
||||
def GetIdeal( self, display_type, tag_service_id, tag_id ) -> int:
|
||||
|
||||
cache_tag_siblings_lookup_table_name = GenerateTagSiblingsLookupCacheTableName( display_type, tag_service_id )
|
||||
|
|
|
@ -158,119 +158,6 @@ def VacuumDB( db_path ):
|
|||
|
||||
c.execute( 'PRAGMA journal_mode = {};'.format( HG.db_journal_mode ) )
|
||||
|
||||
class DBCursorTransactionWrapper( HydrusDBBase.DBBase ):
|
||||
|
||||
def __init__( self, c: sqlite3.Cursor, transaction_commit_period: int ):
|
||||
|
||||
HydrusDBBase.DBBase.__init__( self )
|
||||
|
||||
self._SetCursor( c )
|
||||
|
||||
self._transaction_commit_period = transaction_commit_period
|
||||
|
||||
self._transaction_start_time = 0
|
||||
self._in_transaction = False
|
||||
self._transaction_contains_writes = False
|
||||
|
||||
self._last_mem_refresh_time = HydrusData.GetNow()
|
||||
self._last_wal_checkpoint_time = HydrusData.GetNow()
|
||||
|
||||
|
||||
def BeginImmediate( self ):
|
||||
|
||||
if not self._in_transaction:
|
||||
|
||||
self._Execute( 'BEGIN IMMEDIATE;' )
|
||||
self._Execute( 'SAVEPOINT hydrus_savepoint;' )
|
||||
|
||||
self._transaction_start_time = HydrusData.GetNow()
|
||||
self._in_transaction = True
|
||||
self._transaction_contains_writes = False
|
||||
|
||||
|
||||
|
||||
def Commit( self ):
|
||||
|
||||
if self._in_transaction:
|
||||
|
||||
self._Execute( 'COMMIT;' )
|
||||
|
||||
self._in_transaction = False
|
||||
self._transaction_contains_writes = False
|
||||
|
||||
if HG.db_journal_mode == 'WAL' and HydrusData.TimeHasPassed( self._last_wal_checkpoint_time + 1800 ):
|
||||
|
||||
self._Execute( 'PRAGMA wal_checkpoint(PASSIVE);' )
|
||||
|
||||
self._last_wal_checkpoint_time = HydrusData.GetNow()
|
||||
|
||||
|
||||
if HydrusData.TimeHasPassed( self._last_mem_refresh_time + 600 ):
|
||||
|
||||
self._Execute( 'DETACH mem;' )
|
||||
self._Execute( 'ATTACH ":memory:" AS mem;' )
|
||||
|
||||
HydrusDBBase.TemporaryIntegerTableNameCache.instance().Clear()
|
||||
|
||||
self._last_mem_refresh_time = HydrusData.GetNow()
|
||||
|
||||
|
||||
else:
|
||||
|
||||
HydrusData.Print( 'Received a call to commit, but was not in a transaction!' )
|
||||
|
||||
|
||||
|
||||
def CommitAndBegin( self ):
|
||||
|
||||
if self._in_transaction:
|
||||
|
||||
self.Commit()
|
||||
|
||||
self.BeginImmediate()
|
||||
|
||||
|
||||
|
||||
def InTransaction( self ):
|
||||
|
||||
return self._in_transaction
|
||||
|
||||
|
||||
def NotifyWriteOccuring( self ):
|
||||
|
||||
self._transaction_contains_writes = True
|
||||
|
||||
|
||||
def Rollback( self ):
|
||||
|
||||
if self._in_transaction:
|
||||
|
||||
self._Execute( 'ROLLBACK TO hydrus_savepoint;' )
|
||||
|
||||
# any temp int tables created in this lad will be rolled back, so 'initialised' can't be trusted. just reset, no big deal
|
||||
HydrusDBBase.TemporaryIntegerTableNameCache.instance().Clear()
|
||||
|
||||
# still in transaction
|
||||
# transaction may no longer contain writes, but it isn't important to figure out that it doesn't
|
||||
|
||||
else:
|
||||
|
||||
HydrusData.Print( 'Received a call to rollback, but was not in a transaction!' )
|
||||
|
||||
|
||||
|
||||
def Save( self ):
|
||||
|
||||
self._Execute( 'RELEASE hydrus_savepoint;' )
|
||||
|
||||
self._Execute( 'SAVEPOINT hydrus_savepoint;' )
|
||||
|
||||
|
||||
def TimeToCommit( self ):
|
||||
|
||||
return self._in_transaction and self._transaction_contains_writes and HydrusData.TimeHasPassed( self._transaction_start_time + self._transaction_commit_period )
|
||||
|
||||
|
||||
class HydrusDB( HydrusDBBase.DBBase ):
|
||||
|
||||
READ_WRITE_ACTIONS = []
|
||||
|
@ -361,7 +248,7 @@ class HydrusDB( HydrusDBBase.DBBase ):
|
|||
raise Exception( 'Your current database version of hydrus ' + str( version ) + ' is too old for this software version ' + str( HC.SOFTWARE_VERSION ) + ' to update. Please try updating with version ' + str( version + 45 ) + ' or earlier first.' )
|
||||
|
||||
|
||||
self._RepairDB()
|
||||
self._RepairDB( version )
|
||||
|
||||
while version < HC.SOFTWARE_VERSION:
|
||||
|
||||
|
@ -575,7 +462,7 @@ class HydrusDB( HydrusDBBase.DBBase ):
|
|||
|
||||
self._is_connected = True
|
||||
|
||||
self._cursor_transaction_wrapper = DBCursorTransactionWrapper( self._c, HG.db_transaction_commit_period )
|
||||
self._cursor_transaction_wrapper = HydrusDBBase.DBCursorTransactionWrapper( self._c, HG.db_transaction_commit_period )
|
||||
|
||||
self._LoadModules()
|
||||
|
||||
|
@ -743,9 +630,12 @@ class HydrusDB( HydrusDBBase.DBBase ):
|
|||
raise NotImplementedError()
|
||||
|
||||
|
||||
def _RepairDB( self ):
|
||||
def _RepairDB( self, version ):
|
||||
|
||||
pass
|
||||
for module in self._modules:
|
||||
|
||||
module.Repair( version, self._cursor_transaction_wrapper )
|
||||
|
||||
|
||||
|
||||
def _ReportOverupdatedDB( self, version ):
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import collections
|
||||
import sqlite3
|
||||
|
||||
from hydrus.core import HydrusData
|
||||
from hydrus.core import HydrusGlobals as HG
|
||||
|
||||
class TemporaryIntegerTableNameCache( object ):
|
||||
|
@ -179,11 +180,6 @@ class DBBase( object ):
|
|||
self._c.executemany( query, args_iterator )
|
||||
|
||||
|
||||
def _GenerateIndexName( self, table_name, columns ):
|
||||
|
||||
return '{}_{}_index'.format( table_name, '_'.join( columns ) )
|
||||
|
||||
|
||||
def _ExecuteManySelectSingleParam( self, query, single_param_iterator ):
|
||||
|
||||
select_args_iterator = ( ( param, ) for param in single_param_iterator )
|
||||
|
@ -208,6 +204,27 @@ class DBBase( object ):
|
|||
|
||||
|
||||
|
||||
def _GenerateIndexName( self, table_name, columns ):
|
||||
|
||||
return '{}_{}_index'.format( table_name, '_'.join( columns ) )
|
||||
|
||||
|
||||
def _GetAttachedDatabaseNames( self, include_temp = False ):
|
||||
|
||||
if include_temp:
|
||||
|
||||
f = lambda schema_name, path: True
|
||||
|
||||
else:
|
||||
|
||||
f = lambda schema_name, path: schema_name != 'temp' and path != ''
|
||||
|
||||
|
||||
names = [ schema_name for ( number, schema_name, path ) in self._Execute( 'PRAGMA database_list;' ) if f( schema_name, path ) ]
|
||||
|
||||
return names
|
||||
|
||||
|
||||
def _GetLastRowId( self ) -> int:
|
||||
|
||||
return self._c.lastrowid
|
||||
|
@ -227,6 +244,13 @@ class DBBase( object ):
|
|||
|
||||
|
||||
|
||||
def _IndexExists( self, table_name, columns ):
|
||||
|
||||
index_name = self._GenerateIndexName( table_name, columns )
|
||||
|
||||
return self._TableOrIndexExists( index_name, 'index' )
|
||||
|
||||
|
||||
def _MakeTemporaryIntegerTable( self, integer_iterable, column_name ):
|
||||
|
||||
return TemporaryIntegerTable( self._c, integer_iterable, column_name )
|
||||
|
@ -257,4 +281,149 @@ class DBBase( object ):
|
|||
|
||||
return { item for ( item, ) in iterable_cursor }
|
||||
|
||||
|
||||
def _TableExists( self, table_name ):
|
||||
|
||||
return self._TableOrIndexExists( table_name, 'table' )
|
||||
|
||||
|
||||
def _TableOrIndexExists( self, name, item_type ):
|
||||
|
||||
if '.' in name:
|
||||
|
||||
( schema, name ) = name.split( '.', 1 )
|
||||
|
||||
search_schemas = [ schema ]
|
||||
|
||||
else:
|
||||
|
||||
search_schemas = self._GetAttachedDatabaseNames()
|
||||
|
||||
|
||||
for schema in search_schemas:
|
||||
|
||||
result = self._Execute( 'SELECT 1 FROM {}.sqlite_master WHERE name = ? AND type = ?;'.format( schema ), ( name, item_type ) ).fetchone()
|
||||
|
||||
if result is not None:
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class DBCursorTransactionWrapper( DBBase ):
|
||||
|
||||
def __init__( self, c: sqlite3.Cursor, transaction_commit_period: int ):
|
||||
|
||||
DBBase.__init__( self )
|
||||
|
||||
self._SetCursor( c )
|
||||
|
||||
self._transaction_commit_period = transaction_commit_period
|
||||
|
||||
self._transaction_start_time = 0
|
||||
self._in_transaction = False
|
||||
self._transaction_contains_writes = False
|
||||
|
||||
self._last_mem_refresh_time = HydrusData.GetNow()
|
||||
self._last_wal_checkpoint_time = HydrusData.GetNow()
|
||||
|
||||
|
||||
def BeginImmediate( self ):
|
||||
|
||||
if not self._in_transaction:
|
||||
|
||||
self._Execute( 'BEGIN IMMEDIATE;' )
|
||||
self._Execute( 'SAVEPOINT hydrus_savepoint;' )
|
||||
|
||||
self._transaction_start_time = HydrusData.GetNow()
|
||||
self._in_transaction = True
|
||||
self._transaction_contains_writes = False
|
||||
|
||||
|
||||
|
||||
def Commit( self ):
|
||||
|
||||
if self._in_transaction:
|
||||
|
||||
self._Execute( 'COMMIT;' )
|
||||
|
||||
self._in_transaction = False
|
||||
self._transaction_contains_writes = False
|
||||
|
||||
if HG.db_journal_mode == 'WAL' and HydrusData.TimeHasPassed( self._last_wal_checkpoint_time + 1800 ):
|
||||
|
||||
self._Execute( 'PRAGMA wal_checkpoint(PASSIVE);' )
|
||||
|
||||
self._last_wal_checkpoint_time = HydrusData.GetNow()
|
||||
|
||||
|
||||
if HydrusData.TimeHasPassed( self._last_mem_refresh_time + 600 ):
|
||||
|
||||
self._Execute( 'DETACH mem;' )
|
||||
self._Execute( 'ATTACH ":memory:" AS mem;' )
|
||||
|
||||
TemporaryIntegerTableNameCache.instance().Clear()
|
||||
|
||||
self._last_mem_refresh_time = HydrusData.GetNow()
|
||||
|
||||
|
||||
else:
|
||||
|
||||
HydrusData.Print( 'Received a call to commit, but was not in a transaction!' )
|
||||
|
||||
|
||||
|
||||
def CommitAndBegin( self ):
|
||||
|
||||
if self._in_transaction:
|
||||
|
||||
self.Commit()
|
||||
|
||||
self.BeginImmediate()
|
||||
|
||||
|
||||
|
||||
def InTransaction( self ):
|
||||
|
||||
return self._in_transaction
|
||||
|
||||
|
||||
def NotifyWriteOccuring( self ):
|
||||
|
||||
self._transaction_contains_writes = True
|
||||
|
||||
|
||||
def Rollback( self ):
|
||||
|
||||
if self._in_transaction:
|
||||
|
||||
self._Execute( 'ROLLBACK TO hydrus_savepoint;' )
|
||||
|
||||
# any temp int tables created in this lad will be rolled back, so 'initialised' can't be trusted. just reset, no big deal
|
||||
TemporaryIntegerTableNameCache.instance().Clear()
|
||||
|
||||
# still in transaction
|
||||
# transaction may no longer contain writes, but it isn't important to figure out that it doesn't
|
||||
|
||||
else:
|
||||
|
||||
HydrusData.Print( 'Received a call to rollback, but was not in a transaction!' )
|
||||
|
||||
|
||||
|
||||
def Save( self ):
|
||||
|
||||
self._Execute( 'RELEASE hydrus_savepoint;' )
|
||||
|
||||
self._Execute( 'SAVEPOINT hydrus_savepoint;' )
|
||||
|
||||
|
||||
def TimeToCommit( self ):
|
||||
|
||||
return self._in_transaction and self._transaction_contains_writes and HydrusData.TimeHasPassed( self._transaction_start_time + self._transaction_commit_period )
|
||||
|
||||
|
||||
|
|
@ -2,6 +2,7 @@ import sqlite3
|
|||
import typing
|
||||
|
||||
from hydrus.core import HydrusDBBase
|
||||
from hydrus.core import HydrusExceptions
|
||||
|
||||
class HydrusDBModule( HydrusDBBase.DBBase ):
|
||||
|
||||
|
@ -14,16 +15,92 @@ class HydrusDBModule( HydrusDBBase.DBBase ):
|
|||
self._SetCursor( cursor )
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationTuples( self ):
|
||||
def _FlattenIndexGenerationDict( self, index_generation_dict: dict ):
|
||||
|
||||
tuples = []
|
||||
|
||||
for ( table_name, index_rows ) in index_generation_dict.items():
|
||||
|
||||
tuples.extend( ( ( table_name, columns, unique, version_added ) for ( columns, unique, version_added ) in index_rows ) )
|
||||
|
||||
|
||||
return tuples
|
||||
|
||||
|
||||
def _GetCriticalTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
return set()
|
||||
|
||||
|
||||
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def _GetServicesIndexGenerationDict( self ) -> dict:
|
||||
|
||||
index_generation_dict = {}
|
||||
|
||||
for service_id in self._GetServiceIdsWeGenerateDynamicTablesFor():
|
||||
|
||||
index_generation_dict.update( self._GetServiceIndexGenerationDict( service_id ) )
|
||||
|
||||
|
||||
return index_generation_dict
|
||||
|
||||
|
||||
def _GetServicesTableGenerationDict( self ) -> dict:
|
||||
|
||||
table_generation_dict = {}
|
||||
|
||||
for service_id in self._GetServiceIdsWeGenerateDynamicTablesFor():
|
||||
|
||||
table_generation_dict.update( self._GetServiceTableGenerationDict( service_id ) )
|
||||
|
||||
|
||||
return table_generation_dict
|
||||
|
||||
|
||||
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def _GetInitialIndexGenerationDict( self ) -> dict:
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def _GetInitialTableGenerationDict( self ) -> dict:
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def _PresentMissingIndicesWarningToUser( self, index_names ):
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def _PresentMissingTablesWarningToUser( self, table_names ):
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def _RepairRepopulateTables( self, table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def CreateInitialIndices( self ):
|
||||
|
||||
index_generation_tuples = self._GetInitialIndexGenerationTuples()
|
||||
index_generation_dict = self._GetInitialIndexGenerationDict()
|
||||
|
||||
for ( table_name, columns, unique ) in index_generation_tuples:
|
||||
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
|
||||
|
||||
self._CreateIndex( table_name, columns, unique = unique )
|
||||
|
||||
|
@ -31,21 +108,54 @@ class HydrusDBModule( HydrusDBBase.DBBase ):
|
|||
|
||||
def CreateInitialTables( self ):
|
||||
|
||||
raise NotImplementedError()
|
||||
table_generation_dict = self._GetInitialTableGenerationDict()
|
||||
|
||||
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
|
||||
|
||||
self._Execute( create_query_without_name.format( table_name ) )
|
||||
|
||||
|
||||
|
||||
def GetExpectedIndexNames( self ) -> typing.Collection[ str ]:
|
||||
def GetExpectedServiceIndexNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
index_generation_tuples = self._GetInitialIndexGenerationTuples()
|
||||
index_generation_dict = self._GetServicesIndexGenerationDict()
|
||||
|
||||
expected_index_names = [ self._GenerateIndexName( table_name, columns ) for ( table_name, columns, unique ) in index_generation_tuples ]
|
||||
expected_index_names = []
|
||||
|
||||
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
|
||||
|
||||
expected_index_names.append( self._GenerateIndexName( table_name, columns ) )
|
||||
|
||||
|
||||
return expected_index_names
|
||||
|
||||
|
||||
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
|
||||
def GetExpectedInitialIndexNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
raise NotImplementedError()
|
||||
index_generation_dict = self._GetInitialIndexGenerationDict()
|
||||
|
||||
expected_index_names = []
|
||||
|
||||
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
|
||||
|
||||
expected_index_names.append( self._GenerateIndexName( table_name, columns ) )
|
||||
|
||||
|
||||
return expected_index_names
|
||||
|
||||
|
||||
def GetExpectedServiceTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
table_generation_dict = self._GetServicesTableGenerationDict()
|
||||
|
||||
return list( table_generation_dict.keys() )
|
||||
|
||||
|
||||
def GetExpectedInitialTableNames( self ) -> typing.Collection[ str ]:
|
||||
|
||||
table_generation_dict = self._GetInitialTableGenerationDict()
|
||||
|
||||
return list( table_generation_dict.keys() )
|
||||
|
||||
|
||||
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
|
||||
|
@ -55,3 +165,100 @@ class HydrusDBModule( HydrusDBBase.DBBase ):
|
|||
raise NotImplementedError()
|
||||
|
||||
|
||||
def Repair( self, current_db_version, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
|
||||
|
||||
# core, initial tables first
|
||||
|
||||
table_generation_dict = self._GetInitialTableGenerationDict()
|
||||
|
||||
missing_table_rows = [ ( table_name, create_query_without_name ) for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items() if version_added <= current_db_version and not self._TableExists( table_name ) ]
|
||||
|
||||
if len( missing_table_rows ) > 0:
|
||||
|
||||
missing_table_names = sorted( [ missing_table_row[0] for missing_table_row in missing_table_rows ] )
|
||||
|
||||
critical_table_names = self._GetCriticalTableNames()
|
||||
|
||||
missing_critical_table_names = set( missing_table_names ).intersection( critical_table_names )
|
||||
|
||||
if len( missing_critical_table_names ) > 0:
|
||||
|
||||
message = 'Unfortunately, this database is missing one or more critical tables! This database is non functional and cannot be repaired. Please check out "install_dir/db/help my db is broke.txt" for the next steps.'
|
||||
|
||||
raise HydrusExceptions.DBAccessException( message )
|
||||
|
||||
|
||||
self._PresentMissingTablesWarningToUser( missing_table_names )
|
||||
|
||||
for ( table_name, create_query_without_name ) in missing_table_rows:
|
||||
|
||||
self._Execute( create_query_without_name.format( table_name ) )
|
||||
|
||||
cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
self._RepairRepopulateTables( missing_table_names, cursor_transaction_wrapper )
|
||||
|
||||
cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
# now indices for those tables
|
||||
|
||||
index_generation_dict = self._GetInitialIndexGenerationDict()
|
||||
|
||||
missing_index_rows = [ ( self._GenerateIndexName( table_name, columns ), table_name, columns, unique ) for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ) if version_added <= current_db_version and not self._IndexExists( table_name, columns ) ]
|
||||
|
||||
if len( missing_index_rows ):
|
||||
|
||||
self._PresentMissingIndicesWarningToUser( sorted( [ index_name for ( index_name, table_name, columns, unique ) in missing_index_rows ] ) )
|
||||
|
||||
for ( index_name, table_name, columns, unique ) in missing_index_rows:
|
||||
|
||||
self._CreateIndex( table_name, columns, unique = unique )
|
||||
|
||||
cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
|
||||
# now do service tables, same thing over again
|
||||
|
||||
table_generation_dict = self._GetServicesTableGenerationDict()
|
||||
|
||||
missing_table_rows = [ ( table_name, create_query_without_name ) for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items() if version_added <= current_db_version and not self._TableExists( table_name ) ]
|
||||
|
||||
if len( missing_table_rows ) > 0:
|
||||
|
||||
missing_table_names = sorted( [ missing_table_row[0] for missing_table_row in missing_table_rows ] )
|
||||
|
||||
self._PresentMissingTablesWarningToUser( missing_table_names )
|
||||
|
||||
for ( table_name, create_query_without_name ) in missing_table_rows:
|
||||
|
||||
self._Execute( create_query_without_name.format( table_name ) )
|
||||
|
||||
cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
self._RepairRepopulateTables( missing_table_names, cursor_transaction_wrapper )
|
||||
|
||||
cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
# now indices for those tables
|
||||
|
||||
index_generation_dict = self._GetServicesIndexGenerationDict()
|
||||
|
||||
missing_index_rows = [ ( self._GenerateIndexName( table_name, columns ), table_name, columns, unique ) for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ) if version_added <= current_db_version and not self._IndexExists( table_name, columns ) ]
|
||||
|
||||
if len( missing_index_rows ):
|
||||
|
||||
self._PresentMissingIndicesWarningToUser( sorted( [ index_name for ( index_name, table_name, columns, unique ) in missing_index_rows ] ) )
|
||||
|
||||
for ( index_name, table_name, columns, unique ) in missing_index_rows:
|
||||
|
||||
self._CreateIndex( table_name, columns, unique = unique )
|
||||
|
||||
cursor_transaction_wrapper.CommitAndBegin()
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -30,8 +30,8 @@ export_folders_running = False
|
|||
profile_mode = False
|
||||
|
||||
db_profile_min_job_time_ms = 16
|
||||
callto_profile_min_job_time_ms = 5
|
||||
server_profile_min_job_time_ms = 5
|
||||
callto_profile_min_job_time_ms = 10
|
||||
server_profile_min_job_time_ms = 10
|
||||
menu_profile_min_job_time_ms = 16
|
||||
pubsub_profile_min_job_time_ms = 5
|
||||
ui_timer_profile_min_job_time_ms = 5
|
||||
|
|
Loading…
Reference in New Issue