hydrus/include/ServerDB.py

3212 lines
148 KiB
Python
Raw Normal View History

2013-02-19 00:11:43 +00:00
import collections
import hashlib
import HydrusConstants as HC
2015-04-22 22:57:25 +00:00
import HydrusDB
2017-01-04 22:48:23 +00:00
import HydrusEncryption
2013-07-24 20:26:00 +00:00
import HydrusExceptions
2015-11-25 22:00:57 +00:00
import HydrusFileHandling
2013-11-06 18:22:07 +00:00
import HydrusNATPunch
2017-03-02 02:14:56 +00:00
import HydrusNetwork
import HydrusNetworking
2015-11-04 22:30:28 +00:00
import HydrusPaths
2015-06-03 21:05:13 +00:00
import HydrusSerialisable
2013-05-01 17:21:53 +00:00
import itertools
2015-06-24 22:10:14 +00:00
import json
2013-02-19 00:11:43 +00:00
import os
import Queue
2013-06-12 22:53:31 +00:00
import random
2015-03-18 21:46:29 +00:00
import ServerFiles
2013-02-19 00:11:43 +00:00
import shutil
import sqlite3
2017-01-11 22:31:30 +00:00
import stat
2013-02-19 00:11:43 +00:00
import sys
import threading
import time
import traceback
2015-03-25 22:04:19 +00:00
import HydrusData
import HydrusTags
2017-05-10 21:33:58 +00:00
import HydrusGlobals as HG
2017-03-02 02:14:56 +00:00
def GenerateRepositoryMasterMapTableNames( service_id ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
suffix = str( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
hash_id_map_table_name = 'external_master.repository_hash_id_map_' + suffix
tag_id_map_table_name = 'external_master.repository_tag_id_map_' + suffix
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return ( hash_id_map_table_name, tag_id_map_table_name )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def GenerateRepositoryFilesTableNames( service_id ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
suffix = str( service_id )
current_files_table_name = 'current_files_' + suffix
deleted_files_table_name = 'deleted_files_' + suffix
pending_files_table_name = 'pending_files_' + suffix
petitioned_files_table_name = 'petitioned_files_' + suffix
ip_addresses_table_name = 'ip_addresses_' + suffix
return ( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name )
def GenerateRepositoryMappingsTableNames( service_id ):
suffix = str( service_id )
current_mappings_table_name = 'external_mappings.current_mappings_' + suffix
deleted_mappings_table_name = 'external_mappings.deleted_mappings_' + suffix
pending_mappings_table_name = 'external_mappings.pending_mappings_' + suffix
petitioned_mappings_table_name = 'external_mappings.petitioned_mappings_' + suffix
return ( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name )
def GenerateRepositoryTagParentsTableNames( service_id ):
suffix = str( service_id )
current_tag_parents_table_name = 'current_tag_parents_' + suffix
deleted_tag_parents_table_name = 'deleted_tag_parents_' + suffix
pending_tag_parents_table_name = 'pending_tag_parents_' + suffix
petitioned_tag_parents_table_name = 'petitioned_tag_parents_' + suffix
return ( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name )
def GenerateRepositoryTagSiblingsTableNames( service_id ):
suffix = str( service_id )
current_tag_siblings_table_name = 'current_tag_siblings_' + suffix
deleted_tag_siblings_table_name = 'deleted_tag_siblings_' + suffix
pending_tag_siblings_table_name = 'pending_tag_siblings_' + suffix
petitioned_tag_siblings_table_name = 'petitioned_tag_siblings_' + suffix
return ( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name )
def GenerateRepositoryUpdateTableName( service_id ):
return 'updates_' + str( service_id )
2015-03-04 22:44:32 +00:00
2015-04-22 22:57:25 +00:00
class DB( HydrusDB.HydrusDB ):
2017-03-02 02:14:56 +00:00
READ_WRITE_ACTIONS = [ 'access_key', 'immediate_content_update', 'registration_keys' ]
2015-03-04 22:44:32 +00:00
2017-11-01 20:37:39 +00:00
TRANSACTION_COMMIT_TIME = 120
2016-10-12 21:52:50 +00:00
def __init__( self, controller, db_dir, db_name, no_wal = False ):
self._files_dir = os.path.join( db_dir, 'server_files' )
2017-01-04 22:48:23 +00:00
self._ssl_cert_filename = 'server.crt'
self._ssl_key_filename = 'server.key'
self._ssl_cert_path = os.path.join( db_dir, self._ssl_cert_filename )
self._ssl_key_path = os.path.join( db_dir, self._ssl_key_filename )
2017-03-02 02:14:56 +00:00
self._account_type_cache = {}
2016-10-12 21:52:50 +00:00
HydrusDB.HydrusDB.__init__( self, controller, db_dir, db_name, no_wal = no_wal )
2017-03-02 02:14:56 +00:00
def _AddAccountType( self, service_id, account_type ):
2013-05-01 17:21:53 +00:00
2017-03-02 02:14:56 +00:00
( account_type_key, title, dictionary ) = account_type.ToDictionaryTuple()
2013-05-01 17:21:53 +00:00
2017-03-02 02:14:56 +00:00
dictionary_string = dictionary.DumpToString()
self._c.execute( 'INSERT INTO account_types ( service_id, account_type_key, title, dictionary_string ) VALUES ( ?, ?, ?, ? );', ( service_id, sqlite3.Binary( account_type_key ), title, dictionary_string ) )
account_type_id = self._c.lastrowid
return account_type_id
def _AddFile( self, file_dict ):
2013-02-19 00:11:43 +00:00
hash = file_dict[ 'hash' ]
2017-03-02 02:14:56 +00:00
master_hash_id = self._GetMasterHashId( hash )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT 1 FROM files_info WHERE master_hash_id = ?;', ( master_hash_id, ) ).fetchone()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
2013-02-19 00:11:43 +00:00
size = file_dict[ 'size' ]
mime = file_dict[ 'mime' ]
if 'width' in file_dict: width = file_dict[ 'width' ]
else: width = None
if 'height' in file_dict: height = file_dict[ 'height' ]
else: height = None
if 'duration' in file_dict: duration = file_dict[ 'duration' ]
else: duration = None
if 'num_frames' in file_dict: num_frames = file_dict[ 'num_frames' ]
else: num_frames = None
if 'num_words' in file_dict: num_words = file_dict[ 'num_words' ]
else: num_words = None
2013-08-07 22:25:18 +00:00
source_path = file_dict[ 'path' ]
2016-06-08 20:27:22 +00:00
dest_path = ServerFiles.GetExpectedFilePath( hash )
2013-02-19 00:11:43 +00:00
2016-06-15 18:59:44 +00:00
HydrusPaths.MirrorFile( source_path, dest_path )
2013-02-19 00:11:43 +00:00
if 'thumbnail' in file_dict:
2016-06-08 20:27:22 +00:00
thumbnail_dest_path = ServerFiles.GetExpectedThumbnailPath( hash )
2013-02-19 00:11:43 +00:00
2015-03-25 22:04:19 +00:00
thumbnail = file_dict[ 'thumbnail' ]
2015-11-18 22:44:07 +00:00
with open( thumbnail_dest_path, 'wb' ) as f:
f.write( thumbnail )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT OR IGNORE INTO files_info ( master_hash_id, size, mime, width, height, duration, num_frames, num_words ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? );', ( master_hash_id, size, mime, width, height, duration, num_frames, num_words ) )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
return master_hash_id
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _AddService( self, service ):
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
( service_key, service_type, name, port, dictionary ) = service.ToTuple()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
dictionary_string = dictionary.DumpToString()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT INTO services ( service_key, service_type, name, port, dictionary_string ) VALUES ( ?, ?, ?, ?, ? );', ( sqlite3.Binary( service_key ), service_type, name, port, dictionary_string ) )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._c.lastrowid
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_admin_account_type = HydrusNetwork.AccountType.GenerateAdminAccountType( service_type )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_admin_account_type_id = self._AddAccountType( service_id, service_admin_account_type )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if service_type == HC.SERVER_ADMIN:
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
force_registration_key = 'init'
2015-03-04 22:44:32 +00:00
else:
2017-03-02 02:14:56 +00:00
force_registration_key = None
2015-03-04 22:44:32 +00:00
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
[ registration_key ] = self._GenerateRegistrationKeys( service_id, 1, service_admin_account_type_id, None, force_registration_key )
2013-06-12 22:53:31 +00:00
2017-03-08 23:23:12 +00:00
access_key = self._GetAccessKey( service_key, registration_key )
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
if service_type in HC.REPOSITORIES:
self._RepositoryCreate( service_id )
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
return access_key
2013-06-12 22:53:31 +00:00
2015-03-04 22:44:32 +00:00
def _AddSession( self, session_key, service_key, account_key, expires ):
2013-06-12 22:53:31 +00:00
2015-03-04 22:44:32 +00:00
service_id = self._GetServiceId( service_key )
2013-06-12 22:53:31 +00:00
2015-03-04 22:44:32 +00:00
account_id = self._GetAccountId( account_key )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT INTO sessions ( session_key, service_id, account_id, expires ) VALUES ( ?, ?, ?, ? );', ( sqlite3.Binary( session_key ), service_id, account_id, expires ) )
2013-02-19 00:11:43 +00:00
2016-04-20 20:42:21 +00:00
def _Analyze( self, stop_time ):
stale_time_delta = 30 * 86400
2016-01-06 21:17:20 +00:00
existing_names_to_timestamps = dict( self._c.execute( 'SELECT name, timestamp FROM analyze_timestamps;' ).fetchall() )
2016-04-27 19:20:37 +00:00
db_names = [ name for ( index, name, path ) in self._c.execute( 'PRAGMA database_list;' ) if name not in ( 'mem', 'temp' ) ]
all_names = set()
for db_name in db_names:
2017-01-04 22:48:23 +00:00
all_names.update( ( name for ( name, ) in self._c.execute( 'SELECT name FROM ' + db_name + '.sqlite_master WHERE type = ?;', ( 'table', ) ) ) )
2016-04-27 19:20:37 +00:00
2016-07-20 19:57:10 +00:00
all_names.discard( 'sqlite_stat1' )
2016-01-06 21:17:20 +00:00
names_to_analyze = [ name for name in all_names if name not in existing_names_to_timestamps or HydrusData.TimeHasPassed( existing_names_to_timestamps[ name ] + stale_time_delta ) ]
random.shuffle( names_to_analyze )
if len( names_to_analyze ) > 0:
2017-05-10 21:33:58 +00:00
HG.server_busy = True
2016-01-06 21:17:20 +00:00
2016-04-27 19:20:37 +00:00
for name in names_to_analyze:
2016-01-06 21:17:20 +00:00
started = HydrusData.GetNowPrecise()
self._c.execute( 'ANALYZE ' + name + ';' )
2016-05-11 18:16:39 +00:00
self._c.execute( 'DELETE FROM analyze_timestamps WHERE name = ?;', ( name, ) )
self._c.execute( 'INSERT OR IGNORE INTO analyze_timestamps ( name, timestamp ) VALUES ( ?, ? );', ( name, HydrusData.GetNow() ) )
2016-01-06 21:17:20 +00:00
time_took = HydrusData.GetNowPrecise() - started
2016-04-20 20:42:21 +00:00
if time_took > 1:
2018-07-04 20:48:28 +00:00
HydrusData.Print( 'Analyzed ' + name + ' in ' + HydrusData.TimeDeltaToPrettyTimeDelta( time_took ) )
2016-04-20 20:42:21 +00:00
2016-01-06 21:17:20 +00:00
if HydrusData.TimeHasPassed( stop_time ):
break
self._c.execute( 'ANALYZE sqlite_master;' ) # this reloads the current stats into the query planner
2017-05-10 21:33:58 +00:00
HG.server_busy = False
2016-01-06 21:17:20 +00:00
2016-04-14 01:54:29 +00:00
def _Backup( self ):
self._CloseDBCursor()
2017-05-10 21:33:58 +00:00
HG.server_busy = True
2017-03-02 02:14:56 +00:00
2016-04-14 01:54:29 +00:00
try:
stop_time = HydrusData.GetNow() + 300
for filename in self._db_filenames.values():
db_path = os.path.join( self._db_dir, filename )
if HydrusDB.CanVacuum( db_path, stop_time ):
HydrusData.Print( 'backing up: vacuuming ' + filename )
HydrusDB.VacuumDB( db_path )
2016-10-12 21:52:50 +00:00
backup_path = os.path.join( self._db_dir, 'server_backup' )
2016-04-14 01:54:29 +00:00
2016-08-17 20:07:22 +00:00
HydrusPaths.MakeSureDirectoryExists( backup_path )
2016-04-14 01:54:29 +00:00
for filename in self._db_filenames.values():
HydrusData.Print( 'backing up: copying ' + filename )
source = os.path.join( self._db_dir, filename )
dest = os.path.join( backup_path, filename )
2016-06-01 20:04:15 +00:00
HydrusPaths.MirrorFile( source, dest )
2016-04-14 01:54:29 +00:00
2017-01-04 22:48:23 +00:00
for filename in [ self._ssl_cert_filename, self._ssl_key_filename ]:
HydrusData.Print( 'backing up: copying ' + filename )
source = os.path.join( self._db_dir, filename )
dest = os.path.join( backup_path, filename )
HydrusPaths.MirrorFile( source, dest )
2016-04-14 01:54:29 +00:00
HydrusData.Print( 'backing up: copying files' )
2016-10-12 21:52:50 +00:00
HydrusPaths.MirrorTree( self._files_dir, os.path.join( backup_path, 'server_files' ) )
2016-04-14 01:54:29 +00:00
finally:
2017-05-10 21:33:58 +00:00
HG.server_busy = False
2017-03-02 02:14:56 +00:00
2016-04-14 01:54:29 +00:00
self._InitDBCursor()
HydrusData.Print( 'backing up: done!' )
2016-03-30 22:56:50 +00:00
2017-03-02 02:14:56 +00:00
def _CreateDB( self ):
HydrusPaths.MakeSureDirectoryExists( self._files_dir )
for prefix in HydrusData.IterateHexPrefixes():
new_dir = os.path.join( self._files_dir, prefix )
HydrusPaths.MakeSureDirectoryExists( new_dir )
self._c.execute( 'CREATE TABLE services ( service_id INTEGER PRIMARY KEY, service_key BLOB_BYTES, service_type INTEGER, name TEXT, port INTEGER, dictionary_string TEXT );' )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE accounts ( account_id INTEGER PRIMARY KEY, service_id INTEGER, account_key BLOB_BYTES, hashed_access_key BLOB_BYTES, account_type_id INTEGER, created INTEGER, expires INTEGER, dictionary_string TEXT );' )
self._c.execute( 'CREATE UNIQUE INDEX accounts_account_key_index ON accounts ( account_key );' )
self._c.execute( 'CREATE UNIQUE INDEX accounts_hashed_access_key_index ON accounts ( hashed_access_key );' )
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE account_scores ( service_id INTEGER, account_id INTEGER, score_type INTEGER, score INTEGER, PRIMARY KEY ( service_id, account_id, score_type ) );' )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE account_types ( account_type_id INTEGER PRIMARY KEY, service_id INTEGER, account_type_key BLOB_BYTES, title TEXT, dictionary_string TEXT );' )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE analyze_timestamps ( name TEXT, timestamp INTEGER );' )
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE files_info ( master_hash_id INTEGER PRIMARY KEY, size INTEGER, mime INTEGER, width INTEGER, height INTEGER, duration INTEGER, num_frames INTEGER, num_words INTEGER );' )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE reasons ( reason_id INTEGER PRIMARY KEY, reason TEXT );' )
self._c.execute( 'CREATE UNIQUE INDEX reasons_reason_index ON reasons ( reason );' )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE registration_keys ( registration_key BLOB_BYTES PRIMARY KEY, service_id INTEGER, account_type_id INTEGER, account_key BLOB_BYTES, access_key BLOB_BYTES UNIQUE, expires INTEGER );' )
self._c.execute( 'CREATE TABLE sessions ( session_key BLOB_BYTES, service_id INTEGER, account_id INTEGER, expires INTEGER );' )
self._c.execute( 'CREATE TABLE version ( version INTEGER, year INTEGER, month INTEGER );' )
# master
self._c.execute( 'CREATE TABLE IF NOT EXISTS external_master.hashes ( master_hash_id INTEGER PRIMARY KEY, hash BLOB_BYTES UNIQUE );' )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE IF NOT EXISTS external_master.tags ( master_tag_id INTEGER PRIMARY KEY, tag TEXT UNIQUE );' )
# inserts
2013-02-19 00:11:43 +00:00
2018-01-31 22:58:15 +00:00
current_time_struct = time.localtime()
2013-02-19 00:11:43 +00:00
2015-03-04 22:44:32 +00:00
( current_year, current_month ) = ( current_time_struct.tm_year, current_time_struct.tm_mon )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT INTO version ( version, year, month ) VALUES ( ?, ?, ? );', ( HC.SOFTWARE_VERSION, current_year, current_month ) )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
# create ssl keys
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
HydrusEncryption.GenerateOpenSSLCertAndKeyFile( self._ssl_cert_path, self._ssl_key_path )
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
# set up server admin
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
admin_service = HydrusNetwork.GenerateService( HC.SERVER_ADMIN_KEY, HC.SERVER_ADMIN, 'server admin', HC.DEFAULT_SERVER_ADMIN_PORT )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._AddService( admin_service ) # this sets up the admin account and a registration key by itself
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
2018-03-28 21:55:58 +00:00
def _DeleteAllAccountContributions( self, service_key, account, subject_accounts, superban ):
account.CheckPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_OVERRULE )
service_id = self._GetServiceId( service_key )
subject_account_keys = [ subject_account.GetAccountKey() for subject_account in subject_accounts ]
subject_account_ids = [ self._GetAccountId( subject_account_key ) for subject_account_key in subject_account_keys ]
self._RepositoryBan( service_id, subject_account_ids )
if superban:
account_key = account.GetAccountKey()
account_id = self._GetAccountId( account_key )
self._RepositorySuperBan( service_id, account_id, subject_account_ids )
2017-03-02 02:14:56 +00:00
def _DeleteOrphans( self ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
# make a table for files
# make a table for thumbnails
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
# populate both tables with what you have in your hdd
# if the filename isn't even a hash, schedule it for immediate deletion instead
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
# delete from the tables based on what is in current and pending repo file tables
# delete from the file tables based on what is in update tables
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
# delete whatever is left
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
# might want to split this up into 256 jobs--depends on how fast its bits run
# might also want to set server_busy, if it isn't already
2013-06-12 22:53:31 +00:00
2017-03-02 02:14:56 +00:00
# also think about how often it runs--maybe only once a month is appropriate
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
return # return to this to fix it for new system
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
def _DeleteService( self, service_key ):
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
# assume foreign keys is on here
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
service_type = self._GetServiceType( service_id )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM services WHERE service_id = ?;', ( service_id, ) )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM accounts WHERE service_id = ?;', ( service_id, ) )
self._c.execute( 'DELETE FROM account_types WHERE service_id = ?;', ( service_id, ) )
self._c.execute( 'DELETE FROM account_scores WHERE service_id = ?;', ( service_id, ) )
self._c.execute( 'DELETE FROM registration_keys WHERE service_id = ?;', ( service_id, ) )
self._c.execute( 'DELETE FROM sessions WHERE service_id = ?;', ( service_id, ) )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
if service_type in HC.REPOSITORIES:
self._RepositoryDrop( service_id )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
def _GenerateRegistrationKeysFromAccount( self, service_key, account, num, account_type_key, expires ):
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
account.CheckPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_CREATE )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
account_type_id = self._GetAccountTypeId( service_id, account_type_key )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
return self._GenerateRegistrationKeys( service_id, num, account_type_id, expires )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
def _GenerateRegistrationKeys( self, service_id, num, account_type_id, expires, force_registration_key = None ):
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
if force_registration_key is None:
keys = [ ( os.urandom( HC.HYDRUS_KEY_LENGTH ), os.urandom( HC.HYDRUS_KEY_LENGTH ), os.urandom( HC.HYDRUS_KEY_LENGTH ) ) for i in range( num ) ]
else:
keys = [ ( force_registration_key, os.urandom( HC.HYDRUS_KEY_LENGTH ), os.urandom( HC.HYDRUS_KEY_LENGTH ) ) for i in range( num ) ]
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'INSERT INTO registration_keys ( registration_key, service_id, account_type_id, account_key, access_key, expires ) VALUES ( ?, ?, ?, ?, ?, ? );', [ ( sqlite3.Binary( hashlib.sha256( registration_key ).digest() ), service_id, account_type_id, sqlite3.Binary( account_key ), sqlite3.Binary( access_key ), expires ) for ( registration_key, account_key, access_key ) in keys ] )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
return [ registration_key for ( registration_key, account_key, access_key ) in keys ]
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
2017-03-08 23:23:12 +00:00
def _GetAccessKey( self, service_key, registration_key ):
service_id = self._GetServiceId( service_key )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
# we generate a new access_key every time this is requested so that no one with access to the registration key can peek at the access_key before the legit user fetches it for real
# the reg_key is deleted when the last-requested access_key is used to create a session, which calls getaccountkeyfromaccesskey
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
registration_key_sha256 = hashlib.sha256( registration_key ).digest()
2016-04-20 20:42:21 +00:00
2017-03-08 23:23:12 +00:00
result = self._c.execute( 'SELECT 1 FROM registration_keys WHERE service_id = ? AND registration_key = ?;', ( service_id, sqlite3.Binary( registration_key_sha256 ) ) ).fetchone()
2016-06-01 20:04:15 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
raise HydrusExceptions.ForbiddenException( 'The service could not find that registration key in its database.' )
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
new_access_key = os.urandom( HC.HYDRUS_KEY_LENGTH )
2016-04-20 20:42:21 +00:00
2017-03-08 23:23:12 +00:00
self._c.execute( 'UPDATE registration_keys SET access_key = ? WHERE service_id = ? AND registration_key = ?;', ( sqlite3.Binary( new_access_key ), service_id, sqlite3.Binary( registration_key_sha256 ) ) )
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
return new_access_key
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
def _GetAccount( self, service_id, account_id ):
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
( account_key, account_type_id, created, expires, dictionary_string ) = self._c.execute( 'SELECT account_key, account_type_id, created, expires, dictionary_string FROM accounts WHERE service_id = ? AND account_id = ?;', ( service_id, account_id ) ).fetchone()
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
account_type = self._GetAccountTypeFromCache( service_id, account_type_id )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
dictionary = HydrusSerialisable.CreateFromString( dictionary_string )
2015-04-22 22:57:25 +00:00
2017-03-08 23:23:12 +00:00
return HydrusNetwork.Account.GenerateAccountFromTuple( ( account_key, account_type, created, expires, dictionary ) )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
def _GetAccountFromAccountKey( self, service_key, account_key ):
2017-01-04 22:48:23 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2017-01-04 22:48:23 +00:00
2017-03-02 02:14:56 +00:00
account_id = self._GetAccountId( account_key )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
return self._GetAccount( service_id, account_id )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
def _GetAccountFromAccountKeyAdmin( self, admin_account, service_key, account_key ):
# check admin account
service_id = self._GetServiceId( service_key )
account_id = self._GetAccountId( account_key )
return self._GetAccount( service_id, account_id )
2017-03-02 02:14:56 +00:00
def _GetAccountKeyFromAccessKey( self, service_key, access_key ):
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT account_key FROM accounts WHERE service_id = ? AND hashed_access_key = ?;', ( service_id, sqlite3.Binary( hashlib.sha256( access_key ).digest() ), ) ).fetchone()
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
# we do not delete the registration_key (and hence the raw unhashed access_key)
# until the first attempt to create a session to make sure the user
# has the access_key saved
try:
( account_type_id, account_key, expires ) = self._c.execute( 'SELECT account_type_id, account_key, expires FROM registration_keys WHERE access_key = ?;', ( sqlite3.Binary( access_key ), ) ).fetchone()
except:
raise HydrusExceptions.ForbiddenException( 'The service could not find that account in its database.' )
self._c.execute( 'DELETE FROM registration_keys WHERE access_key = ?;', ( sqlite3.Binary( access_key ), ) )
#
hashed_account_key = hashlib.sha256( access_key ).digest()
account_type = self._GetAccountTypeFromCache( service_id, account_type_id )
created = HydrusData.GetNow()
2017-03-08 23:23:12 +00:00
account = HydrusNetwork.Account( account_key, account_type, created, expires )
2017-03-02 02:14:56 +00:00
2017-03-08 23:23:12 +00:00
( account_key, account_type, created, expires, dictionary ) = HydrusNetwork.Account.GenerateTupleFromAccount( account )
2017-03-02 02:14:56 +00:00
dictionary_string = dictionary.DumpToString()
self._c.execute( 'INSERT INTO accounts ( service_id, account_key, hashed_access_key, account_type_id, created, expires, dictionary_string ) VALUES ( ?, ?, ?, ?, ?, ?, ? );', ( service_id, sqlite3.Binary( account_key ), sqlite3.Binary( hashed_account_key ), account_type_id, created, expires, dictionary_string ) )
else:
( account_key, ) = result
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
return account_key
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
def _GetAccountKeyFromAccountId( self, account_id ):
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
try: ( account_key, ) = self._c.execute( 'SELECT account_key FROM accounts WHERE account_id = ?;', ( account_id, ) ).fetchone()
except: raise HydrusExceptions.ForbiddenException( 'The service could not find that account_id in its database.' )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
return account_key
2015-04-22 22:57:25 +00:00
2017-03-29 19:39:34 +00:00
def _GetAccountFromContent( self, admin_account, service_key, content ):
2013-06-12 22:53:31 +00:00
2017-03-29 19:39:34 +00:00
# check admin account
service_id = self._GetServiceId( service_key )
content_type = content.GetContentType()
content_data = content.GetContentData()
if content_type == HC.CONTENT_TYPE_FILES:
hash = content_data[0]
if not self._MasterHashExists( hash ):
raise HydrusExceptions.NotFoundException( 'The service could not find that hash in its database.' )
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
master_hash_id = self._GetMasterHashId( hash )
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
if not self._RepositoryServiceHashIdExists( service_id, master_hash_id ):
raise HydrusExceptions.NotFoundException( 'The service could not find that service hash in its database.' )
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
service_hash_id = self._RepositoryGetServiceHashId( service_id, master_hash_id )
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
result = self._c.execute( 'SELECT account_id FROM ' + current_files_table_name + ' WHERE service_hash_id = ?;', ( service_hash_id, ) ).fetchone()
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
if result is None:
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
result = self._c.execute( 'SELECT account_id FROM ' + deleted_files_table_name + ' WHERE service_hash_id = ?;', ( service_hash_id, ) ).fetchone()
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
if result is None:
raise HydrusExceptions.NotFoundException( 'The service could not find that hash in its database.' )
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
elif content_type == HC.CONTENT_TYPE_MAPPING:
( tag, hash ) = content_data
if not self._MasterHashExists( hash ):
raise HydrusExceptions.NotFoundException( 'The service could not find that hash in its database.' )
2017-03-02 02:14:56 +00:00
2017-03-29 19:39:34 +00:00
master_hash_id = self._GetMasterHashId( hash )
if not self._RepositoryServiceHashIdExists( service_id, master_hash_id ):
raise HydrusExceptions.NotFoundException( 'The service could not find that service hash in its database.' )
service_hash_id = self._RepositoryGetServiceHashId( service_id, master_hash_id )
if not self._MasterTagExists( tag ):
raise HydrusExceptions.NotFoundException( 'The service could not find that tag in its database.' )
master_tag_id = self._GetMasterTagId( tag )
if not self._RepositoryServiceTagIdExists( service_id, master_tag_id ):
raise HydrusExceptions.NotFoundException( 'The service could not find that service tag in its database.' )
service_tag_id = self._RepositoryGetServiceTagId( service_id, master_tag_id )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
result = self._c.execute( 'SELECT account_id FROM ' + current_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( service_tag_id, service_hash_id ) ).fetchone()
if result is None:
result = self._c.execute( 'SELECT account_id FROM ' + deleted_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( service_tag_id, service_hash_id ) ).fetchone()
if result is None:
raise HydrusExceptions.NotFoundException( 'The service could not find that mapping in its database.' )
else:
raise HydrusExceptions.NotFoundException( 'The service could not understand the submitted content.' )
2013-02-19 00:11:43 +00:00
2017-03-29 19:39:34 +00:00
( account_id, ) = result
account = self._GetAccount( service_id, account_id )
return account
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _GetAccountId( self, account_key ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT account_id FROM accounts WHERE account_key = ?;', ( sqlite3.Binary( account_key ), ) ).fetchone()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if result is None: raise HydrusExceptions.ForbiddenException( 'The service could not find that account key in its database.' )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
( account_id, ) = result
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
return account_id
2013-02-19 00:11:43 +00:00
2017-03-29 19:39:34 +00:00
def _GetAccountInfo( self, service_key, account, subject_account ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
account.CheckPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_OVERRULE )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2013-02-19 00:11:43 +00:00
2017-03-29 19:39:34 +00:00
subject_account_key = subject_account.GetAccountKey()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
subject_account_id = self._GetAccountId( subject_account_key )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_type = self._GetServiceType( service_id )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if service_type in HC.REPOSITORIES:
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
account_info = self._RepositoryGetAccountInfo( service_id, subject_account_id )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
else:
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
account_info = {}
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
return account_info
2013-10-02 22:06:06 +00:00
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _GetAccountTypeId( self, service_id, account_type_key ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT account_type_id FROM account_types WHERE service_id = ? AND account_type_key = ?;', ( service_id, sqlite3.Binary( account_type_key ) ) ).fetchone()
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
raise HydrusExceptions.NotFoundException( 'Could not find that account type in db for this service.' )
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
( account_type_id, ) = result
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
return account_type_id
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
def _GetAccountTypes( self, service_key, account ):
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
account.CheckPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_CREATE )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
return self._GetAccountTypesFromCache( service_id )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
def _GetAccountTypeFromCache( self, service_id, account_type_id ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
if service_id not in self._account_type_cache:
self._RefreshAccountTypeCache( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return self._account_type_cache[ service_id ][ account_type_id ]
2015-03-04 22:44:32 +00:00
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _GetAccountTypesFromCache( self, service_id ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if service_id not in self._account_type_cache:
self._RefreshAccountTypeCache( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return self._account_type_cache[ service_id ].values()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _GetFile( self, hash ):
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
path = ServerFiles.GetFilePath( hash )
2015-09-02 23:16:09 +00:00
2017-03-15 20:13:04 +00:00
with open( path, 'rb' ) as f:
data = f.read()
2015-09-02 23:16:09 +00:00
2017-03-15 20:13:04 +00:00
return data
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
def _GetHash( self, master_hash_id ):
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT hash FROM hashes WHERE master_hash_id = ?;', ( master_hash_id, ) ).fetchone()
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
raise Exception( 'File hash error in database' )
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
( hash, ) = result
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
return hash
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
def _GetHashes( self, master_hash_ids ):
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
select_statement = 'SELECT hash FROM hashes WHERE master_hash_id IN %s;'
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
return [ hash for ( hash, ) in self._SelectFromList( select_statement, master_hash_ids ) ]
2015-09-02 23:16:09 +00:00
2017-03-02 02:14:56 +00:00
def _GetMasterHashId( self, hash ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT master_hash_id FROM hashes WHERE hash = ?;', ( sqlite3.Binary( hash ), ) ).fetchone()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
self._c.execute( 'INSERT INTO hashes ( hash ) VALUES ( ? );', ( sqlite3.Binary( hash ), ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
master_hash_id = self._c.lastrowid
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
return master_hash_id
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
else:
( master_hash_id, ) = result
return master_hash_id
2015-06-03 21:05:13 +00:00
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
def _GetMasterHashIds( self, hashes ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
master_hash_ids = set()
hashes_not_in_db = set()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
for hash in hashes:
if hash is None:
continue
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT master_hash_id FROM hashes WHERE hash = ?;', ( sqlite3.Binary( hash ), ) ).fetchone()
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
hashes_not_in_db.add( hash )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
else:
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
( master_hash_id, ) = result
master_hash_ids.add( master_hash_id )
2015-06-03 21:05:13 +00:00
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if len( hashes_not_in_db ) > 0:
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'INSERT INTO hashes ( hash ) VALUES ( ? );', ( ( sqlite3.Binary( hash ), ) for hash in hashes_not_in_db ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
for hash in hashes_not_in_db:
2015-11-18 22:44:07 +00:00
2017-03-02 02:14:56 +00:00
( master_hash_id, ) = self._c.execute( 'SELECT master_hash_id FROM hashes WHERE hash = ?;', ( sqlite3.Binary( hash ), ) ).fetchone()
master_hash_ids.add( master_hash_id )
2015-11-18 22:44:07 +00:00
2015-06-03 21:05:13 +00:00
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
return master_hash_ids
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
def _GetMasterTagId( self, tag ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
tag = HydrusTags.CleanTag( tag )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
HydrusTags.CheckTagNotEmpty( tag )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT master_tag_id FROM tags WHERE tag = ?;', ( tag, ) ).fetchone()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
2015-11-18 22:44:07 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT INTO tags ( tag ) VALUES ( ? );', ( tag, ) )
master_tag_id = self._c.lastrowid
return master_tag_id
else:
( master_tag_id, ) = result
return master_tag_id
2015-11-18 22:44:07 +00:00
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
def _GetOptions( self, service_key ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
( options, ) = self._c.execute( 'SELECT options FROM services WHERE service_id = ?;', ( service_id, ) ).fetchone()
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
return options
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
def _GetReason( self, reason_id ):
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT reason FROM reasons WHERE reason_id = ?;', ( reason_id, ) ).fetchone()
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
if result is None: raise Exception( 'Reason error in database' )
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
( reason, ) = result
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
return reason
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
def _GetReasonId( self, reason ):
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT reason_id FROM reasons WHERE reason = ?;', ( reason, ) ).fetchone()
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT INTO reasons ( reason ) VALUES ( ? );', ( reason, ) )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
reason_id = self._c.lastrowid
2014-10-01 22:58:32 +00:00
2017-03-02 02:14:56 +00:00
return reason_id
2014-10-01 22:58:32 +00:00
2017-03-02 02:14:56 +00:00
else:
2014-10-01 22:58:32 +00:00
2017-03-02 02:14:56 +00:00
( reason_id, ) = result
2014-10-01 22:58:32 +00:00
2017-03-02 02:14:56 +00:00
return reason_id
2014-10-01 22:58:32 +00:00
2017-03-02 02:14:56 +00:00
def _GetServiceId( self, service_key ):
2014-10-01 22:58:32 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT service_id FROM services WHERE service_key = ?;', ( sqlite3.Binary( service_key ), ) ).fetchone()
2014-10-01 22:58:32 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
2014-01-01 20:01:00 +00:00
2017-03-02 02:14:56 +00:00
raise HydrusExceptions.DataMissing( 'Service id error in database' )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
( service_id, ) = result
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
return service_id
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _GetServiceIds( self, limited_types = HC.ALL_SERVICES ):
2014-09-24 21:50:07 +00:00
2017-03-02 02:14:56 +00:00
return [ service_id for ( service_id, ) in self._c.execute( 'SELECT service_id FROM services WHERE service_type IN ' + HydrusData.SplayListForDB( limited_types ) + ';' ) ]
2014-09-24 21:50:07 +00:00
2017-03-02 02:14:56 +00:00
def _GetServiceKey( self, service_id ):
2014-09-24 21:50:07 +00:00
2017-03-02 02:14:56 +00:00
( service_key, ) = self._c.execute( 'SELECT service_key FROM services WHERE service_id = ?;', ( service_id, ) ).fetchone()
2014-09-24 21:50:07 +00:00
2017-03-02 02:14:56 +00:00
return service_key
2014-09-24 21:50:07 +00:00
2017-03-02 02:14:56 +00:00
def _GetServiceKeys( self, limited_types = HC.ALL_SERVICES ):
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
return [ service_key for ( service_key, ) in self._c.execute( 'SELECT service_key FROM services WHERE service_type IN '+ HydrusData.SplayListForDB( limited_types ) + ';' ) ]
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
def _GetServiceType( self, service_id ):
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT service_type FROM services WHERE service_id = ?;', ( service_id, ) ).fetchone()
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
if result is None: raise Exception( 'Service id error in database' )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
( service_type, ) = result
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
return service_type
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
def _GetServices( self, limited_types = HC.ALL_SERVICES ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
services = []
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_info = self._c.execute( 'SELECT service_key, service_type, name, port, dictionary_string FROM services WHERE service_type IN ' + HydrusData.SplayListForDB( limited_types ) + ';' ).fetchall()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
for ( service_key, service_type, name, port, dictionary_string ) in service_info:
dictionary = HydrusSerialisable.CreateFromString( dictionary_string )
service = HydrusNetwork.GenerateService( service_key, service_type, name, port, dictionary )
services.append( service )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
return services
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _GetServicesFromAccount( self, account ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
account.CheckPermission( HC.CONTENT_TYPE_SERVICES, HC.PERMISSION_ACTION_OVERRULE )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
return self._GetServices()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _GetSessions( self, service_key = None ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
now = HydrusData.GetNow()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM sessions WHERE ? > expires;', ( now, ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
sessions = []
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if service_key is None:
results = self._c.execute( 'SELECT session_key, service_id, account_id, expires FROM sessions;' ).fetchall()
else:
service_id = self._GetServiceId( service_key)
results = self._c.execute( 'SELECT session_key, service_id, account_id, expires FROM sessions WHERE service_id = ?;', ( service_id, ) ).fetchall()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
service_ids_to_service_keys = {}
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
account_ids_to_accounts = {}
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
for ( session_key, service_id, account_id, expires ) in results:
if service_id not in service_ids_to_service_keys:
service_ids_to_service_keys[ service_id ] = self._GetServiceKey( service_id )
service_key = service_ids_to_service_keys[ service_id ]
if account_id not in account_ids_to_accounts:
account = self._GetAccount( service_id, account_id )
account_ids_to_accounts[ account_id ] = account
account = account_ids_to_accounts[ account_id ]
sessions.append( ( session_key, service_key, account, expires ) )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return sessions
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _GetTag( self, master_tag_id ):
2015-10-14 21:02:25 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT tag FROM tags WHERE master_tag_id = ?;', ( master_tag_id, ) ).fetchone()
2015-03-04 22:44:32 +00:00
2016-02-17 22:06:47 +00:00
if result is None:
2017-03-02 02:14:56 +00:00
raise Exception( 'Tag error in database' )
2016-02-17 22:06:47 +00:00
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( tag, ) = result
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return tag
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _GetThumbnail( self, hash ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
path = ServerFiles.GetThumbnailPath( hash )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
with open( path, 'rb' ) as f: thumbnail = f.read()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return thumbnail
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _InitCaches( self ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._over_monthly_data = False
self._services_over_monthly_data = set()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _InitExternalDatabases( self ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._db_filenames[ 'external_mappings' ] = 'server.mappings.db'
self._db_filenames[ 'external_master' ] = 'server.master.db'
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _ManageDBError( self, job, e ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
if isinstance( e, HydrusExceptions.NetworkException ):
job.PutResult( e )
else:
( exception_type, value, tb ) = sys.exc_info()
new_e = type( e )( os.linesep.join( traceback.format_exception( exception_type, value, tb ) ) )
job.PutResult( new_e )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _MasterHashExists( self, hash ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT master_hash_id FROM hashes WHERE hash = ?;', ( sqlite3.Binary( hash ), ) ).fetchone()
2015-03-04 22:44:32 +00:00
if result is None:
2017-03-02 02:14:56 +00:00
return False
2015-03-04 22:44:32 +00:00
else:
2017-03-02 02:14:56 +00:00
return True
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _MasterTagExists( self, tag ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT master_tag_id FROM tags WHERE tag = ?;', ( tag, ) ).fetchone()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
if result is None:
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return False
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
else:
return True
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _ModifyAccounts( self, service_key, account, subject_accounts ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
account.CheckPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_OVERRULE )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2014-03-26 21:23:10 +00:00
2017-03-02 02:14:56 +00:00
self._SaveAccounts( service_id, subject_accounts )
2014-03-26 21:23:10 +00:00
2017-03-02 02:14:56 +00:00
subject_account_keys = [ subject_account.GetAccountKey() for subject_account in subject_accounts ]
2014-03-26 21:23:10 +00:00
2017-07-12 20:03:45 +00:00
self.pub_after_job( 'update_session_accounts', service_key, subject_account_keys )
2014-03-26 21:23:10 +00:00
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
def _ModifyAccountTypes( self, service_key, account, account_types, deletee_account_type_keys_to_new_account_type_keys ):
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
account.CheckPermission( HC.CONTENT_TYPE_ACCOUNT_TYPES, HC.PERMISSION_ACTION_OVERRULE )
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
current_account_type_keys = { account_type_key for ( account_type_key, ) in self._c.execute( 'SELECT account_type_key FROM account_types WHERE service_id = ?;', ( service_id, ) ) }
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
future_account_type_keys = { account_type.GetAccountTypeKey() for account_type in account_types }
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
for account_type in account_types:
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
account_type_key = account_type.GetAccountTypeKey()
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
if account_type_key not in current_account_type_keys:
self._AddAccountType( service_id, account_type )
else:
( account_type_key, title, dictionary ) = account_type.ToDictionaryTuple()
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
dictionary_string = dictionary.DumpToString()
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
account_type_id = self._GetAccountTypeId( service_id, account_type_key )
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'UPDATE account_types SET title = ?, dictionary_string = ? WHERE service_id = ? AND account_type_id = ?;', ( title, dictionary_string, service_id, account_type_id ) )
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
for account_type_key in current_account_type_keys:
if account_type_key not in future_account_type_keys:
account_type_id = self._GetAccountTypeId( service_id, account_type_key )
if account_type_key not in deletee_account_type_keys_to_new_account_type_keys:
raise HydrusExceptions.NotFoundException( 'Was missing a replacement account_type_key.' )
new_account_type_key = deletee_account_type_keys_to_new_account_type_keys[ account_type_key ]
new_account_type_id = self._GetAccountTypeId( service_id, new_account_type_key )
self._c.execute( 'UPDATE accounts SET account_type_id = ? WHERE service_id = ? AND account_type_id = ?;', ( new_account_type_id, service_id, account_type_id ) )
self._c.execute( 'UPDATE registration_keys SET account_type_id = ? WHERE service_id = ? AND account_type_id = ?;', ( new_account_type_id, service_id, account_type_id ) )
self._c.execute( 'DELETE FROM account_types WHERE service_id = ? AND account_type_id = ?;', ( service_id, account_type_id ) )
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
self._RefreshAccountTypeCache( service_id )
2017-07-12 20:03:45 +00:00
self.pub_after_job( 'update_all_session_accounts', service_key )
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
def _ModifyServices( self, account, services ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
account.CheckPermission( HC.CONTENT_TYPE_SERVICES, HC.PERMISSION_ACTION_OVERRULE )
2013-02-19 00:11:43 +00:00
2017-03-29 19:39:34 +00:00
self._Commit()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'PRAGMA foreign_keys = ON;' )
2013-02-19 00:11:43 +00:00
2017-03-29 19:39:34 +00:00
self._BeginImmediate()
2013-05-01 17:21:53 +00:00
2017-03-02 02:14:56 +00:00
current_service_keys = { service_key for ( service_key, ) in self._c.execute( 'SELECT service_key FROM services;' ) }
2013-05-01 17:21:53 +00:00
2017-03-02 02:14:56 +00:00
future_service_keys = { service.GetServiceKey() for service in services }
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
for service_key in current_service_keys:
if service_key not in future_service_keys:
self._DeleteService( service_key )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_keys_to_access_keys = {}
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
for service in services:
service_key = service.GetServiceKey()
if service_key in current_service_keys:
( service_key, service_type, name, port, dictionary ) = service.ToTuple()
service_id = self._GetServiceId( service_key )
dictionary_string = dictionary.DumpToString()
self._c.execute( 'UPDATE services SET name = ?, port = ?, dictionary_string = ? WHERE service_id = ?;', ( name, port, dictionary_string, service_id ) )
else:
access_key = self._AddService( service )
service_keys_to_access_keys[ service_key ] = access_key
2013-02-19 00:11:43 +00:00
2017-07-12 20:03:45 +00:00
self._CloseDBCursor()
2017-03-02 02:14:56 +00:00
self._InitDBCursor()
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
return service_keys_to_access_keys
2013-10-09 18:13:42 +00:00
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _Read( self, action, *args, **kwargs ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if action == 'access_key': result = self._GetAccessKey( *args, **kwargs )
elif action == 'account': result = self._GetAccountFromAccountKey( *args, **kwargs )
elif action == 'account_info': result = self._GetAccountInfo( *args, **kwargs )
elif action == 'account_key_from_access_key': result = self._GetAccountKeyFromAccessKey( *args, **kwargs )
elif action == 'account_types': result = self._GetAccountTypes( *args, **kwargs )
elif action == 'immediate_update': result = self._RepositoryGenerateImmediateUpdate( *args, **kwargs )
elif action == 'ip': result = self._RepositoryGetIPTimestamp( *args, **kwargs )
elif action == 'num_petitions': result = self._RepositoryGetNumPetitions( *args, **kwargs )
elif action == 'petition': result = self._RepositoryGetPetition( *args, **kwargs )
elif action == 'registration_keys': result = self._GenerateRegistrationKeysFromAccount( *args, **kwargs )
elif action == 'service_has_file': result = self._RepositoryHasFile( *args, **kwargs )
elif action == 'service_keys': result = self._GetServiceKeys( *args, **kwargs )
elif action == 'services': result = self._GetServices( *args, **kwargs )
elif action == 'services_from_account': result = self._GetServicesFromAccount( *args, **kwargs )
elif action == 'sessions': result = self._GetSessions( *args, **kwargs )
elif action == 'verify_access_key': result = self._VerifyAccessKey( *args, **kwargs )
else: raise Exception( 'db received an unknown read command: ' + action )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
return result
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _RefreshAccountTypeCache( self, service_id ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._account_type_cache[ service_id ] = {}
account_type_tuples = self._c.execute( 'SELECT account_type_id, account_type_key, title, dictionary_string FROM account_types WHERE service_id = ?;', ( service_id, ) ).fetchall()
for ( account_type_id, account_type_key, title, dictionary_string ) in account_type_tuples:
dictionary = HydrusSerialisable.CreateFromString( dictionary_string )
account_type = HydrusNetwork.AccountType( account_type_key, title, dictionary )
self._account_type_cache[ service_id ][ account_type_id ] = account_type
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryAddFile( self, service_id, account_id, file_dict, overwrite_deleted ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
master_hash_id = self._AddFile( file_dict )
service_hash_id = self._RepositoryGetServiceHashId( service_id, master_hash_id )
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
now = HydrusData.GetNow()
if 'ip' in file_dict:
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
ip = file_dict[ 'ip' ]
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT INTO ' + ip_addresses_table_name + ' ( master_hash_id, ip, ip_timestamp ) VALUES ( ?, ?, ? );', ( master_hash_id, ip, now ) )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT 1 FROM ' + current_files_table_name + ' WHERE service_hash_id = ?;', ( service_hash_id, ) ).fetchone()
if result is not None:
return
if overwrite_deleted:
#self._RepositoryRewardFilePenders( service_id, service_hash_id, 1 )
#self._c.execute( 'DELETE FROM ' + pending_files_table_name + ' WHERE service_hash_id = ?;', ( service_hash_id, ) )
self._c.execute( 'DELETE FROM ' + deleted_files_table_name + ' WHERE service_hash_id = ?;', ( service_hash_id, ) )
2013-02-19 00:11:43 +00:00
else:
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT 1 FROM ' + deleted_files_table_name + ' WHERE service_hash_id = ?;', ( service_hash_id, ) ).fetchone()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if result is not None:
return
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT INTO ' + current_files_table_name + ' ( service_hash_id, account_id, file_timestamp ) VALUES ( ?, ?, ? );', ( service_hash_id, account_id, now ) )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryAddMappings( self, service_id, account_id, master_tag_id, master_hash_ids, overwrite_deleted ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_tag_id = self._RepositoryGetServiceTagId( service_id, master_tag_id )
service_hash_ids = self._RepositoryGetServiceHashIds( service_id, master_hash_ids )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if overwrite_deleted:
#self._RepositoryRewardMappingPenders( service_id, service_tag_id, service_hash_ids, 1 )
#self._c.executemany( 'DELETE FROM ' + pending_mappings_table_name + ' WHERE master_tag_id = ? AND master_hash_id = ?;', ( ( master_tag_id, master_hash_id ) for master_hash_id in master_hash_ids ) )
self._c.executemany( 'DELETE FROM ' + deleted_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( ( service_tag_id, service_hash_id ) for service_hash_id in service_hash_ids ) )
else:
2017-03-08 23:23:12 +00:00
select_statement = 'SELECT service_hash_id FROM ' + deleted_mappings_table_name + ' WHERE service_tag_id = ' + str( service_tag_id ) + ' AND service_hash_id IN %s;'
2017-03-15 20:13:04 +00:00
deleted_service_hash_ids = self._STI( self._SelectFromList( select_statement, service_hash_ids ) )
2017-03-02 02:14:56 +00:00
service_hash_ids = set( service_hash_ids ).difference( deleted_service_hash_ids )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
# in future, delete from pending with the master ids here
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
now = HydrusData.GetNow()
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'INSERT OR IGNORE INTO ' + current_mappings_table_name + ' ( service_tag_id, service_hash_id, account_id, mapping_timestamp ) VALUES ( ?, ?, ?, ? );', [ ( service_tag_id, service_hash_id, account_id, now ) for service_hash_id in service_hash_ids ] )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryAddTagParent( self, service_id, account_id, child_master_tag_id, parent_master_tag_id, overwrite_deleted ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
child_service_tag_id = self._RepositoryGetServiceTagId( service_id, child_master_tag_id )
parent_service_tag_id = self._RepositoryGetServiceTagId( service_id, parent_master_tag_id )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if overwrite_deleted:
self._RepositoryRewardTagParentPenders( service_id, child_master_tag_id, parent_master_tag_id, 1 )
self._c.execute( 'DELETE FROM ' + pending_tag_parents_table_name + ' WHERE child_master_tag_id = ? AND parent_master_tag_id = ?;', ( child_master_tag_id, parent_master_tag_id ) )
self._c.execute( 'DELETE FROM ' + deleted_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) )
else:
result = self._c.execute( 'SELECT 1 FROM ' + deleted_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) ).fetchone()
if result is not None:
return
2013-03-15 02:38:12 +00:00
2015-03-25 22:04:19 +00:00
now = HydrusData.GetNow()
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'INSERT OR IGNORE INTO ' + current_tag_parents_table_name + ' ( child_service_tag_id, parent_service_tag_id, account_id, parent_timestamp ) VALUES ( ?, ?, ?, ? );', ( child_service_tag_id, parent_service_tag_id, account_id, now ) )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
child_master_hash_ids = self._RepositoryGetCurrentMappingsMasterHashIds( service_id, child_service_tag_id )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryAddMappings( service_id, account_id, parent_master_tag_id, child_master_hash_ids, False )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryAddTagSibling( self, service_id, account_id, bad_master_tag_id, good_master_tag_id, overwrite_deleted ):
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
bad_service_tag_id = self._RepositoryGetServiceTagId( service_id, bad_master_tag_id )
good_service_tag_id = self._RepositoryGetServiceTagId( service_id, good_master_tag_id )
2014-09-24 21:50:07 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
if overwrite_deleted:
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardTagSiblingPenders( service_id, bad_master_tag_id, good_master_tag_id, 1 )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM ' + pending_tag_siblings_table_name + ' WHERE bad_master_tag_id = ? AND good_master_tag_id = ?;', ( bad_master_tag_id, good_master_tag_id ) )
self._c.execute( 'DELETE FROM ' + deleted_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
else:
2014-09-10 22:37:38 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT 1 FROM ' + deleted_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) ).fetchone()
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
if result is not None:
return
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
now = HydrusData.GetNow()
self._c.execute( 'INSERT OR IGNORE INTO ' + current_tag_siblings_table_name + ' ( bad_service_tag_id, good_service_tag_id, account_id, sibling_timestamp ) VALUES ( ?, ?, ?, ? );', ( bad_service_tag_id, good_service_tag_id, account_id, now ) )
2013-03-15 02:38:12 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryBan( self, service_id, subject_account_ids ):
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'DELETE FROM ' + pending_files_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
self._c.executemany( 'DELETE FROM ' + petitioned_files_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'DELETE FROM ' + pending_mappings_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
self._c.executemany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
self._c.executemany( 'DELETE FROM ' + pending_tag_parents_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
self._c.executemany( 'DELETE FROM ' + petitioned_tag_parents_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
self._c.executemany( 'DELETE FROM ' + pending_tag_siblings_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
self._c.executemany( 'DELETE FROM ' + petitioned_tag_siblings_table_name + ' WHERE account_id = ?;', ( ( subject_account_id, ) for subject_account_id in subject_account_ids ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryCreate( self, service_id ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE ' + hash_id_map_table_name + ' ( service_hash_id INTEGER PRIMARY KEY, master_hash_id INTEGER UNIQUE, hash_id_timestamp INTEGER );' )
self._CreateIndex( hash_id_map_table_name, [ 'hash_id_timestamp' ] )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE ' + tag_id_map_table_name + ' ( service_tag_id INTEGER PRIMARY KEY, master_tag_id INTEGER UNIQUE, tag_id_timestamp INTEGER );' )
self._CreateIndex( tag_id_map_table_name, [ 'tag_id_timestamp' ] )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
#
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE ' + current_files_table_name + ' ( service_hash_id INTEGER PRIMARY KEY, account_id INTEGER, file_timestamp INTEGER );' )
self._CreateIndex( current_files_table_name, [ 'account_id' ] )
self._CreateIndex( current_files_table_name, [ 'file_timestamp' ] )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE ' + deleted_files_table_name + ' ( service_hash_id INTEGER PRIMARY KEY, account_id INTEGER, file_timestamp INTEGER );' )
self._CreateIndex( deleted_files_table_name, [ 'account_id' ] )
self._CreateIndex( deleted_files_table_name, [ 'file_timestamp' ] )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE ' + pending_files_table_name + ' ( master_hash_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( master_hash_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( pending_files_table_name, [ 'account_id', 'reason_id' ] )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'CREATE TABLE ' + petitioned_files_table_name + ' ( service_hash_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( service_hash_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( petitioned_files_table_name, [ 'account_id', 'reason_id' ] )
self._c.execute( 'CREATE TABLE ' + ip_addresses_table_name + ' ( master_hash_id INTEGER, ip TEXT, ip_timestamp INTEGER );' )
#
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
self._c.execute( 'CREATE TABLE ' + current_mappings_table_name + ' ( service_tag_id INTEGER, service_hash_id INTEGER, account_id INTEGER, mapping_timestamp INTEGER, PRIMARY KEY ( service_tag_id, service_hash_id ) ) WITHOUT ROWID;' )
self._CreateIndex( current_mappings_table_name, [ 'account_id' ] )
self._CreateIndex( current_mappings_table_name, [ 'mapping_timestamp' ] )
self._c.execute( 'CREATE TABLE ' + deleted_mappings_table_name + ' ( service_tag_id INTEGER, service_hash_id INTEGER, account_id INTEGER, mapping_timestamp INTEGER, PRIMARY KEY ( service_tag_id, service_hash_id ) ) WITHOUT ROWID;' )
self._CreateIndex( deleted_mappings_table_name, [ 'account_id' ] )
self._CreateIndex( deleted_mappings_table_name, [ 'mapping_timestamp' ] )
self._c.execute( 'CREATE TABLE ' + pending_mappings_table_name + ' ( master_tag_id INTEGER, master_hash_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( master_tag_id, master_hash_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( pending_mappings_table_name, [ 'account_id', 'reason_id' ] )
self._c.execute( 'CREATE TABLE ' + petitioned_mappings_table_name + ' ( service_tag_id INTEGER, service_hash_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( service_tag_id, service_hash_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( petitioned_mappings_table_name, [ 'account_id', 'reason_id' ] )
#
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
self._c.execute( 'CREATE TABLE ' + current_tag_parents_table_name + ' ( child_service_tag_id INTEGER, parent_service_tag_id INTEGER, account_id INTEGER, parent_timestamp INTEGER, PRIMARY KEY ( child_service_tag_id, parent_service_tag_id ) ) WITHOUT ROWID;' )
self._CreateIndex( current_tag_parents_table_name, [ 'account_id' ] )
self._CreateIndex( current_tag_parents_table_name, [ 'parent_timestamp' ] )
self._c.execute( 'CREATE TABLE ' + deleted_tag_parents_table_name + ' ( child_service_tag_id INTEGER, parent_service_tag_id INTEGER, account_id INTEGER, parent_timestamp INTEGER, PRIMARY KEY ( child_service_tag_id, parent_service_tag_id ) ) WITHOUT ROWID;' )
self._CreateIndex( deleted_tag_parents_table_name, [ 'account_id' ] )
self._CreateIndex( deleted_tag_parents_table_name, [ 'parent_timestamp' ] )
self._c.execute( 'CREATE TABLE ' + pending_tag_parents_table_name + ' ( child_master_tag_id INTEGER, parent_master_tag_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( child_master_tag_id, parent_master_tag_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( pending_tag_parents_table_name, [ 'account_id', 'reason_id' ] )
self._c.execute( 'CREATE TABLE ' + petitioned_tag_parents_table_name + ' ( child_service_tag_id INTEGER, parent_service_tag_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( child_service_tag_id, parent_service_tag_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( petitioned_tag_parents_table_name, [ 'account_id', 'reason_id' ] )
#
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
self._c.execute( 'CREATE TABLE ' + current_tag_siblings_table_name + ' ( bad_service_tag_id INTEGER PRIMARY KEY, good_service_tag_id INTEGER, account_id INTEGER, sibling_timestamp INTEGER );' )
self._CreateIndex( current_tag_siblings_table_name, [ 'account_id' ] )
self._CreateIndex( current_tag_siblings_table_name, [ 'sibling_timestamp' ] )
self._c.execute( 'CREATE TABLE ' + deleted_tag_siblings_table_name + ' ( bad_service_tag_id INTEGER PRIMARY KEY, good_service_tag_id INTEGER, account_id INTEGER, sibling_timestamp INTEGER );' )
self._CreateIndex( deleted_tag_siblings_table_name, [ 'account_id' ] )
self._CreateIndex( deleted_tag_siblings_table_name, [ 'sibling_timestamp' ] )
self._c.execute( 'CREATE TABLE ' + pending_tag_siblings_table_name + ' ( bad_master_tag_id INTEGER, good_master_tag_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( bad_master_tag_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( pending_tag_siblings_table_name, [ 'account_id', 'reason_id' ] )
self._c.execute( 'CREATE TABLE ' + petitioned_tag_siblings_table_name + ' ( bad_service_tag_id INTEGER, good_service_tag_id INTEGER, account_id INTEGER, reason_id INTEGER, PRIMARY KEY ( bad_service_tag_id, account_id ) ) WITHOUT ROWID;' )
self._CreateIndex( petitioned_tag_siblings_table_name, [ 'account_id', 'reason_id' ] )
#
( update_table_name ) = GenerateRepositoryUpdateTableName( service_id )
self._c.execute( 'CREATE TABLE ' + update_table_name + ' ( master_hash_id INTEGER PRIMARY KEY );' )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryCreateUpdate( self, service_key, begin, end ):
service_id = self._GetServiceId( service_key )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( name, ) = self._c.execute( 'SELECT name FROM services WHERE service_id = ?;', ( service_id, ) ).fetchone()
2015-10-14 21:02:25 +00:00
2018-02-07 23:40:33 +00:00
HydrusData.Print( 'Creating update for ' + repr( name ) + ' from ' + HydrusData.ConvertTimestampToPrettyTime( begin, in_gmt = True ) + ' to ' + HydrusData.ConvertTimestampToPrettyTime( end, in_gmt = True ) )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
updates = self._RepositoryGenerateUpdates( service_id, begin, end )
update_hashes = []
total_definition_rows = 0
total_content_rows = 0
if len( updates ) > 0:
2016-06-01 20:04:15 +00:00
2017-03-02 02:14:56 +00:00
for update in updates:
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
num_rows = update.GetNumRows()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
if isinstance( update, HydrusNetwork.DefinitionsUpdate ):
2015-10-14 21:02:25 +00:00
2017-03-02 02:14:56 +00:00
total_definition_rows += num_rows
2015-10-14 21:02:25 +00:00
2017-03-02 02:14:56 +00:00
elif isinstance( update, HydrusNetwork.ContentUpdate ):
2015-10-14 21:02:25 +00:00
2017-03-02 02:14:56 +00:00
total_content_rows += num_rows
2015-10-14 21:02:25 +00:00
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
update_bytes = update.DumpToNetworkString()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
update_hash = hashlib.sha256( update_bytes ).digest()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
dest_path = ServerFiles.GetExpectedFilePath( update_hash )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
with open( dest_path, 'wb' ) as f:
2015-10-14 21:02:25 +00:00
2017-03-02 02:14:56 +00:00
f.write( update_bytes )
2015-10-14 21:02:25 +00:00
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
update_hashes.append( update_hash )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( update_table_name ) = GenerateRepositoryUpdateTableName( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
master_hash_ids = self._GetMasterHashIds( update_hashes )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'INSERT OR IGNORE INTO ' + update_table_name + ' ( master_hash_id ) VALUES ( ? );', ( ( master_hash_id, ) for master_hash_id in master_hash_ids ) )
2015-03-04 22:44:32 +00:00
2018-07-04 20:48:28 +00:00
HydrusData.Print( 'Update OK. ' + HydrusData.ToHumanInt( total_definition_rows ) + ' definition rows and ' + HydrusData.ToHumanInt( total_content_rows ) + ' content rows in ' + HydrusData.ToHumanInt( len( updates ) ) + ' update files.' )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return update_hashes
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDeleteFiles( self, service_id, account_id, service_hash_ids ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
select_statement = 'SELECT service_hash_id FROM ' + current_files_table_name + ' WHERE service_hash_id IN %s;'
2015-03-04 22:44:32 +00:00
2017-03-15 20:13:04 +00:00
valid_service_hash_ids = self._STL( self._SelectFromList( select_statement, service_hash_ids ) )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardFilePetitioners( service_id, valid_service_hash_ids, 1 )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'DELETE FROM ' + current_files_table_name + ' WHERE service_hash_id = ?', ( ( service_hash_id, ) for service_hash_id in valid_service_hash_ids ) )
self._c.executemany( 'DELETE FROM ' + petitioned_files_table_name + ' WHERE service_hash_id = ?', ( ( service_hash_id, ) for service_hash_id in valid_service_hash_ids ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
now = HydrusData.GetNow()
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'INSERT OR IGNORE INTO ' + deleted_files_table_name + ' ( service_hash_id, account_id, file_timestamp ) VALUES ( ?, ?, ? );', ( ( service_hash_id, account_id, now ) for service_hash_id in valid_service_hash_ids ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDeleteMappings( self, service_id, account_id, service_tag_id, service_hash_ids ):
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
2013-10-02 22:06:06 +00:00
2017-03-15 20:13:04 +00:00
select_statement = 'SELECT service_hash_id FROM ' + current_mappings_table_name + ' WHERE service_tag_id = ' + str( service_tag_id ) + ' AND service_hash_id IN %s;'
2013-10-02 22:06:06 +00:00
2017-03-15 20:13:04 +00:00
valid_service_hash_ids = self._STL( self._SelectFromList( select_statement, service_hash_ids ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardMappingPetitioners( service_id, service_tag_id, valid_service_hash_ids, 1 )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'DELETE FROM ' + current_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( ( service_tag_id, service_hash_id ) for service_hash_id in valid_service_hash_ids ) )
self._c.executemany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( ( service_tag_id, service_hash_id ) for service_hash_id in valid_service_hash_ids ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
now = HydrusData.GetNow()
self._c.executemany( 'INSERT OR IGNORE INTO ' + deleted_mappings_table_name + ' ( service_tag_id, service_hash_id, account_id, mapping_timestamp ) VALUES ( ?, ?, ?, ? );', ( ( service_tag_id, service_hash_id, account_id, now ) for service_hash_id in valid_service_hash_ids ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDeleteTagParent( self, service_id, account_id, child_service_tag_id, parent_service_tag_id ):
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardTagParentPetitioners( service_id, child_service_tag_id, parent_service_tag_id, 1 )
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM ' + current_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) )
self._c.execute( 'DELETE FROM ' + petitioned_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) )
now = HydrusData.GetNow()
self._c.execute( 'INSERT OR IGNORE INTO ' + deleted_tag_parents_table_name + ' ( child_service_tag_id, parent_service_tag_id, account_id, parent_timestamp ) VALUES ( ?, ?, ?, ? );', ( child_service_tag_id, parent_service_tag_id, account_id, now ) )
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDeleteTagSibling( self, service_id, account_id, bad_service_tag_id, good_service_tag_id ):
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardTagSiblingPetitioners( service_id, bad_service_tag_id, good_service_tag_id, 1 )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM ' + current_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) )
self._c.execute( 'DELETE FROM ' + petitioned_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
now = HydrusData.GetNow()
2015-06-03 21:05:13 +00:00
2017-03-08 23:23:12 +00:00
self._c.execute( 'INSERT OR IGNORE INTO ' + deleted_tag_siblings_table_name + ' ( bad_service_tag_id, good_service_tag_id, account_id, sibling_timestamp ) VALUES ( ?, ?, ?, ? );', ( bad_service_tag_id, good_service_tag_id, account_id, now ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDenyFilePetition( self, service_id, service_hash_ids ):
self._RepositoryRewardFilePetitioners( service_id, service_hash_ids, -1 )
2017-03-15 20:13:04 +00:00
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
2017-03-02 02:14:56 +00:00
2017-03-15 20:13:04 +00:00
self._c.executemany( 'DELETE FROM ' + petitioned_files_table_name + ' WHERE service_hash_id = ?;', ( ( service_hash_id, ) for service_hash_id in service_hash_ids ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDenyMappingPetition( self, service_id, service_tag_id, service_hash_ids ):
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardMappingPetitioners( service_id, service_tag_id, service_hash_ids, -1 )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._c.executemany( 'DELETE FROM ' + petitioned_mappings_table_name + ' WHERE service_tag_id = ? AND service_hash_id = ?;', ( ( service_tag_id, service_hash_id ) for service_hash_id in service_hash_ids ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDenyTagParentPend( self, service_id, child_master_tag_id, parent_master_tag_id ):
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardTagParentPenders( service_id, child_master_tag_id, parent_master_tag_id, -1 )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM ' + pending_tag_parents_table_name + ' WHERE child_master_tag_id = ? AND parent_master_tag_id = ?;', ( child_master_tag_id, parent_master_tag_id ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDenyTagParentPetition( self, service_id, child_service_tag_id, parent_service_tag_id ):
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardTagParentPetitioners( service_id, child_service_tag_id, parent_service_tag_id, -1 )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM ' + petitioned_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDenyTagSiblingPend( self, service_id, bad_master_tag_id, good_master_tag_id ):
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardTagSiblingPenders( service_id, bad_master_tag_id, good_master_tag_id, -1 )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DELETE FROM ' + pending_tag_siblings_table_name + ' WHERE bad_master_tag_id = ? AND good_master_tag_id = ?;', ( bad_master_tag_id, good_master_tag_id ) )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryDenyTagSiblingPetition( self, service_id, bad_service_tag_id, good_service_tag_id ):
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._RepositoryRewardTagSiblingPetitioners( service_id, bad_service_tag_id, good_service_tag_id, -1 )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
self._c.execute( 'DELETE FROM ' + petitioned_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) )
def _RepositoryDrop( self, service_id ):
table_names = []
table_names.extend( GenerateRepositoryMasterMapTableNames( service_id ) )
table_names.extend( GenerateRepositoryFilesTableNames( service_id ) )
table_names.extend( GenerateRepositoryMappingsTableNames( service_id ) )
table_names.extend( GenerateRepositoryTagParentsTableNames( service_id ) )
table_names.extend( GenerateRepositoryTagSiblingsTableNames( service_id ) )
table_names.append( GenerateRepositoryUpdateTableName( service_id ) )
for table_name in table_names:
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
self._c.execute( 'DROP TABLE ' + table_name + ';' )
2015-06-03 21:05:13 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryGenerateImmediateUpdate( self, service_key, account, begin, end ):
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
if True not in ( account.HasPermission( content_type, HC.PERMISSION_ACTION_OVERRULE ) for content_type in HC.REPOSITORY_CONTENT_TYPES ):
raise HydrusExceptions.PermissionException( 'You do not have permission to generate an immediate update!' )
service_id = self._GetServiceId( service_key )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
updates = self._RepositoryGenerateUpdates( service_id, begin, end )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
return updates
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryGenerateUpdates( self, service_id, begin, end ):
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
MAX_DEFINITIONS_ROWS = 50000
MAX_CONTENT_ROWS = 250000
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
MAX_CONTENT_CHUNK = 25000
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
updates = []
definitions_update_builder = HydrusNetwork.UpdateBuilder( HydrusNetwork.DefinitionsUpdate, MAX_DEFINITIONS_ROWS )
content_update_builder = HydrusNetwork.UpdateBuilder( HydrusNetwork.ContentUpdate, MAX_CONTENT_ROWS )
( service_hash_ids_table_name, service_tag_ids_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
for ( service_hash_id, hash ) in self._c.execute( 'SELECT service_hash_id, hash FROM ' + service_hash_ids_table_name + ' NATURAL JOIN hashes WHERE hash_id_timestamp BETWEEN ? AND ?;', ( begin, end ) ):
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
row = ( HC.DEFINITIONS_TYPE_HASHES, service_hash_id, hash )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
definitions_update_builder.AddRow( row )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
for ( service_tag_id, tag ) in self._c.execute( 'SELECT service_tag_id, tag FROM ' + service_tag_ids_table_name + ' NATURAL JOIN tags WHERE tag_id_timestamp BETWEEN ? AND ?;', ( begin, end ) ):
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
row = ( HC.DEFINITIONS_TYPE_TAGS, service_tag_id, tag )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
definitions_update_builder.AddRow( row )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
definitions_update_builder.Finish()
updates.extend( definitions_update_builder.GetUpdates() )
#
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
table_join = self._RepositoryGetFilesInfoFilesTableJoin( service_id, HC.CONTENT_STATUS_CURRENT )
for ( service_hash_id, size, mime, timestamp, width, height, duration, num_frames, num_words ) in self._c.execute( 'SELECT service_hash_id, size, mime, file_timestamp, width, height, duration, num_frames, num_words FROM ' + table_join + ' WHERE file_timestamp BETWEEN ? AND ?;', ( begin, end ) ):
2015-02-25 19:34:30 +00:00
2017-03-02 02:14:56 +00:00
file_row = ( service_hash_id, size, mime, timestamp, width, height, duration, num_frames, num_words )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
content_update_builder.AddRow( ( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_ADD, file_row ) )
2013-10-02 22:06:06 +00:00
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_hash_ids = [ service_hash_id for ( service_hash_id, ) in self._c.execute( 'SELECT service_hash_id FROM ' + deleted_files_table_name + ' WHERE file_timestamp BETWEEN ? AND ?;', ( begin, end ) ) ]
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
for service_hash_id in service_hash_ids:
content_update_builder.AddRow( ( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DELETE, service_hash_id ) )
#
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_tag_ids_to_service_hash_ids = HydrusData.BuildKeyToListDict( self._c.execute( 'SELECT service_tag_id, service_hash_id FROM ' + current_mappings_table_name + ' WHERE mapping_timestamp BETWEEN ? AND ?;', ( begin, end ) ) )
for ( service_tag_id, service_hash_ids ) in service_tag_ids_to_service_hash_ids.items():
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
for block_of_service_hash_ids in HydrusData.SplitListIntoChunks( service_hash_ids, MAX_CONTENT_CHUNK ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
row_weight = len( block_of_service_hash_ids )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
content_update_builder.AddRow( ( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_ADD, ( service_tag_id, block_of_service_hash_ids ) ), row_weight )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
service_tag_ids_to_service_hash_ids = HydrusData.BuildKeyToListDict( self._c.execute( 'SELECT service_tag_id, service_hash_id FROM ' + deleted_mappings_table_name + ' WHERE mapping_timestamp BETWEEN ? AND ?;', ( begin, end ) ) )
for ( service_tag_id, service_hash_ids ) in service_tag_ids_to_service_hash_ids.items():
for block_of_service_hash_ids in HydrusData.SplitListIntoChunks( service_hash_ids, MAX_CONTENT_CHUNK ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
row_weight = len( block_of_service_hash_ids )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
content_update_builder.AddRow( ( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_DELETE, ( service_tag_id, block_of_service_hash_ids ) ), row_weight )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
#
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
2016-02-03 22:12:53 +00:00
2017-03-02 02:14:56 +00:00
pairs = self._c.execute( 'SELECT child_service_tag_id, parent_service_tag_id FROM ' + current_tag_parents_table_name + ' WHERE parent_timestamp BETWEEN ? AND ?;', ( begin, end ) ).fetchall()
for pair in pairs:
2016-03-02 21:00:30 +00:00
2017-03-02 02:14:56 +00:00
content_update_builder.AddRow( ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_ADD, pair ) )
2016-03-02 21:00:30 +00:00
2017-03-02 02:14:56 +00:00
pairs = self._c.execute( 'SELECT child_service_tag_id, parent_service_tag_id FROM ' + deleted_tag_parents_table_name + ' WHERE parent_timestamp BETWEEN ? AND ?;', ( begin, end ) ).fetchall()
2016-02-03 22:12:53 +00:00
2017-03-02 02:14:56 +00:00
for pair in pairs:
content_update_builder.AddRow( ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_DELETE, pair ) )
2016-02-03 22:12:53 +00:00
2017-03-02 02:14:56 +00:00
#
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
pairs = self._c.execute( 'SELECT bad_service_tag_id, good_service_tag_id FROM ' + current_tag_siblings_table_name + ' WHERE sibling_timestamp BETWEEN ? AND ?;', ( begin, end ) ).fetchall()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
for pair in pairs:
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
content_update_builder.AddRow( ( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_ADD, pair ) )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
pairs = self._c.execute( 'SELECT bad_service_tag_id, good_service_tag_id FROM ' + deleted_tag_siblings_table_name + ' WHERE sibling_timestamp BETWEEN ? AND ?;', ( begin, end ) ).fetchall()
2016-02-03 22:12:53 +00:00
2017-03-02 02:14:56 +00:00
for pair in pairs:
content_update_builder.AddRow( ( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_DELETE, pair ) )
2016-02-03 22:12:53 +00:00
2017-03-02 02:14:56 +00:00
#
2016-02-03 22:12:53 +00:00
2017-03-02 02:14:56 +00:00
content_update_builder.Finish()
2013-11-27 18:27:11 +00:00
2017-03-02 02:14:56 +00:00
updates.extend( content_update_builder.GetUpdates() )
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
return updates
2013-10-02 22:06:06 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryGetAccountInfo( self, service_id, account_id ):
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
2015-04-22 22:57:25 +00:00
2017-03-02 02:14:56 +00:00
table_join = 'file_info NATURAL JOIN ' + hash_id_map_table_name + ' NATURAL JOIN ' + current_files_table_name
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
( num_files, num_files_bytes ) = self._c.execute( 'SELECT COUNT( * ), SUM( size ) FROM ' + table_join + ' WHERE account_id = ?;', ( account_id, ) ).fetchone()
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
if num_files_bytes is None:
num_files_bytes = 0
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
account_info = {}
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
account_info[ 'num_files' ] = num_files
account_info[ 'num_files_bytes' ] = num_files_bytes
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
#
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
num_mappings = len( self._c.execute( 'SELECT 1 FROM ' + current_mappings_table_name + ' WHERE account_id = ? LIMIT 5000;', ( account_id, ) ).fetchall() )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
account_info[ 'num_mappings' ] = num_mappings
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
#
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
result = self._c.execute( 'SELECT score FROM account_scores WHERE service_id = ? AND account_id = ? AND score_type = ?;', ( service_id, account_id, HC.SCORE_PETITION ) ).fetchone()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
if result is None: petition_score = 0
else: ( petition_score, ) = result
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
account_info[ 'petition_score' ] = petition_score
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return account_info
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryGetCurrentMappingsCount( self, service_id, service_tag_id ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( count, ) = self._c.execute( 'SELECT COUNT( * ) FROM ' + current_mappings_table_name + ' WHERE service_tag_id = ?;', ( service_tag_id, ) ).fetchone()
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return count
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryGetCurrentMappingsMasterHashIds( self, service_id, service_tag_id ):
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
2015-03-04 22:44:32 +00:00
2017-03-08 23:23:12 +00:00
master_hash_ids = [ master_hash_id for ( master_hash_id, ) in self._c.execute( 'SELECT master_hash_id FROM ' + hash_id_map_table_name + ' NATURAL JOIN ' + current_mappings_table_name + ' WHERE service_tag_id = ?;', ( service_tag_id, ) ) ]
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
return master_hash_ids
2015-03-04 22:44:32 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryGetFilesInfoFilesTableJoin( self, service_id, content_status ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
if content_status == HC.CONTENT_STATUS_CURRENT:
return 'files_info NATURAL JOIN ' + hash_id_map_table_name + ' NATURAL JOIN ' + current_files_table_name
elif content_status == HC.CONTENT_STATUS_DELETED:
return 'files_info NATURAL JOIN ' + hash_id_map_table_name + ' NATURAL JOIN ' + deleted_files_table_name
elif content_status == HC.CONTENT_STATUS_PENDING:
return 'files_info NATURAL JOIN ' + hash_id_map_table_name + ' NATURAL JOIN ' + pending_files_table_name
elif content_status == HC.CONTENT_STATUS_PETITIONED:
return 'files_info NATURAL JOIN ' + hash_id_map_table_name + ' NATURAL JOIN ' + petitioned_files_table_name
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryGetFilePetition( self, service_id ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
result = self._c.execute( 'SELECT account_id, reason_id FROM ' + petitioned_files_table_name + ' ORDER BY RANDOM() LIMIT 1;' ).fetchone()
if result is None:
raise HydrusExceptions.NotFoundException( 'No petitions!' )
( petitioner_account_id, reason_id ) = result
action = HC.CONTENT_UPDATE_PETITION
petitioner_account = self._GetAccount( service_id, petitioner_account_id )
reason = self._GetReason( reason_id )
service_hash_ids = [ service_hash_id for ( service_hash_id, ) in self._c.execute( 'SELECT service_hash_id FROM ' + petitioned_files_table_name + ' WHERE account_id = ? AND reason_id = ?;', ( petitioner_account_id, reason_id ) ) ]
master_hash_ids = self._RepositoryGetMasterHashIds( service_id, service_hash_ids )
hashes = self._GetHashes( master_hash_ids )
content_type = HC.CONTENT_TYPE_FILES
contents = [ HydrusNetwork.Content( content_type, hashes ) ]
return HydrusNetwork.Petition( action, petitioner_account, reason, contents )
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
def _RepositoryGetIPTimestamp( self, service_key, account, hash ):
2013-02-19 00:11:43 +00:00
2017-03-02 02:14:56 +00:00
account.CheckPermission( HC.CONTENT_TYPE_ACCOUNTS, HC.PERMISSION_ACTION_OVERRULE )
2016-03-30 22:56:50 +00:00
2017-03-02 02:14:56 +00:00
service_id = self._GetServiceId( service_key )
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
master_hash_id = self._GetMasterHashId( hash )
result = self._c.execute( 'SELECT ip, ip_timestamp FROM ' + ip_addresses_table_name + ' WHERE master_hash_id = ?;', ( master_hash_id, ) ).fetchone()
if result is None:
2016-03-30 22:56:50 +00:00
2017-03-02 02:14:56 +00:00
raise HydrusExceptions.ForbiddenException( 'Did not find ip information for that hash.' )
2016-03-30 22:56:50 +00:00
2017-03-02 02:14:56 +00:00
return result
def _RepositoryGetNumPetitions( self, service_key, account ):
service_id = self._GetServiceId( service_key )
petition_count_info = []
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
if account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_OVERRULE ):
2016-05-18 20:07:14 +00:00
2017-03-02 02:14:56 +00:00
( num_petitions, ) = self._c.execute( 'SELECT COUNT( * ) FROM ( SELECT DISTINCT account_id, reason_id FROM ' + petitioned_files_table_name + ' );' ).fetchone()
2016-03-30 22:56:50 +00:00
2017-03-02 02:14:56 +00:00
petition_count_info.append( ( HC.CONTENT_TYPE_FILES, HC.CONTENT_STATUS_PETITIONED, num_petitions ) )
2016-03-30 22:56:50 +00:00
2017-03-02 02:14:56 +00:00
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
if account.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_OVERRULE ):
2016-05-18 20:07:14 +00:00
2017-03-02 02:14:56 +00:00
( num_petitions, ) = self._c.execute( 'SELECT COUNT( * ) FROM ( SELECT DISTINCT service_tag_id, account_id, reason_id FROM ' + petitioned_mappings_table_name + ' );' ).fetchone()
2016-03-30 22:56:50 +00:00
2017-03-02 02:14:56 +00:00
petition_count_info.append( ( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_STATUS_PETITIONED, num_petitions ) )
2016-03-30 22:56:50 +00:00
2016-04-14 01:54:29 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
if account.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_OVERRULE ):
2016-04-14 01:54:29 +00:00
2017-03-02 02:14:56 +00:00
( num_petitions, ) = self._c.execute( 'SELECT COUNT( * ) FROM ' + pending_tag_parents_table_name + ';' ).fetchone()
2016-04-14 01:54:29 +00:00
2017-03-02 02:14:56 +00:00
petition_count_info.append( ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_STATUS_PENDING, num_petitions ) )
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
( num_petitions, ) = self._c.execute( 'SELECT COUNT( * ) FROM ' + petitioned_tag_parents_table_name + ';' ).fetchone()
2016-04-14 01:54:29 +00:00
2017-03-02 02:14:56 +00:00
petition_count_info.append( ( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_STATUS_PETITIONED, num_petitions ) )
2016-04-14 01:54:29 +00:00
2017-03-02 02:14:56 +00:00
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
if account.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_OVERRULE ):
2016-04-14 01:54:29 +00:00
2017-03-02 02:14:56 +00:00
( num_petitions, ) = self._c.execute( 'SELECT COUNT( * ) FROM ' + pending_tag_siblings_table_name + ';' ).fetchone()
2016-04-20 20:42:21 +00:00
2017-03-02 02:14:56 +00:00
petition_count_info.append( ( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_STATUS_PENDING, num_petitions ) )
2016-04-14 01:54:29 +00:00
2017-03-02 02:14:56 +00:00
( num_petitions, ) = self._c.execute( 'SELECT COUNT( * ) FROM ' + petitioned_tag_siblings_table_name + ';' ).fetchone()
petition_count_info.append( ( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_STATUS_PETITIONED, num_petitions ) )
return petition_count_info
def _RepositoryGetMappingPetition( self, service_id ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
result = self._c.execute( 'SELECT account_id, reason_id FROM ' + petitioned_mappings_table_name + ' ORDER BY RANDOM() LIMIT 1;' ).fetchone()
if result is None:
raise HydrusExceptions.NotFoundException( 'No petitions!' )
( petitioner_account_id, reason_id ) = result
action = HC.CONTENT_UPDATE_PETITION
petitioner_account = self._GetAccount( service_id, petitioner_account_id )
reason = self._GetReason( reason_id )
tag_ids_to_hash_ids = HydrusData.BuildKeyToListDict( self._c.execute( 'SELECT service_tag_id, service_hash_id FROM ' + petitioned_mappings_table_name + ' WHERE account_id = ? AND reason_id = ?;', ( petitioner_account_id, reason_id ) ) )
contents = []
2017-05-10 21:33:58 +00:00
total_num_petitions = 0
total_weight = 0
2017-05-31 21:50:53 +00:00
min_weight_permitted = None
max_weight_permitted = None
2017-06-21 21:15:59 +00:00
max_total_weight = None
2017-05-31 21:50:53 +00:00
petition_pairs = list( tag_ids_to_hash_ids.items() )
random.shuffle( petition_pairs )
for ( service_tag_id, service_hash_ids ) in petition_pairs:
content_weight = len( service_hash_ids )
if min_weight_permitted is None:
# group petitions of similar weight together rather than mixing weight 5000 in with a hundred weight 1s
if content_weight == 1:
min_weight_permitted = 1
max_weight_permitted = 1
2017-06-21 21:15:59 +00:00
max_total_weight = 20000
2017-05-31 21:50:53 +00:00
elif content_weight < 10:
min_weight_permitted = 2
max_weight_permitted = 9
2017-06-21 21:15:59 +00:00
max_total_weight = 5000
2017-05-31 21:50:53 +00:00
elif content_weight < 50:
min_weight_permitted = 10
max_weight_permitted = 49
2017-06-21 21:15:59 +00:00
max_total_weight = 2000
2017-05-31 21:50:53 +00:00
else:
min_weight_permitted = 50
max_weight_permitted = None
2017-06-21 21:15:59 +00:00
max_total_weight = 500
2017-05-31 21:50:53 +00:00
else:
if content_weight < min_weight_permitted:
continue
if max_weight_permitted is not None and content_weight > max_weight_permitted:
continue
2017-03-02 02:14:56 +00:00
master_tag_id = self._RepositoryGetMasterTagId( service_id, service_tag_id )
master_hash_ids = self._RepositoryGetMasterHashIds( service_id, service_hash_ids )
tag = self._GetTag( master_tag_id )
hashes = self._GetHashes( master_hash_ids )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_MAPPINGS, ( tag, hashes ) )
contents.append( content )
2017-05-10 21:33:58 +00:00
total_num_petitions += 1
2017-05-31 21:50:53 +00:00
total_weight += content_weight
2017-05-10 21:33:58 +00:00
2017-06-21 21:15:59 +00:00
if total_num_petitions > 20 and total_weight > 10000:
2017-05-10 21:33:58 +00:00
break
2017-03-02 02:14:56 +00:00
return HydrusNetwork.Petition( action, petitioner_account, reason, contents )
def _RepositoryGetMasterHashIds( self, service_id, service_hash_ids ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
select_statement = 'SELECT master_hash_id FROM ' + hash_id_map_table_name + ' WHERE service_hash_id IN %s;'
master_hash_ids = [ master_hash_id for ( master_hash_id, ) in self._SelectFromList( select_statement, service_hash_ids ) ]
if len( service_hash_ids ) != len( master_hash_ids ):
raise HydrusExceptions.DataMissing( 'Missing master_hash_id map error!' )
return master_hash_ids
def _RepositoryGetMasterTagId( self, service_id, service_tag_id ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
result = self._c.execute( 'SELECT master_tag_id FROM ' + tag_id_map_table_name + ' WHERE service_tag_id = ?;', ( service_tag_id, ) ).fetchone()
if result is None:
raise HydrusExceptions.DataMissing( 'Missing master_tag_id map error!' )
( master_tag_id, ) = result
return master_tag_id
2017-03-15 20:13:04 +00:00
def _RepositoryGetPetition( self, service_key, account, content_type, status ):
service_id = self._GetServiceId( service_key )
account.CheckPermission( content_type, HC.PERMISSION_ACTION_OVERRULE )
if content_type == HC.CONTENT_TYPE_FILES:
petition = self._RepositoryGetFilePetition( service_id )
elif content_type == HC.CONTENT_TYPE_MAPPINGS:
petition = self._RepositoryGetMappingPetition( service_id )
elif content_type == HC.CONTENT_TYPE_TAG_PARENTS:
if status == HC.CONTENT_STATUS_PENDING:
petition = self._RepositoryGetTagParentPend( service_id )
else:
petition = self._RepositoryGetTagParentPetition( service_id )
elif content_type == HC.CONTENT_TYPE_TAG_SIBLINGS:
if status == HC.CONTENT_STATUS_PENDING:
petition = self._RepositoryGetTagSiblingPend( service_id )
else:
petition = self._RepositoryGetTagSiblingPetition( service_id )
return petition
2017-03-02 02:14:56 +00:00
def _RepositoryGetServiceHashId( self, service_id, master_hash_id ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
result = self._c.execute( 'SELECT service_hash_id FROM ' + hash_id_map_table_name + ' WHERE master_hash_id = ?;', ( master_hash_id, ) ).fetchone()
if result is None:
now = HydrusData.GetNow()
self._c.execute( 'INSERT INTO ' + hash_id_map_table_name + ' ( master_hash_id, hash_id_timestamp ) VALUES ( ?, ? );', ( master_hash_id, now ) )
service_hash_id = self._c.lastrowid
return service_hash_id
else:
( service_hash_id, ) = result
return service_hash_id
def _RepositoryGetServiceHashIds( self, service_id, master_hash_ids ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
service_hash_ids = set()
master_hash_ids_not_in_table = set()
for master_hash_id in master_hash_ids:
result = self._c.execute( 'SELECT service_hash_id FROM ' + hash_id_map_table_name + ' WHERE master_hash_id = ?;', ( master_hash_id, ) ).fetchone()
if result is None:
master_hash_ids_not_in_table.add( master_hash_id )
else:
( service_hash_id, ) = result
service_hash_ids.add( service_hash_id )
if len( master_hash_ids_not_in_table ) > 0:
now = HydrusData.GetNow()
self._c.executemany( 'INSERT INTO ' + hash_id_map_table_name + ' ( master_hash_id, hash_id_timestamp ) VALUES ( ?, ? );', ( ( master_hash_id, now ) for master_hash_id in master_hash_ids_not_in_table ) )
for master_hash_id in master_hash_ids_not_in_table:
( service_hash_id, ) = self._c.execute( 'SELECT service_hash_id FROM ' + hash_id_map_table_name + ' WHERE master_hash_id = ?;', ( master_hash_id, ) ).fetchone()
service_hash_ids.add( service_hash_id )
return service_hash_ids
def _RepositoryGetServiceTagId( self, service_id, master_tag_id ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
result = self._c.execute( 'SELECT service_tag_id FROM ' + tag_id_map_table_name + ' WHERE master_tag_id = ?;', ( master_tag_id, ) ).fetchone()
if result is None:
now = HydrusData.GetNow()
self._c.execute( 'INSERT INTO ' + tag_id_map_table_name + ' ( master_tag_id, tag_id_timestamp ) VALUES ( ?, ? );', ( master_tag_id, now ) )
service_tag_id = self._c.lastrowid
return service_tag_id
else:
( service_tag_id, ) = result
return service_tag_id
def _RepositoryGetTagParentPend( self, service_id ):
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
2017-03-15 20:13:04 +00:00
result = self._c.execute( 'SELECT account_id, reason_id FROM ' + pending_tag_parents_table_name + ' ORDER BY RANDOM() LIMIT 1;' ).fetchone()
2017-03-02 02:14:56 +00:00
if result is None:
raise HydrusExceptions.NotFoundException( 'No petitions!' )
( petitioner_account_id, reason_id ) = result
action = HC.CONTENT_UPDATE_PEND
petitioner_account = self._GetAccount( service_id, petitioner_account_id )
reason = self._GetReason( reason_id )
pairs = self._c.execute( 'SELECT child_master_tag_id, parent_master_tag_id FROM ' + pending_tag_parents_table_name + ' WHERE account_id = ? AND reason_id = ?;', ( petitioner_account_id, reason_id ) ).fetchall()
contents = []
for ( child_master_tag_id, parent_master_tag_id ) in pairs:
child_tag = self._GetTag( child_master_tag_id )
parent_tag = self._GetTag( parent_master_tag_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_PARENTS, ( child_tag, parent_tag ) )
contents.append( content )
return HydrusNetwork.Petition( action, petitioner_account, reason, contents )
def _RepositoryGetTagParentPetition( self, service_id ):
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
2017-03-15 20:13:04 +00:00
result = self._c.execute( 'SELECT account_id, reason_id FROM ' + petitioned_tag_parents_table_name + ' ORDER BY RANDOM() LIMIT 1;' ).fetchone()
2017-03-02 02:14:56 +00:00
if result is None:
raise HydrusExceptions.NotFoundException( 'No petitions!' )
( petitioner_account_id, reason_id ) = result
action = HC.CONTENT_UPDATE_PETITION
petitioner_account = self._GetAccount( service_id, petitioner_account_id )
reason = self._GetReason( reason_id )
pairs = self._c.execute( 'SELECT child_service_tag_id, parent_service_tag_id FROM ' + petitioned_tag_parents_table_name + ' WHERE account_id = ? AND reason_id = ?;', ( petitioner_account_id, reason_id ) ).fetchall()
contents = []
for ( child_service_tag_id, parent_service_tag_id ) in pairs:
child_master_tag_id = self._RepositoryGetMasterTagId( service_id, child_service_tag_id )
parent_master_tag_id = self._RepositoryGetMasterTagId( service_id, parent_service_tag_id )
child_tag = self._GetTag( child_master_tag_id )
parent_tag = self._GetTag( parent_master_tag_id )
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_PARENTS, ( child_tag, parent_tag ) )
contents.append( content )
return HydrusNetwork.Petition( action, petitioner_account, reason, contents )
def _RepositoryGetTagSiblingPend( self, service_id ):
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
result = self._c.execute( 'SELECT account_id, reason_id FROM ' + pending_tag_siblings_table_name + ' ORDER BY RANDOM() LIMIT 1;' ).fetchone()
if result is None:
raise HydrusExceptions.NotFoundException( 'No petitions!' )
( petitioner_account_id, reason_id ) = result
action = HC.CONTENT_UPDATE_PEND
petitioner_account = self._GetAccount( service_id, petitioner_account_id )
reason = self._GetReason( reason_id )
pairs = self._c.execute( 'SELECT bad_master_tag_id, good_master_tag_id FROM ' + pending_tag_siblings_table_name + ' WHERE account_id = ? AND reason_id = ?;', ( petitioner_account_id, reason_id ) ).fetchall()
contents = []
for ( bad_master_tag_id, good_master_tag_id ) in pairs:
bad_tag = self._GetTag( bad_master_tag_id )
good_tag = self._GetTag( good_master_tag_id )
2017-03-15 20:13:04 +00:00
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_SIBLINGS, ( bad_tag, good_tag ) )
2017-03-02 02:14:56 +00:00
contents.append( content )
return HydrusNetwork.Petition( action, petitioner_account, reason, contents )
def _RepositoryGetTagSiblingPetition( self, service_id ):
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
result = self._c.execute( 'SELECT account_id, reason_id FROM ' + petitioned_tag_siblings_table_name + ' ORDER BY RANDOM() LIMIT 1;' ).fetchone()
if result is None:
raise HydrusExceptions.NotFoundException( 'No petitions!' )
( petitioner_account_id, reason_id ) = result
action = HC.CONTENT_UPDATE_PETITION
petitioner_account = self._GetAccount( service_id, petitioner_account_id )
reason = self._GetReason( reason_id )
pairs = self._c.execute( 'SELECT bad_service_tag_id, good_service_tag_id FROM ' + petitioned_tag_siblings_table_name + ' WHERE account_id = ? AND reason_id = ?;', ( petitioner_account_id, reason_id ) ).fetchall()
contents = []
for ( bad_service_tag_id, good_service_tag_id ) in pairs:
bad_master_tag_id = self._RepositoryGetMasterTagId( service_id, bad_service_tag_id )
good_master_tag_id = self._RepositoryGetMasterTagId( service_id, good_service_tag_id )
bad_tag = self._GetTag( bad_master_tag_id )
good_tag = self._GetTag( good_master_tag_id )
2017-03-15 20:13:04 +00:00
content = HydrusNetwork.Content( HC.CONTENT_TYPE_TAG_SIBLINGS, ( bad_tag, good_tag ) )
2017-03-02 02:14:56 +00:00
contents.append( content )
return HydrusNetwork.Petition( action, petitioner_account, reason, contents )
def _RepositoryHasFile( self, service_key, hash ):
if not self._MasterHashExists( hash ):
return ( False, None )
service_id = self._GetServiceId( service_key )
master_hash_id = self._GetMasterHashId( hash )
table_join = self._RepositoryGetFilesInfoFilesTableJoin( service_id, HC.CONTENT_STATUS_CURRENT )
result = self._c.execute( 'SELECT mime FROM ' + table_join + ' WHERE master_hash_id = ?;', ( master_hash_id, ) ).fetchone()
if result is None:
return ( False, None )
( mime, ) = result
return ( True, mime )
def _RepositoryPendTagParent( self, service_id, account_id, child_master_tag_id, parent_master_tag_id, reason_id ):
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
child_exists = self._RepositoryServiceTagIdExists( service_id, child_master_tag_id )
parent_exists = self._RepositoryServiceTagIdExists( service_id, parent_master_tag_id )
if child_exists and parent_exists:
child_service_tag_id = self._RepositoryGetServiceTagId( service_id, child_master_tag_id )
parent_service_tag_id = self._RepositoryGetServiceTagId( service_id, parent_master_tag_id )
result = self._c.execute( 'SELECT 1 FROM ' + current_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) ).fetchone()
if result is not None:
return
self._c.execute( 'REPLACE INTO ' + pending_tag_parents_table_name + ' ( child_master_tag_id, parent_master_tag_id, account_id, reason_id ) VALUES ( ?, ?, ?, ? );', ( child_master_tag_id, parent_master_tag_id, account_id, reason_id ) )
def _RepositoryPendTagSibling( self, service_id, account_id, bad_master_tag_id, good_master_tag_id, reason_id ):
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
bad_exists = self._RepositoryServiceTagIdExists( service_id, bad_master_tag_id )
good_exists = self._RepositoryServiceTagIdExists( service_id, good_master_tag_id )
if bad_exists and good_exists:
bad_service_tag_id = self._RepositoryGetServiceTagId( service_id, bad_master_tag_id )
good_service_tag_id = self._RepositoryGetServiceTagId( service_id, good_master_tag_id )
result = self._c.execute( 'SELECT 1 FROM ' + current_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) ).fetchone()
if result is not None:
return
self._c.execute( 'REPLACE INTO ' + pending_tag_siblings_table_name + ' ( bad_master_tag_id, good_master_tag_id, account_id, reason_id ) VALUES ( ?, ?, ?, ? );', ( bad_master_tag_id, good_master_tag_id, account_id, reason_id ) )
def _RepositoryPetitionFiles( self, service_id, account_id, service_hash_ids, reason_id ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
select_statement = 'SELECT service_hash_id FROM ' + current_files_table_name + ' WHERE service_hash_id IN %s;'
valid_service_hash_ids = [ service_hash_id for ( service_hash_id, ) in self._SelectFromList( select_statement, service_hash_ids ) ]
now = HydrusData.GetNow()
2017-03-15 20:13:04 +00:00
self._c.executemany( 'REPLACE INTO ' + petitioned_files_table_name + ' ( service_hash_id, account_id, reason_id ) VALUES ( ?, ?, ? );', ( ( service_hash_id, account_id, reason_id ) for service_hash_id in valid_service_hash_ids ) )
2017-03-02 02:14:56 +00:00
def _RepositoryPetitionMappings( self, service_id, account_id, service_tag_id, service_hash_ids, reason_id ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
select_statement = 'SELECT service_hash_id FROM ' + current_mappings_table_name + ' WHERE service_tag_id = ' + str( service_tag_id ) + ' AND service_hash_id IN %s;'
valid_service_hash_ids = [ service_hash_id for ( service_hash_id, ) in self._SelectFromList( select_statement, service_hash_ids ) ]
self._c.executemany( 'REPLACE INTO ' + petitioned_mappings_table_name + ' ( service_tag_id, service_hash_id, account_id, reason_id ) VALUES ( ?, ?, ?, ? );', [ ( service_tag_id, service_hash_id, account_id, reason_id ) for service_hash_id in valid_service_hash_ids ] )
def _RepositoryPetitionTagParent( self, service_id, account_id, child_service_tag_id, parent_service_tag_id, reason_id ):
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
result = self._c.execute( 'SELECT 1 FROM ' + current_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) ).fetchone()
if result is None:
return
self._c.execute( 'REPLACE INTO ' + petitioned_tag_parents_table_name + ' ( child_service_tag_id, parent_service_tag_id, account_id, reason_id ) VALUES ( ?, ?, ?, ? );', ( child_service_tag_id, parent_service_tag_id, account_id, reason_id ) )
def _RepositoryPetitionTagSibling( self, service_id, account_id, bad_service_tag_id, good_service_tag_id, reason_id ):
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
result = self._c.execute( 'SELECT 1 FROM ' + current_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) ).fetchone()
if result is None:
return
self._c.execute( 'REPLACE INTO ' + petitioned_tag_siblings_table_name + ' ( bad_service_tag_id, good_service_tag_id, account_id, reason_id ) VALUES ( ?, ?, ?, ? );', ( bad_service_tag_id, good_service_tag_id, account_id, reason_id ) )
def _RepositoryProcessAddFile( self, service, account, file_dict ):
service_key = service.GetServiceKey()
service_id = self._GetServiceId( service_key )
account_key = account.GetAccountKey()
account_id = self._GetAccountId( account_key )
can_petition_files = account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_PETITION )
can_create_files = account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_CREATE )
can_overrule_files = account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_OVERRULE )
# later add pend file here however that is neat
if can_create_files or can_overrule_files:
if not can_overrule_files:
max_storage = service.GetMaxStorage()
if max_storage is not None:
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
table_join = self._RepositoryGetFilesInfoFilesTableJoin( service_id, HC.CONTENT_STATUS_CURRENT )
( total_current_storage, ) = self._c.execute( 'SELECT SUM( size ) FROM ' + table_join + ';' ).fetchone()
if total_current_storage is None:
total_current_storage = 0
table_join = self._RepositoryGetFilesInfoFilesTableJoin( service_id, HC.CONTENT_STATUS_PENDING )
( total_pending_storage, ) = self._c.execute( 'SELECT SUM( size ) FROM ' + table_join + ';' ).fetchone()
if total_pending_storage is None:
total_pending_storage = 0
if total_current_storage + total_pending_storage + file_dict[ 'size' ] > max_storage:
raise HydrusExceptions.PermissionException( 'This repository is full up and cannot take any more files!' )
overwrite_deleted = can_overrule_files
self._RepositoryAddFile( service_id, account_id, file_dict, overwrite_deleted )
def _RepositoryProcessClientToServerUpdate( self, service_key, account, client_to_server_update ):
service_id = self._GetServiceId( service_key )
account_key = account.GetAccountKey()
account_id = self._GetAccountId( account_key )
can_petition_files = account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_PETITION )
can_overrule_files = account.HasPermission( HC.CONTENT_TYPE_FILES, HC.PERMISSION_ACTION_OVERRULE )
can_petition_mappings = account.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_PETITION )
can_create_mappings = account.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_CREATE )
can_overrule_mappings = account.HasPermission( HC.CONTENT_TYPE_MAPPINGS, HC.PERMISSION_ACTION_OVERRULE )
can_petition_tag_parents = account.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_PETITION )
can_create_tag_parents = account.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_CREATE )
can_overrule_tag_parents = account.HasPermission( HC.CONTENT_TYPE_TAG_PARENTS, HC.PERMISSION_ACTION_OVERRULE )
can_petition_tag_siblings = account.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_PETITION )
can_create_tag_siblings = account.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_CREATE )
can_overrule_tag_siblings = account.HasPermission( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.PERMISSION_ACTION_OVERRULE )
if can_overrule_files or can_petition_files:
for ( hashes, reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_PETITION ):
master_hash_ids = self._GetMasterHashIds( hashes )
service_hash_ids = self._RepositoryGetServiceHashIds( service_id, master_hash_ids )
if can_overrule_files:
self._RepositoryDeleteFiles( service_id, account_id, service_hash_ids )
elif can_petition_files:
reason_id = self._GetReasonId( reason )
self._RepositoryPetitionFiles( service_id, account_id, service_hash_ids, reason_id )
if can_overrule_files:
for ( hashes, reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_FILES, HC.CONTENT_UPDATE_DENY_PETITION ):
master_hash_ids = self._GetMasterHashIds( hashes )
service_hash_ids = self._RepositoryGetServiceHashIds( service_id, master_hash_ids )
self._RepositoryDenyFilePetition( service_id, service_hash_ids )
#
# later add pend mappings here however that is neat
if can_create_mappings or can_overrule_mappings:
for ( ( tag, hashes ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PEND ):
master_tag_id = self._GetMasterTagId( tag )
master_hash_ids = self._GetMasterHashIds( hashes )
overwrite_deleted = can_overrule_mappings
self._RepositoryAddMappings( service_id, account_id, master_tag_id, master_hash_ids, overwrite_deleted )
if can_overrule_mappings or can_petition_mappings:
for ( ( tag, hashes ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PETITION ):
master_tag_id = self._GetMasterTagId( tag )
service_tag_id = self._RepositoryGetServiceTagId( service_id, master_tag_id )
master_hash_ids = self._GetMasterHashIds( hashes )
service_hash_ids = self._RepositoryGetServiceHashIds( service_id, master_hash_ids )
if can_overrule_mappings:
self._RepositoryDeleteMappings( service_id, account_id, service_tag_id, service_hash_ids )
elif can_petition_mappings:
reason_id = self._GetReasonId( reason )
self._RepositoryPetitionMappings( service_id, account_id, service_tag_id, service_hash_ids, reason_id )
if can_overrule_mappings:
for ( ( tag, hashes ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_DENY_PETITION ):
master_tag_id = self._GetMasterTagId( tag )
service_tag_id = self._RepositoryGetServiceTagId( service_id, master_tag_id )
master_hash_ids = self._GetMasterHashIds( hashes )
service_hash_ids = self._RepositoryGetServiceHashIds( service_id, master_hash_ids )
self._RepositoryDenyMappingPetition( service_id, service_tag_id, service_hash_ids )
#
if can_create_tag_parents or can_overrule_tag_parents or can_petition_tag_parents:
for ( ( child_tag, parent_tag ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_PEND ):
child_master_tag_id = self._GetMasterTagId( child_tag )
parent_master_tag_id = self._GetMasterTagId( parent_tag )
if can_create_tag_parents or can_overrule_tag_parents:
overwrite_deleted = can_overrule_tag_parents
self._RepositoryAddTagParent( service_id, account_id, child_master_tag_id, parent_master_tag_id, overwrite_deleted )
elif can_petition_tag_parents:
reason_id = self._GetReasonId( reason )
self._RepositoryPendTagParent( service_id, account_id, child_master_tag_id, parent_master_tag_id, reason_id )
for ( ( child_tag, parent_tag ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_PETITION ):
child_master_tag_id = self._GetMasterTagId( child_tag )
parent_master_tag_id = self._GetMasterTagId( parent_tag )
child_service_tag_id = self._RepositoryGetServiceTagId( service_id, child_master_tag_id )
parent_service_tag_id = self._RepositoryGetServiceTagId( service_id, parent_master_tag_id )
if can_overrule_tag_parents:
self._RepositoryDeleteTagParent( service_id, account_id, child_service_tag_id, parent_service_tag_id )
elif can_petition_tag_parents:
reason_id = self._GetReasonId( reason )
self._RepositoryPetitionTagParent( service_id, account_id, child_service_tag_id, parent_service_tag_id, reason_id )
if can_overrule_tag_parents:
for ( ( child_tag, parent_tag ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_DENY_PEND ):
child_master_tag_id = self._GetMasterTagId( child_tag )
parent_master_tag_id = self._GetMasterTagId( parent_tag )
self._RepositoryDenyTagParentPend( service_id, child_master_tag_id, parent_master_tag_id )
for ( ( child_tag, parent_tag ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_TAG_PARENTS, HC.CONTENT_UPDATE_DENY_PETITION ):
child_master_tag_id = self._GetMasterTagId( child_tag )
parent_master_tag_id = self._GetMasterTagId( parent_tag )
child_service_tag_id = self._RepositoryGetServiceTagId( service_id, child_master_tag_id )
parent_service_tag_id = self._RepositoryGetServiceTagId( service_id, parent_master_tag_id )
self._RepositoryDenyTagParentPetition( service_id, child_service_tag_id, parent_service_tag_id )
#
if can_create_tag_siblings or can_overrule_tag_siblings or can_petition_tag_siblings:
for ( ( bad_tag, good_tag ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_PEND ):
bad_master_tag_id = self._GetMasterTagId( bad_tag )
good_master_tag_id = self._GetMasterTagId( good_tag )
if can_create_tag_siblings or can_overrule_tag_siblings:
overwrite_deleted = can_overrule_tag_siblings
self._RepositoryAddTagSibling( service_id, account_id, bad_master_tag_id, good_master_tag_id, overwrite_deleted )
elif can_petition_tag_siblings:
reason_id = self._GetReasonId( reason )
self._RepositoryPendTagSibling( service_id, account_id, bad_master_tag_id, good_master_tag_id, reason_id )
for ( ( bad_tag, good_tag ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_PETITION ):
bad_master_tag_id = self._GetMasterTagId( bad_tag )
good_master_tag_id = self._GetMasterTagId( good_tag )
bad_service_tag_id = self._RepositoryGetServiceTagId( service_id, bad_master_tag_id )
good_service_tag_id = self._RepositoryGetServiceTagId( service_id, good_master_tag_id )
if can_overrule_tag_siblings:
self._RepositoryDeleteTagSibling( service_id, account_id, bad_service_tag_id, good_service_tag_id )
elif can_petition_tag_siblings:
reason_id = self._GetReasonId( reason )
self._RepositoryPetitionTagSibling( service_id, account_id, bad_service_tag_id, good_service_tag_id, reason_id )
if can_overrule_tag_siblings:
for ( ( bad_tag, good_tag ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_DENY_PEND ):
bad_master_tag_id = self._GetMasterTagId( bad_tag )
good_master_tag_id = self._GetMasterTagId( good_tag )
self._RepositoryDenyTagSiblingPend( service_id, bad_master_tag_id, good_master_tag_id )
for ( ( bad_tag, good_tag ), reason ) in client_to_server_update.GetContentDataIterator( HC.CONTENT_TYPE_TAG_SIBLINGS, HC.CONTENT_UPDATE_DENY_PETITION ):
bad_master_tag_id = self._GetMasterTagId( bad_tag )
good_master_tag_id = self._GetMasterTagId( good_tag )
bad_service_tag_id = self._RepositoryGetServiceTagId( service_id, bad_master_tag_id )
good_service_tag_id = self._RepositoryGetServiceTagId( service_id, good_master_tag_id )
self._RepositoryDenyTagSiblingPetition( service_id, bad_service_tag_id, good_service_tag_id )
def _RepositoryRewardFilePetitioners( self, service_id, service_hash_ids, multiplier ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
select_statement = 'SELECT account_id, COUNT( * ) FROM ' + petitioned_files_table_name + ' WHERE service_hash_id IN %s GROUP BY account_id;'
scores = [ ( account_id, count * multiplier ) for ( account_id, count ) in self._SelectFromList( select_statement, service_hash_ids ) ]
self._RewardAccounts( service_id, HC.SCORE_PETITION, scores )
def _RepositoryRewardMappingPetitioners( self, service_id, service_tag_id, service_hash_ids, multiplier ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
select_statement = 'SELECT account_id, COUNT( * ) FROM ' + petitioned_mappings_table_name + ' WHERE service_tag_id = ' + str( service_tag_id ) + ' AND service_hash_id IN %s GROUP BY account_id;'
scores = [ ( account_id, count * multiplier ) for ( account_id, count ) in self._SelectFromList( select_statement, service_hash_ids ) ]
self._RewardAccounts( service_id, HC.SCORE_PETITION, scores )
def _RepositoryRewardTagParentPenders( self, service_id, child_master_tag_id, parent_master_tag_id, multiplier ):
child_service_tag_id = self._RepositoryGetServiceTagId( service_id, child_master_tag_id )
score = self._RepositoryGetCurrentMappingsCount( service_id, child_service_tag_id )
score = max( score, 1 )
weighted_score = score * multiplier
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
account_ids = [ account_id for ( account_id, ) in self._c.execute( 'SELECT account_id FROM ' + pending_tag_parents_table_name + ' WHERE child_master_tag_id = ? AND parent_master_tag_id = ?;', ( child_master_tag_id, parent_master_tag_id ) ) ]
scores = [ ( account_id, weighted_score ) for account_id in account_ids ]
self._RewardAccounts( service_id, HC.SCORE_PETITION, scores )
def _RepositoryRewardTagParentPetitioners( self, service_id, child_service_tag_id, parent_service_tag_id, multiplier ):
score = self._RepositoryGetCurrentMappingsCount( service_id, child_service_tag_id )
score = max( score, 1 )
weighted_score = score * multiplier
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
account_ids = [ account_id for ( account_id, ) in self._c.execute( 'SELECT account_id FROM ' + petitioned_tag_parents_table_name + ' WHERE child_service_tag_id = ? AND parent_service_tag_id = ?;', ( child_service_tag_id, parent_service_tag_id ) ) ]
scores = [ ( account_id, weighted_score ) for account_id in account_ids ]
self._RewardAccounts( service_id, HC.SCORE_PETITION, scores )
def _RepositoryRewardTagSiblingPenders( self, service_id, bad_master_tag_id, good_master_tag_id, multiplier ):
bad_service_tag_id = self._RepositoryGetServiceTagId( service_id, bad_master_tag_id )
score = self._RepositoryGetCurrentMappingsCount( service_id, bad_service_tag_id )
score = max( score, 1 )
weighted_score = score * multiplier
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
account_ids = [ account_id for ( account_id, ) in self._c.execute( 'SELECT account_id FROM ' + pending_tag_siblings_table_name + ' WHERE bad_master_tag_id = ? AND good_master_tag_id = ?;', ( bad_master_tag_id, good_master_tag_id ) ) ]
scores = [ ( account_id, weighted_score ) for account_id in account_ids ]
self._RewardAccounts( service_id, HC.SCORE_PETITION, scores )
def _RepositoryRewardTagSiblingPetitioners( self, service_id, bad_service_tag_id, good_service_tag_id, multiplier ):
score = self._RepositoryGetCurrentMappingsCount( service_id, bad_service_tag_id )
score = max( score, 1 )
weighted_score = score * multiplier
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
account_ids = [ account_id for ( account_id, ) in self._c.execute( 'SELECT account_id FROM ' + petitioned_tag_siblings_table_name + ' WHERE bad_service_tag_id = ? AND good_service_tag_id = ?;', ( bad_service_tag_id, good_service_tag_id ) ) ]
scores = [ ( account_id, weighted_score ) for account_id in account_ids ]
self._RewardAccounts( service_id, HC.SCORE_PETITION, scores )
def _RepositoryServiceHashIdExists( self, service_id, master_hash_id ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
result = self._c.execute( 'SELECT 1 FROM ' + hash_id_map_table_name + ' WHERE master_hash_id = ?;', ( master_hash_id, ) ).fetchone()
if result is None:
return False
else:
return True
def _RepositoryServiceTagIdExists( self, service_id, master_tag_id ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryMasterMapTableNames( service_id )
result = self._c.execute( 'SELECT 1 FROM ' + tag_id_map_table_name + ' WHERE master_tag_id = ?;', ( master_tag_id, ) ).fetchone()
if result is None:
return False
else:
return True
def _RepositorySuperBan( self, service_id, admin_account_id, subject_account_ids ):
( current_files_table_name, deleted_files_table_name, pending_files_table_name, petitioned_files_table_name, ip_addresses_table_name ) = GenerateRepositoryFilesTableNames( service_id )
select_statement = 'SELECT service_hash_id FROM ' + current_files_table_name + ' WHERE account_id IN %s;'
service_hash_ids = [ service_hash_id for ( service_hash_id, ) in self._SelectFromList( select_statement, subject_account_ids ) ]
if len( service_hash_ids ) > 0:
self._RepositoryDeleteFiles( service_id, admin_account_id, service_hash_ids )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateRepositoryMappingsTableNames( service_id )
select_statement = 'SELECT service_tag_id, service_hash_id FROM ' + current_mappings_table_name + ' WHERE account_id IN %s;'
mappings_dict = HydrusData.BuildKeyToListDict( self._SelectFromList( select_statement, subject_account_ids ) )
if len( mappings_dict ) > 0:
for ( service_tag_id, service_hash_ids ) in mappings_dict.items():
self._RepositoryDeleteMappings( service_id, admin_account_id, service_tag_id, service_hash_ids )
( current_tag_parents_table_name, deleted_tag_parents_table_name, pending_tag_parents_table_name, petitioned_tag_parents_table_name ) = GenerateRepositoryTagParentsTableNames( service_id )
select_statement = 'SELECT child_service_tag_id, parent_service_tag_id FROM ' + current_tag_parents_table_name + ' WHERE account_id IN %s;'
pairs = self._SelectFromListFetchAll( select_statement, subject_account_ids )
if len( pairs ) > 0:
for ( child_service_tag_id, parent_service_tag_id ) in pairs:
self._RepositoryDeleteTagParent( service_id, admin_account_id, child_service_tag_id, parent_service_tag_id )
( current_tag_siblings_table_name, deleted_tag_siblings_table_name, pending_tag_siblings_table_name, petitioned_tag_siblings_table_name ) = GenerateRepositoryTagSiblingsTableNames( service_id )
select_statement = 'SELECT bad_service_tag_id, good_service_tag_id FROM ' + current_tag_siblings_table_name + ' WHERE account_id IN %s;'
pairs = self._SelectFromListFetchAll( select_statement, subject_account_ids )
if len( pairs ) > 0:
for ( bad_service_tag_id, good_service_tag_id ) in pairs:
self._RepositoryDeleteTagSibling( service_id, admin_account_id, bad_service_tag_id, good_service_tag_id )
def _RewardAccounts( self, service_id, score_type, scores ):
self._c.executemany( 'INSERT OR IGNORE INTO account_scores ( service_id, account_id, score_type, score ) VALUES ( ?, ?, ?, ? );', [ ( service_id, account_id, score_type, 0 ) for ( account_id, score ) in scores ] )
self._c.executemany( 'UPDATE account_scores SET score = score + ? WHERE service_id = ? AND account_id = ? and score_type = ?;', [ ( score, service_id, account_id, score_type ) for ( account_id, score ) in scores ] )
def _SaveAccounts( self, service_id, accounts ):
for account in accounts:
2017-03-08 23:23:12 +00:00
( account_key, account_type, created, expires, dictionary ) = HydrusNetwork.Account.GenerateTupleFromAccount( account )
2017-03-02 02:14:56 +00:00
account_type_key = account_type.GetAccountTypeKey()
account_type_id = self._GetAccountTypeId( service_id, account_type_key )
dictionary_string = dictionary.DumpToString()
self._c.execute( 'UPDATE accounts SET account_type_id = ?, expires = ?, dictionary_string = ? WHERE account_key = ?;', ( account_type_id, expires, dictionary_string, sqlite3.Binary( account_key ) ) )
account.SetClean()
def _SaveDirtyAccounts( self, service_keys_to_dirty_accounts ):
for ( service_key, dirty_accounts ) in service_keys_to_dirty_accounts.items():
service_id = self._GetServiceId( service_key )
self._SaveAccounts( service_id, dirty_accounts )
def _SaveDirtyServices( self, dirty_services ):
self._SaveServices( dirty_services )
def _SaveServices( self, services ):
for service in services:
( service_key, service_type, name, port, dictionary ) = service.ToTuple()
dictionary_string = dictionary.DumpToString()
self._c.execute( 'UPDATE services SET dictionary_string = ? WHERE service_key = ?;', ( dictionary_string, sqlite3.Binary( service_key ) ) )
service.SetClean()
def _UnbanKey( self, service_key, account_key ):
service_id = self._GetServiceId( service_key )
account_id = self._GetAccountId( account_key )
account = self._GetAccount( service_id, account_id )
account.Unban()
self._SaveAccounts( service_id, [ account ] )
def _UpdateDB( self, version ):
HydrusData.Print( 'The server is updating to version ' + str( version + 1 ) )
2018-03-28 21:55:58 +00:00
# all updates timed out, 244->245 was the last
2017-03-02 02:14:56 +00:00
2015-11-18 22:44:07 +00:00
HydrusData.Print( 'The server has updated to version ' + str( version + 1 ) )
2015-05-06 20:26:18 +00:00
2014-11-20 01:48:04 +00:00
self._c.execute( 'UPDATE version SET version = ?;', ( version + 1, ) )
2013-02-19 00:11:43 +00:00
2015-11-11 21:20:41 +00:00
2015-04-22 22:57:25 +00:00
def _VerifyAccessKey( self, service_key, access_key ):
2013-02-19 00:11:43 +00:00
2015-04-22 22:57:25 +00:00
service_id = self._GetServiceId( service_key )
2013-10-02 22:06:06 +00:00
2015-04-22 22:57:25 +00:00
result = self._c.execute( 'SELECT 1 FROM accounts WHERE service_id = ? AND hashed_access_key = ?;', ( service_id, sqlite3.Binary( hashlib.sha256( access_key ).digest() ) ) ).fetchone()
2013-02-19 00:11:43 +00:00
2015-04-22 22:57:25 +00:00
if result is None:
result = self._c.execute( 'SELECT 1 FROM registration_keys WHERE service_id = ? AND access_key = ?;', ( service_id, sqlite3.Binary( access_key ) ) ).fetchone()
2017-03-15 20:13:04 +00:00
if result is None:
return False
2015-04-22 22:57:25 +00:00
2013-02-19 00:11:43 +00:00
2015-04-22 22:57:25 +00:00
return True
2013-02-19 00:11:43 +00:00
2015-04-22 22:57:25 +00:00
def _Write( self, action, *args, **kwargs ):
2017-03-02 02:14:56 +00:00
if action == 'accounts': result = self._ModifyAccounts( *args, **kwargs )
2015-04-22 22:57:25 +00:00
elif action == 'account_types': result = self._ModifyAccountTypes( *args, **kwargs )
2016-01-06 21:17:20 +00:00
elif action == 'analyze': result = self._Analyze( *args, **kwargs )
2016-04-14 01:54:29 +00:00
elif action == 'backup': result = self._Backup( *args, **kwargs )
2017-03-02 02:14:56 +00:00
elif action == 'create_update': result = self._RepositoryCreateUpdate( *args, **kwargs )
2015-04-22 22:57:25 +00:00
elif action == 'delete_orphans': result = self._DeleteOrphans( *args, **kwargs )
2017-03-02 02:14:56 +00:00
elif action == 'dirty_accounts': result = self._SaveDirtyAccounts( *args, **kwargs )
elif action == 'dirty_services': result = self._SaveDirtyServices( *args, **kwargs )
elif action == 'file': result = self._RepositoryProcessAddFile( *args, **kwargs )
2015-04-22 22:57:25 +00:00
elif action == 'services': result = self._ModifyServices( *args, **kwargs )
elif action == 'session': result = self._AddSession( *args, **kwargs )
2017-03-02 02:14:56 +00:00
elif action == 'update': result = self._RepositoryProcessClientToServerUpdate( *args, **kwargs )
2015-04-22 22:57:25 +00:00
else: raise Exception( 'db received an unknown write command: ' + action )
2013-02-19 00:11:43 +00:00
2015-04-22 22:57:25 +00:00
return result
2013-10-02 22:06:06 +00:00
2016-10-12 21:52:50 +00:00
def GetFilesDir( self ):
return self._files_dir
2017-01-04 22:48:23 +00:00
def GetSSLPaths( self ):
return ( self._ssl_cert_path, self._ssl_key_path )