import hashlib import io import os import typing import numpy import numpy.core.multiarray # important this comes before cv! import warnings try: # more hidden imports for pyinstaller import numpy.random.common # pylint: disable=E0401 import numpy.random.bounded_integers # pylint: disable=E0401 import numpy.random.entropy # pylint: disable=E0401 except: pass # old version of numpy, screw it from PIL import ImageFile as PILImageFile from PIL import Image as PILImage from PIL import ImageCms as PILImageCms try: from pillow_heif import register_heif_opener from pillow_heif import register_avif_opener register_heif_opener(thumbnails=False) register_avif_opener(thumbnails=False) HEIF_OK = True except: HEIF_OK = False from hydrus.core import HydrusConstants as HC from hydrus.core import HydrusData from hydrus.core import HydrusExceptions from hydrus.core import HydrusGlobals as HG from hydrus.core import HydrusPaths from hydrus.core import HydrusTemp from hydrus.core import HydrusPSDHandling from hydrus.external import blurhash PIL_SRGB_PROFILE = PILImageCms.createProfile( 'sRGB' ) def EnableLoadTruncatedImages(): if hasattr( PILImageFile, 'LOAD_TRUNCATED_IMAGES' ): # this can now cause load hangs due to the trunc load code adding infinite fake EOFs to the file stream, wew lad # hence debug only PILImageFile.LOAD_TRUNCATED_IMAGES = True return True else: return False if not hasattr( PILImage, 'DecompressionBombError' ): # super old versions don't have this, so let's just make a stub, wew class DBEStub( Exception ): pass PILImage.DecompressionBombError = DBEStub if not hasattr( PILImage, 'DecompressionBombWarning' ): # super old versions don't have this, so let's just make a stub, wew class DBWStub( Exception ): pass PILImage.DecompressionBombWarning = DBWStub warnings.simplefilter( 'ignore', PILImage.DecompressionBombWarning ) warnings.simplefilter( 'ignore', PILImage.DecompressionBombError ) # PIL moaning about weirdo TIFFs warnings.filterwarnings( "ignore", "(Possibly )?corrupt EXIF data", UserWarning ) warnings.filterwarnings( "ignore", "Metadata Warning", UserWarning ) OLD_PIL_MAX_IMAGE_PIXELS = PILImage.MAX_IMAGE_PIXELS PILImage.MAX_IMAGE_PIXELS = None # this turns off decomp check entirely, wew PIL_ONLY_MIMETYPES = { HC.ANIMATION_GIF, HC.IMAGE_ICON, HC.IMAGE_WEBP, HC.IMAGE_QOI, HC.IMAGE_BMP }.union( HC.PIL_HEIF_MIMES ) try: import cv2 if cv2.__version__.startswith( '2' ): CV_IMREAD_FLAGS_PNG = cv2.CV_LOAD_IMAGE_UNCHANGED CV_IMREAD_FLAGS_JPEG = CV_IMREAD_FLAGS_PNG CV_IMREAD_FLAGS_WEIRD = CV_IMREAD_FLAGS_PNG CV_JPEG_THUMBNAIL_ENCODE_PARAMS = [] CV_PNG_THUMBNAIL_ENCODE_PARAMS = [] else: # allows alpha channel CV_IMREAD_FLAGS_PNG = cv2.IMREAD_UNCHANGED # this preserves colour info but does EXIF reorientation and flipping CV_IMREAD_FLAGS_JPEG = cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR # this seems to allow weirdass tiffs to load as non greyscale, although the LAB conversion 'whitepoint' or whatever can be wrong CV_IMREAD_FLAGS_WEIRD = CV_IMREAD_FLAGS_PNG CV_JPEG_THUMBNAIL_ENCODE_PARAMS = [ cv2.IMWRITE_JPEG_QUALITY, 92 ] CV_PNG_THUMBNAIL_ENCODE_PARAMS = [ cv2.IMWRITE_PNG_COMPRESSION, 9 ] OPENCV_OK = True except: OPENCV_OK = False def MakeClipRectFit( image_resolution, clip_rect ): ( im_width, im_height ) = image_resolution ( x, y, clip_width, clip_height ) = clip_rect x = max( 0, x ) y = max( 0, y ) clip_width = min( clip_width, im_width ) clip_height = min( clip_height, im_height ) if x + clip_width > im_width: x = im_width - clip_width if y + clip_height > im_height: y = im_height - clip_height return ( x, y, clip_width, clip_height ) def ClipNumPyImage( numpy_image: numpy.array, clip_rect ): if len( numpy_image.shape ) == 3: ( im_height, im_width, depth ) = numpy_image.shape else: ( im_height, im_width ) = numpy_image.shape ( x, y, clip_width, clip_height ) = MakeClipRectFit( ( im_width, im_height ), clip_rect ) return numpy_image[ y : y + clip_height, x : x + clip_width ] def ClipPILImage( pil_image: PILImage.Image, clip_rect ): ( x, y, clip_width, clip_height ) = MakeClipRectFit( pil_image.size, clip_rect ) return pil_image.crop( box = ( x, y, x + clip_width, y + clip_height ) ) def DequantizeNumPyImage( numpy_image: numpy.array ) -> numpy.array: # OpenCV loads images in BGR, and we want to normalise to RGB in general if numpy_image.dtype == 'uint16': numpy_image = numpy.array( numpy_image // 256, dtype = 'uint8' ) shape = numpy_image.shape if len( shape ) == 2: # monochrome image convert = cv2.COLOR_GRAY2RGB else: ( im_y, im_x, depth ) = shape if depth == 4: convert = cv2.COLOR_BGRA2RGBA else: convert = cv2.COLOR_BGR2RGB numpy_image = cv2.cvtColor( numpy_image, convert ) return numpy_image def DequantizePILImage( pil_image: PILImage.Image ) -> PILImage.Image: if HasICCProfile( pil_image ): try: pil_image = NormaliseICCProfilePILImageToSRGB( pil_image ) except Exception as e: HydrusData.ShowException( e ) HydrusData.ShowText( 'Failed to normalise image ICC profile.' ) pil_image = NormalisePILImageToRGB( pil_image ) return pil_image def GenerateNumPyImage( path, mime, force_pil = False ) -> numpy.array: if HG.media_load_report_mode: HydrusData.ShowText( 'Loading media: ' + path ) if mime == HC.APPLICATION_PSD: if HG.media_load_report_mode: HydrusData.ShowText( 'Loading PSD' ) pil_image = HydrusPSDHandling.MergedPILImageFromPSD( path ) pil_image = DequantizePILImage( pil_image ) numpy_image = GenerateNumPyImageFromPILImage( pil_image ) return StripOutAnyUselessAlphaChannel( numpy_image ) if not OPENCV_OK: force_pil = True if not force_pil: try: pil_image = RawOpenPILImage( path ) try: pil_image.verify() except: raise HydrusExceptions.UnsupportedFileException() # I and F are some sort of 32-bit monochrome or whatever, doesn't seem to work in PIL well, with or without ICC if pil_image.mode not in ( 'I', 'F' ): if pil_image.mode == 'LAB': force_pil = True if HasICCProfile( pil_image ): if HG.media_load_report_mode: HydrusData.ShowText( 'Image has ICC, so switching to PIL' ) force_pil = True except HydrusExceptions.UnsupportedFileException: # pil had trouble, let's cross our fingers cv can do it pass if mime in PIL_ONLY_MIMETYPES or force_pil: if HG.media_load_report_mode: HydrusData.ShowText( 'Loading with PIL' ) pil_image = GeneratePILImage( path ) numpy_image = GenerateNumPyImageFromPILImage( pil_image ) else: if HG.media_load_report_mode: HydrusData.ShowText( 'Loading with OpenCV' ) if mime in ( HC.IMAGE_JPEG, HC.IMAGE_TIFF ): flags = CV_IMREAD_FLAGS_JPEG elif mime == HC.IMAGE_PNG: flags = CV_IMREAD_FLAGS_PNG else: flags = CV_IMREAD_FLAGS_WEIRD numpy_image = cv2.imread( path, flags = flags ) if numpy_image is None: # doesn't support some random stuff if HG.media_load_report_mode: HydrusData.ShowText( 'OpenCV Failed, loading with PIL' ) pil_image = GeneratePILImage( path ) numpy_image = GenerateNumPyImageFromPILImage( pil_image ) else: numpy_image = DequantizeNumPyImage( numpy_image ) numpy_image = StripOutAnyUselessAlphaChannel( numpy_image ) return numpy_image def GenerateNumPyImageFromPILImage( pil_image: PILImage.Image ) -> numpy.array: # this seems to magically work, I guess asarray either has a match for Image or Image provides some common shape/datatype properties that it can hook into return numpy.asarray( pil_image ) # old method: ''' ( w, h ) = pil_image.size try: s = pil_image.tobytes() except OSError as e: # e.g. OSError: unrecognized data stream contents when reading image file raise HydrusExceptions.UnsupportedFileException( str( e ) ) depth = len( s ) // ( w * h ) return numpy.fromstring( s, dtype = 'uint8' ).reshape( ( h, w, depth ) ) ''' def GeneratePILImage( path, dequantize = True ) -> PILImage.Image: pil_image = RawOpenPILImage( path ) if pil_image is None: raise Exception( 'The file at {} could not be rendered!'.format( path ) ) pil_image = RotateEXIFPILImage( pil_image ) if dequantize: # note this destroys animated gifs atm, it collapses down to one frame pil_image = DequantizePILImage( pil_image ) return pil_image def GeneratePILImageFromNumPyImage( numpy_image: numpy.array ) -> PILImage.Image: # I'll leave this here as a neat artifact, but I really shouldn't ever be making a PIL from a cv2 image. the only PIL benefits are the .info dict, which this won't generate if len( numpy_image.shape ) == 2: ( h, w ) = numpy_image.shape format = 'L' else: ( h, w, depth ) = numpy_image.shape if depth == 1: format = 'L' elif depth == 2: format = 'LA' elif depth == 3: format = 'RGB' elif depth == 4: format = 'RGBA' pil_image = PILImage.frombytes( format, ( w, h ), numpy_image.data.tobytes() ) return pil_image def GenerateThumbnailNumPyFromStaticImagePath( path, target_resolution, mime, clip_rect = None ): if OPENCV_OK: numpy_image = GenerateNumPyImage( path, mime ) if clip_rect is not None: numpy_image = ClipNumPyImage( numpy_image, clip_rect ) thumbnail_numpy_image = ResizeNumPyImage( numpy_image, target_resolution ) return thumbnail_numpy_image pil_image = GeneratePILImage( path ) if clip_rect is not None: pil_image = ClipPILImage( pil_image, clip_rect ) thumbnail_pil_image = pil_image.resize( target_resolution, PILImage.LANCZOS ) thumbnail_numpy_image = GenerateNumPyImageFromPILImage(thumbnail_pil_image) return thumbnail_numpy_image def GenerateThumbnailBytesNumPy( numpy_image ) -> bytes: ( im_height, im_width, depth ) = numpy_image.shape numpy_image = StripOutAnyUselessAlphaChannel( numpy_image ) if depth == 4: convert = cv2.COLOR_RGBA2BGRA else: convert = cv2.COLOR_RGB2BGR numpy_image = cv2.cvtColor( numpy_image, convert ) ( im_height, im_width, depth ) = numpy_image.shape if depth == 4: ext = '.png' params = CV_PNG_THUMBNAIL_ENCODE_PARAMS else: ext = '.jpg' params = CV_JPEG_THUMBNAIL_ENCODE_PARAMS ( result_success, result_byte_array ) = cv2.imencode( ext, numpy_image, params ) if result_success: thumbnail_bytes = result_byte_array.tostring() return thumbnail_bytes else: raise HydrusExceptions.CantRenderWithCVException( 'Thumb failed to encode!' ) def GenerateThumbnailBytesPIL( pil_image: PILImage.Image ) -> bytes: f = io.BytesIO() if PILImageHasTransparency( pil_image ): pil_image.save( f, 'PNG' ) else: pil_image.save( f, 'JPEG', quality = 92 ) f.seek( 0 ) thumbnail_bytes = f.read() f.close() return thumbnail_bytes def GeneratePNGBytesNumPy( numpy_image ) -> bytes: ( im_height, im_width, depth ) = numpy_image.shape ext = '.png' if depth == 4: convert = cv2.COLOR_RGBA2BGRA else: convert = cv2.COLOR_RGB2BGR numpy_image = cv2.cvtColor( numpy_image, convert ) ( result_success, result_byte_array ) = cv2.imencode( ext, numpy_image ) if result_success: return result_byte_array.tostring() else: raise HydrusExceptions.CantRenderWithCVException( 'Image failed to encode!' ) def GetEXIFDict( pil_image: PILImage.Image ) -> typing.Optional[ dict ]: if pil_image.format in ( 'JPEG', 'TIFF', 'PNG', 'WEBP', 'HEIF', 'AVIF' ): try: exif_dict = pil_image.getexif()._get_merged_dict() if len( exif_dict ) > 0: return exif_dict except: pass return None def GetICCProfileBytes( pil_image: PILImage.Image ) -> bytes: if HasICCProfile( pil_image ): return pil_image.info[ 'icc_profile' ] raise HydrusExceptions.DataMissing( 'This image has no ICC profile!' ) def GetImagePixelHash( path, mime ) -> bytes: numpy_image = GenerateNumPyImage( path, mime ) return GetImagePixelHashNumPy( numpy_image ) def GetImagePixelHashNumPy( numpy_image ): return hashlib.sha256( numpy_image.data.tobytes() ).digest() def GetImageResolution( path, mime ): # PIL first here, rather than numpy, as it loads image headers real quick try: pil_image = GeneratePILImage( path, dequantize = False ) ( width, height ) = pil_image.size except HydrusExceptions.DamagedOrUnusualFileException: # desperate situation numpy_image = GenerateNumPyImage( path, mime ) if len( numpy_image.shape ) == 3: ( height, width, depth ) = numpy_image.shape else: ( height, width ) = numpy_image.shape width = max( width, 1 ) height = max( height, 1 ) return ( width, height ) # bigger number is worse quality # this is very rough and misses some finesse def GetJPEGQuantizationQualityEstimate( path ): try: pil_image = RawOpenPILImage( path ) except HydrusExceptions.UnsupportedFileException: return ( 'unknown', None ) if hasattr( pil_image, 'quantization' ): table_arrays = list( pil_image.quantization.values() ) if len( table_arrays ) == 0: return ( 'unknown', None ) quality = sum( ( sum( table_array ) for table_array in table_arrays ) ) quality /= len( table_arrays ) if quality >= 3400: label = 'very low' elif quality >= 2000: label = 'low' elif quality >= 1400: label = 'medium low' elif quality >= 1000: label = 'medium' elif quality >= 700: label = 'medium high' elif quality >= 400: label = 'high' elif quality >= 200: label = 'very high' else: label = 'extremely high' return ( label, quality ) return ( 'unknown', None ) def GetJpegSubsampling( pil_image: PILImage.Image ) -> str: from PIL import JpegImagePlugin result = JpegImagePlugin.get_sampling( pil_image ) subsampling_str_lookup = { 0 : '4:4:4', 1 : '4:2:2', 2 : '4:2:0' } return subsampling_str_lookup.get( result, 'unknown' ) def GetEmbeddedFileText( pil_image: PILImage.Image ) -> typing.Optional[ str ]: def render_dict( d, prefix ): texts = [] keys = sorted( d.keys() ) for key in keys: if key in ( 'exif', 'icc_profile' ): continue value = d[ key ] if isinstance( value, bytes ): continue if isinstance( value, dict ): value_string = render_dict( value, prefix = ' ' + prefix ) if value_string is None: continue else: value_string = ' {}{}'.format( prefix, value ) row_text = '{}{}:'.format( prefix, key ) row_text += os.linesep row_text += value_string texts.append( row_text ) if len( texts ) > 0: return os.linesep.join( texts ) else: return None if hasattr( pil_image, 'info' ): try: return render_dict( pil_image.info, '' ) except: pass return None def GetResolutionNumPy( numpy_image ): ( image_height, image_width, depth ) = numpy_image.shape return ( image_width, image_height ) THUMBNAIL_SCALE_DOWN_ONLY = 0 THUMBNAIL_SCALE_TO_FIT = 1 THUMBNAIL_SCALE_TO_FILL = 2 thumbnail_scale_str_lookup = { THUMBNAIL_SCALE_DOWN_ONLY : 'scale down only', THUMBNAIL_SCALE_TO_FIT : 'scale to fit', THUMBNAIL_SCALE_TO_FILL : 'scale to fill' } def GetThumbnailResolutionAndClipRegion( image_resolution: typing.Tuple[ int, int ], bounding_dimensions: typing.Tuple[ int, int ], thumbnail_scale_type: int, thumbnail_dpr_percent: int ): clip_rect = None ( im_width, im_height ) = image_resolution ( bounding_width, bounding_height ) = bounding_dimensions if thumbnail_dpr_percent != 100: thumbnail_dpr = thumbnail_dpr_percent / 100 bounding_height = int( bounding_height * thumbnail_dpr ) bounding_width = int( bounding_width * thumbnail_dpr ) if im_width is None: im_width = bounding_width if im_height is None: im_height = bounding_height # TODO SVG thumbs should always scale up to the bounding dimensions if thumbnail_scale_type == THUMBNAIL_SCALE_DOWN_ONLY: if bounding_width >= im_width and bounding_height >= im_height: return ( clip_rect, ( im_width, im_height ) ) width_ratio = im_width / bounding_width height_ratio = im_height / bounding_height thumbnail_width = bounding_width thumbnail_height = bounding_height if thumbnail_scale_type in ( THUMBNAIL_SCALE_DOWN_ONLY, THUMBNAIL_SCALE_TO_FIT ): if width_ratio > height_ratio: thumbnail_height = im_height / width_ratio elif height_ratio > width_ratio: thumbnail_width = im_width / height_ratio elif thumbnail_scale_type == THUMBNAIL_SCALE_TO_FILL: if width_ratio == height_ratio: # we have something that fits bounding region perfectly, no clip region required pass else: clip_x = 0 clip_y = 0 clip_width = im_width clip_height = im_height if width_ratio > height_ratio: clip_width = max( int( im_width * height_ratio / width_ratio ), 1 ) clip_x = ( im_width - clip_width ) // 2 elif height_ratio > width_ratio: clip_height = max( int( im_height * width_ratio / height_ratio ), 1 ) clip_y = ( im_height - clip_height ) // 2 clip_rect = ( clip_x, clip_y, clip_width, clip_height ) thumbnail_width = max( int( thumbnail_width ), 1 ) thumbnail_height = max( int( thumbnail_height ), 1 ) return ( clip_rect, ( thumbnail_width, thumbnail_height ) ) def HasEXIF( path: str ) -> bool: try: pil_image = RawOpenPILImage( path ) except: return False result = GetEXIFDict( pil_image ) return result is not None def HasHumanReadableEmbeddedMetadata( path: str ) -> bool: try: pil_image = RawOpenPILImage( path ) except: return False result = GetEmbeddedFileText( pil_image ) return result is not None def HasICCProfile( pil_image: PILImage.Image ) -> bool: if 'icc_profile' in pil_image.info: icc_profile = pil_image.info[ 'icc_profile' ] if isinstance( icc_profile, bytes ) and len( icc_profile ) > 0: return True return False def IsDecompressionBomb( path ) -> bool: # there are two errors here, the 'Warning' and the 'Error', which atm is just a test vs a test x 2 for number of pixels # 256MB bmp by default, ( 1024 ** 3 ) // 4 // 3 # we'll set it at 512MB, and now catching error should be about 1GB PILImage.MAX_IMAGE_PIXELS = ( 512 * ( 1024 ** 2 ) ) // 3 warnings.simplefilter( 'error', PILImage.DecompressionBombError ) try: RawOpenPILImage( path ) except ( PILImage.DecompressionBombError ): return True except: # pil was unable to load it, which does not mean it was a decomp bomb return False finally: PILImage.MAX_IMAGE_PIXELS = None warnings.simplefilter( 'ignore', PILImage.DecompressionBombError ) return False def NormaliseICCProfilePILImageToSRGB( pil_image: PILImage.Image ) -> PILImage.Image: try: icc_profile_bytes = GetICCProfileBytes( pil_image ) except HydrusExceptions.DataMissing: return pil_image try: f = io.BytesIO( icc_profile_bytes ) src_profile = PILImageCms.ImageCmsProfile( f ) if pil_image.mode in ( 'L', 'LA' ): # had a bunch of LA pngs that turned pure white on RGBA ICC conversion # but seem to work fine if keep colourspace the same for now # it is a mystery, I guess a PIL bug, but presumably L and LA are technically sRGB so it is still ok to this outputMode = pil_image.mode else: if PILImageHasTransparency( pil_image ): outputMode = 'RGBA' else: outputMode = 'RGB' pil_image = PILImageCms.profileToProfile( pil_image, src_profile, PIL_SRGB_PROFILE, outputMode = outputMode ) except ( PILImageCms.PyCMSError, OSError ): # 'cannot build transform' and presumably some other fun errors # way more advanced than we can deal with, so we'll just no-op # OSError is due to a "OSError: cannot open profile from string" a user got # no idea, but that seems to be an ImageCms issue doing byte handling and ending up with an odd OSError? # or maybe somehow my PIL reader or bytesIO sending string for some reason? # in any case, nuke it for now pass pil_image = NormalisePILImageToRGB( pil_image ) return pil_image def NormalisePILImageToRGB( pil_image: PILImage.Image ) -> PILImage.Image: if PILImageHasTransparency( pil_image ): desired_mode = 'RGBA' else: desired_mode = 'RGB' if pil_image.mode != desired_mode: if pil_image.mode == 'LAB': pil_image = PILImageCms.profileToProfile( pil_image, PILImageCms.createProfile( 'LAB' ), PIL_SRGB_PROFILE, outputMode = 'RGB' ) else: pil_image = pil_image.convert( desired_mode ) return pil_image def NumPyImageHasAllCellsTheSame( numpy_image: numpy.array, value: int ): # I looked around for ways to do this iteratively at the c++ level but didn't have huge luck. # unless some magic is going on, the '==' actually creates the bool array # its ok for now! return numpy.all( numpy_image == value ) # old way, which makes a third array: # alpha_channel == numpy.full( ( shape[0], shape[1] ), 255, dtype = 'uint8' ) ).all() def NumPyImageHasUselessAlphaChannel( numpy_image: numpy.array ) -> bool: if not NumPyImageHasAlphaChannel( numpy_image ): return False # RGBA image alpha_channel = numpy_image[:,:,3].copy() if NumPyImageHasAllCellsTheSame( alpha_channel, 255 ): # all opaque return True if NumPyImageHasAllCellsTheSame( alpha_channel, 0 ): # all transparent underlying_image_is_black = NumPyImageHasAllCellsTheSame( numpy_image, 0 ) return not underlying_image_is_black return False def NumPyImageHasOpaqueAlphaChannel( numpy_image: numpy.array ) -> bool: if not NumPyImageHasAlphaChannel( numpy_image ): return False # RGBA image # opaque means 255 alpha_channel = numpy_image[:,:,3].copy() return NumPyImageHasAllCellsTheSame( alpha_channel, 255 ) def NumPyImageHasAlphaChannel( numpy_image: numpy.array ) -> bool: # note this does not test how useful the channel is, just if it exists shape = numpy_image.shape if len( shape ) <= 2: return False # 2 for LA? think this works return shape[2] in ( 2, 4 ) def NumPyImageHasTransparentAlphaChannel( numpy_image: numpy.array ) -> bool: if not NumPyImageHasAlphaChannel( numpy_image ): return False # RGBA image # transparent means 0 alpha_channel = numpy_image[:,:,3].copy() return NumPyImageHasAllCellsTheSame( alpha_channel, 0 ) def PILImageHasTransparency( pil_image: PILImage.Image ) -> bool: return pil_image.mode in ( 'LA', 'RGBA' ) or ( pil_image.mode == 'P' and 'transparency' in pil_image.info ) def RawOpenPILImage( path ) -> PILImage.Image: try: pil_image = PILImage.open( path ) except Exception as e: raise HydrusExceptions.DamagedOrUnusualFileException( 'Could not load the image--it was likely malformed!' ) return pil_image def ResizeNumPyImage( numpy_image: numpy.array, target_resolution ) -> numpy.array: ( target_width, target_height ) = target_resolution ( image_width, image_height ) = GetResolutionNumPy( numpy_image ) if target_width == image_width and target_height == target_width: return numpy_image elif target_width > image_height or target_height > image_width: interpolation = cv2.INTER_LANCZOS4 else: interpolation = cv2.INTER_AREA return cv2.resize( numpy_image, ( target_width, target_height ), interpolation = interpolation ) def RotateEXIFPILImage( pil_image: PILImage.Image )-> PILImage.Image: exif_dict = GetEXIFDict( pil_image ) if exif_dict is not None: EXIF_ORIENTATION = 274 if EXIF_ORIENTATION in exif_dict: orientation = exif_dict[ EXIF_ORIENTATION ] if orientation == 1: pass # normal elif orientation == 2: # mirrored horizontal pil_image = pil_image.transpose( PILImage.FLIP_LEFT_RIGHT ) elif orientation == 3: # 180 pil_image = pil_image.transpose( PILImage.ROTATE_180 ) elif orientation == 4: # mirrored vertical pil_image = pil_image.transpose( PILImage.FLIP_TOP_BOTTOM ) elif orientation == 5: # seems like these 90 degree rotations are wrong, but fliping them works for my posh example images, so I guess the PIL constants are odd # mirrored horizontal, then 90 CCW pil_image = pil_image.transpose( PILImage.FLIP_LEFT_RIGHT ).transpose( PILImage.ROTATE_90 ) elif orientation == 6: # 90 CW pil_image = pil_image.transpose( PILImage.ROTATE_270 ) elif orientation == 7: # mirrored horizontal, then 90 CCW pil_image = pil_image.transpose( PILImage.FLIP_LEFT_RIGHT ).transpose( PILImage.ROTATE_270 ) elif orientation == 8: # 90 CCW pil_image = pil_image.transpose( PILImage.ROTATE_90 ) return pil_image def StripOutAnyUselessAlphaChannel( numpy_image: numpy.array ) -> numpy.array: if NumPyImageHasUselessAlphaChannel( numpy_image ): numpy_image = numpy_image[:,:,:3].copy() # old way, which doesn't actually remove the channel lmao lmao lmao ''' convert = cv2.COLOR_RGBA2RGB numpy_image = cv2.cvtColor( numpy_image, convert ) ''' return numpy_image def GetImageBlurHashNumPy( numpy_image, components_x = 4, components_y = 4 ): return blurhash.blurhash_encode( numpy_image, components_x, components_y )