diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..9270bb72 --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 6000 \ No newline at end of file diff --git a/.github/workflows/.flake8 b/.github/workflows/.flake8 new file mode 100644 index 00000000..9270bb72 --- /dev/null +++ b/.github/workflows/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 6000 \ No newline at end of file diff --git a/README.md b/README.md index 54f253de..0f6968fb 100644 --- a/README.md +++ b/README.md @@ -17,11 +17,13 @@ A simple tool to take the work out of uploading. - Can re-use existing torrents instead of hashing new - Generates proper name for your upload using Mediainfo/BDInfo and TMDb/IMDb conforming to site rules - Checks for existing releases already on site - - Uploads to OE/PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/AL/ULCX + - Uploads to OE/PTP/BLU/BHD/Aither/THR/STC/R4E(limited)/HP/ACM/LCD/LST/NBL/ANT/FL/HUNO/RF/SN/RTF/OTW/FNP/CBR/UTP/AL/ULCX/HDB - Adds to your client with fast resume, seeding instantly (rtorrent/qbittorrent/deluge/watch folder) - ALL WITH MINIMAL INPUT! - Currently works with .mkv/.mp4/Blu-ray/DVD/HD-DVDs + Built with updated BDInfoCLI from https://github.com/rokibhasansagar/BDInfoCLI-ng + ## Image Hosts: - OnlyImage - oeimg - ImgBB - imgbb diff --git a/bin/BDInfo/BDInfo.exe b/bin/BDInfo/BDInfo.exe index e2462867..82e0a6f8 100644 Binary files a/bin/BDInfo/BDInfo.exe and b/bin/BDInfo/BDInfo.exe differ diff --git a/bin/BDInfo/System.Resources.Extensions.dll b/bin/BDInfo/System.Resources.Extensions.dll new file mode 100644 index 00000000..939c9f58 Binary files /dev/null and b/bin/BDInfo/System.Resources.Extensions.dll differ diff --git a/cogs/commands.py b/cogs/commands.py index 5528d556..3ff68d17 100644 --- a/cogs/commands.py +++ b/cogs/commands.py @@ -1,4 +1,3 @@ -from discord.ext.commands.errors import CommandInvokeError from src.prep import Prep from src.args import Args from src.clients import Clients @@ -7,29 +6,27 @@ from src.trackers.BHD import BHD from src.trackers.AITHER import AITHER from src.trackers.STC import STC +from src.trackers.OE import OE from src.trackers.LCD import LCD from src.trackers.CBR import CBR -from data.config import config +from data.config import config # type: ignore -import discord -from discord.ext import commands +import discord # type: ignore +from discord.ext import commands # type: ignore import os from datetime import datetime import asyncio import json -import shutil import multiprocessing from pathlib import Path from glob import glob import argparse - class Commands(commands.Cog): def __init__(self, bot): self.bot = bot - @commands.Cog.listener() async def on_guild_join(self, guild): """ @@ -46,7 +43,7 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): return parser = Args(config) - if path == None: + if path is None: await ctx.send("Missing Path") return elif path.lower() == "-h": @@ -61,17 +58,17 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): try: args = (meta['path'],) + args + search_args meta, help, before_args = parser.parse(args, meta) - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return - if meta['imghost'] == None: + if meta['imghost'] is None: meta['imghost'] = config['DEFAULT']['img_host_1'] # if not meta['unattended']: # ua = config['DEFAULT'].get('auto_mode', False) # if str(ua).lower() == "true": # meta['unattended'] = True prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) - preparing_embed = discord.Embed(title=f"Preparing to upload:", description=f"```{path}```", color=0xffff00) + preparing_embed = discord.Embed(title="Preparing to upload:", description=f"```{path}```", color=0xffff00) if message_id == 0: message = await ctx.send(embed=preparing_embed) meta['embed_msg_id'] = message.id @@ -87,7 +84,6 @@ async def upload(self, ctx, path, *args, message_id=0, search_args=tuple()): else: await ctx.send("Invalid Path") - @commands.command() async def args(self, ctx): f""" @@ -103,56 +99,6 @@ async def args(self, ctx): await ctx.send(f"```{help[1991:]}```") else: await ctx.send(help.format_help()) - # await ctx.send(""" - # ```Optional arguments: - - # -s, --screens [SCREENS] - # Number of screenshots - # -c, --category [{movie,tv,fanres}] - # Category - # -t, --type [{disc,remux,encode,webdl,web-dl,webrip,hdtv}] - # Type - # -res, --resolution - # [{2160p,1080p,1080i,720p,576p,576i,480p,480i,8640p,4320p,other}] - # Resolution - # -tmdb, --tmdb [TMDB] - # TMDb ID - # -g, --tag [TAG] - # Group Tag - # -serv, --service [SERVICE] - # Streaming Service - # -edition, --edition [EDITION] - # Edition - # -d, --desc [DESC] - # Custom Description (string) - # -nfo, --nfo - # Use .nfo in directory for description - # -k, --keywords [KEYWORDS] - # Add comma seperated keywords e.g. 'keyword, keyword2, etc' - # -reg, --region [REGION] - # Region for discs - # -a, --anon Upload anonymously - # -st, --stream Stream Optimized Upload - # -debug, --debug Debug Mode```""") - - - # @commands.group(invoke_without_command=True) - # async def foo(self, ctx): - # """ - # check out my subcommands! - # """ - # await ctx.send('check out my subcommands!') - - # @foo.command(aliases=['an_alias']) - # async def bar(self, ctx): - # """ - # I have an alias!, I also belong to command 'foo' - # """ - # await ctx.send('foo bar!') - - - - @commands.command() async def edit(self, ctx, uuid=None, *args): @@ -161,7 +107,7 @@ async def edit(self, ctx, uuid=None, *args): """ if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return - if uuid == None: + if uuid is None: await ctx.send("Missing ID, please try again using the ID in the footer") parser = Args(config) base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) @@ -187,11 +133,6 @@ async def edit(self, ctx, uuid=None, *args): meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) await self.send_embed_and_upload(ctx, meta) - - - - - @commands.group(invoke_without_command=True) async def search(self, ctx, *, args=None): """ @@ -206,14 +147,14 @@ async def search(self, ctx, *, args=None): args = args.replace(search_terms, '') while args.startswith(" "): args = args[1:] - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return search = Search(config=config) - if search_terms == None: + if search_terms is None: await ctx.send("Missing search term(s)") return files_total = await search.searchFile(search_terms) @@ -235,14 +176,12 @@ async def search(self, ctx, *, args=None): await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) channel = message.channel - def check(reaction, user): if reaction.message.id == message.id: if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: return reaction - try: await self.bot.wait_for("reaction_add", timeout=120, check=check) except asyncio.TimeoutError: @@ -250,8 +189,6 @@ def check(reaction, user): else: await self.upload(ctx, files_total[0], search_args=tuple(args.split(" ")), message_id=message.id) - - @search.command() async def dir(self, ctx, *, args=None): """ @@ -266,14 +203,14 @@ async def dir(self, ctx, *, args=None): args = args.replace(search_terms, '') while args.startswith(" "): args = args[1:] - except SystemExit as error: + except SystemExit: await ctx.send(f"Invalid argument detected, use `{config['DISCORD']['command_prefix']}args` for list of valid args") return if ctx.channel.id != int(config['DISCORD']['discord_channel_id']): return search = Search(config=config) - if search_terms == None: + if search_terms is None: await ctx.send("Missing search term(s)") return folders_total = await search.searchFolder(search_terms) @@ -295,14 +232,12 @@ async def dir(self, ctx, *, args=None): await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) channel = message.channel - def check(reaction, user): if reaction.message.id == message.id: if str(user.id) == config['DISCORD']['admin_id']: if str(reaction.emoji) == config['DISCORD']['discord_emojis']['UPLOAD']: return reaction - try: await self.bot.wait_for("reaction_add", timeout=120, check=check) except asyncio.TimeoutError: @@ -312,18 +247,11 @@ def check(reaction, user): # await ctx.send(folders_total) return - - - - - - - - async def send_embed_and_upload(self,ctx,meta): + async def send_embed_and_upload(self, ctx, meta): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=config) meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - if meta.get('uploaded_screens', False) == False: + if meta.get('uploaded_screens', False) is False: if meta.get('embed_msg_id', '0') != '0': message = await ctx.fetch_message(meta['embed_msg_id']) await message.edit(embed=discord.Embed(title="Uploading Screenshots", color=0xffff00)) @@ -333,17 +261,16 @@ async def send_embed_and_upload(self,ctx,meta): channel = message.channel.id return_dict = multiprocessing.Manager().dict() - u = multiprocessing.Process(target = prep.upload_screens, args=(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict)) + u = multiprocessing.Process(target=prep.upload_screens, args=(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict)) u.start() - while u.is_alive() == True: + while u.is_alive() is True: await asyncio.sleep(3) meta['image_list'] = return_dict['image_list'] if meta['debug']: print(meta['image_list']) meta['uploaded_screens'] = True - #Create base .torrent - + # Create base .torrent if len(glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) == 0: if meta.get('embed_msg_id', '0') != '0': message = await ctx.fetch_message(int(meta['embed_msg_id'])) @@ -352,15 +279,15 @@ async def send_embed_and_upload(self,ctx,meta): message = await ctx.send(embed=discord.Embed(title="Creating .torrent", color=0xffff00)) meta['embed_msg_id'] = message.id channel = message.channel - if meta['nohash'] == False: - if meta.get('torrenthash', None) != None: - reuse_torrent = await client.find_existing_torrent(meta) - if reuse_torrent != None: + if meta['nohash'] is False: + if meta.get('torrenthash', None) is not None: + reuse_torrent = await client.find_existing_torrent(meta) # noqa F821 + if reuse_torrent is not None: prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - p = multiprocessing.Process(target = prep.create_torrent, args=(meta, Path(meta['path']))) + p = multiprocessing.Process(target=prep.create_torrent, args=(meta, Path(meta['path']))) p.start() - while p.is_alive() == True: + while p.is_alive() is True: await asyncio.sleep(5) if int(meta.get('randomized', 0)) >= 1: @@ -368,8 +295,7 @@ async def send_embed_and_upload(self,ctx,meta): else: meta['client'] = 'none' - - #Format for embed + # Format for embed if meta['tag'] == "": tag = "" else: @@ -388,19 +314,25 @@ async def send_embed_and_upload(self,ctx,meta): res = meta['resolution'] missing = await self.get_missing(meta) - embed=discord.Embed(title=f"Upload: {meta['title']}", url=f"https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}", description=meta['overview'], color=0x0080ff, timestamp=datetime.utcnow()) + embed = discord.Embed( + title=f"Upload: {meta['title']}", + url=f"https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}", + description=meta['overview'], + color=0x0080ff, + timestamp=datetime.utcnow() + ) embed.add_field(name="Links", value=f"[TMDB](https://www.themoviedb.org/{meta['category'].lower()}/{meta['tmdb']}){imdb}{tvdb}") embed.add_field(name=f"{res} / {meta['type']}{tag}", value=f"```{meta['name']}```", inline=False) if missing != []: - embed.add_field(name=f"POTENTIALLY MISSING INFORMATION:", value="\n".join(missing), inline=False) + embed.add_field(name="POTENTIALLY MISSING INFORMATION:", value="\n".join(missing), inline=False) embed.set_thumbnail(url=f"https://image.tmdb.org/t/p/original{meta['poster']}") embed.set_footer(text=meta['uuid']) - embed.set_author(name="L4G's Upload Assistant", url="https://github.com/L4GSP1KE/Upload-Assistant", icon_url="https://images2.imgbox.com/6e/da/dXfdgNYs_o.png") + embed.set_author(name="L4G's Upload Assistant", url="https://github.com/Audionut/Upload-Assistant", icon_url="https://images2.imgbox.com/6e/da/dXfdgNYs_o.png") message = await ctx.fetch_message(meta['embed_msg_id']) await message.edit(embed=embed) - if meta.get('trackers', None) != None: + if meta.get('trackers', None) is not None: trackers = meta['trackers'] else: trackers = config['TRACKERS']['default_trackers'] @@ -430,8 +362,8 @@ async def send_embed_and_upload(self,ctx,meta): await asyncio.sleep(0.3) await message.add_reaction(config['DISCORD']['discord_emojis']['UPLOAD']) - #Save meta to json - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + # Save meta to json + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) f.close() @@ -455,7 +387,7 @@ def check(reaction, user): await msg.clear_reactions() await msg.edit(embed=timeout_embed) return - except: + except Exception: print("timeout after edit") pass except CancelException: @@ -464,20 +396,9 @@ def check(reaction, user): await msg.clear_reactions() await msg.edit(embed=cancel_embed) return - # except ManualException: - # msg = await ctx.fetch_message(meta['embed_msg_id']) - # await msg.clear_reactions() - # archive_url = await prep.package(meta) - # if archive_url == False: - # archive_fail_embed = discord.Embed(title="Unable to upload prep files", description=f"The files can be found at `tmp/{meta['title']}.tar`", color=0xff0000) - # await msg.edit(embed=archive_fail_embed) - # else: - # archive_embed = discord.Embed(title="Files can be found at:",description=f"{archive_url} or `tmp/{meta['title']}.tar`", color=0x00ff40) - # await msg.edit(embed=archive_embed) - # return else: - #Check which are selected and upload to them + # Check which are selected and upload to them msg = await ctx.fetch_message(message.id) tracker_list = list() tracker_emojis = config['DISCORD']['discord_emojis'] @@ -494,9 +415,6 @@ def check(reaction, user): await msg.edit(embed=upload_embed) await msg.clear_reactions() - - - client = Clients(config=config) if "MANUAL" in tracker_list: for manual_tracker in tracker_list: @@ -513,6 +431,9 @@ def check(reaction, user): if manual_tracker.upper() == "STC": stc = STC(config=config) await stc.edit_desc(meta) + if manual_tracker.upper() == "OE": + stc = OE(config=config) + await stc.edit_desc(meta) if manual_tracker.upper() == "LCD": lcd = LCD(config=config) await lcd.edit_desc(meta) @@ -521,19 +442,19 @@ def check(reaction, user): await cbr.edit_desc(meta) archive_url = await prep.package(meta) upload_embed_description = upload_embed_description.replace('MANUAL', '~~MANUAL~~') - if archive_url == False: + if archive_url is False: upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0xff0000) upload_embed.add_field(name="Unable to upload prep files", value=f"The files can be found at `tmp/{meta['title']}.tar`") await msg.edit(embed=upload_embed) else: upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - upload_embed.add_field(name="Files can be found at:",value=f"{archive_url} or `tmp/{meta['uuid']}`") + upload_embed.add_field(name="Files can be found at:", value=f"{archive_url} or `tmp/{meta['uuid']}`") await msg.edit(embed=upload_embed) if "BLU" in tracker_list: blu = BLU(config=config) dupes = await blu.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await blu.upload(meta) await client.add_to_client(meta, "BLU") upload_embed_description = upload_embed_description.replace('BLU', '~~BLU~~') @@ -543,7 +464,7 @@ def check(reaction, user): bhd = BHD(config=config) dupes = await bhd.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await bhd.upload(meta) await client.add_to_client(meta, "BHD") upload_embed_description = upload_embed_description.replace('BHD', '~~BHD~~') @@ -553,7 +474,7 @@ def check(reaction, user): aither = AITHER(config=config) dupes = await aither.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await aither.upload(meta) await client.add_to_client(meta, "AITHER") upload_embed_description = upload_embed_description.replace('AITHER', '~~AITHER~~') @@ -563,27 +484,17 @@ def check(reaction, user): stc = STC(config=config) dupes = await stc.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await stc.upload(meta) await client.add_to_client(meta, "STC") upload_embed_description = upload_embed_description.replace('STC', '~~STC~~') upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) await msg.edit(embed=upload_embed) - if "OE" in tracker_list: - OE = OE(config=config) - dupes = await stc.search_existing(meta) - meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: - await oe.upload(meta) - await client.add_to_client(meta, "OE") - upload_embed_description = upload_embed_description.replace('OE', '~~OE~~') - upload_embed = discord.Embed(title=f"Uploaded `{meta['name']}` to:", description=upload_embed_description, color=0x00ff40) - await msg.edit(embed=upload_embed) if "LCD" in tracker_list: lcd = LCD(config=config) dupes = await lcd.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await lcd.upload(meta) await client.add_to_client(meta, "LCD") upload_embed_description = upload_embed_description.replace('LCD', '~~LCD~~') @@ -593,7 +504,7 @@ def check(reaction, user): cbr = CBR(config=config) dupes = await cbr.search_existing(meta) meta = await self.dupe_embed(dupes, meta, tracker_emojis, channel) - if meta['upload'] == True: + if meta['upload'] is True: await cbr.upload(meta) await client.add_to_client(meta, "CBR") upload_embed_description = upload_embed_description.replace('CBR', '~~CBR~~') @@ -601,8 +512,6 @@ def check(reaction, user): await msg.edit(embed=upload_embed) return None - - async def dupe_embed(self, dupes, meta, emojis, channel): if not dupes: print("No dupes found") @@ -634,7 +543,7 @@ def check(reaction, user): try: await channel.send(f"{meta['uuid']} timed out") meta['upload'] = False - except: + except Exception: return except CancelException: await channel.send(f"{meta['title']} cancelled") @@ -658,15 +567,14 @@ async def get_missing(self, meta): missing.append(f"--{each}") return missing + def setup(bot): bot.add_cog(Commands(bot)) - - - class CancelException(Exception): pass + class ManualException(Exception): - pass + pass \ No newline at end of file diff --git a/data/example-config.py b/data/example-config.py index a1e6f480..10f0a006 100644 --- a/data/example-config.py +++ b/data/example-config.py @@ -1,17 +1,17 @@ config = { - "DEFAULT" : { + "DEFAULT": { # ------ READ THIS ------ # Any lines starting with the # symbol are commented and will not be used. # If you change any of these options, remove the # # ----------------------- - "tmdb_api" : "tmdb_api key", - "oeimg_api" : "OnlyImg api key", - "imgbb_api" : "imgbb api key", - "ptscreens_api" : "ptscreens api key", - "ptpimg_api" : "ptpimg api key", - "lensdump_api" : "lensdump api key", + "tmdb_api": "tmdb_api key", + "oeimg_api": "OnlyImg api key", + "imgbb_api": "imgbb api key", + "ptscreens_api": "ptscreens api key", + "ptpimg_api": "ptpimg api key", + "lensdump_api": "lensdump api key", # Order of image hosts, and backup image hosts "img_host_1": "oeimg", @@ -23,201 +23,208 @@ "img_host_7": "ptscreens", - "screens" : "6", + "screens": "6", # Enable lossless PNG Compression (True/False) "optimize_images" : True, # The name of your default torrent client, set in the torrent client sections below - "default_torrent_client" : "Client1", + "default_torrent_client": "Client1", # Play the bell sound effect when asking for confirmation - "sfx_on_prompt" : True, + "sfx_on_prompt": True, }, - "TRACKERS" : { + "TRACKERS": { # Which trackers do you want to upload to? # Available tracker: BLU, BHD, AITHER, STC, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, HDT, OE, RF, OTW, FNP, UTP, AL # Remove the ones not used to save being asked everytime - "default_trackers" : "BLU, BHD, AITHER, STC, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, HDT, OE, RF, OTW, FNP, UTP, AL", + "default_trackers": "BLU, BHD, AITHER, STC, SN, THR, R4E, HP, ACM, PTP, LCD, LST, PTER, NBL, ANT, MTV, CBR, RTF, HUNO, BHDTV, LT, PTER, TL, HDT, OE, RF, OTW, FNP, UTP, AL", - "BLU" : { - "useAPI" : False, # Set to True if using BLU - "api_key" : "BLU api key", - "announce_url" : "https://blutopia.cc/announce/customannounceurl", + "BLU": { + "useAPI": False, # Set to True if using BLU + "api_key": "BLU api key", + "announce_url": "https://blutopia.cc/announce/customannounceurl", # "anon" : False }, "BHD" : { - "api_key" : "BHD api key", - "announce_url" : "https://beyond-hd.me/announce/customannounceurl", - "draft_default" : "True", + "api_key": "BHD api key", + "announce_url": "https://beyond-hd.me/announce/customannounceurl", + "draft_default": "True", # "anon" : False }, "BHDTV": { "api_key": "found under https://www.bit-hdtv.com/my.php", "announce_url": "https://trackerr.bit-hdtv.com/announce", - #passkey found under https://www.bit-hdtv.com/my.php + # passkey found under https://www.bit-hdtv.com/my.php "my_announce_url": "https://trackerr.bit-hdtv.com/passkey/announce", # "anon" : "False" }, - "PTP" : { - "useAPI" : False, # Set to True if using PTP - "add_web_source_to_desc" : True, - "ApiUser" : "ptp api user", - "ApiKey" : 'ptp api key', - "username" : "", - "password" : "", - "announce_url" : "" - }, - "AITHER" :{ - "api_key" : "AITHER api key", - "announce_url" : "https://aither.cc/announce/customannounceurl", + "PTP": { + "useAPI": False, # Set to True if using PTP + "add_web_source_to_desc": True, + "ApiUser": "ptp api user", + "ApiKey": 'ptp api key', + "username": "", + "password": "", + "announce_url": "" + }, + "AITHER":{ + "api_key": "AITHER api key", + "announce_url": "https://aither.cc/announce/customannounceurl", # "anon" : False }, - "R4E" :{ - "api_key" : "R4E api key", - "announce_url" : "https://racing4everyone.eu/announce/customannounceurl", + "R4E":{ + "api_key": "R4E api key", + "announce_url": "https://racing4everyone.eu/announce/customannounceurl", # "anon" : False }, - "HUNO" : { - "api_key" : "HUNO api key", - "announce_url" : "https://hawke.uno/announce/customannounceurl", + "HUNO": { + "api_key": "HUNO api key", + "announce_url": "https://hawke.uno/announce/customannounceurl", # "anon" : False }, "MTV": { - 'api_key' : 'get from security page', - 'username' : '', - 'password' : '', - 'announce_url' : "get from https://www.morethantv.me/upload.php", - 'anon' : False, + 'api_key': 'get from security page', + 'username': '', + 'password': '', + 'announce_url': "get from https://www.morethantv.me/upload.php", + 'anon': False, # 'otp_uri' : 'OTP URI, read the following for more information https://github.com/google/google-authenticator/wiki/Key-Uri-Format' }, - "STC" :{ - "api_key" : "STC API Key", - "announce_url" : "https://skipthecommericals.xyz/announce/customannounceurl", + "STC":{ + "api_key": "STC API Key", + "announce_url": "https://skipthecommericals.xyz/announce/customannounceurl", # "anon" : False }, "SN": { "api_key": "SN API Key", "announce_url": "https://tracker.swarmazon.club:8443//announce", }, - "HP" :{ - "api_key" : "HP", - "announce_url" : "https://hidden-palace.net/announce/customannounceurl", + "HP":{ + "api_key": "HP", + "announce_url": "https://hidden-palace.net/announce/customannounceurl", # "anon" : False }, - "ACM" :{ - "api_key" : "ACM api key", - "announce_url" : "https://asiancinema.me/announce/customannounceurl", + "ACM":{ + "api_key": "ACM api key", + "announce_url": "https://asiancinema.me/announce/customannounceurl", # "anon" : False, # FOR INTERNAL USE ONLY: # "internal" : True, # "internal_groups" : ["What", "Internal", "Groups", "Are", "You", "In"], }, - "NBL" : { - "api_key" : "NBL api key", - "announce_url" : "https://nebulance.io/customannounceurl", + "NBL": { + "api_key": "NBL api key", + "announce_url": "https://nebulance.io/customannounceurl", }, - "ANT" :{ - "api_key" : "ANT api key", - "announce_url" : "https://anthelion.me/announce/customannounceurl", + "ANT":{ + "api_key": "ANT api key", + "announce_url": "https://anthelion.me/announce/customannounceurl", # "anon" : False }, "THR" : { - "username" : "username", - "password" : "password", - "img_api" : "get this from the forum post", - "announce_url" : "http://www.torrenthr.org/announce.php?passkey=yourpasskeyhere", - "pronfo_api_key" : "pronfo api key", - "pronfo_theme" : "pronfo theme code", - "pronfo_rapi_id" : "pronfo remote api id", + "username": "username", + "password": "password", + "img_api": "get this from the forum post", + "announce_url": "http://www.torrenthr.org/announce.php?passkey=yourpasskeyhere", + "pronfo_api_key": "pronfo api key", + "pronfo_theme": "pronfo theme code", + "pronfo_rapi_id": "pronfo remote api id", # "anon" : False }, - "LCD" : { - "api_key" : "LCD api key", - "announce_url" : "https://locadora.cc/announce/customannounceurl", + "LCD": { + "api_key": "LCD api key", + "announce_url": "https://locadora.cc/announce/customannounceurl", # "anon" : False }, - "CBR" : { - "api_key" : "CBR api key", - "announce_url" : "https://capybarabr.com/announce/customannounceurl", + "CBR": { + "api_key": "CBR api key", + "announce_url": "https://capybarabr.com/announce/customannounceurl", # "anon" : False }, - "LST" : { - "api_key" : "LST api key", - "announce_url" : "https://lst.gg/announce/customannounceurl", + "LST": { + "api_key": "LST api key", + "announce_url": "https://lst.gg/announce/customannounceurl", # "anon" : False }, - "LT" : { - "api_key" : "LT api key", - "announce_url" : "https://lat-team.com/announce/customannounceurl", + "LT": { + "api_key": "LT api key", + "announce_url": "https://lat-team.com/announce/customannounceurl", # "anon" : False }, - "PTER" : { + "PTER": { "passkey":'passkey', - "img_rehost" : False, - "username" : "", - "password" : "", - "ptgen_api": "", + "img_rehost": False, + "username": "", + "password": "", + "ptgen_api" "", "anon": True, }, "TL": { "announce_key": "TL announce key", }, - "HDT" : { - "username" : "username", - "password" : "password", + "HDT": { + "username": "username", + "password": "password", "my_announce_url": "https://hdts-announce.ru/announce.php?pid=", # "anon" : "False" - "announce_url" : "https://hdts-announce.ru/announce.php", #DO NOT EDIT THIS LINE + "announce_url": "https://hdts-announce.ru/announce.php", #DO NOT EDIT THIS LINE }, - "OE" : { - "api_key" : "OE api key", - "announce_url" : "https://onlyencodes.cc/announce/customannounceurl", + "OE": { + "api_key": "OE api key", + "announce_url": "https://onlyencodes.cc/announce/customannounceurl", #"internal" : True, #"internal_groups" " ["group1"], # "anon" : False }, "RTF": { - "username" : "username", - "password" : "password", + "username": "username", + "password": "password", "api_key": 'get_it_by_running_/api/ login command from https://retroflix.club/api/doc', "announce_url": "get from upload page", # "tag": "RetroFlix, nd", "anon": True }, - "RF" : { + "RF": { "api_key" : "RF api key", "announce_url" : "https://reelflix.xyz/announce/customannounceurl", # "anon" : False }, - "OTW" : { - "api_key" : "OTW api key", - "announce_url" : "https://oldtoons.world/announce/customannounceurl", + "OTW": { + "api_key": "OTW api key", + "announce_url": "https://oldtoons.world/announce/customannounceurl", # "anon" : False }, - "FNP" :{ - "api_key" : "FNP api key", - "announce_url" : "https://fearnopeer.com/announce/customannounceurl", + "FNP":{ + "api_key": "FNP api key", + "announce_url": "https://fearnopeer.com/announce/customannounceurl", # "anon" : "False" }, - "ULCX" : { - "api_key" : "ULXC api key", - "announce_url" : "https://upload.cx/announce/customannounceurl", + "ULCX": { + "api_key": "ULXC api key", + "announce_url": "https://upload.cx/announce/customannounceurl", # "anon" : False }, - "UTP" : { - "api_key" : "UTP api key", - "announce_url" : "https://UTP/announce/customannounceurl", + "UTP": { + "api_key": "UTP api key", + "announce_url": "https://UTP/announce/customannounceurl", # "anon" : False }, - "AL" : { - "api_key" : "AL api key", - "announce_url" : "https://animelovers.club/announce/customannounceurl", + "AL": { + "api_key": "AL api key", + "announce_url": "https://animelovers.club/announce/customannounceurl", # "anon" : False }, + "HDB": { + "useAPI": True, + "username": "HDB username", + "passkey": "HDB passkey", + "announce_url": "https://hdbits.org/announce/Custom_Announce_URL", + "anon": False, + }, "MANUAL" : { # Uncomment and replace link with filebrowser (https://github.com/filebrowser/filebrowser) link to the Upload-Assistant directory, this will link to your filebrowser instead of uploading to uguu.se # "filebrowser" : "https://domain.tld/filebrowser/files/Upload-Assistant/" @@ -228,40 +235,40 @@ # Should use the qbit API, but will also use the torrent_storage_dir to find suitable hashes # If you find issue, use the "--debug" command option to print out some related details - "TORRENT_CLIENTS" : { + "TORRENT_CLIENTS": { # Name your torrent clients here, for example, this example is named "Client1" and is set as default_torrent_client above # All options relate to the webui, make sure you have the webui secured if it has WAN access # See https://github.com/edge20200/Only-Uploader/wiki - "Client1" : { - "torrent_client" : "qbit", - # "enable_search" : True, - "qbit_url" : "http://127.0.0.1", - "qbit_port" : "8080", - "qbit_user" : "username", - "qbit_pass" : "password", - # "torrent_storage_dir" : "path/to/BT_backup folder" + "Client1": { + "torrent_client": "qbit", + # "enable_search": True, + "qbit_url": "http://127.0.0.1", + "qbit_port": "8080", + "qbit_user": "username", + "qbit_pass": "password", + # "torrent_storage_dir": "path/to/BT_backup folder" # Remote path mapping (docker/etc.) CASE SENSITIVE - # "local_path" : "/LocalPath", - # "remote_path" : "/RemotePath" - }, - "qbit_sample" : { - "torrent_client" : "qbit", - "enable_search" : True, - "qbit_url" : "http://127.0.0.1", - "qbit_port" : "8080", - "qbit_user" : "username", - "qbit_pass" : "password", - # "torrent_storage_dir" : "path/to/BT_backup folder" - # "qbit_tag" : "tag", - # "qbit_cat" : "category" + # "local_path": "/LocalPath", + # "remote_path": "/RemotePath" + }, + "qbit_sample": { + "torrent_client": "qbit", + "enable_search": True, + "qbit_url": "http://127.0.0.1", + "qbit_port": "8080", + "qbit_user": "username", + "qbit_pass": "password", + # "torrent_storage_dir": "path/to/BT_backup folder" + # "qbit_tag": "tag", + # "qbit_cat": "category" # Content Layout for adding .torrents: "Original"(recommended)/"Subfolder"/"NoSubfolder" - "content_layout" : "Original" + "content_layout": "Original" # Enable automatic torrent management if listed path(s) are present in the path - # If using remote path mapping, use remote path - # For using multiple paths, use a list ["path1", "path2"] + # If using remote path mapping, use remote path + # For using multiple paths, use a list ["path1", "path2"] # "automatic_management_paths" : "" # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "E:\\downloads\\tv", @@ -271,9 +278,9 @@ # "VERIFY_WEBUI_CERTIFICATE" : True }, - "rtorrent_sample" : { - "torrent_client" : "rtorrent", - "rtorrent_url" : "https://user:password@server.host.tld:443/username/rutorrent/plugins/httprpc/action.php", + "rtorrent_sample": { + "torrent_client": "rtorrent", + "rtorrent_url": "https://user:password@server.host.tld:443/username/rutorrent/plugins/httprpc/action.php", # "torrent_storage_dir" : "path/to/session folder", # "rtorrent_label" : "Add this label to all uploads" @@ -282,48 +289,47 @@ # "remote_path" : "/RemotePath" }, - "deluge_sample" : { - "torrent_client" : "deluge", - "deluge_url" : "localhost", - "deluge_port" : "8080", - "deluge_user" : "username", - "deluge_pass" : "password", + "deluge_sample": { + "torrent_client": "deluge", + "deluge_url": "localhost", + "deluge_port": "8080", + "deluge_user": "username", + "deluge_pass": "password", # "torrent_storage_dir" : "path/to/session folder", # Remote path mapping (docker/etc.) CASE SENSITIVE # "local_path" : "/LocalPath", # "remote_path" : "/RemotePath" }, - "watch_sample" : { - "torrent_client" : "watch", - "watch_folder" : "/Path/To/Watch/Folder" + "watch_sample": { + "torrent_client": "watch", + "watch_folder": "/Path/To/Watch/Folder" }, }, - "DISCORD" :{ - "discord_bot_token" : "discord bot token", - "discord_bot_description" : "L4G's Upload Assistant", - "command_prefix" : "!", - "discord_channel_id" : "discord channel id for use", - "admin_id" : "your discord user id", + "DISCORD":{ + "discord_bot_token": "discord bot token", + "discord_bot_description": "L4G's Upload Assistant", + "command_prefix": "!", + "discord_channel_id": "discord channel id for use", + "admin_id": "your discord user id", - "search_dir" : "Path/to/downloads/folder/ this is used for search", + "search_dir": "Path/to/downloads/folder/ this is used for search", # Alternatively, search multiple folders: # "search_dir" : [ # "/downloads/dir1", # "/data/dir2", # ] - "discord_emojis" : { - "BLU": "πŸ’™", - "BHD": "πŸŽ‰", - "AITHER": "πŸ›«", - "STC": "πŸ“Ί", - "ACM": "πŸ™", - "MANUAL" : "πŸ“©", - "UPLOAD" : "βœ…", - "CANCEL" : "🚫" + "discord_emojis": { + "BLU": "πŸ’™", + "BHD": "πŸŽ‰", + "AITHER": "πŸ›«", + "STC": "πŸ“Ί", + "ACM": "πŸ™", + "MANUAL": "πŸ“©", + "UPLOAD": "βœ…", + "CANCEL": "🚫" } } -} - +} \ No newline at end of file diff --git a/discordbot.py b/discordbot.py index 6f4a59eb..694d1bf5 100644 --- a/discordbot.py +++ b/discordbot.py @@ -1,21 +1,18 @@ import asyncio import datetime -import json import logging -import configparser from pathlib import Path import discord from discord.ext import commands - - def config_load(): # Python Config from data.config import config return config + async def run(): """ Where the bot gets started. If you wanted to create an database connection pool or other session for the bot to use, @@ -76,7 +73,6 @@ async def load_all_extensions(self): print(f'failed to load extension {error}') print('-' * 10) - async def on_ready(self): """ This event is called every time the bot connects or resumes connection. @@ -102,12 +98,8 @@ async def on_message(self, message): await self.process_commands(message) - - - - if __name__ == '__main__': logging.basicConfig(level=logging.INFO) loop = asyncio.get_event_loop() - loop.run_until_complete(run()) + loop.run_until_complete(run()) \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 01c8c562..c63a8c33 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,3 +20,4 @@ rich Jinja2 pyotp str2bool +click \ No newline at end of file diff --git a/src/args.py b/src/args.py index b763ced8..d3be5c68 100644 --- a/src/args.py +++ b/src/args.py @@ -3,7 +3,6 @@ import urllib.parse import os import datetime -import traceback from src.console import console @@ -16,8 +15,6 @@ def __init__(self, config): self.config = config pass - - def parse(self, args, meta): input = args parser = argparse.ArgumentParser() @@ -64,7 +61,7 @@ def parse(self, args, meta): parser.add_argument('-webdv', '--webdv', action='store_true', required=False, help="Contains a Dolby Vision layer converted using dovi_tool") parser.add_argument('-hc', '--hardcoded-subs', action='store_true', required=False, help="Contains hardcoded subs", dest="hardcoded-subs") parser.add_argument('-pr', '--personalrelease', action='store_true', required=False, help="Personal Release") - parser.add_argument('-sdc','--skip-dupe-check', action='store_true', required=False, help="Pass if you know this is a dupe (Skips dupe check)", dest="dupe") + parser.add_argument('-sdc', '--skip-dupe-check', action='store_true', required=False, help="Pass if you know this is a dupe (Skips dupe check)", dest="dupe") parser.add_argument('-debug', '--debug', action='store_true', required=False, help="Debug Mode, will run through all the motions providing extra info, but will not upload to trackers.") parser.add_argument('-ffdebug', '--ffdebug', action='store_true', required=False, help="Will show info from ffmpeg while taking screenshots.") parser.add_argument('-m', '--manual', action='store_true', required=False, help="Manual Mode. Returns link to ddl screens/base.torrent") @@ -72,7 +69,7 @@ def parse(self, args, meta): parser.add_argument('-rh', '--rehash', action='store_true', required=False, help="DO hash .torrent") parser.add_argument('-ps', '--piece-size-max', dest='piece_size_max', nargs='*', required=False, help="Maximum piece size in MiB", choices=[1, 2, 4, 8, 16], type=int) parser.add_argument('-dr', '--draft', action='store_true', required=False, help="Send to drafts (BHD)") - parser.add_argument('-tc', '--torrent-creation', dest='torrent_creation', nargs='*', required=False, help="What tool should be used to create the base .torrent", choices=['torf', 'torrenttools', 'mktorrent']) + parser.add_argument('-mps', '--max-piece-size', nargs='*', required=False, help="Set max piece size allowed in MiB for default torrent creation (default 64 MiB)", choices=['2', '4', '8', '16', '32', '64', '128']) parser.add_argument('-client', '--client', nargs='*', required=False, help="Use this torrent client instead of default") parser.add_argument('-qbt', '--qbit-tag', dest='qbit_tag', nargs='*', required=False, help="Add to qbit with this tag") parser.add_argument('-qbc', '--qbit-cat', dest='qbit_cat', nargs='*', required=False, help="Add to qbit with this category") @@ -82,7 +79,6 @@ def parse(self, args, meta): parser.add_argument('-ua', '--unattended', action='store_true', required=False, help=argparse.SUPPRESS) parser.add_argument('-vs', '--vapoursynth', action='store_true', required=False, help="Use vapoursynth for screens (requires vs install)") parser.add_argument('-cleanup', '--cleanup', action='store_true', required=False, help="Clean up tmp directory") - parser.add_argument('-fl', '--freeleech', nargs='*', required=False, help="Freeleech Percentage", default=0, dest="freeleech") args, before_args = parser.parse_known_args(input) args = vars(args) @@ -97,7 +93,7 @@ def parse(self, args, meta): else: break - if meta.get('tmdb_manual') != None or meta.get('imdb') != None: + if meta.get('tmdb_manual') is not None or meta.get('imdb') is not None: meta['tmdb_manual'] = meta['imdb'] = None for key in args: value = args.get(key) @@ -105,7 +101,7 @@ def parse(self, args, meta): if isinstance(value, list): value2 = self.list_to_string(value) if key == 'type': - meta[key] = value2.upper().replace('-','') + meta[key] = value2.upper().replace('-', '') elif key == 'tag': meta[key] = f"-{value2}" elif key == 'screens': @@ -123,7 +119,7 @@ def parse(self, args, meta): parsed = urllib.parse.urlparse(value2) try: meta['ptp'] = urllib.parse.parse_qs(parsed.query)['torrentid'][0] - except: + except Exception: console.print('[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid') console.print('[red]Continuing without -ptp') else: @@ -136,7 +132,7 @@ def parse(self, args, meta): if blupath.endswith('/'): blupath = blupath[:-1] meta['blu'] = blupath.split('/')[-1] - except: + except Exception: console.print('[red]Unable to parse id from url') console.print('[red]Continuing without --blu') else: @@ -146,7 +142,7 @@ def parse(self, args, meta): parsed = urllib.parse.urlparse(value2) try: meta['hdb'] = urllib.parse.parse_qs(parsed.query)['id'][0] - except: + except Exception: console.print('[red]Your terminal ate part of the url, please surround in quotes next time, or pass only the torrentid') console.print('[red]Continuing without -hdb') else: @@ -170,17 +166,15 @@ def parse(self, args, meta): # parser.print_help() return meta, parser, before_args - def list_to_string(self, list): if len(list) == 1: return str(list[0]) try: result = " ".join(list) - except: + except Exception: result = "None" return result - def parse_tmdb_id(self, id, category): id = id.lower().lstrip() if id.startswith('tv'): @@ -191,4 +185,4 @@ def parse_tmdb_id(self, id, category): category = 'MOVIE' else: id = id - return category, id + return category, id \ No newline at end of file diff --git a/src/bbcode.py b/src/bbcode.py index 8f82969d..0665b638 100644 --- a/src/bbcode.py +++ b/src/bbcode.py @@ -1,6 +1,7 @@ import re import html import urllib.parse +from src.console import console # Bold - KEEP # Italic - KEEP @@ -36,63 +37,62 @@ def __init__(self): pass def clean_ptp_description(self, desc, is_disc): + # console.print(f"[yellow]Cleaning PTP description...") + # Convert Bullet Points to - desc = desc.replace("•", "-") # Unescape html desc = html.unescape(desc) - # End my suffering desc = desc.replace('\r\n', '\n') # Remove url tags with PTP/HDB links - url_tags = re.findall("(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) - url_tags = url_tags + re.findall("(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) - if url_tags != []: + url_tags = re.findall(r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) + url_tags += re.findall(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+)([^\[]+)(\[\/url\])?", desc, flags=re.IGNORECASE) + if url_tags: for url_tag in url_tags: url_tag = ''.join(url_tag) - url_tag_removed = re.sub("(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+])", "", url_tag, flags=re.IGNORECASE) - url_tag_removed = re.sub("(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+])", "", url_tag_removed, flags=re.IGNORECASE) + url_tag_removed = re.sub(r"(\[url[\=\]]https?:\/\/passthepopcorn\.m[^\]]+])", "", url_tag, flags=re.IGNORECASE) + url_tag_removed = re.sub(r"(\[url[\=\]]https?:\/\/hdbits\.o[^\]]+])", "", url_tag_removed, flags=re.IGNORECASE) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(url_tag, url_tag_removed) - # Remove links to PTP + # Remove links to PTP/HDB desc = desc.replace('http://passthepopcorn.me', 'PTP').replace('https://passthepopcorn.me', 'PTP') desc = desc.replace('http://hdbits.org', 'HDB').replace('https://hdbits.org', 'HDB') # Remove Mediainfo Tags / Attempt to regex out mediainfo - mediainfo_tags = re.findall("\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) - if len(mediainfo_tags) >= 1: - desc = re.sub("\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) + mediainfo_tags = re.findall(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", desc) + if mediainfo_tags: + desc = re.sub(r"\[mediainfo\][\s\S]*?\[\/mediainfo\]", "", desc) elif is_disc != "BDMV": - desc = re.sub("(^general\nunique)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^general\ncomplete)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(Format[\s]{2,}:))(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) - desc = re.sub("(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^general\nunique)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^general\ncomplete)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(Format[\s]{2,}:))(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(video|audio|text)( #\d+)?\nid)(.*?)^$", "", desc, flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) + desc = re.sub(r"(^(menu)( #\d+)?\n)(.*?)^$", "", f"{desc}\n\n", flags=re.MULTILINE | re.IGNORECASE | re.DOTALL) elif any(x in is_disc for x in ["BDMV", "DVD"]): - return "" - + return "", [] # Convert Quote tags: - desc = re.sub("\[quote.*?\]", "[code]", desc) + desc = re.sub(r"\[quote.*?\]", "[code]", desc) desc = desc.replace("[/quote]", "[/code]") # Remove Alignments: - desc = re.sub("\[align=.*?\]", "", desc) + desc = re.sub(r"\[align=.*?\]", "", desc) desc = desc.replace("[/align]", "") # Remove size tags - desc = re.sub("\[size=.*?\]", "", desc) + desc = re.sub(r"\[size=.*?\]", "", desc) desc = desc.replace("[/size]", "") # Remove Videos - desc = re.sub("\[video\][\s\S]*?\[\/video\]", "", desc) + desc = re.sub(r"\[video\][\s\S]*?\[\/video\]", "", desc) # Remove Staff tags - desc = re.sub("\[staff[\s\S]*?\[\/staff\]", "", desc) - + desc = re.sub(r"\[staff[\s\S]*?\[\/staff\]", "", desc) - #Remove Movie/Person/User/hr/Indent + # Remove Movie/Person/User/hr/Indent remove_list = [ '[movie]', '[/movie]', '[artist]', '[/artist]', @@ -104,33 +104,40 @@ def clean_ptp_description(self, desc, is_disc): for each in remove_list: desc = desc.replace(each, '') - #Catch Stray Images - comps = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) - hides = re.findall("\[hide[\s\S]*?\[\/hide\]", desc) + # Catch Stray Images and Prepare Image List + imagelist = [] + comps = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) + hides = re.findall(r"\[hide[\s\S]*?\[\/hide\]", desc) comps.extend(hides) nocomp = desc comp_placeholders = [] # Replace comparison/hide tags with placeholder because sometimes uploaders use comp images as loose images - for i in range(len(comps)): - nocomp = nocomp.replace(comps[i], '') - desc = desc.replace(comps[i], f"COMPARISON_PLACEHOLDER-{i} ") - comp_placeholders.append(comps[i]) - + for i, comp in enumerate(comps): + nocomp = nocomp.replace(comp, '') + desc = desc.replace(comp, f"COMPARISON_PLACEHOLDER-{i} ") + comp_placeholders.append(comp) # Remove Images in IMG tags: - desc = re.sub("\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) - desc = re.sub("\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) - # Replace Images - loose_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) - if len(loose_images) >= 1: - for image in loose_images: - desc = desc.replace(image, '') + desc = re.sub(r"\[img\][\s\S]*?\[\/img\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[img=[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + + # Extract loose images and add to imagelist as dictionaries + loose_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", nocomp, flags=re.IGNORECASE) + if loose_images: + for img_url in loose_images: + image_dict = { + 'img_url': img_url, + 'raw_url': img_url, + 'web_url': img_url # Since there is no distinction here, use the same URL for all + } + imagelist.append(image_dict) + desc = desc.replace(img_url, '') + # Re-place comparisons - if comp_placeholders != []: - for i, comp in enumerate(comp_placeholders): - comp = re.sub("\[\/?img[\s\S]*?\]", "",comp, flags=re.IGNORECASE) - desc = desc.replace(f"COMPARISON_PLACEHOLDER-{i} ", comp) + for i, comp in enumerate(comp_placeholders): + comp = re.sub(r"\[\/?img[\s\S]*?\]", "", comp, flags=re.IGNORECASE) + desc = desc.replace(f"COMPARISON_PLACEHOLDER-{i} ", comp) # Convert hides with multiple images to comparison desc = self.convert_collapse_to_comparison(desc, "hide", hides) @@ -142,25 +149,26 @@ def clean_ptp_description(self, desc, is_disc): desc = desc.replace('\n', '', 1) desc = desc.strip('\n') - if desc.replace('\n', '') == '': - return "" - return desc + if desc.replace('\n', '').strip() == '': + console.print("[yellow]Description is empty after cleaning.") + return "", imagelist + return desc, imagelist def clean_unit3d_description(self, desc, site): - # Unescape html + # Unescape HTML desc = html.unescape(desc) - # End my suffering + # Replace carriage returns with newlines desc = desc.replace('\r\n', '\n') # Remove links to site site_netloc = urllib.parse.urlparse(site).netloc - site_regex = f"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" + site_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}/[^\]]+])([^\[]+)(\[\/url\])?" site_url_tags = re.findall(site_regex, desc) - if site_url_tags != []: + if site_url_tags: for site_url_tag in site_url_tags: site_url_tag = ''.join(site_url_tag) - url_tag_regex = f"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" + url_tag_regex = rf"(\[url[\=\]]https?:\/\/{site_netloc}[^\]]+])" url_tag_removed = re.sub(url_tag_regex, "", site_url_tag) url_tag_removed = url_tag_removed.replace("[/url]", "") desc = desc.replace(site_url_tag, url_tag_removed) @@ -168,7 +176,7 @@ def clean_unit3d_description(self, desc, site): desc = desc.replace(site_netloc, site_netloc.split('.')[0]) # Temporarily hide spoiler tags - spoilers = re.findall("\[spoiler[\s\S]*?\[\/spoiler\]", desc) + spoilers = re.findall(r"\[spoiler[\s\S]*?\[\/spoiler\]", desc) nospoil = desc spoiler_placeholders = [] for i in range(len(spoilers)): @@ -176,76 +184,67 @@ def clean_unit3d_description(self, desc, site): desc = desc.replace(spoilers[i], f"SPOILER_PLACEHOLDER-{i} ") spoiler_placeholders.append(spoilers[i]) - # Get Images from outside spoilers + # Get Images from [img] tags and remove them from the description imagelist = [] - url_tags = re.findall("\[url=[\s\S]*?\[\/url\]", desc) - if url_tags != []: - for tag in url_tags: - image = re.findall("\[img[\s\S]*?\[\/img\]", tag) - if len(image) == 1: - image_dict = {} - img_url = image[0].lower().replace('[img]', '').replace('[/img]', '') - image_dict['img_url'] = image_dict['raw_url'] = re.sub("\[img[\s\S]*\]", "", img_url) - url_tag = tag.replace(image[0], '') - image_dict['web_url'] = re.match("\[url=[\s\S]*?\]", url_tag, flags=re.IGNORECASE)[0].lower().replace('[url=', '')[:-1] - imagelist.append(image_dict) - desc = desc.replace(tag, '') - - # Remove bot signatures - desc = desc.replace("[img=35]https://blutopia/favicon.ico[/img] [b]Uploaded Using [url=https://github.com/HDInnovations/UNIT3D]UNIT3D[/url] Auto Uploader[/b] [img=35]https://blutopia/favicon.ico[/img]", '') - desc = re.sub("\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) + img_tags = re.findall(r"\[img[^\]]*\](.*?)\[/img\]", desc, re.IGNORECASE) + if img_tags: + for img_url in img_tags: + image_dict = { + 'img_url': img_url.strip(), + 'raw_url': img_url.strip(), + 'web_url': img_url.strip(), + } + imagelist.append(image_dict) + # Remove the [img] tag and its contents from the description + desc = re.sub(rf"\[img[^\]]*\]{re.escape(img_url)}\[/img\]", '', desc, flags=re.IGNORECASE) + + # Filter out bot images from imagelist + bot_image_urls = [ + "https://blutopia.xyz/favicon.ico", # Example bot image URL + # Add any other known bot image URLs here + ] + imagelist = [img for img in imagelist if img['img_url'] not in bot_image_urls] - # Replace spoiler tags - if spoiler_placeholders != []: + # Restore spoiler tags + if spoiler_placeholders: for i, spoiler in enumerate(spoiler_placeholders): desc = desc.replace(f"SPOILER_PLACEHOLDER-{i} ", spoiler) - # Check for empty [center] tags - centers = re.findall("\[center[\s\S]*?\[\/center\]", desc) - if centers != []: + # Check for and clean up empty [center] tags + centers = re.findall(r"\[center[\s\S]*?\[\/center\]", desc) + if centers: for center in centers: - full_center = center - replace = ['[center]', ' ', '\n', '[/center]'] - for each in replace: - center = center.replace(each, '') - if center == "": - desc = desc.replace(full_center, '') + # If [center] contains only whitespace or empty tags, remove the entire tag + cleaned_center = re.sub(r'\[center\]\s*\[\/center\]', '', center) + cleaned_center = re.sub(r'\[center\]\s+', '[center]', cleaned_center) + cleaned_center = re.sub(r'\s*\[\/center\]', '[/center]', cleaned_center) + if cleaned_center == '[center][/center]': + desc = desc.replace(center, '') + else: + desc = desc.replace(center, cleaned_center.strip()) - # Convert Comparison spoilers to [comparison=] - desc = self.convert_collapse_to_comparison(desc, "spoiler", spoilers) + # Remove bot signatures + bot_signature_regex = r"\[center\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[b\]Uploaded Using \[url=https:\/\/github\.com\/HDInnovations\/UNIT3D\]UNIT3D\[\/url\] Auto Uploader\[\/b\]\s*\[img=\d+\]https:\/\/blutopia\.xyz\/favicon\.ico\[\/img\]\s*\[\/center\]" + desc = re.sub(bot_signature_regex, "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[center\].*Created by L4G's Upload Assistant.*\[\/center\]", "", desc, flags=re.IGNORECASE) - # Strip blank lines: - desc = desc.strip('\n') - desc = re.sub("\n\n+", "\n\n", desc) - while desc.startswith('\n'): - desc = desc.replace('\n', '', 1) - desc = desc.strip('\n') + # Ensure no dangling tags and remove extra blank lines + desc = re.sub(r'\n\s*\n', '\n', desc) # Remove multiple consecutive blank lines + desc = re.sub(r'\n\n+', '\n\n', desc) # Ensure no excessive blank lines + desc = desc.strip() # Final cleanup of trailing newlines and spaces + + # Strip trailing whitespace and newlines: + desc = desc.rstrip() if desc.replace('\n', '') == '': return "", imagelist return desc, imagelist - - - - - - - - - - - - - - - def convert_pre_to_code(self, desc): desc = desc.replace('[pre]', '[code]') desc = desc.replace('[/pre]', '[/code]') return desc - def convert_hide_to_spoiler(self, desc): desc = desc.replace('[hide', '[spoiler') desc = desc.replace('[/hide]', '[/spoiler]') @@ -257,7 +256,7 @@ def convert_spoiler_to_hide(self, desc): return desc def remove_spoiler(self, desc): - desc = re.sub("\[\/?spoiler[\s\S]*?\]", "", desc, flags=re.IGNORECASE) + desc = re.sub(r"\[\/?spoiler[\s\S]*?\]", "", desc, flags=re.IGNORECASE) return desc def convert_spoiler_to_code(self, desc): @@ -271,13 +270,13 @@ def convert_code_to_quote(self, desc): return desc def convert_comparison_to_collapse(self, desc, max_width): - comparisons = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) + comparisons = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) for comp in comparisons: line = [] output = [] comp_sources = comp.split(']', 1)[0].replace('[comparison=', '').replace(' ', '').split(',') comp_images = comp.split(']', 1)[1].replace('[/comparison]', '').replace(',', '\n').replace(' ', '\n') - comp_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) + comp_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) screens_per_line = len(comp_sources) img_size = int(max_width / screens_per_line) if img_size > 350: @@ -295,15 +294,14 @@ def convert_comparison_to_collapse(self, desc, max_width): desc = desc.replace(comp, new_bbcode) return desc - def convert_comparison_to_centered(self, desc, max_width): - comparisons = re.findall("\[comparison=[\s\S]*?\[\/comparison\]", desc) + comparisons = re.findall(r"\[comparison=[\s\S]*?\[\/comparison\]", desc) for comp in comparisons: line = [] output = [] comp_sources = comp.split(']', 1)[0].replace('[comparison=', '').replace(' ', '').split(',') comp_images = comp.split(']', 1)[1].replace('[/comparison]', '').replace(',', '\n').replace(' ', '\n') - comp_images = re.findall("(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) + comp_images = re.findall(r"(https?:\/\/.*\.(?:png|jpg))", comp_images, flags=re.IGNORECASE) screens_per_line = len(comp_sources) img_size = int(max_width / screens_per_line) if img_size > 350: @@ -326,17 +324,17 @@ def convert_collapse_to_comparison(self, desc, spoiler_hide, collapses): if collapses != []: for i in range(len(collapses)): tag = collapses[i] - images = re.findall("\[img[\s\S]*?\[\/img\]", tag, flags=re.IGNORECASE) + images = re.findall(r"\[img[\s\S]*?\[\/img\]", tag, flags=re.IGNORECASE) if len(images) >= 6: comp_images = [] final_sources = [] for image in images: - image_url = re.sub("\[img[\s\S]*\]", "", image.replace('[/img]', ''), flags=re.IGNORECASE) + image_url = re.sub(r"\[img[\s\S]*\]", "", image.replace('[/img]', ''), flags=re.IGNORECASE) comp_images.append(image_url) if spoiler_hide == "spoiler": - sources = re.match("\[spoiler[\s\S]*?\]", tag)[0].replace('[spoiler=', '')[:-1] + sources = re.match(r"\[spoiler[\s\S]*?\]", tag)[0].replace('[spoiler=', '')[:-1] elif spoiler_hide == "hide": - sources = re.match("\[hide[\s\S]*?\]", tag)[0].replace('[hide=', '')[:-1] + sources = re.match(r"\[hide[\s\S]*?\]", tag)[0].replace('[hide=', '')[:-1] sources = re.sub("comparison", "", sources, flags=re.IGNORECASE) for each in ['vs', ',', '|']: sources = sources.split(each) @@ -348,4 +346,4 @@ def convert_collapse_to_comparison(self, desc, spoiler_hide, collapses): final_sources = ', '.join(final_sources) spoil2comp = f"[comparison={final_sources}]{comp_images}[/comparison]" desc = desc.replace(tag, spoil2comp) - return desc + return desc \ No newline at end of file diff --git a/src/clients.py b/src/clients.py index d9a94594..b1e2a017 100644 --- a/src/clients.py +++ b/src/clients.py @@ -4,7 +4,7 @@ import bencode import os import qbittorrentapi -from deluge_client import DelugeRPCClient, LocalDelugeRPCClient +from deluge_client import DelugeRPCClient import base64 from pyrobase.parts import Bunch import errno @@ -14,6 +14,7 @@ import time from src.console import console + class Clients(): """ Add to torrent client @@ -24,15 +25,15 @@ def __init__(self, config): async def add_to_client(self, meta, tracker): torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent" - if meta.get('no_seed', False) == True: - console.print(f"[bold red]--no-seed was passed, so the torrent will not be added to the client") - console.print(f"[bold yellow]Add torrent manually to the client") + if meta.get('no_seed', False) is True: + console.print("[bold red]--no-seed was passed, so the torrent will not be added to the client") + console.print("[bold yellow]Add torrent manually to the client") return if os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) else: return - if meta.get('client', None) == None: + if meta.get('client', None) is None: default_torrent_client = self.config['DEFAULT']['default_torrent_client'] else: default_torrent_client = meta['client'] @@ -52,14 +53,14 @@ async def add_to_client(self, meta, tracker): await self.qbittorrent(meta['path'], torrent, local_path, remote_path, client, meta['is_disc'], meta['filelist'], meta) elif torrent_client.lower() == "deluge": if meta['type'] == "DISC": - path = os.path.dirname(meta['path']) + path = os.path.dirname(meta['path']) # noqa F841 self.deluge(meta['path'], torrent_path, torrent, local_path, remote_path, client, meta) elif torrent_client.lower() == "watch": shutil.copy(torrent_path, client['watch_folder']) return async def find_existing_torrent(self, meta): - if meta.get('client', None) == None: + if meta.get('client', None) is None: default_torrent_client = self.config['DEFAULT']['default_torrent_client'] else: default_torrent_client = meta['client'] @@ -68,22 +69,22 @@ async def find_existing_torrent(self, meta): client = self.config['TORRENT_CLIENTS'][default_torrent_client] torrent_storage_dir = client.get('torrent_storage_dir', None) torrent_client = client.get('torrent_client', None).lower() - if torrent_storage_dir == None and torrent_client != "watch": + if torrent_storage_dir is None and torrent_client != "watch": console.print(f'[bold red]Missing torrent_storage_dir for {default_torrent_client}') return None elif not os.path.exists(str(torrent_storage_dir)) and torrent_client != "watch": console.print(f"[bold red]Invalid torrent_storage_dir path: [bold yellow]{torrent_storage_dir}") torrenthash = None - if torrent_storage_dir != None and os.path.exists(torrent_storage_dir): - if meta.get('torrenthash', None) != None: + if torrent_storage_dir is not None and os.path.exists(torrent_storage_dir): + if meta.get('torrenthash', None) is not None: valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['torrenthash']}.torrent", meta['torrenthash'], torrent_client, print_err=True) if valid: torrenthash = meta['torrenthash'] - elif meta.get('ext_torrenthash', None) != None: + elif meta.get('ext_torrenthash', None) is not None: valid, torrent_path = await self.is_valid_torrent(meta, f"{torrent_storage_dir}/{meta['ext_torrenthash']}.torrent", meta['ext_torrenthash'], torrent_client, print_err=True) if valid: torrenthash = meta['ext_torrenthash'] - if torrent_client == 'qbit' and torrenthash == None and client.get('enable_search') == True: + if torrent_client == 'qbit' and torrenthash is None and client.get('enable_search') is True: torrenthash = await self.search_qbit_for_torrent(meta, client) if not torrenthash: console.print("[bold yellow]No Valid .torrent found") @@ -100,61 +101,89 @@ async def is_valid_torrent(self, meta, torrent_path, torrenthash, torrent_client valid = False wrong_file = False err_print = "" + + # Normalize the torrent hash based on the client if torrent_client in ('qbit', 'deluge'): torrenthash = torrenthash.lower().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) elif torrent_client == 'rtorrent': torrenthash = torrenthash.upper().strip() torrent_path = torrent_path.replace(torrenthash.upper(), torrenthash) + if meta['debug']: - console.log(torrent_path) + console.log(f"[DEBUG] Torrent path after normalization: {torrent_path}") + + # Check if torrent file exists if os.path.exists(torrent_path): torrent = Torrent.read(torrent_path) + # Reuse if disc and basename matches or --keep-folder was specified - if meta.get('is_disc', None) != None or (meta['keep_folder'] and meta['isdir']): + if meta.get('is_disc', None) is not None or (meta['keep_folder'] and meta['isdir']): torrent_filepath = os.path.commonpath(torrent.files) if os.path.basename(meta['path']) in torrent_filepath: valid = True + if meta['debug']: + console.log(f"[DEBUG] Torrent is valid based on disc/basename or keep-folder: {valid}") + # If one file, check for folder if len(torrent.files) == len(meta['filelist']) == 1: if os.path.basename(torrent.files[0]) == os.path.basename(meta['filelist'][0]): if str(torrent.files[0]) == os.path.basename(torrent.files[0]): valid = True - else: - wrong_file = True + else: + wrong_file = True + if meta['debug']: + console.log(f"[DEBUG] Single file match status: valid={valid}, wrong_file={wrong_file}") + # Check if number of files matches number of videos elif len(torrent.files) == len(meta['filelist']): torrent_filepath = os.path.commonpath(torrent.files) actual_filepath = os.path.commonpath(meta['filelist']) local_path, remote_path = await self.remote_path_map(meta) + if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): actual_filepath = torrent_path.replace(local_path, remote_path) actual_filepath = torrent_path.replace(os.sep, '/') + if meta['debug']: - console.log(f"torrent_filepath: {torrent_filepath}") - console.log(f"actual_filepath: {actual_filepath}") + console.log(f"[DEBUG] torrent_filepath: {torrent_filepath}") + console.log(f"[DEBUG] actual_filepath: {actual_filepath}") + if torrent_filepath in actual_filepath: valid = True + if meta['debug']: + console.log(f"[DEBUG] Multiple file match status: valid={valid}") + else: console.print(f'[bold yellow]{torrent_path} was not found') + + # Additional checks if the torrent is valid so far if valid: if os.path.exists(torrent_path): reuse_torrent = Torrent.read(torrent_path) - if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): # Allow up to 7k pieces at 8MiB or 4k pieces at 4MiB or less + if meta['debug']: + console.log(f"[DEBUG] Checking piece size and count: pieces={reuse_torrent.pieces}, piece_size={reuse_torrent.piece_size}") + + if (reuse_torrent.pieces >= 7000 and reuse_torrent.piece_size < 8388608) or (reuse_torrent.pieces >= 4000 and reuse_torrent.piece_size < 4194304): err_print = "[bold yellow]Too many pieces exist in current hash. REHASHING" valid = False elif reuse_torrent.piece_size < 32768: err_print = "[bold yellow]Piece size too small to reuse" valid = False - elif wrong_file == True: + elif wrong_file: err_print = "[bold red] Provided .torrent has files that were not expected" valid = False else: err_print = f'[bold green]REUSING .torrent with infohash: [bold yellow]{torrenthash}' + if meta['debug']: + console.log(f"[DEBUG] Final validity after piece checks: valid={valid}") else: err_print = '[bold yellow]Unwanted Files/Folders Identified' + + # Print the error message if needed if print_err: console.print(err_print) + return valid, torrent_path async def search_qbit_for_torrent(self, meta, client): @@ -165,7 +194,7 @@ async def search_qbit_for_torrent(self, meta, client): console.print(f"Torrent storage directory found: {torrent_storage_dir}") else: console.print("No torrent storage directory found.") - if torrent_storage_dir == None and client.get("torrent_client", None) != "watch": + if torrent_storage_dir is None and client.get("torrent_client", None) != "watch": console.print(f"[bold red]Missing torrent_storage_dir for {self.config['DEFAULT']['default_torrent_client']}") return None @@ -185,7 +214,7 @@ async def search_qbit_for_torrent(self, meta, client): if local_path.lower() in meta['path'].lower() and local_path.lower() != remote_path.lower(): remote_path_map = True if meta['debug']: - console.print(f"Remote path mapping found!") + console.print("Remote path mapping found!") console.print(f"Local path: {local_path}") console.print(f"Remote path: {remote_path}") @@ -233,7 +262,7 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c isdir = os.path.isdir(path) # if meta['type'] == "DISC": # path = os.path.dirname(path) - #Remote path mount + # Remote path mount modified_fr = False if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): path_dir = os.path.dirname(path) @@ -242,16 +271,16 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c shutil.copy(fr_file, f"{path_dir}/fr.torrent") fr_file = f"{os.path.dirname(path)}/fr.torrent" modified_fr = True - if isdir == False: + if isdir is False: path = os.path.dirname(path) console.print("[bold yellow]Adding and starting torrent") rtorrent.load.start_verbose('', fr_file, f"d.directory_base.set={path}") time.sleep(1) # Add labels - if client.get('rtorrent_label', None) != None: + if client.get('rtorrent_label', None) is not None: rtorrent.d.custom1.set(torrent.infohash, client['rtorrent_label']) - if meta.get('rtorrent_label') != None: + if meta.get('rtorrent_label') is not None: rtorrent.d.custom1.set(torrent.infohash, meta['rtorrent_label']) # Delete modified fr_file location @@ -263,7 +292,7 @@ def rtorrent(self, path, torrent_path, torrent, meta, local_path, remote_path, c async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_disc, filelist, meta): # infohash = torrent.infohash - #Remote path mount + # Remote path mount isdir = os.path.isdir(path) if not isdir and len(filelist) == 1: path = os.path.dirname(path) @@ -302,9 +331,9 @@ async def qbittorrent(self, path, torrent, local_path, remote_path, client, is_d break await asyncio.sleep(1) qbt_client.torrents_resume(torrent.infohash) - if client.get('qbit_tag', None) != None: + if client.get('qbit_tag', None) is not None: qbt_client.torrents_add_tags(tags=client.get('qbit_tag'), torrent_hashes=torrent.infohash) - if meta.get('qbit_tag') != None: + if meta.get('qbit_tag') is not None: qbt_client.torrents_add_tags(tags=meta.get('qbit_tag'), torrent_hashes=torrent.infohash) console.print(f"Added to: {path}") @@ -312,17 +341,17 @@ def deluge(self, path, torrent_path, torrent, local_path, remote_path, client, m client = DelugeRPCClient(client['deluge_url'], int(client['deluge_port']), client['deluge_user'], client['deluge_pass']) # client = LocalDelugeRPCClient() client.connect() - if client.connected == True: + if client.connected is True: console.print("Connected to Deluge") - isdir = os.path.isdir(path) - #Remote path mount + isdir = os.path.isdir(path) # noqa F841 + # Remote path mount if local_path.lower() in path.lower() and local_path.lower() != remote_path.lower(): path = path.replace(local_path, remote_path) path = path.replace(os.sep, '/') path = os.path.dirname(path) - client.call('core.add_torrent_file', torrent_path, base64.b64encode(torrent.dump()), {'download_location' : path, 'seed_mode' : True}) + client.call('core.add_torrent_file', torrent_path, base64.b64encode(torrent.dump()), {'download_location': path, 'seed_mode': True}) if meta['debug']: console.print(f"[cyan]Path: {path}") else: @@ -365,19 +394,21 @@ def add_fast_resume(self, metainfo, datapath, torrent): resume["files"].append(dict( priority=1, mtime=int(os.path.getmtime(filepath)), - completed=(offset+fileinfo["length"]+piece_length-1) // piece_length - - offset // piece_length, + completed=( + (offset + fileinfo["length"] + piece_length - 1) // piece_length - + offset // piece_length + ), )) offset += fileinfo["length"] return metainfo async def remote_path_map(self, meta): - if meta.get('client', None) == None: + if meta.get('client', None) is None: torrent_client = self.config['DEFAULT']['default_torrent_client'] else: torrent_client = meta['client'] - local_path = list_local_path = self.config['TORRENT_CLIENTS'][torrent_client].get('local_path','/LocalPath') + local_path = list_local_path = self.config['TORRENT_CLIENTS'][torrent_client].get('local_path', '/LocalPath') remote_path = list_remote_path = self.config['TORRENT_CLIENTS'][torrent_client].get('remote_path', '/RemotePath') if isinstance(local_path, list): for i in range(len(local_path)): @@ -390,4 +421,4 @@ async def remote_path_map(self, meta): if local_path.endswith(os.sep): remote_path = remote_path + os.sep - return local_path, remote_path + return local_path, remote_path \ No newline at end of file diff --git a/src/console.py b/src/console.py index 223c5118..745d8c8b 100644 --- a/src/console.py +++ b/src/console.py @@ -1,2 +1,2 @@ from rich.console import Console -console = Console() +console = Console() \ No newline at end of file diff --git a/src/discparse.py b/src/discparse.py index c5e6ee56..64422cca 100644 --- a/src/discparse.py +++ b/src/discparse.py @@ -28,7 +28,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): for file in os.listdir(save_dir): if file == f"BD_SUMMARY_{str(i).zfill(2)}.txt": bdinfo_text = save_dir + "/" + file - if bdinfo_text == None or meta_discs == []: + if bdinfo_text is None or meta_discs == []: if os.path.exists(f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt"): bdinfo_text = os.path.abspath(f"{save_dir}/BD_FULL_{str(i).zfill(2)}.txt") else: @@ -39,7 +39,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): console.print(f"[bold green]Scanning {path}") proc = await asyncio.create_subprocess_exec('mono', f"{base_dir}/bin/BDInfo/BDInfo.exe", '-w', path, save_dir) await proc.wait() - except: + except Exception: console.print('[bold red]mono not found, please install mono') elif sys.platform.startswith('win32'): @@ -54,7 +54,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): try: if bdinfo_text == "": for file in os.listdir(save_dir): - if file.startswith(f"BDINFO"): + if file.startswith("BDINFO"): bdinfo_text = save_dir + "/" + file with open(bdinfo_text, 'r') as f: text = f.read() @@ -64,7 +64,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): result = result2.split("********************", 1) bd_summary = result[0].rstrip(" \n") f.close() - with open(bdinfo_text, 'r') as f: # parse extended BDInfo + with open(bdinfo_text, 'r') as f: # parse extended BDInfo text = f.read() result = text.split("[code]", 3) result2 = result[2].rstrip(" \n") @@ -84,7 +84,7 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): with open(f"{save_dir}/BD_SUMMARY_{str(i).zfill(2)}.txt", 'w') as f: f.write(bd_summary.strip()) f.close() - with open(f"{save_dir}/BD_SUMMARY_EXT.txt", 'w') as f: # write extended BDInfo file + with open(f"{save_dir}/BD_SUMMARY_EXT.txt", 'w') as f: # write extended BDInfo file f.write(ext_bd_summary.strip()) f.close() @@ -98,8 +98,6 @@ async def get_bdinfo(self, discs, folder_id, base_dir, meta_discs): return discs, discs[0]['bdinfo'] - - def parse_bdinfo(self, bdinfo_input, files, path): bdinfo = dict() bdinfo['video'] = list() @@ -107,56 +105,56 @@ def parse_bdinfo(self, bdinfo_input, files, path): bdinfo['subtitles'] = list() bdinfo['path'] = path lines = bdinfo_input.splitlines() - for l in lines: + for l in lines: # noqa E741 line = l.strip().lower() if line.startswith("*"): line = l.replace("*", "").strip().lower() if line.startswith("playlist:"): playlist = l.split(':', 1)[1] - bdinfo['playlist'] = playlist.split('.',1)[0].strip() + bdinfo['playlist'] = playlist.split('.', 1)[0].strip() if line.startswith("disc size:"): size = l.split(':', 1)[1] - size = size.split('bytes', 1)[0].replace(',','') - size = float(size)/float(1<<30) + size = size.split('bytes', 1)[0].replace(',', '') + size = float(size) / float(1 << 30) bdinfo['size'] = size if line.startswith("length:"): length = l.split(':', 1)[1] - bdinfo['length'] = length.split('.',1)[0].strip() + bdinfo['length'] = length.split('.', 1)[0].strip() if line.startswith("video:"): split1 = l.split(':', 1)[1] split2 = split1.split('/', 12) while len(split2) != 9: split2.append("") - n=0 + n = 0 if "Eye" in split2[2].strip(): n = 1 three_dim = split2[2].strip() else: three_dim = "" try: - bit_depth = split2[n+6].strip() - hdr_dv = split2[n+7].strip() - color = split2[n+8].strip() - except: + bit_depth = split2[n + 6].strip() + hdr_dv = split2[n + 7].strip() + color = split2[n + 8].strip() + except Exception: bit_depth = "" hdr_dv = "" color = "" bdinfo['video'].append({ 'codec': split2[0].strip(), 'bitrate': split2[1].strip(), - 'res': split2[n+2].strip(), - 'fps': split2[n+3].strip(), - 'aspect_ratio' : split2[n+4].strip(), - 'profile': split2[n+5].strip(), - 'bit_depth' : bit_depth, - 'hdr_dv' : hdr_dv, - 'color' : color, - '3d' : three_dim, - }) + 'res': split2[n + 2].strip(), + 'fps': split2[n + 3].strip(), + 'aspect_ratio': split2[n + 4].strip(), + 'profile': split2[n + 5].strip(), + 'bit_depth': bit_depth, + 'hdr_dv': hdr_dv, + 'color': color, + '3d': three_dim, + }) elif line.startswith("audio:"): if "(" in l: - l = l.split("(")[0] - l = l.strip() + l = l.split("(")[0] # noqa E741 + l = l.strip() # noqa E741 split1 = l.split(':', 1)[1] split2 = split1.split('/') n = 0 @@ -166,18 +164,18 @@ def parse_bdinfo(self, bdinfo_input, files, path): else: fuckatmos = "" try: - bit_depth = split2[n+5].strip() - except: + bit_depth = split2[n + 5].strip() + except Exception: bit_depth = "" bdinfo['audio'].append({ - 'language' : split2[0].strip(), - 'codec' : split2[1].strip(), - 'channels' : split2[n+2].strip(), - 'sample_rate' : split2[n+3].strip(), - 'bitrate' : split2[n+4].strip(), - 'bit_depth' : bit_depth, # Also DialNorm, but is not in use anywhere yet + 'language': split2[0].strip(), + 'codec': split2[1].strip(), + 'channels': split2[n + 2].strip(), + 'sample_rate': split2[n + 3].strip(), + 'bitrate': split2[n + 4].strip(), + 'bit_depth': bit_depth, # Also DialNorm, but is not in use anywhere yet 'atmos_why_you_be_like_this': fuckatmos, - }) + }) elif line.startswith("disc title:"): title = l.split(':', 1)[1] bdinfo['title'] = title @@ -195,19 +193,17 @@ def parse_bdinfo(self, bdinfo_input, files, path): stripped = line.split() m2ts = {} bd_file = stripped[0] - time_in = stripped[1] + time_in = stripped[1] # noqa F841 bd_length = stripped[2] - bd_size = stripped[3] - bd_bitrate = stripped[4] + bd_size = stripped[3] # noqa F841 + bd_bitrate = stripped[4] # noqa F841 m2ts['file'] = bd_file m2ts['length'] = bd_length bdinfo['files'].append(m2ts) - except: + except Exception: pass return bdinfo - - """ Parse VIDEO_TS and get mediainfos """ @@ -215,7 +211,7 @@ async def get_dvdinfo(self, discs): for each in discs: path = each.get('path') os.chdir(path) - files = glob(f"VTS_*.VOB") + files = glob("VTS_*.VOB") files.sort() # Switch to ordered dictionary filesdict = OrderedDict() @@ -233,7 +229,6 @@ async def get_dvdinfo(self, discs): vob_set_mi = json.loads(vob_set_mi) vob_set_duration = vob_set_mi['media']['track'][1]['Duration'] - # If the duration of the new vob set > main set by more than 10% then it's our new main set # This should make it so TV shows pick the first episode if (float(vob_set_duration) * 1.00) > (float(main_set_duration) * 1.10) or len(main_set) < 1: @@ -243,13 +238,12 @@ async def get_dvdinfo(self, discs): set = main_set[0][:2] each['vob'] = vob = f"{path}/VTS_{set}_1.VOB" each['ifo'] = ifo = f"{path}/VTS_{set}_0.IFO" - each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') - each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False, mediainfo_options={'inform_version' : '1'}).replace('\r\n', '\n') + each['vob_mi'] = MediaInfo.parse(os.path.basename(vob), output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['ifo_mi'] = MediaInfo.parse(os.path.basename(ifo), output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['vob_mi_full'] = MediaInfo.parse(vob, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') + each['ifo_mi_full'] = MediaInfo.parse(ifo, output='STRING', full=False, mediainfo_options={'inform_version': '1'}).replace('\r\n', '\n') - - size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f))/float(1<<30) + size = sum(os.path.getsize(f) for f in os.listdir('.') if os.path.isfile(f)) / float(1 << 30) if size <= 7.95: dvd_size = "DVD9" if size <= 4.37: @@ -270,6 +264,6 @@ async def get_hddvd_info(self, discs): if file_size > size: largest = file size = file_size - each['evo_mi'] = MediaInfo.parse(os.path.basename(largest), output='STRING', full=False, mediainfo_options={'inform_version' : '1'}) + each['evo_mi'] = MediaInfo.parse(os.path.basename(largest), output='STRING', full=False, mediainfo_options={'inform_version': '1'}) each['largest_evo'] = os.path.abspath(f"{path}/{largest}") - return discs + return discs \ No newline at end of file diff --git a/src/exceptions.py b/src/exceptions.py index 282f64c5..5eece929 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -7,9 +7,10 @@ def __init__(self, *args, **kwargs): if args: # ... pass them to the super constructor super().__init__(*args, **kwargs) - else: # else, the exception was raised without arguments ... - # ... pass the default message to the super constructor - super().__init__(default_message, **kwargs) + else: # else, the exception was raised without arguments ... + # ... pass the default message to the super constructor + super().__init__(default_message, **kwargs) + class UploadException(Exception): def __init__(self, *args, **kwargs): @@ -20,14 +21,18 @@ def __init__(self, *args, **kwargs): if args: # ... pass them to the super constructor super().__init__(*args, **kwargs) - else: # else, the exception was raised without arguments ... - # ... pass the default message to the super constructor - super().__init__(default_message, **kwargs) + else: # else, the exception was raised without arguments ... + # ... pass the default message to the super constructor + super().__init__(default_message, **kwargs) class XEMNotFound(Exception): pass + + class WeirdSystem(Exception): pass + + class ManualDateException(Exception): - pass + pass \ No newline at end of file diff --git a/src/prep.py b/src/prep.py index 572fc629..a6308e94 100644 --- a/src/prep.py +++ b/src/prep.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- from src.args import Args from src.console import console -from src.exceptions import * +from src.exceptions import * # noqa: F403 from src.trackers.PTP import PTP from src.trackers.BLU import BLU from src.trackers.HDB import HDB @@ -9,14 +9,11 @@ try: import traceback - import nest_asyncio from src.discparse import DiscParse import multiprocessing import os - from os.path import basename import re import math - import sys from str2bool import str2bool import asyncio from guessit import guessit @@ -32,7 +29,7 @@ import pyimgbox from pymediainfo import MediaInfo import tmdbsimple as tmdb - from datetime import datetime, date + from datetime import datetime from difflib import SequenceMatcher import torf from torf import Torrent @@ -41,8 +38,6 @@ import anitopy import shutil from imdb import Cinemagoer - from subprocess import Popen - import subprocess import itertools import cli_ui from rich.progress import Progress, TextColumn, BarColumn, TimeRemainingColumn @@ -56,9 +51,6 @@ exit() - - - class Prep(): """ Prepare for upload: @@ -73,6 +65,209 @@ def __init__(self, screens, img_host, config): self.img_host = img_host.lower() tmdb.API_KEY = config['DEFAULT']['tmdb_api'] + async def prompt_user_for_id_selection(self, blu_tmdb=None, blu_imdb=None, blu_tvdb=None, blu_filename=None, imdb=None): + if imdb: + imdb = str(imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + console.print(f"[cyan]Found IMDb ID: https://www.imdb.com/title/tt{imdb}") + if blu_tmdb or blu_imdb or blu_tvdb: + if blu_imdb: + blu_imdb = str(blu_imdb).zfill(7) # Convert to string and ensure IMDb ID is 7 characters long by adding leading zeros + console.print("[cyan]Found the following IDs on BLU:") + console.print(f"TMDb ID: {blu_tmdb}") + console.print(f"IMDb ID: https://www.imdb.com/title/tt{blu_imdb}") + console.print(f"TVDb ID: {blu_tvdb}") + console.print(f"Filename: {blu_filename}") + + selection = input("Do you want to use this ID? (y/n): ").strip().lower() + return selection == 'y' + + async def prompt_user_for_confirmation(self, message): + selection = input(f"{message} (y/n): ").strip().lower() + return selection == 'y' + + async def update_metadata_from_tracker(self, tracker_name, tracker_instance, meta, search_term, search_file_folder): + tracker_key = tracker_name.lower() + manual_key = f"{tracker_key}_manual" + found_match = False + + # console.print(f"[cyan]Starting update_metadata_from_tracker for: {tracker_name}[/cyan]") + + # Handle each tracker separately + if tracker_name == "BLU": + # console.print(f"[blue]Handling BLU tracker[/blue]") + if meta.get(tracker_key) is not None: + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") + blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( + "BLU", + tracker_instance.torrent_url, + tracker_instance.search_url, + id=meta[tracker_key] + ) + # console.print(f"[blue]BLU search by ID complete[/blue]") + if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: + console.print(f"[green]Valid data found on {tracker_name}, setting meta values[/green]") + if await self.prompt_user_for_id_selection(blu_tmdb, blu_imdb, blu_tvdb, blu_filename): + if blu_tmdb not in [None, '0']: + meta['tmdb_manual'] = blu_tmdb + if blu_imdb not in [None, '0']: + meta['imdb'] = str(blu_imdb).zfill(7) # Pad IMDb ID with leading zeros + if blu_tvdb not in [None, '0']: + meta['tvdb_id'] = blu_tvdb + if blu_mal not in [None, '0']: + meta['mal'] = blu_mal + if blu_desc not in [None, '0', '']: + meta['blu_desc'] = blu_desc + if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() + if not meta.get('image_list'): # Only handle images if image_list is not already populated + if blu_imagelist: # Ensure blu_imagelist is not empty before setting + meta['image_list'] = blu_imagelist + if meta.get('image_list'): # Double-check if image_list is set before handling it + await self.handle_image_list(meta, tracker_name) + if blu_filename: + meta['blu_filename'] = blu_filename # Store the filename in meta for later use + found_match = True + console.print("[green]BLU data successfully updated in meta[/green]") + else: + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") + else: + console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") + else: + console.print("[yellow]No ID found in meta for BLU, searching by file name[/yellow]") + blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist, blu_filename = await COMMON(self.config).unit3d_torrent_info( + "BLU", + tracker_instance.torrent_url, + tracker_instance.search_url, + file_name=search_term + ) + # console.print(f"[blue]BLU search by file name complete[/blue]") + if blu_tmdb not in [None, '0'] or blu_imdb not in [None, '0'] or blu_tvdb not in [None, '0']: + console.print(f"[green]Valid data found on {tracker_name} using file name, setting meta values[/green]") + if await self.prompt_user_for_id_selection(blu_tmdb, blu_imdb, blu_tvdb, blu_filename): + if blu_tmdb not in [None, '0']: + meta['tmdb_manual'] = blu_tmdb + if blu_imdb not in [None, '0']: + meta['imdb'] = str(blu_imdb).zfill(7) + if blu_tvdb not in [None, '0']: + meta['tvdb_id'] = blu_tvdb + if blu_mal not in [None, '0']: + meta['mal'] = blu_mal + if blu_desc not in [None, '0', '']: + meta['blu_desc'] = blu_desc + if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: + meta['category'] = 'TV' if blu_category.upper() == 'TV SHOW' else blu_category.upper() + if not meta.get('image_list'): # Only handle images if image_list is not already populated + if blu_imagelist: # Ensure blu_imagelist is not empty before setting + meta['image_list'] = blu_imagelist + if meta.get('image_list'): # Double-check if image_list is set before handling it + await self.handle_image_list(meta, tracker_name) + if blu_filename: + meta['blu_filename'] = blu_filename + found_match = True + console.print("[green]BLU data successfully updated in meta[/green]") + else: + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") + else: + console.print(f"[yellow]No valid data found on {tracker_name}[/yellow]") + + elif tracker_name == "PTP": + # console.print(f"[blue]Handling PTP tracker[/blue]") + + if meta.get('ptp') is None: + # console.print(f"[yellow]No PTP ID in meta, searching by search term[/yellow]") + imdb, ptp_torrent_id, meta['ext_torrenthash'] = await tracker_instance.get_ptp_id_imdb(search_term, search_file_folder) + if ptp_torrent_id: + meta['ptp'] = ptp_torrent_id + meta['imdb'] = str(imdb).zfill(7) if imdb else None + else: + ptp_torrent_id = meta['ptp'] + console.print(f"[cyan]PTP ID found in meta: {ptp_torrent_id}, using it to get IMDb ID[/cyan]") + imdb, _, meta['ext_torrenthash'] = await tracker_instance.get_imdb_from_torrent_id(ptp_torrent_id) + if imdb: + meta['imdb'] = str(imdb).zfill(7) + console.print(f"[green]IMDb ID found: tt{meta['imdb']}[/green]") + else: + console.print(f"[yellow]Could not find IMDb ID using PTP ID: {ptp_torrent_id}[/yellow]") + + if meta.get('imdb') and await self.prompt_user_for_id_selection(imdb=meta['imdb']): + console.print(f"[green]{tracker_name} IMDb ID found: tt{meta['imdb']}[/green]") + found_match = True + + ptp_desc, ptp_imagelist = await tracker_instance.get_ptp_description(meta['ptp'], meta.get('is_disc', False)) + if ptp_desc.strip(): + meta['description'] = ptp_desc + if not meta.get('image_list'): # Only handle images if image_list is not already populated + meta['image_list'] = ptp_imagelist + if meta.get('image_list'): + await self.handle_image_list(meta, tracker_name) + meta['skip_gen_desc'] = True + console.print("[green]PTP description and images added to metadata.[/green]") + + if await self.prompt_user_for_confirmation("Do you want to keep the description from PTP?"): + meta['skip_gen_desc'] = True + found_match = True + else: + console.print("[yellow]Description discarded from PTP[/yellow]") + meta['skip_gen_desc'] = True + meta['description'] = None + else: + console.print(f"[yellow]Skipped {tracker_name}, moving to the next site.[/yellow]") + meta['skip_gen_desc'] = True + return meta, found_match + + elif tracker_name == "HDB": + if meta.get('hdb') is not None: + meta[manual_key] = meta[tracker_key] + console.print(f"[cyan]{tracker_name} ID found in meta, reusing existing ID: {meta[tracker_key]}[/cyan]") + + # Use get_info_from_torrent_id function if ID is found in meta + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'] = await tracker_instance.get_info_from_torrent_id(meta[tracker_key]) + + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') + meta['hdb_name'] = hdb_name + found_match = True + + else: + console.print("[yellow]No ID found in meta for HDB, searching by file name[/yellow]") + + # Use search_filename function if ID is not found in meta + imdb, tvdb_id, hdb_name, meta['ext_torrenthash'], tracker_id = await tracker_instance.search_filename(search_term, search_file_folder) + + meta['tvdb_id'] = str(tvdb_id) if tvdb_id else meta.get('tvdb_id') + meta['hdb_name'] = hdb_name + if tracker_id: + meta[tracker_key] = tracker_id + found_match = True + + if found_match: + if imdb or tvdb_id or hdb_name: + console.print(f"[green]{tracker_name} data found: IMDb ID: {imdb}, TVDb ID: {meta['tvdb_id']}, HDB Name: {meta['hdb_name']}[/green]") + if await self.prompt_user_for_confirmation(f"Do you want to keep the data found on {tracker_name}?"): + console.print(f"[green]{tracker_name} data retained.[/green]") + else: + console.print(f"[yellow]{tracker_name} data discarded.[/yellow]") + meta[tracker_key] = None + meta['tvdb_id'] = None + meta['hdb_name'] = None + found_match = False + else: + # console.print(f"[yellow]Could not find a matching release on {tracker_name}.[/yellow]") + found_match = False + + # console.print(f"[cyan]Finished processing tracker: {tracker_name} with found_match: {found_match}[/cyan]") + return meta, found_match + + async def handle_image_list(self, meta, tracker_name): + if meta.get('image_list'): + console.print(f"[cyan]Found the following images from {tracker_name}:") + for img in meta['image_list']: + console.print(f"[blue]{img}[/blue]") + keep_images = await self.prompt_user_for_confirmation(f"Do you want to keep the images found on {tracker_name}?") + if not keep_images: + meta['image_list'] = [] + console.print(f"[yellow]Images discarded from {tracker_name}") + else: + console.print(f"[green]Images retained from {tracker_name}") async def gather_prep(self, meta, mode): meta['mode'] = mode @@ -80,258 +275,227 @@ async def gather_prep(self, meta, mode): meta['isdir'] = os.path.isdir(meta['path']) base_dir = meta['base_dir'] - if meta.get('uuid', None) == None: + if meta.get('uuid', None) is None: folder_id = os.path.basename(meta['path']) - meta['uuid'] = folder_id + meta['uuid'] = folder_id if not os.path.exists(f"{base_dir}/tmp/{meta['uuid']}"): Path(f"{base_dir}/tmp/{meta['uuid']}").mkdir(parents=True, exist_ok=True) - + if meta['debug']: console.print(f"[cyan]ID: {meta['uuid']}") - meta['is_disc'], videoloc, bdinfo, meta['discs'] = await self.get_disc(meta) - - # If BD: + + # Debugging information + # console.print(f"Debug: meta['filelist'] before population: {meta.get('filelist', 'Not Set')}") + if meta['is_disc'] == "BDMV": video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) - meta['filelist'] = [] + meta['filelist'] = [] # No filelist for discs, use path + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' try: - guess_name = bdinfo['title'].replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + guess_name = bdinfo['title'].replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] untouched_filename = bdinfo['title'] try: meta['search_year'] = guessit(bdinfo['title'])['year'] except Exception: meta['search_year'] = "" except Exception: - guess_name = bdinfo['label'].replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]})['title'] + guess_name = bdinfo['label'].replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]})['title'] untouched_filename = bdinfo['label'] try: meta['search_year'] = guessit(bdinfo['label'])['year'] except Exception: meta['search_year'] = "" - if meta.get('resolution', None) == None: + if meta.get('resolution', None) is None: meta['resolution'] = self.mi_resolution(bdinfo['video'][0]['res'], guessit(video), width="OTHER", scan="p", height="OTHER", actual_height=0) - # if meta.get('sd', None) == None: meta['sd'] = self.is_sd(meta['resolution']) mi = None - mi_dump = None - #IF DVD + elif meta['is_disc'] == "DVD": video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) meta['filelist'] = [] - guess_name = meta['discs'][0]['path'].replace('-',' ') - # filename = guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name))['title'] - filename = guessit(guess_name, {"excludes" : ["country", "language"]})['title'] + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' + guess_name = meta['discs'][0]['path'].replace('-', ' ') + filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] untouched_filename = os.path.basename(os.path.dirname(meta['discs'][0]['path'])) try: meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] except Exception: meta['search_year'] = "" - if meta.get('edit', False) == False: + if not meta.get('edit', False): mi = self.exportInfo(f"{meta['discs'][0]['path']}/VTS_{meta['discs'][0]['main_set'][0][:2]}_1.VOB", False, meta['uuid'], meta['base_dir'], export_text=False) meta['mediainfo'] = mi else: mi = meta['mediainfo'] - - #NTSC/PAL + meta['dvd_size'] = await self.get_dvd_size(meta['discs']) meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) meta['sd'] = self.is_sd(meta['resolution']) + elif meta['is_disc'] == "HDDVD": video, meta['scene'], meta['imdb'] = self.is_scene(meta['path'], meta.get('imdb', None)) meta['filelist'] = [] - guess_name = meta['discs'][0]['path'].replace('-','') - filename = guessit(guess_name, {"excludes" : ["country", "language"]})['title'] + search_term = os.path.basename(meta['path']) + search_file_folder = 'folder' + guess_name = meta['discs'][0]['path'].replace('-', '') + filename = guessit(guess_name, {"excludes": ["country", "language"]})['title'] untouched_filename = os.path.basename(meta['discs'][0]['path']) videopath = meta['discs'][0]['largest_evo'] try: meta['search_year'] = guessit(meta['discs'][0]['path'])['year'] except Exception: meta['search_year'] = "" - if meta.get('edit', False) == False: + if not meta.get('edit', False): mi = self.exportInfo(meta['discs'][0]['largest_evo'], False, meta['uuid'], meta['base_dir'], export_text=False) meta['mediainfo'] = mi else: mi = meta['mediainfo'] meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) meta['sd'] = self.is_sd(meta['resolution']) - #If NOT BD/DVD/HDDVD + else: - videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) + videopath, meta['filelist'] = self.get_video(videoloc, meta.get('mode', 'discord')) + search_term = os.path.basename(meta['filelist'][0]) if meta['filelist'] else None + search_file_folder = 'file' video, meta['scene'], meta['imdb'] = self.is_scene(videopath, meta.get('imdb', None)) - guess_name = ntpath.basename(video).replace('-',' ') - filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes" : ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes" : ["country", "language"]})["title"]) + guess_name = ntpath.basename(video).replace('-', ' ') + filename = guessit(re.sub(r"[^0-9a-zA-Z\[\\]]+", " ", guess_name), {"excludes": ["country", "language"]}).get("title", guessit(re.sub("[^0-9a-zA-Z]+", " ", guess_name), {"excludes": ["country", "language"]})["title"]) untouched_filename = os.path.basename(video) try: meta['search_year'] = guessit(video)['year'] except Exception: meta['search_year'] = "" - - if meta.get('edit', False) == False: + + if not meta.get('edit', False): mi = self.exportInfo(videopath, meta['isdir'], meta['uuid'], base_dir, export_text=True) meta['mediainfo'] = mi else: mi = meta['mediainfo'] - if meta.get('resolution', None) == None: + if meta.get('resolution', None) is None: meta['resolution'] = self.get_resolution(guessit(video), meta['uuid'], base_dir) - # if meta.get('sd', None) == None: meta['sd'] = self.is_sd(meta['resolution']) - - - if " AKA " in filename.replace('.',' '): + if " AKA " in filename.replace('.', ' '): filename = filename.split('AKA')[0] meta['filename'] = filename meta['bdinfo'] = bdinfo - + # Debugging information after population + # console.print(f"Debug: meta['filelist'] after population: {meta.get('filelist', 'Not Set')}") + # Reuse information from trackers with fallback + found_match = False + if search_term: + # console.print(f"[blue]Starting search with search_term: {search_term}[/blue]") + default_trackers = self.config['TRACKERS'].get('default_trackers', "").split(", ") - # Reuse information from other trackers - if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": - ptp = PTP(config=self.config) - if meta.get('ptp', None) != None: - meta['ptp_manual'] = meta['ptp'] - meta['imdb'], meta['ext_torrenthash'] = await ptp.get_imdb_from_torrent_id(meta['ptp']) - else: - if meta['is_disc'] in [None, ""]: - ptp_search_term = os.path.basename(meta['filelist'][0]) - search_file_folder = 'file' - else: - search_file_folder = 'folder' - ptp_search_term = os.path.basename(meta['path']) - ptp_imdb, ptp_id, meta['ext_torrenthash'] = await ptp.get_ptp_id_imdb(ptp_search_term, search_file_folder) - if ptp_imdb != None: - meta['imdb'] = ptp_imdb - if ptp_id != None: - meta['ptp'] = ptp_id - - if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": - hdb = HDB(config=self.config) - if meta.get('ptp', None) == None or meta.get('hdb', None) != None: - hdb_imdb = hdb_tvdb = hdb_id = None - hdb_id = meta.get('hdb') - if hdb_id != None: - meta['hdb_manual'] = hdb_id - hdb_imdb, hdb_tvdb, meta['hdb_name'], meta['ext_torrenthash'] = await hdb.get_info_from_torrent_id(hdb_id) - else: - if meta['is_disc'] in [None, ""]: - hdb_imdb, hdb_tvdb, meta['hdb_name'], meta['ext_torrenthash'], hdb_id = await hdb.search_filename(meta['filelist']) - else: - # Somehow search for disc - pass - if hdb_imdb != None: - meta['imdb'] = str(hdb_imdb) - if hdb_tvdb != None: - meta['tvdb_id'] = str(hdb_tvdb) - if hdb_id != None: - meta['hdb'] = hdb_id - - if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": - blu = BLU(config=self.config) - if meta.get('blu', None) != None: - meta['blu_manual'] = meta['blu'] - blu_tmdb, blu_imdb, blu_tvdb, blu_mal, blu_desc, blu_category, meta['ext_torrenthash'], blu_imagelist = await COMMON(self.config).unit3d_torrent_info("BLU", blu.torrent_url, meta['blu']) - if blu_tmdb not in [None, '0']: - meta['tmdb_manual'] = blu_tmdb - if blu_imdb not in [None, '0']: - meta['imdb'] = str(blu_imdb) - if blu_tvdb not in [None, '0']: - meta['tvdb_id'] = blu_tvdb - if blu_mal not in [None, '0']: - meta['mal'] = blu_mal - if blu_desc not in [None, '0', '']: - meta['blu_desc'] = blu_desc - if blu_category.upper() in ['MOVIE', 'TV SHOW', 'FANRES']: - if blu_category.upper() == 'TV SHOW': - meta['category'] = 'TV' - else: - meta['category'] = blu_category.upper() - if meta.get('image_list', []) == []: - meta['image_list'] = blu_imagelist + if "PTP" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching PTP for: {search_term}[/blue]") + ptp = PTP(config=self.config) + meta, match = await self.update_metadata_from_tracker('PTP', ptp, meta, search_term, search_file_folder) + if match: + found_match = True + # console.print(f"[blue]PTP search complete, found_match: {found_match}[/blue]") + + if "HDB" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('HDB', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching HDB for: {search_term}[/blue]") + hdb = HDB(config=self.config) + meta, match = await self.update_metadata_from_tracker('HDB', hdb, meta, search_term, search_file_folder) + if match: + found_match = True + # console.print(f"[blue]HDB search complete, found_match: {found_match}[/blue]") + + if "BLU" in default_trackers and not found_match: + if str(self.config['TRACKERS'].get('BLU', {}).get('useAPI')).lower() == "true": + # console.print(f"[blue]Searching BLU for: {search_term}[/blue]") + blu = BLU(config=self.config) + meta, match = await self.update_metadata_from_tracker('BLU', blu, meta, search_term, search_file_folder) + if match: + found_match = True + # console.print(f"[blue]BLU search complete, found_match: {found_match}[/blue]") + + if not found_match: + console.print("[yellow]No matches found on any trackers.[/yellow]") else: - # Seach automatically - pass - - - - + console.print(f"[green]Match found: {found_match}[/green]") + else: + console.print("[yellow]Warning: No valid search term available, skipping tracker updates.[/yellow]") # Take Screenshots if meta['is_disc'] == "BDMV": - if meta.get('edit', False) == False: - if meta.get('vapoursynth', False) == True: + if meta.get('edit', False) is False: + if meta.get('vapoursynth', False) is True: use_vs = True else: use_vs = False try: ds = multiprocessing.Process(target=self.disc_screenshots, args=(filename, bdinfo, meta['uuid'], base_dir, use_vs, meta.get('image_list', []), meta.get('ffdebug', False), None)) ds.start() - while ds.is_alive() == True: + while ds.is_alive() is True: await asyncio.sleep(1) except KeyboardInterrupt: - ds.terminate() + ds.terminate() elif meta['is_disc'] == "DVD": - if meta.get('edit', False) == False: + if meta.get('edit', False) is False: try: ds = multiprocessing.Process(target=self.dvd_screenshots, args=(meta, 0, None)) ds.start() - while ds.is_alive() == True: + while ds.is_alive() is True: await asyncio.sleep(1) except KeyboardInterrupt: ds.terminate() else: - if meta.get('edit', False) == False: + if meta.get('edit', False) is False: try: s = multiprocessing.Process(target=self.screenshots, args=(videopath, filename, meta['uuid'], base_dir, meta)) s.start() - while s.is_alive() == True: + while s.is_alive() is True: await asyncio.sleep(3) except KeyboardInterrupt: s.terminate() - - - meta['tmdb'] = meta.get('tmdb_manual', None) - if meta.get('type', None) == None: + if meta.get('type', None) is None: meta['type'] = self.get_type(video, meta['scene'], meta['is_disc']) - if meta.get('category', None) == None: + if meta.get('category', None) is None: meta['category'] = self.get_cat(video) else: meta['category'] = meta['category'].upper() - if meta.get('tmdb', None) == None and meta.get('imdb', None) == None: - meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) - if meta.get('tmdb', None) == None and meta.get('imdb', None) == None: + if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: + meta['category'], meta['tmdb'], meta['imdb'] = self.get_tmdb_imdb_from_mediainfo(mi, meta['category'], meta['is_disc'], meta['tmdb'], meta['imdb']) + if meta.get('tmdb', None) is None and meta.get('imdb', None) is None: meta = await self.get_tmdb_id(filename, meta['search_year'], meta, meta['category'], untouched_filename) - elif meta.get('imdb', None) != None and meta.get('tmdb_manual', None) == None: + elif meta.get('imdb', None) is not None and meta.get('tmdb_manual', None) is None: meta['imdb_id'] = str(meta['imdb']).replace('tt', '') meta = await self.get_tmdb_from_imdb(meta, filename) else: meta['tmdb_manual'] = meta.get('tmdb', None) - # If no tmdb, use imdb for meta if int(meta['tmdb']) == 0: meta = await self.imdb_other_meta(meta) else: meta = await self.tmdb_other_meta(meta) # Search tvmaze - meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id','0'), meta.get('tvdb_id', 0)) + meta['tvmaze_id'], meta['imdb_id'], meta['tvdb_id'] = await self.search_tvmaze(filename, meta['search_year'], meta.get('imdb_id', '0'), meta.get('tvdb_id', 0)) # If no imdb, search for it - if meta.get('imdb_id', None) == None: + if meta.get('imdb_id', None) is None: meta['imdb_id'] = await self.search_imdb(filename, meta['search_year']) - if meta.get('imdb_info', None) == None and int(meta['imdb_id']) != 0: + if meta.get('imdb_info', None) is None and int(meta['imdb_id']) != 0: meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) - if meta.get('tag', None) == None: + if meta.get('tag', None) is None: meta['tag'] = self.get_tag(video, meta) else: if not meta['tag'].startswith('-') and meta['tag'] != "": @@ -352,33 +516,25 @@ async def gather_prep(self, meta, mode): meta['uhd'] = self.get_uhd(meta['type'], guessit(meta['path']), meta['resolution'], meta['path']) meta['hdr'] = self.get_hdr(mi, bdinfo) meta['distributor'] = self.get_distributor(meta['distributor']) - if meta.get('is_disc', None) == "BDMV": #Blu-ray Specific + if meta.get('is_disc', None) == "BDMV": # Blu-ray Specific meta['region'] = self.get_region(bdinfo, meta.get('region', None)) meta['video_codec'] = self.get_video_codec(bdinfo) else: meta['video_encode'], meta['video_codec'], meta['has_encode_settings'], meta['bit_depth'] = self.get_video_encode(mi, meta['type'], bdinfo) - + meta['edition'], meta['repack'] = self.get_edition(meta['path'], bdinfo, meta['filelist'], meta.get('manual_edition')) if "REPACK" in meta.get('edition', ""): meta['repack'] = re.search(r"REPACK[\d]?", meta['edition'])[0] meta['edition'] = re.sub(r"REPACK[\d]?", "", meta['edition']).strip().replace(' ', ' ') - - - - #WORK ON THIS + + # WORK ON THIS meta.get('stream', False) meta['stream'] = self.stream_optimized(meta['stream']) meta.get('anon', False) meta['anon'] = self.is_anon(meta['anon']) - - - meta = await self.gen_desc(meta) return meta - - - """ Determine if disc and if so, get bdinfo """ @@ -386,45 +542,45 @@ async def get_disc(self, meta): is_disc = None videoloc = meta['path'] bdinfo = None - bd_summary = None + bd_summary = None # noqa: F841 discs = [] parse = DiscParse() for path, directories, files in os. walk(meta['path']): for each in directories: - if each.upper() == "BDMV": #BDMVs + if each.upper() == "BDMV": # BDMVs is_disc = "BDMV" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'BDMV', - 'summary' : "", - 'bdinfo' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'BDMV', + 'summary': "", + 'bdinfo': "" } discs.append(disc) - elif each == "VIDEO_TS": #DVDs + elif each == "VIDEO_TS": # DVDs is_disc = "DVD" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'DVD', - 'vob_mi' : '', - 'ifo_mi' : '', - 'main_set' : [], - 'size' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'DVD', + 'vob_mi': '', + 'ifo_mi': '', + 'main_set': [], + 'size': "" } discs.append(disc) elif each == "HVDVD_TS": is_disc = "HDDVD" disc = { - 'path' : f"{path}/{each}", - 'name' : os.path.basename(path), - 'type' : 'HDDVD', - 'evo_mi' : '', - 'largest_evo' : "" + 'path': f"{path}/{each}", + 'name': os.path.basename(path), + 'type': 'HDDVD', + 'evo_mi': '', + 'largest_evo': "" } discs.append(disc) if is_disc == "BDMV": - if meta.get('edit', False) == False: + if meta.get('edit', False) is False: discs, bdinfo = await parse.get_bdinfo(discs, meta['uuid'], meta['base_dir'], meta.get('discs', [])) else: discs, bdinfo = await parse.get_bdinfo(meta['discs'], meta['uuid'], meta['base_dir'], meta['discs']) @@ -441,9 +597,6 @@ async def get_disc(self, meta): discs = sorted(discs, key=lambda d: d['name']) return is_disc, videoloc, bdinfo, discs - - - """ Get video files @@ -457,7 +610,7 @@ def get_video(self, videoloc, mode): if not file.lower().endswith('sample.mkv') or "!sample" in file.lower(): filelist.append(os.path.abspath(f"{videoloc}{os.sep}{file}")) try: - video = sorted(filelist)[0] + video = sorted(filelist)[0] except IndexError: console.print("[bold red]No Video files found") if mode == 'cli': @@ -468,8 +621,6 @@ def get_video(self, videoloc, mode): filelist = sorted(filelist) return video, filelist - - """ Get and parse mediainfo """ @@ -482,7 +633,7 @@ def filter_mediainfo(data): "track": [] } } - + for track in data["media"]["track"]: if track["@type"] == "General": filtered["media"]["track"].append({ @@ -628,7 +779,7 @@ def filter_mediainfo(data): "@type": track["@type"], "extra": track.get("extra"), }) - + return filtered if not os.path.exists(f"{base_dir}/tmp/{folder_id}/MEDIAINFO.txt") and export_text: @@ -651,9 +802,8 @@ def filter_mediainfo(data): with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", 'r', encoding='utf-8') as f: mi = json.load(f) - - return mi + return mi """ Get Resolution @@ -665,13 +815,13 @@ def get_resolution(self, guess, folder_id, base_dir): try: width = mi['media']['track'][1]['Width'] height = mi['media']['track'][1]['Height'] - except: + except Exception: width = 0 height = 0 framerate = mi['media']['track'][1].get('FrameRate', '') try: scan = mi['media']['track'][1]['ScanType'] - except: + except Exception: scan = "Progressive" if scan == "Progressive": scan = "p" @@ -700,56 +850,54 @@ def closest(self, lst, K): res = each break return res - + # return lst[min(range(len(lst)), key = lambda i: abs(lst[i]-K))] def mi_resolution(self, res, guess, width, scan, height, actual_height): res_map = { - "3840x2160p" : "2160p", "2160p" : "2160p", - "2560x1440p" : "1440p", "1440p" : "1440p", - "1920x1080p" : "1080p", "1080p" : "1080p", - "1920x1080i" : "1080i", "1080i" : "1080i", - "1280x720p" : "720p", "720p" : "720p", - "1280x540p" : "720p", "1280x576p" : "720p", - "1024x576p" : "576p", "576p" : "576p", - "1024x576i" : "576i", "576i" : "576i", - "854x480p" : "480p", "480p" : "480p", - "854x480i" : "480i", "480i" : "480i", - "720x576p" : "576p", "576p" : "576p", - "720x576i" : "576i", "576i" : "576i", - "720x480p" : "480p", "480p" : "480p", - "720x480i" : "480i", "480i" : "480i", - "15360x8640p" : "8640p", "8640p" : "8640p", - "7680x4320p" : "4320p", "4320p" : "4320p", - "OTHER" : "OTHER"} + "3840x2160p": "2160p", "2160p": "2160p", + "2560x1440p": "1440p", "1440p": "1440p", + "1920x1080p": "1080p", "1080p": "1080p", + "1920x1080i": "1080i", "1080i": "1080i", + "1280x720p": "720p", "720p": "720p", + "1280x540p": "720p", "1280x576p": "720p", + "1024x576p": "576p", "576p": "576p", + "1024x576i": "576i", "576i": "576i", + "854x480p": "480p", "480p": "480p", + "854x480i": "480i", "480i": "480i", + "720x576p": "576p", "576p": "576p", + "720x576i": "576i", "576i": "576i", + "720x480p": "480p", "480p": "480p", + "720x480i": "480i", "480i": "480i", + "15360x8640p": "8640p", "8640p": "8640p", + "7680x4320p": "4320p", "4320p": "4320p", + "OTHER": "OTHER"} resolution = res_map.get(res, None) if actual_height == 540: resolution = "OTHER" - if resolution == None: - try: + if resolution is None: + try: resolution = guess['screen_size'] - except: + except Exception: width_map = { - '3840p' : '2160p', - '2560p' : '1550p', - '1920p' : '1080p', - '1920i' : '1080i', - '1280p' : '720p', - '1024p' : '576p', - '1024i' : '576i', - '854p' : '480p', - '854i' : '480i', - '720p' : '576p', - '720i' : '576i', - '15360p' : '4320p', - 'OTHERp' : 'OTHER' + '3840p': '2160p', + '2560p': '1550p', + '1920p': '1080p', + '1920i': '1080i', + '1280p': '720p', + '1024p': '576p', + '1024i': '576i', + '854p': '480p', + '854i': '480i', + '720p': '576p', + '720i': '576i', + '15360p': '4320p', + 'OTHERp': 'OTHER' } resolution = width_map.get(f"{width}{scan}", "OTHER") resolution = self.mi_resolution(resolution, guess, width, scan, height, actual_height) - + return resolution - - def is_sd(self, resolution): if resolution in ("480i", "480p", "576i", "576p", "540p"): @@ -775,7 +923,7 @@ def is_scene(self, video, imdb=None): scene = True r = requests.get(f"https://api.srrdb.com/v1/imdb/{base}") r = r.json() - if r['releases'] != [] and imdb == None: + if r['releases'] != [] and imdb is None: imdb = r['releases'][0].get('imdb', imdb) if r['releases'][0].get('imdb') is not None else imdb console.print(f"[green]SRRDB: Matched to {response['results'][0]['release']}") except Exception: @@ -784,24 +932,17 @@ def is_scene(self, video, imdb=None): console.print("[yellow]SRRDB: No match found, or request has timed out") return video, scene, imdb - - - - - - - """ Generate Screenshots """ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_list, ffdebug, num_screens=None): - if num_screens == None: + if num_screens is None: num_screens = self.screens if num_screens == 0 or len(image_list) >= num_screens: return - #Get longest m2ts - length = 0 + # Get longest m2ts + length = 0 for each in bdinfo['files']: int_length = sum(int(float(x)) * 60 ** i for i, x in enumerate(reversed(each['length'].split(':')))) if int_length > length: @@ -810,89 +951,88 @@ def disc_screenshots(self, filename, bdinfo, folder_id, base_dir, use_vs, image_ for name in files: if name.lower() == each['file'].lower(): file = f"{root}/{name}" - - + if "VC-1" in bdinfo['video'][0]['codec'] or bdinfo['video'][0]['hdr_dv'] != "": keyframe = 'nokey' else: keyframe = 'none' - os.chdir(f"{base_dir}/tmp/{folder_id}") - i = len(glob.glob(f"{filename}-*.png")) + os.chdir(f"{base_dir}/tmp/{folder_id}") + i = len(glob.glob(f"{filename}-*.png")) if i >= num_screens: i = num_screens console.print('[bold green]Reusing screenshots') else: console.print("[bold yellow]Saving Screens...") - if use_vs == True: + if use_vs is True: from src.vs import vs_screengn vs_screengn(source=file, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") else: - if bool(ffdebug) == True: + if bool(ffdebug) is True: loglevel = 'verbose' debug = False else: loglevel = 'quiet' debug = True - with Progress( + with Progress( TextColumn("[bold green]Saving Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", TimeRemainingColumn() ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - image = f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png" - try: - ss_times = self.valid_ss_time(ss_times, num_screens+1, length) - ( - ffmpeg - .input(file, ss=ss_times[-1], skip_frame=keyframe) - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: - console.print(traceback.format_exc()) - - self.optimize_images(image) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: - i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[bold yellow]Image is incredibly small, retaking") - time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - else: - console.print("[red]Image too large for your image host, retaking") - time.sleep(1) - progress.advance(screen_task) - #remove smallest image + screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) + ss_times = [] + for i in range(num_screens + 1): + image = f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png" + try: + ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) + ( + ffmpeg + .input(file, ss=ss_times[-1], skip_frame=keyframe) + .output(image, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + .run(quiet=debug) + ) + except Exception: + console.print(traceback.format_exc()) + + self.optimize_images(image) + if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": + i += 1 + elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: + i += 1 + elif os.path.getsize(Path(image)) <= 75000: + console.print("[bold yellow]Image is incredibly small, retaking") + time.sleep(1) + elif self.img_host == "ptpimg": + i += 1 + elif self.img_host == "lensdump": + i += 1 + else: + console.print("[red]Image too large for your image host, retaking") + time.sleep(1) + progress.advance(screen_task) + # remove smallest image smallest = "" smallestsize = 99 ** 99 - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): + for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): screensize = os.path.getsize(screens) if screensize < smallestsize: smallestsize = screensize smallest = screens - os.remove(smallest) - + os.remove(smallest) + def dvd_screenshots(self, meta, disc_num, num_screens=None): - if num_screens == None: + if num_screens is None: num_screens = self.screens if num_screens == 0 or (len(meta.get('image_list', [])) >= num_screens and disc_num == 0): return - ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version' : '1'}) + ifo_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{meta['discs'][disc_num]['main_set'][0][:2]}_0.IFO", mediainfo_options={'inform_version': '1'}) sar = 1 for track in ifo_mi.tracks: if track.track_type == "Video": - length = float(track.duration)/1000 + length = float(track.duration)/1000 # noqa F841 par = float(track.pixel_aspect_ratio) dar = float(track.display_aspect_ratio) width = float(track.width) @@ -907,7 +1047,7 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): sar = par w_sar = sar h_sar = 1 - + main_set_length = len(meta['discs'][disc_num]['main_set']) if main_set_length >= 3: main_set = meta['discs'][disc_num]['main_set'][1:-1] @@ -917,108 +1057,109 @@ def dvd_screenshots(self, meta, disc_num, num_screens=None): main_set = meta['discs'][disc_num]['main_set'] n = 0 os.chdir(f"{meta['base_dir']}/tmp/{meta['uuid']}") - i = 0 + i = 0 if len(glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-*.png")) >= num_screens: i = num_screens console.print('[bold green]Reusing screenshots') else: - if bool(meta.get('ffdebug', False)) == True: + if bool(meta.get('ffdebug', False)) is True: loglevel = 'verbose' debug = False looped = 0 retake = False with Progress( - TextColumn("[bold green]Saving Screens..."), - BarColumn(), - "[cyan]{task.completed}/{task.total}", - TimeRemainingColumn() - ) as progress: - screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) - ss_times = [] - for i in range(num_screens + 1): - if n >= len(main_set): - n = 0 - if n >= num_screens: - n -= num_screens - image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" - if not os.path.exists(image) or retake != False: - retake = False - loglevel = 'quiet' - debug = True - if bool(meta.get('debug', False)): - loglevel = 'error' - debug = False - def _is_vob_good(n, loops, num_screens): - voblength = 300 - vob_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", output='JSON') - vob_mi = json.loads(vob_mi) + TextColumn("[bold green]Saving Screens..."), + BarColumn(), + "[cyan]{task.completed}/{task.total}", + TimeRemainingColumn() + ) as progress: + screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) + ss_times = [] + for i in range(num_screens + 1): + if n >= len(main_set): + n = 0 + if n >= num_screens: + n -= num_screens + image = f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['discs'][disc_num]['name']}-{i}.png" + if not os.path.exists(image) or retake is not False: + retake = False + loglevel = 'quiet' + debug = True + if bool(meta.get('debug', False)): + loglevel = 'error' + debug = False + + def _is_vob_good(n, loops, num_screens): + voblength = 300 + vob_mi = MediaInfo.parse(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", output='JSON') + vob_mi = json.loads(vob_mi) + try: + voblength = float(vob_mi['media']['track'][1]['Duration']) + return voblength, n + except Exception: try: - voblength = float(vob_mi['media']['track'][1]['Duration']) + voblength = float(vob_mi['media']['track'][2]['Duration']) return voblength, n except Exception: - try: - voblength = float(vob_mi['media']['track'][2]['Duration']) + n += 1 + if n >= len(main_set): + n = 0 + if n >= num_screens: + n -= num_screens + if loops < 6: + loops = loops + 1 + voblength, n = _is_vob_good(n, loops, num_screens) return voblength, n - except Exception: - n += 1 - if n >= len(main_set): - n = 0 - if n >= num_screens: - n -= num_screens - if loops < 6: - loops = loops + 1 - voblength, n = _is_vob_good(n, loops, num_screens) - return voblength, n - else: - return 300, n - try: - voblength, n = _is_vob_good(n, 0, num_screens) - img_time = random.randint(round(voblength/5) , round(voblength - voblength/5)) - ss_times = self.valid_ss_time(ss_times, num_screens+1, voblength) - ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[-1]) - if w_sar != 1 or h_sar != 1: - ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) - ( - ff - .output(image, vframes=1, pix_fmt="rgb24") - .overwrite_output() - .global_args('-loglevel', loglevel) - .run(quiet=debug) - ) - except Exception: - console.print(traceback.format_exc()) - self.optimize_images(image) - n += 1 - try: - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": - i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost', 'oeimg']: - i += 1 - elif os.path.getsize(Path(image)) <= 75000: - console.print("[yellow]Image is incredibly small (and is most likely to be a single color), retaking") - retake = True - time.sleep(1) - elif self.img_host == "ptpimg": - i += 1 - elif self.img_host == "lensdump": - i += 1 - elif self.img_host == "ptscreens": - i += 1 - else: - console.print("[red]Image too large for your image host, retaking") - retake = True - time.sleep(1) - looped = 0 - except Exception: - if looped >= 25: - console.print('[red]Failed to take screenshots') - exit() - looped += 1 - progress.advance(screen_task) - #remove smallest image + else: + return 300, n + try: + voblength, n = _is_vob_good(n, 0, num_screens) + # img_time = random.randint(round(voblength/5), round(voblength - voblength/5)) + ss_times = self.valid_ss_time(ss_times, num_screens + 1, voblength) + ff = ffmpeg.input(f"{meta['discs'][disc_num]['path']}/VTS_{main_set[n]}", ss=ss_times[-1]) + if w_sar != 1 or h_sar != 1: + ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) + ( + ff + .output(image, vframes=1, pix_fmt="rgb24") + .overwrite_output() + .global_args('-loglevel', loglevel) + .run(quiet=debug) + ) + except Exception: + console.print(traceback.format_exc()) + self.optimize_images(image) + n += 1 + try: + if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb": + i += 1 + elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost']: + i += 1 + elif os.path.getsize(Path(image)) <= 75000: + console.print("[yellow]Image is incredibly small (and is most likely to be a single color), retaking") + retake = True + time.sleep(1) + elif self.img_host == "ptpimg": + i += 1 + elif self.img_host == "lensdump": + i += 1 + elif self.img_host == "ptscreens": + i += 1 + else: + console.print("[red]Image too large for your image host, retaking") + retake = True + time.sleep(1) + looped = 0 + except Exception: + if looped >= 25: + console.print('[red]Failed to take screenshots') + exit() + looped += 1 + progress.advance(screen_task) + # remove smallest image smallest = "" smallestsize = 99**99 - for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): + for screens in glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}/", f"{meta['discs'][disc_num]['name']}-*"): screensize = os.path.getsize(screens) if screensize < smallestsize: smallestsize = screensize @@ -1026,11 +1167,22 @@ def _is_vob_good(n, loops, num_screens): os.remove(smallest) def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=None): - if num_screens == None: - num_screens = self.screens - len(meta.get('image_list', [])) - if num_screens == 0: - # or len(meta.get('image_list', [])) >= num_screens: + # Ensure the image list is initialized and preserve existing images + if 'image_list' not in meta: + meta['image_list'] = [] + + # Check if there are already at least 3 image links in the image list + existing_images = [img for img in meta['image_list'] if isinstance(img, dict) and img.get('img_url', '').startswith('http')] + if len(existing_images) >= 3: + console.print("[yellow]There are already at least 3 images in the image list. Skipping additional screenshots.") + return + + # Determine the number of screenshots to take + if num_screens is None: + num_screens = self.screens - len(existing_images) + if num_screens <= 0: return + with open(f"{base_dir}/tmp/{folder_id}/MediaInfo.json", encoding='utf-8') as f: mi = json.load(f) video_track = mi['media']['track'][1] @@ -1048,7 +1200,7 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non w_sar = 1 h_sar = sar else: - sar = w_sar = par + sar = w_sar = par h_sar = 1 length = round(float(length)) os.chdir(f"{base_dir}/tmp/{folder_id}") @@ -1059,10 +1211,10 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non else: loglevel = 'quiet' debug = True - if bool(meta.get('ffdebug', False)) == True: + if bool(meta.get('ffdebug', False)) is True: loglevel = 'verbose' debug = False - if meta.get('vapoursynth', False) == True: + if meta.get('vapoursynth', False) is True: from src.vs import vs_screengn vs_screengn(source=path, encode=None, filter_b_frames=False, num=num_screens, dir=f"{base_dir}/tmp/{folder_id}/") else: @@ -1076,87 +1228,95 @@ def screenshots(self, path, filename, folder_id, base_dir, meta, num_screens=Non ss_times = [] screen_task = progress.add_task("[green]Saving Screens...", total=num_screens + 1) for i in range(num_screens + 1): - image = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") - if not os.path.exists(image) or retake != False: + image_path = os.path.abspath(f"{base_dir}/tmp/{folder_id}/{filename}-{i}.png") + if not os.path.exists(image_path) or retake is not False: retake = False try: - ss_times = self.valid_ss_time(ss_times, num_screens+1, length) + ss_times = self.valid_ss_time(ss_times, num_screens + 1, length) ff = ffmpeg.input(path, ss=ss_times[-1]) if w_sar != 1 or h_sar != 1: ff = ff.filter('scale', int(round(width * w_sar)), int(round(height * h_sar))) ( ff - .output(image, vframes=1, pix_fmt="rgb24") + .output(image_path, vframes=1, pix_fmt="rgb24") .overwrite_output() .global_args('-loglevel', loglevel) .run(quiet=debug) ) except Exception: console.print(traceback.format_exc()) - - self.optimize_images(image) - if os.path.getsize(Path(image)) <= 75000: + + self.optimize_images(image_path) + if os.path.getsize(Path(image_path)) <= 75000: console.print("[yellow]Image is incredibly small, retaking") retake = True time.sleep(1) - if os.path.getsize(Path(image)) <= 31000000 and self.img_host == "imgbb" and retake == False: + if os.path.getsize(Path(image_path)) <= 31000000 and self.img_host == "imgbb" and retake is False: i += 1 - elif os.path.getsize(Path(image)) <= 10000000 and self.img_host in ["imgbox", 'pixhost', 'oeimg'] and retake == False: + elif os.path.getsize(Path(image_path)) <= 10000000 and self.img_host in ["imgbox", 'pixhost', 'oeimg'] and retake is False: i += 1 - elif self.img_host in ["ptpimg", "lensdump", "ptscreens"] and retake == False: + elif self.img_host in ["ptpimg", "lensdump", "ptscreens"] and retake is False: i += 1 elif self.img_host == "freeimage.host": console.print("[bold red]Support for freeimage.host has been removed. Please remove from your config") exit() - elif retake == True: + elif retake is True: pass else: console.print("[red]Image too large for your image host, retaking") retake = True - time.sleep(1) + time.sleep(1) else: i += 1 progress.advance(screen_task) - #remove smallest image - smallest = "" - smallestsize = 99 ** 99 - for screens in glob.glob1(f"{base_dir}/tmp/{folder_id}/", f"{filename}-*"): - screensize = os.path.getsize(screens) - if screensize < smallestsize: - smallestsize = screensize - smallest = screens - os.remove(smallest) + + # Add new images to the meta['image_list'] as dictionaries + new_images = glob.glob(f"{filename}-*.png") + for image in new_images: + img_dict = { + 'img_url': image, + 'raw_url': image, + 'web_url': image # Assuming local path, but you might need to update this if uploading + } + meta['image_list'].append(img_dict) + + # Remove the smallest image if there are more than needed + if len(meta['image_list']) > self.screens: + smallest = min(meta['image_list'], key=lambda x: os.path.getsize(x['img_url'])) + os.remove(smallest['img_url']) + meta['image_list'].remove(smallest) def valid_ss_time(self, ss_times, num_screens, length): valid_time = False - while valid_time != True: + while valid_time is not True: valid_time = True if ss_times != []: - sst = random.randint(round(length/5), round(length/2)) + sst = random.randint(round(length / 5), round(length / 2)) for each in ss_times: tolerance = length / 10 / num_screens if abs(sst - each) <= tolerance: valid_time = False - if valid_time == True: + if valid_time is True: ss_times.append(sst) else: - ss_times.append(random.randint(round(length/5), round(length/2))) + ss_times.append(random.randint(round(length / 5), round(length / 2))) return ss_times def optimize_images(self, image): - if self.config['DEFAULT'].get('optimize_images', True) == True: + if self.config['DEFAULT'].get('optimize_images', True) is True: if os.path.exists(image): try: pyver = platform.python_version_tuple() if int(pyver[0]) == 3 and int(pyver[1]) >= 7: - import oxipng + import oxipng if os.path.getsize(image) >= 16000000: oxipng.optimize(image, level=6) else: oxipng.optimize(image, level=3) - except: + except Exception: pass return + """ Get type and category """ @@ -1173,7 +1333,7 @@ def get_type(self, video, scene, is_disc): # type = "ENCODE" elif "hdtv" in filename: type = "HDTV" - elif is_disc != None: + elif is_disc is not None: type = "DISC" elif "dvdrip" in filename: console.print("[bold red]DVDRip Detected, exiting") @@ -1186,15 +1346,15 @@ def get_cat(self, video): # if category is None: category = guessit(video.replace('1.0', ''))['type'] if category.lower() == "movie": - category = "MOVIE" #1 + category = "MOVIE" # 1 elif category.lower() in ("tv", "episode"): - category = "TV" #2 + category = "TV" # 2 else: category = "MOVIE" return category async def get_tmdb_from_imdb(self, meta, filename): - if meta.get('tmdb_manual') != None: + if meta.get('tmdb_manual') is not None: meta['tmdb'] = meta['tmdb_manual'] return meta imdb_id = meta['imdb'] @@ -1204,17 +1364,17 @@ async def get_tmdb_from_imdb(self, meta, filename): info = find.info(external_source="imdb_id") if len(info['movie_results']) >= 1: meta['category'] = "MOVIE" - meta['tmdb'] = info['movie_results'][0]['id'] + meta['tmdb'] = info['movie_results'][0]['id'] elif len(info['tv_results']) >= 1: meta['category'] = "TV" - meta['tmdb'] = info['tv_results'][0]['id'] + meta['tmdb'] = info['tv_results'][0]['id'] else: imdb_info = await self.get_imdb_info(imdb_id.replace('tt', ''), meta) title = imdb_info.get("title") - if title == None: + if title is None: title = filename year = imdb_info.get('year') - if year == None: + if year is None: year = meta['search_year'] console.print(f"[yellow]TMDb was unable to find anything with that IMDb, searching TMDb for {title}") meta = await self.get_tmdb_id(title, year, meta, meta['category'], imdb_info.get('original title', imdb_info.get('localized title', meta['uuid']))) @@ -1234,11 +1394,11 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil search.movie(query=filename, year=search_year) elif category == "TV": search.tv(query=filename, first_air_date_year=search_year) - if meta.get('tmdb_manual') != None: + if meta.get('tmdb_manual') is not None: meta['tmdb'] = meta['tmdb_manual'] else: meta['tmdb'] = search.results[0]['id'] - meta['category'] = category + meta['category'] = category except IndexError: try: if category == "MOVIE": @@ -1257,7 +1417,7 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil meta = await self.get_tmdb_id(filename, search_year, meta, category, untouched_filename, attempted) elif attempted == 2: attempted += 1 - meta = await self.get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes" : ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) + meta = await self.get_tmdb_id(anitopy.parse(guessit(untouched_filename, {"excludes": ["country", "language"]})['title'])['anime_title'], search_year, meta, meta['category'], untouched_filename, attempted) if meta['tmdb'] in (None, ""): console.print(f"[red]Unable to find TMDb match for {filename}") if meta.get('mode', 'discord') == 'cli': @@ -1268,17 +1428,17 @@ async def get_tmdb_id(self, filename, search_year, meta, category, untouched_fil return meta return meta - + async def tmdb_other_meta(self, meta): - + if meta['tmdb'] == "0": try: - title = guessit(meta['path'], {"excludes" : ["country", "language"]})['title'].lower() + title = guessit(meta['path'], {"excludes": ["country", "language"]})['title'].lower() title = title.split('aka')[0] - meta = await self.get_tmdb_id(guessit(title, {"excludes" : ["country", "language"]})['title'], meta['search_year'], meta) + meta = await self.get_tmdb_id(guessit(title, {"excludes": ["country", "language"]})['title'], meta['search_year'], meta) if meta['tmdb'] == "0": meta = await self.get_tmdb_id(title, "", meta, meta['category']) - except: + except Exception: if meta.get('mode', 'discord') == 'cli': console.print("[bold red]Unable to find tmdb entry. Exiting.") exit() @@ -1290,14 +1450,14 @@ async def tmdb_other_meta(self, meta): response = movie.info() meta['title'] = response['title'] if response['release_date']: - meta['year'] = datetime.strptime(response['release_date'],'%Y-%m-%d').year + meta['year'] = datetime.strptime(response['release_date'], '%Y-%m-%d').year else: console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') meta['year'] = meta['search_year'] external = movie.external_ids() - if meta.get('imdb', None) == None: + if meta.get('imdb', None) is None: imdb_id = external.get('imdb_id', "0") - if imdb_id == "" or imdb_id == None: + if imdb_id == "" or imdb_id is None: meta['imdb_id'] = '0' else: meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) @@ -1315,9 +1475,9 @@ async def tmdb_other_meta(self, meta): break except Exception: console.print('[yellow]Unable to grab videos from TMDb.') - + meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) - if original_language != None: + if original_language is not None: meta['original_language'] = original_language else: meta['original_language'] = response['original_language'] @@ -1326,7 +1486,7 @@ async def tmdb_other_meta(self, meta): meta['keywords'] = self.get_keywords(movie) meta['genres'] = self.get_genres(response) meta['tmdb_directors'] = self.get_directors(movie) - if meta.get('anime', False) == False: + if meta.get('anime', False) is False: meta['mal_id'], meta['aka'], meta['anime'] = self.get_anime(response, meta) meta['poster'] = response.get('poster_path', "") meta['overview'] = response['overview'] @@ -1337,14 +1497,14 @@ async def tmdb_other_meta(self, meta): response = tv.info() meta['title'] = response['name'] if response['first_air_date']: - meta['year'] = datetime.strptime(response['first_air_date'],'%Y-%m-%d').year + meta['year'] = datetime.strptime(response['first_air_date'], '%Y-%m-%d').year else: console.print('[yellow]TMDB does not have a release date, using year from filename instead (if it exists)') meta['year'] = meta['search_year'] external = tv.external_ids() - if meta.get('imdb', None) == None: + if meta.get('imdb', None) is None: imdb_id = external.get('imdb_id', "0") - if imdb_id == "" or imdb_id == None: + if imdb_id == "" or imdb_id is None: meta['imdb_id'] = '0' else: meta['imdb_id'] = str(int(imdb_id.replace('tt', ''))).zfill(7) @@ -1365,7 +1525,7 @@ async def tmdb_other_meta(self, meta): # meta['aka'] = f" AKA {response['original_name']}" meta['aka'], original_language = await self.get_imdb_aka(meta['imdb_id']) - if original_language != None: + if original_language is not None: meta['original_language'] = original_language else: meta['original_language'] = response['original_language'] @@ -1390,20 +1550,17 @@ async def tmdb_other_meta(self, meta): meta['aka'] = "" if f"({meta['year']})" in meta['aka']: meta['aka'] = meta['aka'].replace(f"({meta['year']})", "").strip() - - - return meta - + return meta def get_keywords(self, tmdb_info): if tmdb_info is not None: tmdb_keywords = tmdb_info.keywords() if tmdb_keywords.get('keywords') is not None: - keywords=[f"{keyword['name'].replace(',',' ')}" for keyword in tmdb_keywords.get('keywords')] + keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('keywords')] elif tmdb_keywords.get('results') is not None: - keywords=[f"{keyword['name'].replace(',',' ')}" for keyword in tmdb_keywords.get('results')] - return(', '.join(keywords)) + keywords = [f"{keyword['name'].replace(',', ' ')}" for keyword in tmdb_keywords.get('results')] + return (', '.join(keywords)) else: return '' @@ -1411,8 +1568,8 @@ def get_genres(self, tmdb_info): if tmdb_info is not None: tmdb_genres = tmdb_info.get('genres', []) if tmdb_genres is not []: - genres=[f"{genre['name'].replace(',',' ')}" for genre in tmdb_genres] - return(', '.join(genres)) + genres = [f"{genre['name'].replace(',', ' ')}" for genre in tmdb_genres] + return (', '.join(genres)) else: return '' @@ -1439,10 +1596,10 @@ def get_anime(self, response, meta): for each in response['genres']: if each['id'] == 16: animation = True - if response['original_language'] == 'ja' and animation == True: + if response['original_language'] == 'ja' and animation is True: romaji, mal_id, eng_title, season_year, episodes = self.get_romaji(tmdb_name, meta.get('mal', None)) alt_name = f" AKA {romaji}" - + anime = True # mal = AnimeSearch(romaji) # mal_id = mal.results[0].mal_id @@ -1455,7 +1612,7 @@ def get_anime(self, response, meta): return mal_id, alt_name, anime def get_romaji(self, tmdb_name, mal): - if mal == None: + if mal is None: mal = 0 tmdb_name = tmdb_name.replace('-', "").replace("The Movie", "") tmdb_name = ' '.join(tmdb_name.split()) @@ -1515,16 +1672,16 @@ def get_romaji(self, tmdb_name, mal): response = requests.post(url, json={'query': query, 'variables': variables}) json = response.json() media = json['data']['Page']['media'] - except: + except Exception: console.print('[red]Failed to get anime specific info from anilist. Continuing without it...') media = [] if media not in (None, []): - result = {'title' : {}} + result = {'title': {}} difference = 0 for anime in media: search_name = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", tmdb_name.lower().replace(' ', '')) for title in anime['title'].values(): - if title != None: + if title is not None: title = re.sub(u'[\u3000-\u303f\u3040-\u309f\u30a0-\u30ff\uff00-\uff9f\u4e00-\u9faf\u3400-\u4dbf]+ (?=[A-Za-z ]+–)', "", title.lower().replace(' ', ''), re.U) diff = SequenceMatcher(None, title, search_name).ratio() if diff >= difference: @@ -1537,7 +1694,7 @@ def get_romaji(self, tmdb_name, mal): season_year = result.get('season_year', "") episodes = result.get('episodes', 0) else: - romaji = eng_title = season_year = "" + romaji = eng_title = season_year = "" episodes = mal_id = 0 if mal_id in [None, 0]: mal_id = mal @@ -1545,20 +1702,13 @@ def get_romaji(self, tmdb_name, mal): episodes = 0 return romaji, mal_id, eng_title, season_year, episodes - - - - - - - """ Mediainfo/Bdinfo > meta """ def get_audio_v2(self, mi, meta, bdinfo): extra = dual = "" has_commentary = False - + # Get formats if bdinfo is not None: # Disks format_settings = "" @@ -1566,9 +1716,9 @@ def get_audio_v2(self, mi, meta, bdinfo): commercial = format additional = bdinfo.get('audio', [{}])[0].get('atmos_why_you_be_like_this', '') - # Channels + # Channels chan = bdinfo.get('audio', [{}])[0].get('channels', '') - else: + else: track_num = 2 tracks = mi.get('media', {}).get('track', []) @@ -1585,24 +1735,24 @@ def get_audio_v2(self, mi, meta, bdinfo): if track.get('Language', '') == "zxx": meta['silent'] = True - + additional = track.get('Format_AdditionalFeatures', '') format_settings = track.get('Format_Settings', '') if format_settings in ['Explicit']: format_settings = "" - #Channels + # Channels channels = mi['media']['track'][track_num].get('Channels_Original', mi['media']['track'][track_num]['Channels']) if not str(channels).isnumeric(): channels = mi['media']['track'][track_num]['Channels'] try: channel_layout = mi['media']['track'][track_num]['ChannelLayout'] - except: + except Exception: try: channel_layout = mi['media']['track'][track_num]['ChannelLayout_Original'] - except: + except Exception: channel_layout = "" - + # Ensure channel_layout is not None or an empty string before iterating if channel_layout and "LFE" in channel_layout: chan = f"{int(channels) - 1}.1" @@ -1613,7 +1763,7 @@ def get_audio_v2(self, mi, meta, bdinfo): chan = f"{int(channels) - 1}.1" else: chan = f"{channels}.0" - + if meta.get('original_language', '') != 'en': eng, orig = False, False try: @@ -1647,19 +1797,20 @@ def get_audio_v2(self, mi, meta, bdinfo): dual = "Dual-Audio" elif eng and not orig and meta['original_language'] not in ['zxx', 'xx', None] and not meta.get('no_dub', False): dual = "Dubbed" - except Exception as e: + except Exception: console.print(traceback.format_exc()) pass + for t in mi.get('media', {}).get('track', []): if t.get('@type') != "Audio": continue if "commentary" in t.get('Title', '').lower(): - has_commentary = True - - #Convert commercial name to naming conventions + has_commentary = True + + # Convert commercial name to naming conventions audio = { - #Format + # Format "DTS": "DTS", "AAC": "AAC", "AAC LC": "AAC", @@ -1671,16 +1822,16 @@ def get_audio_v2(self, mi, meta, bdinfo): "Vorbis": "VORBIS", "PCM": "LPCM", - #BDINFO AUDIOS - "LPCM Audio" : "LPCM", - "Dolby Digital Audio" : "DD", - "Dolby Digital Plus Audio" : "DD+", + # BDINFO AUDIOS + "LPCM Audio": "LPCM", + "Dolby Digital Audio": "DD", + "Dolby Digital Plus Audio": "DD+", # "Dolby TrueHD" : "TrueHD", - "Dolby TrueHD Audio" : "TrueHD", - "DTS Audio" : "DTS", - "DTS-HD Master Audio" : "DTS-HD MA", - "DTS-HD High-Res Audio" : "DTS-HD HRA", - "DTS:X Master Audio" : "DTS:X" + "Dolby TrueHD Audio": "TrueHD", + "DTS Audio": "DTS", + "DTS-HD Master Audio": "DTS-HD MA", + "DTS-HD High-Res Audio": "DTS-HD HRA", + "DTS:X Master Audio": "DTS:X" } audio_extra = { "XLL": "-HD MA", @@ -1693,22 +1844,21 @@ def get_audio_v2(self, mi, meta, bdinfo): "Atmos Audio": " Atmos", } format_settings_extra = { - "Dolby Surround EX" : "EX" + "Dolby Surround EX": "EX" } commercial_names = { - "Dolby Digital" : "DD", - "Dolby Digital Plus" : "DD+", - "Dolby TrueHD" : "TrueHD", - "DTS-ES" : "DTS-ES", - "DTS-HD High" : "DTS-HD HRA", - "Free Lossless Audio Codec" : "FLAC", - "DTS-HD Master Audio" : "DTS-HD MA" - } + "Dolby Digital": "DD", + "Dolby Digital Plus": "DD+", + "Dolby TrueHD": "TrueHD", + "DTS-ES": "DTS-ES", + "DTS-HD High": "DTS-HD HRA", + "Free Lossless Audio Codec": "FLAC", + "DTS-HD Master Audio": "DTS-HD MA" + } - search_format = True - # Ensure commercial and additional are not None before iterating + # Ensure commercial and additional are not None before iterating if commercial: for key, value in commercial_names.items(): if key in commercial: @@ -1731,7 +1881,7 @@ def get_audio_v2(self, mi, meta, bdinfo): # Ensure codec is not left empty if codec == "": codec = format - + # Ensure additional and channels are not None before using them if format.startswith("DTS"): if additional and additional.endswith("X"): @@ -1745,9 +1895,8 @@ def get_audio_v2(self, mi, meta, bdinfo): audio = ' '.join(audio.split()) return audio, chan, has_commentary - def is_3d(self, mi, bdinfo): - if bdinfo != None: + if bdinfo is not None: if bdinfo['video'][0]['3d'] != "": return "3D" else: @@ -1759,7 +1908,7 @@ def get_tag(self, video, meta): try: tag = guessit(video)['release_group'] tag = f"-{tag}" - except: + except Exception: tag = "" if tag == "-": tag = "" @@ -1767,15 +1916,14 @@ def get_tag(self, video, meta): tag = "" return tag - def get_source(self, type, video, path, is_disc, meta): try: try: source = guessit(video)['source'] - except: + except Exception: try: source = guessit(path)['source'] - except: + except Exception: source = "BluRay" if meta.get('manual_source', None): source = meta['manual_source'] @@ -1794,19 +1942,19 @@ def get_source(self, type, video, path, is_disc, meta): if track.track_type == "Video": system = track.standard if system not in ("PAL", "NTSC"): - raise WeirdSystem - except: + raise WeirdSystem # noqa: F405 + except Exception: try: other = guessit(video)['other'] if "PAL" in other: system = "PAL" elif "NTSC" in other: system = "NTSC" - except: + except Exception: system = "" finally: - if system == None: - system = "" + if system is None: + system = "" if type == "REMUX": system = f"{system} DVD".strip() source = system @@ -1832,7 +1980,7 @@ def get_uhd(self, type, guess, resolution, path): try: source = guess['Source'] other = guess['Other'] - except: + except Exception: source = "" other = "" uhd = "" @@ -1842,7 +1990,7 @@ def get_uhd(self, type, guess, resolution, path): uhd = "UHD" elif type in ("DISC", "REMUX", "ENCODE", "WEBRIP"): uhd = "" - + if type in ("DISC", "REMUX", "ENCODE") and resolution == "2160p": uhd = "UHD" @@ -1851,7 +1999,7 @@ def get_uhd(self, type, guess, resolution, path): def get_hdr(self, mi, bdinfo): hdr = "" dv = "" - if bdinfo != None: #Disks + if bdinfo is not None: # Disks hdr_mi = bdinfo['video'][0]['hdr_dv'] if "HDR10+" in hdr_mi: hdr = "HDR10+" @@ -1860,9 +2008,9 @@ def get_hdr(self, mi, bdinfo): try: if bdinfo['video'][1]['hdr_dv'] == "Dolby Vision": dv = "DV" - except: + except Exception: pass - else: + else: video_track = mi['media']['track'][1] try: hdr_mi = video_track['colour_primaries'] @@ -1880,13 +2028,13 @@ def get_hdr(self, mi, bdinfo): hdr = "HLG" if hdr != "HLG" and "BT.2020 (10-bit)" in transfer_characteristics: hdr = "WCG" - except: + except Exception: pass try: if "Dolby Vision" in video_track.get('HDR_Format', '') or "Dolby Vision" in video_track.get('HDR_Format_String', ''): dv = "DV" - except: + except Exception: pass hdr = f"{dv} {hdr}".strip() @@ -1894,68 +2042,68 @@ def get_hdr(self, mi, bdinfo): def get_region(self, bdinfo, region=None): label = bdinfo.get('label', bdinfo.get('title', bdinfo.get('path', ''))).replace('.', ' ') - if region != None: + if region is not None: region = region.upper() - else: + else: regions = { - 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', - 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', - 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', - 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', - 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', - 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', - 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', - 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', - 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', - 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', - 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', - 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', - 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', - 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', - 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', - 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', - 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', - 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', - 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', - 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', - 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', - 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', - 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', - 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', - 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', - 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', - 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', - 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', - 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', - 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', - 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', - 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', - 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', - 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', - 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR" : "EUR" + 'AFG': 'AFG', 'AIA': 'AIA', 'ALA': 'ALA', 'ALG': 'ALG', 'AND': 'AND', 'ANG': 'ANG', 'ARG': 'ARG', + 'ARM': 'ARM', 'ARU': 'ARU', 'ASA': 'ASA', 'ATA': 'ATA', 'ATF': 'ATF', 'ATG': 'ATG', 'AUS': 'AUS', + 'AUT': 'AUT', 'AZE': 'AZE', 'BAH': 'BAH', 'BAN': 'BAN', 'BDI': 'BDI', 'BEL': 'BEL', 'BEN': 'BEN', + 'BER': 'BER', 'BES': 'BES', 'BFA': 'BFA', 'BHR': 'BHR', 'BHU': 'BHU', 'BIH': 'BIH', 'BLM': 'BLM', + 'BLR': 'BLR', 'BLZ': 'BLZ', 'BOL': 'BOL', 'BOT': 'BOT', 'BRA': 'BRA', 'BRB': 'BRB', 'BRU': 'BRU', + 'BVT': 'BVT', 'CAM': 'CAM', 'CAN': 'CAN', 'CAY': 'CAY', 'CCK': 'CCK', 'CEE': 'CEE', 'CGO': 'CGO', + 'CHA': 'CHA', 'CHI': 'CHI', 'CHN': 'CHN', 'CIV': 'CIV', 'CMR': 'CMR', 'COD': 'COD', 'COK': 'COK', + 'COL': 'COL', 'COM': 'COM', 'CPV': 'CPV', 'CRC': 'CRC', 'CRO': 'CRO', 'CTA': 'CTA', 'CUB': 'CUB', + 'CUW': 'CUW', 'CXR': 'CXR', 'CYP': 'CYP', 'DJI': 'DJI', 'DMA': 'DMA', 'DOM': 'DOM', 'ECU': 'ECU', + 'EGY': 'EGY', 'ENG': 'ENG', 'EQG': 'EQG', 'ERI': 'ERI', 'ESH': 'ESH', 'ESP': 'ESP', 'ETH': 'ETH', + 'FIJ': 'FIJ', 'FLK': 'FLK', 'FRA': 'FRA', 'FRO': 'FRO', 'FSM': 'FSM', 'GAB': 'GAB', 'GAM': 'GAM', + 'GBR': 'GBR', 'GEO': 'GEO', 'GER': 'GER', 'GGY': 'GGY', 'GHA': 'GHA', 'GIB': 'GIB', 'GLP': 'GLP', + 'GNB': 'GNB', 'GRE': 'GRE', 'GRL': 'GRL', 'GRN': 'GRN', 'GUA': 'GUA', 'GUF': 'GUF', 'GUI': 'GUI', + 'GUM': 'GUM', 'GUY': 'GUY', 'HAI': 'HAI', 'HKG': 'HKG', 'HMD': 'HMD', 'HON': 'HON', 'HUN': 'HUN', + 'IDN': 'IDN', 'IMN': 'IMN', 'IND': 'IND', 'IOT': 'IOT', 'IRL': 'IRL', 'IRN': 'IRN', 'IRQ': 'IRQ', + 'ISL': 'ISL', 'ISR': 'ISR', 'ITA': 'ITA', 'JAM': 'JAM', 'JEY': 'JEY', 'JOR': 'JOR', 'JPN': 'JPN', + 'KAZ': 'KAZ', 'KEN': 'KEN', 'KGZ': 'KGZ', 'KIR': 'KIR', 'KNA': 'KNA', 'KOR': 'KOR', 'KSA': 'KSA', + 'KUW': 'KUW', 'KVX': 'KVX', 'LAO': 'LAO', 'LBN': 'LBN', 'LBR': 'LBR', 'LBY': 'LBY', 'LCA': 'LCA', + 'LES': 'LES', 'LIE': 'LIE', 'LKA': 'LKA', 'LUX': 'LUX', 'MAC': 'MAC', 'MAD': 'MAD', 'MAF': 'MAF', + 'MAR': 'MAR', 'MAS': 'MAS', 'MDA': 'MDA', 'MDV': 'MDV', 'MEX': 'MEX', 'MHL': 'MHL', 'MKD': 'MKD', + 'MLI': 'MLI', 'MLT': 'MLT', 'MNG': 'MNG', 'MNP': 'MNP', 'MON': 'MON', 'MOZ': 'MOZ', 'MRI': 'MRI', + 'MSR': 'MSR', 'MTN': 'MTN', 'MTQ': 'MTQ', 'MWI': 'MWI', 'MYA': 'MYA', 'MYT': 'MYT', 'NAM': 'NAM', + 'NCA': 'NCA', 'NCL': 'NCL', 'NEP': 'NEP', 'NFK': 'NFK', 'NIG': 'NIG', 'NIR': 'NIR', 'NIU': 'NIU', + 'NLD': 'NLD', 'NOR': 'NOR', 'NRU': 'NRU', 'NZL': 'NZL', 'OMA': 'OMA', 'PAK': 'PAK', 'PAN': 'PAN', + 'PAR': 'PAR', 'PCN': 'PCN', 'PER': 'PER', 'PHI': 'PHI', 'PLE': 'PLE', 'PLW': 'PLW', 'PNG': 'PNG', + 'POL': 'POL', 'POR': 'POR', 'PRK': 'PRK', 'PUR': 'PUR', 'QAT': 'QAT', 'REU': 'REU', 'ROU': 'ROU', + 'RSA': 'RSA', 'RUS': 'RUS', 'RWA': 'RWA', 'SAM': 'SAM', 'SCO': 'SCO', 'SDN': 'SDN', 'SEN': 'SEN', + 'SEY': 'SEY', 'SGS': 'SGS', 'SHN': 'SHN', 'SIN': 'SIN', 'SJM': 'SJM', 'SLE': 'SLE', 'SLV': 'SLV', + 'SMR': 'SMR', 'SOL': 'SOL', 'SOM': 'SOM', 'SPM': 'SPM', 'SRB': 'SRB', 'SSD': 'SSD', 'STP': 'STP', + 'SUI': 'SUI', 'SUR': 'SUR', 'SWZ': 'SWZ', 'SXM': 'SXM', 'SYR': 'SYR', 'TAH': 'TAH', 'TAN': 'TAN', + 'TCA': 'TCA', 'TGA': 'TGA', 'THA': 'THA', 'TJK': 'TJK', 'TKL': 'TKL', 'TKM': 'TKM', 'TLS': 'TLS', + 'TOG': 'TOG', 'TRI': 'TRI', 'TUN': 'TUN', 'TUR': 'TUR', 'TUV': 'TUV', 'TWN': 'TWN', 'UAE': 'UAE', + 'UGA': 'UGA', 'UKR': 'UKR', 'UMI': 'UMI', 'URU': 'URU', 'USA': 'USA', 'UZB': 'UZB', 'VAN': 'VAN', + 'VAT': 'VAT', 'VEN': 'VEN', 'VGB': 'VGB', 'VIE': 'VIE', 'VIN': 'VIN', 'VIR': 'VIR', 'WAL': 'WAL', + 'WLF': 'WLF', 'YEM': 'YEM', 'ZAM': 'ZAM', 'ZIM': 'ZIM', "EUR": "EUR" } for key, value in regions.items(): if f" {key} " in label: region = value - - if region == None: + + if region is None: region = "" return region def get_distributor(self, distributor_in): distributor_list = [ - '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDΓ‰O', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', - 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SΓ‰LECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE Γ‰DITIONS', 'ARTE EDITIONS', 'ARTE VIDΓ‰O', - 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTΓ–RUNG', - 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL Γ‰DITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFΓ“RNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINΓ‰ART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINΓ‰MA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', - 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TΓ‰LΓ‰VISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', - 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (θ»ŠεΊ«ε¨›ζ¨‚)', 'θ»ŠεΊ«ε¨›ζ¨‚', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDΓ‰O', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HΓ„NNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', - 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", - 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIÈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MΓ‰DIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', - 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', - 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRΓ–DERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', - 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDΓ‰O', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', - 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSÁTIL HOME VIDEO', 'VERSÁTIL VIDEO', 'VERSÁTIL HOME', 'VERSÁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VΓ‰RTICE 360ΒΊ', 'VΓ‰RTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VΓ‰RTIGO FILMS', 'VΓ‰RTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', + '01 DISTRIBUTION', '100 DESTINATIONS TRAVEL FILM', '101 FILMS', '1FILMS', '2 ENTERTAIN VIDEO', '20TH CENTURY FOX', '2L', '3D CONTENT HUB', '3D MEDIA', '3L FILM', '4DIGITAL', '4DVD', '4K ULTRA HD MOVIES', '4K UHD', '8-FILMS', '84 ENTERTAINMENT', '88 FILMS', '@ANIME', 'ANIME', 'A CONTRACORRIENTE', 'A CONTRACORRIENTE FILMS', 'A&E HOME VIDEO', 'A&E', 'A&M RECORDS', 'A+E NETWORKS', 'A+R', 'A-FILM', 'AAA', 'AB VIDΓ‰O', 'AB VIDEO', 'ABC - (AUSTRALIAN BROADCASTING CORPORATION)', 'ABC', 'ABKCO', 'ABSOLUT MEDIEN', 'ABSOLUTE', 'ACCENT FILM ENTERTAINMENT', 'ACCENTUS', 'ACORN MEDIA', 'AD VITAM', 'ADA', 'ADITYA VIDEOS', 'ADSO FILMS', 'AFM RECORDS', 'AGFA', 'AIX RECORDS', + 'ALAMODE FILM', 'ALBA RECORDS', 'ALBANY RECORDS', 'ALBATROS', 'ALCHEMY', 'ALIVE', 'ALL ANIME', 'ALL INTERACTIVE ENTERTAINMENT', 'ALLEGRO', 'ALLIANCE', 'ALPHA MUSIC', 'ALTERDYSTRYBUCJA', 'ALTERED INNOCENCE', 'ALTITUDE FILM DISTRIBUTION', 'ALUCARD RECORDS', 'AMAZING D.C.', 'AMAZING DC', 'AMMO CONTENT', 'AMUSE SOFT ENTERTAINMENT', 'ANCONNECT', 'ANEC', 'ANIMATSU', 'ANIME HOUSE', 'ANIME LTD', 'ANIME WORKS', 'ANIMEIGO', 'ANIPLEX', 'ANOLIS ENTERTAINMENT', 'ANOTHER WORLD ENTERTAINMENT', 'AP INTERNATIONAL', 'APPLE', 'ARA MEDIA', 'ARBELOS', 'ARC ENTERTAINMENT', 'ARP SΓ‰LECTION', 'ARP SELECTION', 'ARROW', 'ART SERVICE', 'ART VISION', 'ARTE Γ‰DITIONS', 'ARTE EDITIONS', 'ARTE VIDΓ‰O', + 'ARTE VIDEO', 'ARTHAUS MUSIK', 'ARTIFICIAL EYE', 'ARTSPLOITATION FILMS', 'ARTUS FILMS', 'ASCOT ELITE HOME ENTERTAINMENT', 'ASIA VIDEO', 'ASMIK ACE', 'ASTRO RECORDS & FILMWORKS', 'ASYLUM', 'ATLANTIC FILM', 'ATLANTIC RECORDS', 'ATLAS FILM', 'AUDIO VISUAL ENTERTAINMENT', 'AURO-3D CREATIVE LABEL', 'AURUM', 'AV VISIONEN', 'AV-JET', 'AVALON', 'AVENTI', 'AVEX TRAX', 'AXIOM', 'AXIS RECORDS', 'AYNGARAN', 'BAC FILMS', 'BACH FILMS', 'BANDAI VISUAL', 'BARCLAY', 'BBC', 'BRITISH BROADCASTING CORPORATION', 'BBI FILMS', 'BBI', 'BCI HOME ENTERTAINMENT', 'BEGGARS BANQUET', 'BEL AIR CLASSIQUES', 'BELGA FILMS', 'BELVEDERE', 'BENELUX FILM DISTRIBUTORS', 'BENNETT-WATT MEDIA', 'BERLIN CLASSICS', 'BERLINER PHILHARMONIKER RECORDINGS', 'BEST ENTERTAINMENT', 'BEYOND HOME ENTERTAINMENT', 'BFI VIDEO', 'BFI', 'BRITISH FILM INSTITUTE', 'BFS ENTERTAINMENT', 'BFS', 'BHAVANI', 'BIBER RECORDS', 'BIG HOME VIDEO', 'BILDSTΓ–RUNG', + 'BILDSTORUNG', 'BILL ZEBUB', 'BIRNENBLATT', 'BIT WEL', 'BLACK BOX', 'BLACK HILL PICTURES', 'BLACK HILL', 'BLACK HOLE RECORDINGS', 'BLACK HOLE', 'BLAQOUT', 'BLAUFIELD MUSIC', 'BLAUFIELD', 'BLOCKBUSTER ENTERTAINMENT', 'BLOCKBUSTER', 'BLU PHASE MEDIA', 'BLU-RAY ONLY', 'BLU-RAY', 'BLURAY ONLY', 'BLURAY', 'BLUE GENTIAN RECORDS', 'BLUE KINO', 'BLUE UNDERGROUND', 'BMG/ARISTA', 'BMG', 'BMGARISTA', 'BMG ARISTA', 'ARISTA', 'ARISTA/BMG', 'ARISTABMG', 'ARISTA BMG', 'BONTON FILM', 'BONTON', 'BOOMERANG PICTURES', 'BOOMERANG', 'BQHL Γ‰DITIONS', 'BQHL EDITIONS', 'BQHL', 'BREAKING GLASS', 'BRIDGESTONE', 'BRINK', 'BROAD GREEN PICTURES', 'BROAD GREEN', 'BUSCH MEDIA GROUP', 'BUSCH', 'C MAJOR', 'C.B.S.', 'CAICHANG', 'CALIFΓ“RNIA FILMES', 'CALIFORNIA FILMES', 'CALIFORNIA', 'CAMEO', 'CAMERA OBSCURA', 'CAMERATA', 'CAMP MOTION PICTURES', 'CAMP MOTION', 'CAPELIGHT PICTURES', 'CAPELIGHT', 'CAPITOL', 'CAPITOL RECORDS', 'CAPRICCI', 'CARGO RECORDS', 'CARLOTTA FILMS', 'CARLOTTA', 'CARLOTA', 'CARMEN FILM', 'CASCADE', 'CATCHPLAY', 'CAULDRON FILMS', 'CAULDRON', 'CBS TELEVISION STUDIOS', 'CBS', 'CCTV', 'CCV ENTERTAINMENT', 'CCV', 'CD BABY', 'CD LAND', 'CECCHI GORI', 'CENTURY MEDIA', 'CHUAN XUN SHI DAI MULTIMEDIA', 'CINE-ASIA', 'CINΓ‰ART', 'CINEART', 'CINEDIGM', 'CINEFIL IMAGICA', 'CINEMA EPOCH', 'CINEMA GUILD', 'CINEMA LIBRE STUDIOS', 'CINEMA MONDO', 'CINEMATIC VISION', 'CINEPLOIT RECORDS', 'CINESTRANGE EXTREME', 'CITEL VIDEO', 'CITEL', 'CJ ENTERTAINMENT', 'CJ', 'CLASSIC MEDIA', 'CLASSICFLIX', 'CLASSICLINE', 'CLAUDIO RECORDS', 'CLEAR VISION', 'CLEOPATRA', 'CLOSE UP', 'CMS MEDIA LIMITED', 'CMV LASERVISION', 'CN ENTERTAINMENT', 'CODE RED', 'COHEN MEDIA GROUP', 'COHEN', 'COIN DE MIRE CINΓ‰MA', 'COIN DE MIRE CINEMA', 'COLOSSEO FILM', 'COLUMBIA', 'COLUMBIA PICTURES', 'COLUMBIA/TRI-STAR', 'TRI-STAR', 'COMMERCIAL MARKETING', 'CONCORD MUSIC GROUP', 'CONCORDE VIDEO', 'CONDOR', 'CONSTANTIN FILM', 'CONSTANTIN', 'CONSTANTINO FILMES', 'CONSTANTINO', 'CONSTRUCTIVE MEDIA SERVICE', 'CONSTRUCTIVE', 'CONTENT ZONE', 'CONTENTS GATE', 'COQUEIRO VERDE', 'CORNERSTONE MEDIA', 'CORNERSTONE', 'CP DIGITAL', 'CREST MOVIES', 'CRITERION', 'CRITERION COLLECTION', 'CC', 'CRYSTAL CLASSICS', 'CULT EPICS', 'CULT FILMS', 'CULT VIDEO', 'CURZON FILM WORLD', 'D FILMS', "D'AILLY COMPANY", 'DAILLY COMPANY', 'D AILLY COMPANY', "D'AILLY", 'DAILLY', 'D AILLY', 'DA CAPO', 'DA MUSIC', "DALL'ANGELO PICTURES", 'DALLANGELO PICTURES', "DALL'ANGELO", 'DALL ANGELO PICTURES', 'DALL ANGELO', 'DAREDO', 'DARK FORCE ENTERTAINMENT', 'DARK FORCE', 'DARK SIDE RELEASING', 'DARK SIDE', 'DAZZLER MEDIA', 'DAZZLER', 'DCM PICTURES', 'DCM', 'DEAPLANETA', 'DECCA', 'DEEPJOY', 'DEFIANT SCREEN ENTERTAINMENT', 'DEFIANT SCREEN', 'DEFIANT', 'DELOS', 'DELPHIAN RECORDS', 'DELPHIAN', 'DELTA MUSIC & ENTERTAINMENT', 'DELTA MUSIC AND ENTERTAINMENT', 'DELTA MUSIC ENTERTAINMENT', 'DELTA MUSIC', 'DELTAMAC CO. LTD.', 'DELTAMAC CO LTD', 'DELTAMAC CO', 'DELTAMAC', 'DEMAND MEDIA', 'DEMAND', 'DEP', 'DEUTSCHE GRAMMOPHON', 'DFW', 'DGM', 'DIAPHANA', 'DIGIDREAMS STUDIOS', 'DIGIDREAMS', 'DIGITAL ENVIRONMENTS', 'DIGITAL', 'DISCOTEK MEDIA', 'DISCOVERY CHANNEL', 'DISCOVERY', 'DISK KINO', 'DISNEY / BUENA VISTA', 'DISNEY', 'BUENA VISTA', 'DISNEY BUENA VISTA', 'DISTRIBUTION SELECT', 'DIVISA', 'DNC ENTERTAINMENT', 'DNC', 'DOGWOOF', 'DOLMEN HOME VIDEO', 'DOLMEN', 'DONAU FILM', 'DONAU', 'DORADO FILMS', 'DORADO', 'DRAFTHOUSE FILMS', 'DRAFTHOUSE', 'DRAGON FILM ENTERTAINMENT', 'DRAGON ENTERTAINMENT', 'DRAGON FILM', 'DRAGON', 'DREAMWORKS', 'DRIVE ON RECORDS', 'DRIVE ON', 'DRIVE-ON', 'DRIVEON', 'DS MEDIA', 'DTP ENTERTAINMENT AG', 'DTP ENTERTAINMENT', 'DTP AG', 'DTP', 'DTS ENTERTAINMENT', 'DTS', 'DUKE MARKETING', 'DUKE VIDEO DISTRIBUTION', 'DUKE', 'DUTCH FILMWORKS', 'DUTCH', 'DVD INTERNATIONAL', 'DVD', 'DYBEX', 'DYNAMIC', 'DYNIT', 'E1 ENTERTAINMENT', 'E1', 'EAGLE ENTERTAINMENT', 'EAGLE HOME ENTERTAINMENT PVT.LTD.', 'EAGLE HOME ENTERTAINMENT PVTLTD', 'EAGLE HOME ENTERTAINMENT PVT LTD', 'EAGLE HOME ENTERTAINMENT', 'EAGLE PICTURES', 'EAGLE ROCK ENTERTAINMENT', 'EAGLE ROCK', 'EAGLE VISION MEDIA', 'EAGLE VISION', 'EARMUSIC', 'EARTH ENTERTAINMENT', 'EARTH', 'ECHO BRIDGE ENTERTAINMENT', 'ECHO BRIDGE', 'EDEL GERMANY GMBH', 'EDEL GERMANY', 'EDEL RECORDS', 'EDITION TONFILM', 'EDITIONS MONTPARNASSE', 'EDKO FILMS LTD.', 'EDKO FILMS LTD', 'EDKO FILMS', + 'EDKO', "EIN'S M&M CO", 'EINS M&M CO', "EIN'S M&M", 'EINS M&M', 'ELEA-MEDIA', 'ELEA MEDIA', 'ELEA', 'ELECTRIC PICTURE', 'ELECTRIC', 'ELEPHANT FILMS', 'ELEPHANT', 'ELEVATION', 'EMI', 'EMON', 'EMS', 'EMYLIA', 'ENE MEDIA', 'ENE', 'ENTERTAINMENT IN VIDEO', 'ENTERTAINMENT IN', 'ENTERTAINMENT ONE', 'ENTERTAINMENT ONE FILMS CANADA INC.', 'ENTERTAINMENT ONE FILMS CANADA INC', 'ENTERTAINMENT ONE FILMS CANADA', 'ENTERTAINMENT ONE CANADA INC', 'ENTERTAINMENT ONE CANADA', 'ENTERTAINMENTONE', 'EONE', 'EOS', 'EPIC PICTURES', 'EPIC', 'EPIC RECORDS', 'ERATO', 'EROS', 'ESC EDITIONS', 'ESCAPI MEDIA BV', 'ESOTERIC RECORDINGS', 'ESPN FILMS', 'EUREKA ENTERTAINMENT', 'EUREKA', 'EURO PICTURES', 'EURO VIDEO', 'EUROARTS', 'EUROPA FILMES', 'EUROPA', 'EUROPACORP', 'EUROZOOM', 'EXCEL', 'EXPLOSIVE MEDIA', 'EXPLOSIVE', 'EXTRALUCID FILMS', 'EXTRALUCID', 'EYE SEE MOVIES', 'EYE SEE', 'EYK MEDIA', 'EYK', 'FABULOUS FILMS', 'FABULOUS', 'FACTORIS FILMS', 'FACTORIS', 'FARAO RECORDS', 'FARBFILM HOME ENTERTAINMENT', 'FARBFILM ENTERTAINMENT', 'FARBFILM HOME', 'FARBFILM', 'FEELGOOD ENTERTAINMENT', 'FEELGOOD', 'FERNSEHJUWELEN', 'FILM CHEST', 'FILM MEDIA', 'FILM MOVEMENT', 'FILM4', 'FILMART', 'FILMAURO', 'FILMAX', 'FILMCONFECT HOME ENTERTAINMENT', 'FILMCONFECT ENTERTAINMENT', 'FILMCONFECT HOME', 'FILMCONFECT', 'FILMEDIA', 'FILMJUWELEN', 'FILMOTEKA NARODAWA', 'FILMRISE', 'FINAL CUT ENTERTAINMENT', 'FINAL CUT', 'FIREHOUSE 12 RECORDS', 'FIREHOUSE 12', 'FIRST INTERNATIONAL PRODUCTION', 'FIRST INTERNATIONAL', 'FIRST LOOK STUDIOS', 'FIRST LOOK', 'FLAGMAN TRADE', 'FLASHSTAR FILMES', 'FLASHSTAR', 'FLICKER ALLEY', 'FNC ADD CULTURE', 'FOCUS FILMES', 'FOCUS', 'FOKUS MEDIA', 'FOKUSA', 'FOX PATHE EUROPA', 'FOX PATHE', 'FOX EUROPA', 'FOX/MGM', 'FOX MGM', 'MGM', 'MGM/FOX', 'FOX', 'FPE', 'FRANCE TΓ‰LΓ‰VISIONS DISTRIBUTION', 'FRANCE TELEVISIONS DISTRIBUTION', 'FRANCE TELEVISIONS', 'FRANCE', 'FREE DOLPHIN ENTERTAINMENT', 'FREE DOLPHIN', 'FREESTYLE DIGITAL MEDIA', 'FREESTYLE DIGITAL', 'FREESTYLE', 'FREMANTLE HOME ENTERTAINMENT', 'FREMANTLE ENTERTAINMENT', 'FREMANTLE HOME', 'FREMANTL', 'FRENETIC FILMS', 'FRENETIC', 'FRONTIER WORKS', 'FRONTIER', 'FRONTIERS MUSIC', 'FRONTIERS RECORDS', 'FS FILM OY', 'FS FILM', 'FULL MOON FEATURES', 'FULL MOON', 'FUN CITY EDITIONS', 'FUN CITY', + 'FUNIMATION ENTERTAINMENT', 'FUNIMATION', 'FUSION', 'FUTUREFILM', 'G2 PICTURES', 'G2', 'GAGA COMMUNICATIONS', 'GAGA', 'GAIAM', 'GALAPAGOS', 'GAMMA HOME ENTERTAINMENT', 'GAMMA ENTERTAINMENT', 'GAMMA HOME', 'GAMMA', 'GARAGEHOUSE PICTURES', 'GARAGEHOUSE', 'GARAGEPLAY (θ»ŠεΊ«ε¨›ζ¨‚)', 'θ»ŠεΊ«ε¨›ζ¨‚', 'GARAGEPLAY (Che Ku Yu Le )', 'GARAGEPLAY', 'Che Ku Yu Le', 'GAUMONT', 'GEFFEN', 'GENEON ENTERTAINMENT', 'GENEON', 'GENEON UNIVERSAL ENTERTAINMENT', 'GENERAL VIDEO RECORDING', 'GLASS DOLL FILMS', 'GLASS DOLL', 'GLOBE MUSIC MEDIA', 'GLOBE MUSIC', 'GLOBE MEDIA', 'GLOBE', 'GO ENTERTAIN', 'GO', 'GOLDEN HARVEST', 'GOOD!MOVIES', 'GOOD! MOVIES', 'GOOD MOVIES', 'GRAPEVINE VIDEO', 'GRAPEVINE', 'GRASSHOPPER FILM', 'GRASSHOPPER FILMS', 'GRASSHOPPER', 'GRAVITAS VENTURES', 'GRAVITAS', 'GREAT MOVIES', 'GREAT', 'GREEN APPLE ENTERTAINMENT', 'GREEN ENTERTAINMENT', 'GREEN APPLE', 'GREEN', 'GREENNARAE MEDIA', 'GREENNARAE', 'GRINDHOUSE RELEASING', 'GRINDHOUSE', 'GRIND HOUSE', 'GRYPHON ENTERTAINMENT', 'GRYPHON', 'GUNPOWDER & SKY', 'GUNPOWDER AND SKY', 'GUNPOWDER SKY', 'GUNPOWDER + SKY', 'GUNPOWDER', 'HANABEE ENTERTAINMENT', 'HANABEE', 'HANNOVER HOUSE', 'HANNOVER', 'HANSESOUND', 'HANSE SOUND', 'HANSE', 'HAPPINET', 'HARMONIA MUNDI', 'HARMONIA', 'HBO', 'HDC', 'HEC', 'HELL & BACK RECORDINGS', 'HELL AND BACK RECORDINGS', 'HELL & BACK', 'HELL AND BACK', "HEN'S TOOTH VIDEO", 'HENS TOOTH VIDEO', "HEN'S TOOTH", 'HENS TOOTH', 'HIGH FLIERS', 'HIGHLIGHT', 'HILLSONG', 'HISTORY CHANNEL', 'HISTORY', 'HK VIDΓ‰O', 'HK VIDEO', 'HK', 'HMH HAMBURGER MEDIEN HAUS', 'HAMBURGER MEDIEN HAUS', 'HMH HAMBURGER MEDIEN', 'HMH HAMBURGER', 'HMH', 'HOLLYWOOD CLASSIC ENTERTAINMENT', 'HOLLYWOOD CLASSIC', 'HOLLYWOOD PICTURES', 'HOLLYWOOD', 'HOPSCOTCH ENTERTAINMENT', 'HOPSCOTCH', 'HPM', 'HΓ„NNSLER CLASSIC', 'HANNSLER CLASSIC', 'HANNSLER', 'I-CATCHER', 'I CATCHER', 'ICATCHER', 'I-ON NEW MEDIA', 'I ON NEW MEDIA', 'ION NEW MEDIA', 'ION MEDIA', 'I-ON', 'ION', 'IAN PRODUCTIONS', 'IAN', 'ICESTORM', 'ICON FILM DISTRIBUTION', 'ICON DISTRIBUTION', 'ICON FILM', 'ICON', 'IDEALE AUDIENCE', 'IDEALE', 'IFC FILMS', 'IFC', 'IFILM', 'ILLUSIONS UNLTD.', 'ILLUSIONS UNLTD', 'ILLUSIONS', 'IMAGE ENTERTAINMENT', 'IMAGE', 'IMAGEM FILMES', 'IMAGEM', 'IMOVISION', 'IMPERIAL CINEPIX', 'IMPRINT', 'IMPULS HOME ENTERTAINMENT', 'IMPULS ENTERTAINMENT', 'IMPULS HOME', 'IMPULS', 'IN-AKUSTIK', 'IN AKUSTIK', 'INAKUSTIK', 'INCEPTION MEDIA GROUP', 'INCEPTION MEDIA', 'INCEPTION GROUP', 'INCEPTION', 'INDEPENDENT', 'INDICAN', 'INDIE RIGHTS', 'INDIE', 'INDIGO', 'INFO', 'INJOINGAN', 'INKED PICTURES', 'INKED', 'INSIDE OUT MUSIC', 'INSIDE MUSIC', 'INSIDE OUT', 'INSIDE', 'INTERCOM', 'INTERCONTINENTAL VIDEO', 'INTERCONTINENTAL', 'INTERGROOVE', 'INTERSCOPE', 'INVINCIBLE PICTURES', 'INVINCIBLE', 'ISLAND/MERCURY', 'ISLAND MERCURY', 'ISLANDMERCURY', 'ISLAND & MERCURY', 'ISLAND AND MERCURY', 'ISLAND', 'ITN', 'ITV DVD', 'ITV', 'IVC', 'IVE ENTERTAINMENT', 'IVE', 'J&R ADVENTURES', 'J&R', 'JR', 'JAKOB', 'JONU MEDIA', 'JONU', 'JRB PRODUCTIONS', 'JRB', 'JUST BRIDGE ENTERTAINMENT', 'JUST BRIDGE', 'JUST ENTERTAINMENT', 'JUST', 'KABOOM ENTERTAINMENT', 'KABOOM', 'KADOKAWA ENTERTAINMENT', 'KADOKAWA', 'KAIROS', 'KALEIDOSCOPE ENTERTAINMENT', 'KALEIDOSCOPE', 'KAM & RONSON ENTERPRISES', 'KAM & RONSON', 'KAM&RONSON ENTERPRISES', 'KAM&RONSON', 'KAM AND RONSON ENTERPRISES', 'KAM AND RONSON', 'KANA HOME VIDEO', 'KARMA FILMS', 'KARMA', 'KATZENBERGER', 'KAZE', + 'KBS MEDIA', 'KBS', 'KD MEDIA', 'KD', 'KING MEDIA', 'KING', 'KING RECORDS', 'KINO LORBER', 'KINO', 'KINO SWIAT', 'KINOKUNIYA', 'KINOWELT HOME ENTERTAINMENT/DVD', 'KINOWELT HOME ENTERTAINMENT', 'KINOWELT ENTERTAINMENT', 'KINOWELT HOME DVD', 'KINOWELT ENTERTAINMENT/DVD', 'KINOWELT DVD', 'KINOWELT', 'KIT PARKER FILMS', 'KIT PARKER', 'KITTY MEDIA', 'KNM HOME ENTERTAINMENT', 'KNM ENTERTAINMENT', 'KNM HOME', 'KNM', 'KOBA FILMS', 'KOBA', 'KOCH ENTERTAINMENT', 'KOCH MEDIA', 'KOCH', 'KRAKEN RELEASING', 'KRAKEN', 'KSCOPE', 'KSM', 'KULTUR', "L'ATELIER D'IMAGES", "LATELIER D'IMAGES", "L'ATELIER DIMAGES", 'LATELIER DIMAGES', "L ATELIER D'IMAGES", "L'ATELIER D IMAGES", + 'L ATELIER D IMAGES', "L'ATELIER", 'L ATELIER', 'LATELIER', 'LA AVENTURA AUDIOVISUAL', 'LA AVENTURA', 'LACE GROUP', 'LACE', 'LASER PARADISE', 'LAYONS', 'LCJ EDITIONS', 'LCJ', 'LE CHAT QUI FUME', 'LE PACTE', 'LEDICK FILMHANDEL', 'LEGEND', 'LEOMARK STUDIOS', 'LEOMARK', 'LEONINE FILMS', 'LEONINE', 'LICHTUNG MEDIA LTD', 'LICHTUNG LTD', 'LICHTUNG MEDIA LTD.', 'LICHTUNG LTD.', 'LICHTUNG MEDIA', 'LICHTUNG', 'LIGHTHOUSE HOME ENTERTAINMENT', 'LIGHTHOUSE ENTERTAINMENT', 'LIGHTHOUSE HOME', 'LIGHTHOUSE', 'LIGHTYEAR', 'LIONSGATE FILMS', 'LIONSGATE', 'LIZARD CINEMA TRADE', 'LLAMENTOL', 'LOBSTER FILMS', 'LOBSTER', 'LOGON', 'LORBER FILMS', 'LORBER', 'LOS BANDITOS FILMS', 'LOS BANDITOS', 'LOUD & PROUD RECORDS', 'LOUD AND PROUD RECORDS', 'LOUD & PROUD', 'LOUD AND PROUD', 'LSO LIVE', 'LUCASFILM', 'LUCKY RED', 'LUMIÈRE HOME ENTERTAINMENT', 'LUMIERE HOME ENTERTAINMENT', 'LUMIERE ENTERTAINMENT', 'LUMIERE HOME', 'LUMIERE', 'M6 VIDEO', 'M6', 'MAD DIMENSION', 'MADMAN ENTERTAINMENT', 'MADMAN', 'MAGIC BOX', 'MAGIC PLAY', 'MAGNA HOME ENTERTAINMENT', 'MAGNA ENTERTAINMENT', 'MAGNA HOME', 'MAGNA', 'MAGNOLIA PICTURES', 'MAGNOLIA', 'MAIDEN JAPAN', 'MAIDEN', 'MAJENG MEDIA', 'MAJENG', 'MAJESTIC HOME ENTERTAINMENT', 'MAJESTIC ENTERTAINMENT', 'MAJESTIC HOME', 'MAJESTIC', 'MANGA HOME ENTERTAINMENT', 'MANGA ENTERTAINMENT', 'MANGA HOME', 'MANGA', 'MANTA LAB', 'MAPLE STUDIOS', 'MAPLE', 'MARCO POLO PRODUCTION', 'MARCO POLO', 'MARIINSKY', 'MARVEL STUDIOS', 'MARVEL', 'MASCOT RECORDS', 'MASCOT', 'MASSACRE VIDEO', 'MASSACRE', 'MATCHBOX', 'MATRIX D', 'MAXAM', 'MAYA HOME ENTERTAINMENT', 'MAYA ENTERTAINMENT', 'MAYA HOME', 'MAYAT', 'MDG', 'MEDIA BLASTERS', 'MEDIA FACTORY', 'MEDIA TARGET DISTRIBUTION', 'MEDIA TARGET', 'MEDIAINVISION', 'MEDIATOON', 'MEDIATRES ESTUDIO', 'MEDIATRES STUDIO', 'MEDIATRES', 'MEDICI ARTS', 'MEDICI CLASSICS', 'MEDIUMRARE ENTERTAINMENT', 'MEDIUMRARE', 'MEDUSA', 'MEGASTAR', 'MEI AH', 'MELI MΓ‰DIAS', 'MELI MEDIAS', 'MEMENTO FILMS', 'MEMENTO', 'MENEMSHA FILMS', 'MENEMSHA', 'MERCURY', 'MERCURY STUDIOS', 'MERGE SOFT PRODUCTIONS', 'MERGE PRODUCTIONS', 'MERGE SOFT', 'MERGE', 'METAL BLADE RECORDS', 'METAL BLADE', 'METEOR', 'METRO-GOLDWYN-MAYER', 'METRO GOLDWYN MAYER', 'METROGOLDWYNMAYER', 'METRODOME VIDEO', 'METRODOME', 'METROPOLITAN', 'MFA+', 'MFA', 'MIG FILMGROUP', 'MIG', 'MILESTONE', 'MILL CREEK ENTERTAINMENT', 'MILL CREEK', 'MILLENNIUM MEDIA', 'MILLENNIUM', 'MIRAGE ENTERTAINMENT', 'MIRAGE', 'MIRAMAX', 'MISTERIYA ZVUKA', 'MK2', 'MODE RECORDS', 'MODE', 'MOMENTUM PICTURES', 'MONDO HOME ENTERTAINMENT', 'MONDO ENTERTAINMENT', 'MONDO HOME', 'MONDO MACABRO', 'MONGREL MEDIA', 'MONOLIT', 'MONOLITH VIDEO', 'MONOLITH', 'MONSTER PICTURES', 'MONSTER', 'MONTEREY VIDEO', 'MONTEREY', 'MONUMENT RELEASING', 'MONUMENT', 'MORNINGSTAR', 'MORNING STAR', 'MOSERBAER', 'MOVIEMAX', 'MOVINSIDE', 'MPI MEDIA GROUP', 'MPI MEDIA', 'MPI', 'MR. BONGO FILMS', 'MR BONGO FILMS', 'MR BONGO', 'MRG (MERIDIAN)', 'MRG MERIDIAN', 'MRG', 'MERIDIAN', 'MUBI', 'MUG SHOT PRODUCTIONS', 'MUG SHOT', 'MULTIMUSIC', 'MULTI-MUSIC', 'MULTI MUSIC', 'MUSE', 'MUSIC BOX FILMS', 'MUSIC BOX', 'MUSICBOX', 'MUSIC BROKERS', 'MUSIC THEORIES', 'MUSIC VIDEO DISTRIBUTORS', 'MUSIC VIDEO', 'MUSTANG ENTERTAINMENT', 'MUSTANG', 'MVD VISUAL', 'MVD', 'MVD/VSC', 'MVL', 'MVM ENTERTAINMENT', 'MVM', 'MYNDFORM', 'MYSTIC NIGHT PICTURES', 'MYSTIC NIGHT', 'NAMELESS MEDIA', 'NAMELESS', 'NAPALM RECORDS', 'NAPALM', 'NATIONAL ENTERTAINMENT MEDIA', 'NATIONAL ENTERTAINMENT', 'NATIONAL MEDIA', 'NATIONAL FILM ARCHIVE', 'NATIONAL ARCHIVE', 'NATIONAL FILM', 'NATIONAL GEOGRAPHIC', 'NAT GEO TV', 'NAT GEO', 'NGO', 'NAXOS', 'NBCUNIVERSAL ENTERTAINMENT JAPAN', 'NBC UNIVERSAL ENTERTAINMENT JAPAN', 'NBCUNIVERSAL JAPAN', 'NBC UNIVERSAL JAPAN', 'NBC JAPAN', 'NBO ENTERTAINMENT', 'NBO', 'NEOS', 'NETFLIX', 'NETWORK', 'NEW BLOOD', 'NEW DISC', 'NEW KSM', 'NEW LINE CINEMA', 'NEW LINE', 'NEW MOVIE TRADING CO. LTD', 'NEW MOVIE TRADING CO LTD', 'NEW MOVIE TRADING CO', 'NEW MOVIE TRADING', 'NEW WAVE FILMS', 'NEW WAVE', 'NFI', 'NHK', 'NIPPONART', 'NIS AMERICA', 'NJUTAFILMS', 'NOBLE ENTERTAINMENT', 'NOBLE', 'NORDISK FILM', 'NORDISK', 'NORSK FILM', 'NORSK', 'NORTH AMERICAN MOTION PICTURES', 'NOS AUDIOVISUAIS', 'NOTORIOUS PICTURES', 'NOTORIOUS', 'NOVA MEDIA', 'NOVA', 'NOVA SALES AND DISTRIBUTION', 'NOVA SALES & DISTRIBUTION', 'NSM', 'NSM RECORDS', 'NUCLEAR BLAST', 'NUCLEUS FILMS', 'NUCLEUS', 'OBERLIN MUSIC', 'OBERLIN', 'OBRAS-PRIMAS DO CINEMA', 'OBRAS PRIMAS DO CINEMA', 'OBRASPRIMAS DO CINEMA', 'OBRAS-PRIMAS CINEMA', 'OBRAS PRIMAS CINEMA', 'OBRASPRIMAS CINEMA', 'OBRAS-PRIMAS', 'OBRAS PRIMAS', 'OBRASPRIMAS', 'ODEON', 'OFDB FILMWORKS', 'OFDB', 'OLIVE FILMS', 'OLIVE', 'ONDINE', 'ONSCREEN FILMS', 'ONSCREEN', 'OPENING DISTRIBUTION', 'OPERA AUSTRALIA', 'OPTIMUM HOME ENTERTAINMENT', 'OPTIMUM ENTERTAINMENT', 'OPTIMUM HOME', 'OPTIMUM', 'OPUS ARTE', 'ORANGE STUDIO', 'ORANGE', 'ORLANDO EASTWOOD FILMS', 'ORLANDO FILMS', 'ORLANDO EASTWOOD', 'ORLANDO', 'ORUSTAK PICTURES', 'ORUSTAK', 'OSCILLOSCOPE PICTURES', 'OSCILLOSCOPE', 'OUTPLAY', 'PALISADES TARTAN', 'PAN VISION', 'PANVISION', 'PANAMINT CINEMA', 'PANAMINT', 'PANDASTORM ENTERTAINMENT', 'PANDA STORM ENTERTAINMENT', 'PANDASTORM', 'PANDA STORM', 'PANDORA FILM', 'PANDORA', 'PANEGYRIC', 'PANORAMA', 'PARADE DECK FILMS', 'PARADE DECK', 'PARADISE', 'PARADISO FILMS', 'PARADOX', 'PARAMOUNT PICTURES', 'PARAMOUNT', 'PARIS FILMES', 'PARIS FILMS', 'PARIS', 'PARK CIRCUS', 'PARLOPHONE', 'PASSION RIVER', 'PATHE DISTRIBUTION', 'PATHE', 'PBS', 'PEACE ARCH TRINITY', 'PECCADILLO PICTURES', 'PEPPERMINT', 'PHASE 4 FILMS', 'PHASE 4', 'PHILHARMONIA BAROQUE', 'PICTURE HOUSE ENTERTAINMENT', 'PICTURE ENTERTAINMENT', 'PICTURE HOUSE', 'PICTURE', 'PIDAX', + 'PINK FLOYD RECORDS', 'PINK FLOYD', 'PINNACLE FILMS', 'PINNACLE', 'PLAIN', 'PLATFORM ENTERTAINMENT LIMITED', 'PLATFORM ENTERTAINMENT LTD', 'PLATFORM ENTERTAINMENT LTD.', 'PLATFORM ENTERTAINMENT', 'PLATFORM', 'PLAYARTE', 'PLG UK CLASSICS', 'PLG UK', 'PLG', 'POLYBAND & TOPPIC VIDEO/WVG', 'POLYBAND AND TOPPIC VIDEO/WVG', 'POLYBAND & TOPPIC VIDEO WVG', 'POLYBAND & TOPPIC VIDEO AND WVG', 'POLYBAND & TOPPIC VIDEO & WVG', 'POLYBAND AND TOPPIC VIDEO WVG', 'POLYBAND AND TOPPIC VIDEO AND WVG', 'POLYBAND AND TOPPIC VIDEO & WVG', 'POLYBAND & TOPPIC VIDEO', 'POLYBAND AND TOPPIC VIDEO', 'POLYBAND & TOPPIC', 'POLYBAND AND TOPPIC', 'POLYBAND', 'WVG', 'POLYDOR', 'PONY', 'PONY CANYON', 'POTEMKINE', 'POWERHOUSE FILMS', 'POWERHOUSE', 'POWERSTATIOM', 'PRIDE & JOY', 'PRIDE AND JOY', 'PRINZ MEDIA', 'PRINZ', 'PRIS AUDIOVISUAIS', 'PRO VIDEO', 'PRO-VIDEO', 'PRO-MOTION', 'PRO MOTION', 'PROD. JRB', 'PROD JRB', 'PRODISC', 'PROKINO', 'PROVOGUE RECORDS', 'PROVOGUE', 'PROWARE', 'PULP VIDEO', 'PULP', 'PULSE VIDEO', 'PULSE', 'PURE AUDIO RECORDINGS', 'PURE AUDIO', 'PURE FLIX ENTERTAINMENT', 'PURE FLIX', 'PURE ENTERTAINMENT', 'PYRAMIDE VIDEO', 'PYRAMIDE', 'QUALITY FILMS', 'QUALITY', 'QUARTO VALLEY RECORDS', 'QUARTO VALLEY', 'QUESTAR', 'R SQUARED FILMS', 'R SQUARED', 'RAPID EYE MOVIES', 'RAPID EYE', 'RARO VIDEO', 'RARO', 'RAROVIDEO U.S.', 'RAROVIDEO US', 'RARO VIDEO US', 'RARO VIDEO U.S.', 'RARO U.S.', 'RARO US', 'RAVEN BANNER RELEASING', 'RAVEN BANNER', 'RAVEN', 'RAZOR DIGITAL ENTERTAINMENT', 'RAZOR DIGITAL', 'RCA', 'RCO LIVE', 'RCO', 'RCV', 'REAL GONE MUSIC', 'REAL GONE', 'REANIMEDIA', 'REANI MEDIA', 'REDEMPTION', 'REEL', 'RELIANCE HOME VIDEO & GAMES', 'RELIANCE HOME VIDEO AND GAMES', 'RELIANCE HOME VIDEO', 'RELIANCE VIDEO', 'RELIANCE HOME', 'RELIANCE', 'REM CULTURE', 'REMAIN IN LIGHT', 'REPRISE', 'RESEN', 'RETROMEDIA', 'REVELATION FILMS LTD.', 'REVELATION FILMS LTD', 'REVELATION FILMS', 'REVELATION LTD.', 'REVELATION LTD', 'REVELATION', 'REVOLVER ENTERTAINMENT', 'REVOLVER', 'RHINO MUSIC', 'RHINO', 'RHV', 'RIGHT STUF', 'RIMINI EDITIONS', 'RISING SUN MEDIA', 'RLJ ENTERTAINMENT', 'RLJ', 'ROADRUNNER RECORDS', 'ROADSHOW ENTERTAINMENT', 'ROADSHOW', 'RONE', 'RONIN FLIX', 'ROTANA HOME ENTERTAINMENT', 'ROTANA ENTERTAINMENT', 'ROTANA HOME', 'ROTANA', 'ROUGH TRADE', + 'ROUNDER', 'SAFFRON HILL FILMS', 'SAFFRON HILL', 'SAFFRON', 'SAMUEL GOLDWYN FILMS', 'SAMUEL GOLDWYN', 'SAN FRANCISCO SYMPHONY', 'SANDREW METRONOME', 'SAPHRANE', 'SAVOR', 'SCANBOX ENTERTAINMENT', 'SCANBOX', 'SCENIC LABS', 'SCHRΓ–DERMEDIA', 'SCHRODERMEDIA', 'SCHRODER MEDIA', 'SCORPION RELEASING', 'SCORPION', 'SCREAM TEAM RELEASING', 'SCREAM TEAM', 'SCREEN MEDIA', 'SCREEN', 'SCREENBOUND PICTURES', 'SCREENBOUND', 'SCREENWAVE MEDIA', 'SCREENWAVE', 'SECOND RUN', 'SECOND SIGHT', 'SEEDSMAN GROUP', 'SELECT VIDEO', 'SELECTA VISION', 'SENATOR', 'SENTAI FILMWORKS', 'SENTAI', 'SEVEN7', 'SEVERIN FILMS', 'SEVERIN', 'SEVILLE', 'SEYONS ENTERTAINMENT', 'SEYONS', 'SF STUDIOS', 'SGL ENTERTAINMENT', 'SGL', 'SHAMELESS', 'SHAMROCK MEDIA', 'SHAMROCK', 'SHANGHAI EPIC MUSIC ENTERTAINMENT', 'SHANGHAI EPIC ENTERTAINMENT', 'SHANGHAI EPIC MUSIC', 'SHANGHAI MUSIC ENTERTAINMENT', 'SHANGHAI ENTERTAINMENT', 'SHANGHAI MUSIC', 'SHANGHAI', 'SHEMAROO', 'SHOCHIKU', 'SHOCK', 'SHOGAKU KAN', 'SHOUT FACTORY', 'SHOUT! FACTORY', 'SHOUT', 'SHOUT!', 'SHOWBOX', 'SHOWTIME ENTERTAINMENT', 'SHOWTIME', 'SHRIEK SHOW', 'SHUDDER', 'SIDONIS', 'SIDONIS CALYSTA', 'SIGNAL ONE ENTERTAINMENT', 'SIGNAL ONE', 'SIGNATURE ENTERTAINMENT', 'SIGNATURE', 'SILVER VISION', 'SINISTER FILM', 'SINISTER', 'SIREN VISUAL ENTERTAINMENT', 'SIREN VISUAL', 'SIREN ENTERTAINMENT', 'SIREN', 'SKANI', 'SKY DIGI', + 'SLASHER // VIDEO', 'SLASHER / VIDEO', 'SLASHER VIDEO', 'SLASHER', 'SLOVAK FILM INSTITUTE', 'SLOVAK FILM', 'SFI', 'SM LIFE DESIGN GROUP', 'SMOOTH PICTURES', 'SMOOTH', 'SNAPPER MUSIC', 'SNAPPER', 'SODA PICTURES', 'SODA', 'SONO LUMINUS', 'SONY MUSIC', 'SONY PICTURES', 'SONY', 'SONY PICTURES CLASSICS', 'SONY CLASSICS', 'SOUL MEDIA', 'SOUL', 'SOULFOOD MUSIC DISTRIBUTION', 'SOULFOOD DISTRIBUTION', 'SOULFOOD MUSIC', 'SOULFOOD', 'SOYUZ', 'SPECTRUM', 'SPENTZOS FILM', 'SPENTZOS', 'SPIRIT ENTERTAINMENT', 'SPIRIT', 'SPIRIT MEDIA GMBH', 'SPIRIT MEDIA', 'SPLENDID ENTERTAINMENT', 'SPLENDID FILM', 'SPO', 'SQUARE ENIX', 'SRI BALAJI VIDEO', 'SRI BALAJI', 'SRI', 'SRI VIDEO', 'SRS CINEMA', 'SRS', 'SSO RECORDINGS', 'SSO', 'ST2 MUSIC', 'ST2', 'STAR MEDIA ENTERTAINMENT', 'STAR ENTERTAINMENT', 'STAR MEDIA', 'STAR', 'STARLIGHT', 'STARZ / ANCHOR BAY', 'STARZ ANCHOR BAY', 'STARZ', 'ANCHOR BAY', 'STER KINEKOR', 'STERLING ENTERTAINMENT', 'STERLING', 'STINGRAY', 'STOCKFISCH RECORDS', 'STOCKFISCH', 'STRAND RELEASING', 'STRAND', 'STUDIO 4K', 'STUDIO CANAL', 'STUDIO GHIBLI', 'GHIBLI', 'STUDIO HAMBURG ENTERPRISES', 'HAMBURG ENTERPRISES', 'STUDIO HAMBURG', 'HAMBURG', 'STUDIO S', 'SUBKULTUR ENTERTAINMENT', 'SUBKULTUR', 'SUEVIA FILMS', 'SUEVIA', 'SUMMIT ENTERTAINMENT', 'SUMMIT', 'SUNFILM ENTERTAINMENT', 'SUNFILM', 'SURROUND RECORDS', 'SURROUND', 'SVENSK FILMINDUSTRI', 'SVENSK', 'SWEN FILMES', 'SWEN FILMS', 'SWEN', 'SYNAPSE FILMS', 'SYNAPSE', 'SYNDICADO', 'SYNERGETIC', 'T- SERIES', 'T-SERIES', 'T SERIES', 'TSERIES', 'T.V.P.', 'TVP', 'TACET RECORDS', 'TACET', 'TAI SENG', 'TAI SHENG', 'TAKEONE', 'TAKESHOBO', 'TAMASA DIFFUSION', 'TC ENTERTAINMENT', 'TC', 'TDK', 'TEAM MARKETING', 'TEATRO REAL', 'TEMA DISTRIBUCIONES', 'TEMPE DIGITAL', 'TF1 VIDΓ‰O', 'TF1 VIDEO', 'TF1', 'THE BLU', 'BLU', 'THE ECSTASY OF FILMS', 'THE FILM DETECTIVE', 'FILM DETECTIVE', 'THE JOKERS', 'JOKERS', 'THE ON', 'ON', 'THIMFILM', 'THIM FILM', 'THIM', 'THIRD WINDOW FILMS', 'THIRD WINDOW', '3RD WINDOW FILMS', '3RD WINDOW', 'THUNDERBEAN ANIMATION', 'THUNDERBEAN', 'THUNDERBIRD RELEASING', 'THUNDERBIRD', 'TIBERIUS FILM', 'TIME LIFE', 'TIMELESS MEDIA GROUP', 'TIMELESS MEDIA', 'TIMELESS GROUP', 'TIMELESS', 'TLA RELEASING', 'TLA', 'TOBIS FILM', 'TOBIS', 'TOEI', 'TOHO', 'TOKYO SHOCK', 'TOKYO', 'TONPOOL MEDIEN GMBH', 'TONPOOL MEDIEN', 'TOPICS ENTERTAINMENT', 'TOPICS', 'TOUCHSTONE PICTURES', 'TOUCHSTONE', 'TRANSMISSION FILMS', 'TRANSMISSION', 'TRAVEL VIDEO STORE', 'TRIART', 'TRIGON FILM', 'TRIGON', 'TRINITY HOME ENTERTAINMENT', 'TRINITY ENTERTAINMENT', 'TRINITY HOME', 'TRINITY', 'TRIPICTURES', 'TRI-PICTURES', 'TRI PICTURES', 'TROMA', 'TURBINE MEDIEN', 'TURTLE RECORDS', 'TURTLE', 'TVA FILMS', 'TVA', 'TWILIGHT TIME', 'TWILIGHT', 'TT', 'TWIN CO., LTD.', 'TWIN CO, LTD.', 'TWIN CO., LTD', 'TWIN CO, LTD', 'TWIN CO LTD', 'TWIN LTD', 'TWIN CO.', 'TWIN CO', 'TWIN', 'UCA', 'UDR', 'UEK', 'UFA/DVD', 'UFA DVD', 'UFADVD', 'UGC PH', 'ULTIMATE3DHEAVEN', 'ULTRA', 'UMBRELLA ENTERTAINMENT', 'UMBRELLA', 'UMC', "UNCORK'D ENTERTAINMENT", 'UNCORKD ENTERTAINMENT', 'UNCORK D ENTERTAINMENT', "UNCORK'D", 'UNCORK D', 'UNCORKD', 'UNEARTHED FILMS', 'UNEARTHED', 'UNI DISC', 'UNIMUNDOS', 'UNITEL', 'UNIVERSAL MUSIC', 'UNIVERSAL SONY PICTURES HOME ENTERTAINMENT', 'UNIVERSAL SONY PICTURES ENTERTAINMENT', 'UNIVERSAL SONY PICTURES HOME', 'UNIVERSAL SONY PICTURES', 'UNIVERSAL HOME ENTERTAINMENT', 'UNIVERSAL ENTERTAINMENT', + 'UNIVERSAL HOME', 'UNIVERSAL STUDIOS', 'UNIVERSAL', 'UNIVERSE LASER & VIDEO CO.', 'UNIVERSE LASER AND VIDEO CO.', 'UNIVERSE LASER & VIDEO CO', 'UNIVERSE LASER AND VIDEO CO', 'UNIVERSE LASER CO.', 'UNIVERSE LASER CO', 'UNIVERSE LASER', 'UNIVERSUM FILM', 'UNIVERSUM', 'UTV', 'VAP', 'VCI', 'VENDETTA FILMS', 'VENDETTA', 'VERSÁTIL HOME VIDEO', 'VERSÁTIL VIDEO', 'VERSÁTIL HOME', 'VERSÁTIL', 'VERSATIL HOME VIDEO', 'VERSATIL VIDEO', 'VERSATIL HOME', 'VERSATIL', 'VERTICAL ENTERTAINMENT', 'VERTICAL', 'VΓ‰RTICE 360ΒΊ', 'VΓ‰RTICE 360', 'VERTICE 360o', 'VERTICE 360', 'VERTIGO BERLIN', 'VΓ‰RTIGO FILMS', 'VΓ‰RTIGO', 'VERTIGO FILMS', 'VERTIGO', 'VERVE PICTURES', 'VIA VISION ENTERTAINMENT', 'VIA VISION', 'VICOL ENTERTAINMENT', 'VICOL', 'VICOM', 'VICTOR ENTERTAINMENT', 'VICTOR', 'VIDEA CDE', 'VIDEO FILM EXPRESS', 'VIDEO FILM', 'VIDEO EXPRESS', 'VIDEO MUSIC, INC.', 'VIDEO MUSIC, INC', 'VIDEO MUSIC INC.', 'VIDEO MUSIC INC', 'VIDEO MUSIC', 'VIDEO SERVICE CORP.', 'VIDEO SERVICE CORP', 'VIDEO SERVICE', 'VIDEO TRAVEL', 'VIDEOMAX', 'VIDEO MAX', 'VII PILLARS ENTERTAINMENT', 'VII PILLARS', 'VILLAGE FILMS', 'VINEGAR SYNDROME', 'VINEGAR', 'VS', 'VINNY MOVIES', 'VINNY', 'VIRGIL FILMS & ENTERTAINMENT', 'VIRGIL FILMS AND ENTERTAINMENT', 'VIRGIL ENTERTAINMENT', 'VIRGIL FILMS', 'VIRGIL', 'VIRGIN RECORDS', 'VIRGIN', 'VISION FILMS', 'VISION', 'VISUAL ENTERTAINMENT GROUP', 'VISUAL GROUP', 'VISUAL ENTERTAINMENT', 'VISUAL', 'VIVENDI VISUAL ENTERTAINMENT', 'VIVENDI VISUAL', 'VIVENDI', 'VIZ PICTURES', 'VIZ', 'VLMEDIA', 'VL MEDIA', 'VL', 'VOLGA', 'VVS FILMS', 'VVS', 'VZ HANDELS GMBH', 'VZ HANDELS', 'WARD RECORDS', 'WARD', 'WARNER BROS.', 'WARNER BROS', 'WARNER ARCHIVE', 'WARNER ARCHIVE COLLECTION', 'WAC', 'WARNER', 'WARNER MUSIC', 'WEA', 'WEINSTEIN COMPANY', 'WEINSTEIN', 'WELL GO USA', 'WELL GO', 'WELTKINO FILMVERLEIH', 'WEST VIDEO', 'WEST', 'WHITE PEARL MOVIES', 'WHITE PEARL', 'WICKED-VISION MEDIA', 'WICKED VISION MEDIA', 'WICKEDVISION MEDIA', 'WICKED-VISION', 'WICKED VISION', 'WICKEDVISION', 'WIENERWORLD', 'WILD BUNCH', 'WILD EYE RELEASING', 'WILD EYE', 'WILD SIDE VIDEO', 'WILD SIDE', 'WME', 'WOLFE VIDEO', 'WOLFE', 'WORD ON FIRE', 'WORKS FILM GROUP', 'WORLD WRESTLING', 'WVG MEDIEN', 'WWE STUDIOS', 'WWE', 'X RATED KULT', 'X-RATED KULT', 'X RATED CULT', 'X-RATED CULT', 'X RATED', 'X-RATED', 'XCESS', 'XLRATOR', 'XT VIDEO', 'XT', 'YAMATO VIDEO', 'YAMATO', 'YASH RAJ FILMS', 'YASH RAJS', 'ZEITGEIST FILMS', 'ZEITGEIST', 'ZENITH PICTURES', 'ZENITH', 'ZIMA', 'ZYLO', 'ZYX MUSIC', 'ZYX', 'MASTERS OF CINEMA', 'MOC' ] @@ -1966,13 +2114,12 @@ def get_distributor(self, distributor_in): distributor_out = each return distributor_out - def get_video_codec(self, bdinfo): codecs = { - "MPEG-2 Video" : "MPEG-2", - "MPEG-4 AVC Video" : "AVC", - "MPEG-H HEVC Video" : "HEVC", - "VC-1 Video" : "VC-1" + "MPEG-2 Video": "MPEG-2", + "MPEG-4 AVC Video": "AVC", + "MPEG-H HEVC Video": "HEVC", + "VC-1 Video": "VC-1" } codec = codecs.get(bdinfo['video'][0]['codec'], "") return codec @@ -1988,21 +2135,21 @@ def get_video_encode(self, mi, type, bdinfo): if mi['media']['track'][1].get('Encoded_Library_Settings', None): has_encode_settings = True bit_depth = mi['media']['track'][1].get('BitDepth', '0') - except: + except Exception: format = bdinfo['video'][0]['codec'] format_profile = bdinfo['video'][0]['profile'] - if type in ("ENCODE", "WEBRIP"): #ENCODE or WEBRIP + if type in ("ENCODE", "WEBRIP"): # ENCODE or WEBRIP if format == 'AVC': codec = 'x264' elif format == 'HEVC': codec = 'x265' - elif type in ('WEBDL', 'HDTV'): #WEB-DL + elif type in ('WEBDL', 'HDTV'): # WEB-DL if format == 'AVC': codec = 'H.264' elif format == 'HEVC': codec = 'H.265' - - if type == 'HDTV' and has_encode_settings == True: + + if type == 'HDTV' and has_encode_settings is True: codec = codec.replace('H.', 'x') elif format == "VP9": codec = "VP9" @@ -2018,16 +2165,15 @@ def get_video_encode(self, mi, type, bdinfo): video_codec = f"MPEG-{mi['media']['track'][1].get('Format_Version')}" return video_encode, video_codec, has_encode_settings, bit_depth - def get_edition(self, video, bdinfo, filelist, manual_edition): if video.lower().startswith('dc'): video = video.replace('dc', '', 1) - + guess = guessit(video) tag = guess.get('release_group', 'NOGROUP') repack = "" edition = "" - + if bdinfo is not None: try: edition = guessit(bdinfo['label'])['edition'] @@ -2040,10 +2186,10 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): except Exception as e: print(f"Video Edition Guess Error: {e}") edition = "" - + if isinstance(edition, list): edition = " ".join(edition) - + if len(filelist) == 1: video = os.path.basename(video) @@ -2054,9 +2200,9 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): if manual_edition: edition = str(manual_edition) - + print(f"Edition After Manual Edition: {edition}") - + if "REPACK" in edition.upper() or "V2" in video: repack = "REPACK" if "REPACK2" in edition.upper() or "V3" in video: @@ -2067,28 +2213,39 @@ def get_edition(self, video, bdinfo, filelist, manual_edition): repack = "PROPER" if "RERIP" in edition.upper(): repack = "RERIP" - + print(f"Repack after Checks: {repack}") - + # Only remove REPACK, RERIP, or PROPER from edition if they're not part of manual_edition edition = re.sub(r"(\bREPACK\d?\b|\bRERIP\b|\bPROPER\b)", "", edition, flags=re.IGNORECASE).strip() bad = ['internal', 'limited', 'retail'] if edition.lower() in bad: edition = "" - + return edition, repack """ Create Torrent """ class CustomTorrent(torf.Torrent): - # Ensure the piece size is within the desired limits + # Default piece size limits torf.Torrent.piece_size_min = 16384 # 16 KiB torf.Torrent.piece_size_max = 67108864 # 64 MiB - def __init__(self, *args, **kwargs): + def __init__(self, meta, *args, **kwargs): super().__init__(*args, **kwargs) + + # Override piece_size_max if meta['max_piece_size'] is specified + if 'max_piece_size' in meta and meta['max_piece_size']: + try: + max_piece_size_mib = int(meta['max_piece_size']) * 1024 * 1024 # Convert MiB to bytes + self.piece_size_max = min(max_piece_size_mib, torf.Torrent.piece_size_max) + except ValueError: + self.piece_size_max = torf.Torrent.piece_size_max # Fallback to default if conversion fails + else: + self.piece_size_max = torf.Torrent.piece_size_max + # Calculate and set the piece size total_size = self._calculate_total_size() piece_size = self.calculate_piece_size(total_size, self.piece_size_min, self.piece_size_max, self.files) @@ -2109,11 +2266,10 @@ def piece_size(self, value): @classmethod def calculate_piece_size(cls, total_size, min_size, max_size, files): our_min_size = 16384 - our_max_size = 67108864 - # Start with a piece size of 8 MiB - piece_size = 8388608 + our_max_size = max_size if max_size else 67108864 # Default to 64 MiB if max_size is None + piece_size = 8388608 # Start with 8 MiB num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # Approximate .torrent size + torrent_file_size = 20 + (num_pieces * 20) + cls._calculate_pathname_bytes(files) # Approximate .torrent size # Adjust the piece size to fit within the constraints while not (1000 <= num_pieces <= 2000 and torrent_file_size <= 102400): # 100 KiB .torrent size limit @@ -2125,7 +2281,7 @@ def calculate_piece_size(cls, total_size, min_size, max_size, files): elif num_pieces > 2000: piece_size *= 2 if piece_size > our_max_size: - cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces! Using ({num_pieces}) pieces.") + cli_ui.warning(f"Warning: Piece size exceeded 2000 pieces and .torrent will be approximately {torrent_file_size / 1024:.2f} KiB! Using ({num_pieces}) pieces.") piece_size = our_max_size break elif torrent_file_size > 102400: @@ -2144,7 +2300,6 @@ def _calculate_total_size(self): @classmethod def _calculate_pathname_bytes(cls, files): - # Calculate the total bytes consumed by all the pathnames in the torrent total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) return total_pathname_bytes @@ -2170,11 +2325,12 @@ def create_torrent(self, meta, path, output_filename): if meta['is_disc']: include, exclude = "", "" else: - exclude = ["*.*", "*sample.mkv", "!sample*.*"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] include = ["*.mkv", "*.mp4", "*.ts"] - + # Create and write the new torrent using the CustomTorrent class torrent = self.CustomTorrent( + meta=meta, path=path, trackers=["https://fake.tracker"], source="L4G", @@ -2196,7 +2352,7 @@ def create_torrent(self, meta, path, output_filename): console.print("[bold green].torrent created", end="\r") return torrent - + def torf_cb(self, torrent, filepath, pieces_done, pieces_total): # print(f'{pieces_done/pieces_total*100:3.0f} % done') cli_ui.info_progress("Hashing...", pieces_done, pieces_total) @@ -2215,7 +2371,7 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): base_torrent.trackers = ['https://fake.tracker'] base_torrent.comment = "Created by L4G's Upload Assistant" base_torrent.created_by = "Created by L4G's Upload Assistant" - #Remove Un-whitelisted info from torrent + # Remove Un-whitelisted info from torrent for each in list(base_torrent.metainfo['info']): if each not in ('files', 'length', 'name', 'piece length', 'pieces', 'private', 'source'): base_torrent.metainfo['info'].pop(each, None) @@ -2226,7 +2382,6 @@ def create_base_from_existing_torrent(self, torrentpath, base_dir, uuid): base_torrent.private = True Torrent.copy(base_torrent).write(f"{base_dir}/tmp/{uuid}/BASE.torrent", overwrite=True) - """ Upload Screenshots """ @@ -2235,9 +2390,7 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i initial_img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] img_host = meta['imghost'] # Use the correctly updated image host from meta - image_list = [] - newhost_list = [] if custom_img_list: image_glob = custom_img_list @@ -2253,139 +2406,140 @@ def upload_screens(self, meta, screens, img_host_num, i, total_screens, custom_i console.print(f"[yellow]Skipping upload because images are already uploaded to {img_host}. Existing images: {len(existing_images)}, Required: {total_screens}") return existing_images, total_screens + # Initialize the progress bar outside of the retry loop with Progress( TextColumn("[bold green]Uploading Screens..."), BarColumn(), "[cyan]{task.completed}/{task.total}", TimeRemainingColumn() ) as progress: - upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) + while True: + upload_task = progress.add_task(f"[green]Uploading Screens to {img_host}...", total=len(image_glob[-screens:])) - for image in image_glob[-screens:]: - try: - timeout = 60 - if img_host == "ptpimg": - payload = { - 'format': 'json', - 'api_key': self.config['DEFAULT']['ptpimg_api'] - } - files = [('file-upload[0]', open(image, 'rb'))] - headers = {'referer': 'https://ptpimg.me/index.php'} - response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) - response = response.json() - ptpimg_code = response[0]['code'] - ptpimg_ext = response[0]['ext'] - img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - raw_url = img_url - web_url = img_url - elif img_host == "imgbb": - url = "https://api.imgbb.com/1/upload" - data = { - 'key': self.config['DEFAULT']['imgbb_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - response = requests.post(url, data=data, timeout=timeout) - response = response.json() - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "ptscreens": - url = "https://ptscreens.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]PT Screens failed, trying next image host") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = img_url - elif img_host == "oeimg": - url = "https://imgoe.download/api/1/upload" - data = { - 'key': self.config['DEFAULT']['oeimg_api'], - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - try: - response = requests.post(url, data = data,timeout=timeout) + for image in image_glob[-screens:]: + try: + timeout = 60 + if img_host == "ptpimg": + payload = { + 'format': 'json', + 'api_key': self.config['DEFAULT']['ptpimg_api'] + } + files = [('file-upload[0]', open(image, 'rb'))] + headers = {'referer': 'https://ptpimg.me/index.php'} + response = requests.post("https://ptpimg.me/upload.php", headers=headers, data=payload, files=files) + response = response.json() + ptpimg_code = response[0]['code'] + ptpimg_ext = response[0]['ext'] + img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" + raw_url = img_url + web_url = img_url + elif img_host == "imgbb": + url = "https://api.imgbb.com/1/upload" + data = { + 'key': self.config['DEFAULT']['imgbb_api'], + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + response = requests.post(url, data=data, timeout=timeout) + response = response.json() + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "ptscreens": + url = "https://ptscreens.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['ptscreens_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) response = response.json() if response.get('status_code') != 200: - progress.console.print(response) - img_url = response['data'].get('medium', response['data']['image'])['url'] + console.print("[yellow]PT Screens failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "oeimg": + url = "https://imgoe.download/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['oeimg_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]OnlyImage failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url + web_url = img_url + elif img_host == "pixhost": + url = "https://api.pixhost.to/images" + data = { + 'content_type': '0', + 'max_th_size': 350, + } + files = { + 'img': ('file-upload[0]', open(image, 'rb')), + } + response = requests.post(url, data=data, files=files, timeout=timeout) + if response.status_code != 200: + console.print("[yellow]Pixhost failed, trying next image host") + break + response = response.json() + raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') + img_url = response['th_url'] + web_url = response['show_url'] + elif img_host == "lensdump": + url = "https://lensdump.com/api/1/upload" + data = { + 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') + } + headers = { + 'X-API-Key': self.config['DEFAULT']['lensdump_api'], + } + response = requests.post(url, data=data, headers=headers, timeout=timeout) + response = response.json() + if response.get('status_code') != 200: + console.print("[yellow]Lensdump failed, trying next image host") + break + img_url = response['data']['image']['url'] + raw_url = img_url web_url = response['data']['url_viewer'] - raw_url = response['data']['image']['url'] - except Exception: - progress.console.print("[yellow]oeimg failed, trying next image host") - progress.stop() - newhost_list, i = self.upload_screens(meta, screens - i , img_host_num + 1, i, total_screens, [], return_dict) - elif img_host == "pixhost": - url = "https://api.pixhost.to/images" - data = { - 'content_type': '0', - 'max_th_size': 350, - } - files = { - 'img': ('file-upload[0]', open(image, 'rb')), - } - response = requests.post(url, data=data, files=files, timeout=timeout) - if response.status_code != 200: - console.print("[yellow]Pixhost failed, trying next image host") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) - response = response.json() - raw_url = response['th_url'].replace('https://t', 'https://img').replace('/thumbs/', '/images/') - img_url = response['th_url'] - web_url = response['show_url'] - elif img_host == "lensdump": - url = "https://lensdump.com/api/1/upload" - data = { - 'image': base64.b64encode(open(image, "rb").read()).decode('utf8') - } - headers = { - 'X-API-Key': self.config['DEFAULT']['lensdump_api'], - } - response = requests.post(url, data=data, headers=headers, timeout=timeout) - response = response.json() - if response.get('status_code') != 200: - console.print("[yellow]Lensdump failed, trying next image host") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) - img_url = response['data']['image']['url'] - raw_url = img_url - web_url = response['data']['url_viewer'] - else: - console.print(f"[red]Unsupported image host: {img_host}") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) + else: + console.print(f"[red]Unsupported image host: {img_host}") + break - # Update progress bar and print the result on the same line - progress.console.print(f"[cyan]Uploaded image {i+1}/{total_screens}: {raw_url}", end='\r') + # Update progress bar and print the result on the same line + progress.console.print(f"[cyan]Uploaded image {i+1}/{total_screens}: {raw_url}", end='\r') - # Add the image details to the list - image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} - image_list.append(image_dict) - progress.advance(upload_task) - i += 1 + # Add the image details to the list + image_dict = {'img_url': img_url, 'raw_url': raw_url, 'web_url': web_url} + image_list.append(image_dict) + progress.advance(upload_task) + i += 1 - except Exception as e: - console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") - img_host_num += 1 - return self.upload_screens(meta, screens - i, img_host_num, i, total_screens, custom_img_list, return_dict, retry_mode=True) + except Exception as e: + console.print(f"[yellow]Failed to upload {image} to {img_host}. Exception: {str(e)}") + break - time.sleep(0.5) + time.sleep(0.5) - if i >= total_screens: - break + if i >= total_screens: + return_dict['image_list'] = image_list + console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") + return image_list, i - return_dict['image_list'] = image_list - console.print(f"\n[cyan]Completed uploading images. Total uploaded: {len(image_list)}") - return image_list, i + # If we broke out of the loop due to a failure, switch to the next host and retry + img_host_num += 1 + if img_host_num > len(self.config['DEFAULT']) - 1: + console.print("[red]All image hosts failed. Unable to complete uploads.") + return image_list, i # Or you could raise an exception if preferred + + img_host = self.config['DEFAULT'][f'img_host_{img_host_num}'] async def imgbox_upload(self, chdir, image_glob): os.chdir(chdir) @@ -2404,14 +2558,9 @@ async def imgbox_upload(self, chdir, image_glob): image_list.append(image_dict) return image_list - - - - - async def get_name(self, meta): type = meta.get('type', "") - title = meta.get('title',"") + title = meta.get('title', "") alt_title = meta.get('aka', "") year = meta.get('year', "") resolution = meta.get('resolution', "") @@ -2429,7 +2578,7 @@ async def get_name(self, meta): uhd = meta.get('uhd', "") hdr = meta.get('hdr', "") episode_title = meta.get('episode_title', '') - if meta.get('is_disc', "") == "BDMV": #Disk + if meta.get('is_disc', "") == "BDMV": # Disk video_codec = meta.get('video_codec', "") region = meta.get('region', "") elif meta.get('is_disc', "") == "DVD": @@ -2445,11 +2594,11 @@ async def get_name(self, meta): year = meta['year'] else: year = "" - if meta.get('no_season', False) == True: + if meta.get('no_season', False) is True: season = '' - if meta.get('no_year', False) == True: + if meta.get('no_year', False) is True: year = '' - if meta.get('no_aka', False) == True: + if meta.get('no_aka', False) is True: alt_title = '' if meta['debug']: console.log("[cyan]get_name cat/type") @@ -2458,38 +2607,38 @@ async def get_name(self, meta): console.log("[cyan]get_name meta:") console.log(meta) - #YAY NAMING FUN - if meta['category'] == "MOVIE": #MOVIE SPECIFIC - if type == "DISC": #Disk + # YAY NAMING FUN + if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif meta['is_disc'] == 'DVD': + elif meta['is_disc'] == 'DVD': name = f"{title} {alt_title} {year} {edition} {repack} {source} {dvd_size} {audio}" potential_missing = ['edition', 'distributor'] elif meta['is_disc'] == 'HDDVD': name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif type == "REMUX" and source in ("BluRay", "HDDVD"): #BluRay/HDDVD Remux - name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" + elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay/HDDVD Remux + name = f"{title} {alt_title} {year} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" potential_missing = ['edition', 'description'] - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): #DVD Remux - name = f"{title} {alt_title} {year} {edition} {repack} {source} REMUX {audio}" + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD", "DVD"): # DVD Remux + name = f"{title} {alt_title} {year} {edition} {repack} {source} REMUX {audio}" potential_missing = ['edition', 'description'] - elif type == "ENCODE": #Encode - name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" + elif type == "ENCODE": # Encode + name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" potential_missing = ['edition', 'description'] - elif type == "WEBDL": #WEB-DL + elif type == "WEBDL": # WEB-DL name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {service} WEB-DL {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "WEBRIP": #WEBRip + elif type == "WEBRIP": # WEBRip name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {uhd} {service} WEBRip {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] - elif meta['category'] == "TV": #TV SPECIFIC - if type == "DISC": #Disk + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} {year} {alt_title} {season}{episode} {three_d} {edition} {repack} {resolution} {region} {uhd} {source} {hdr} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] @@ -2499,29 +2648,28 @@ async def get_name(self, meta): elif meta['is_disc'] == 'HDDVD': name = f"{title} {alt_title} {year} {edition} {repack} {resolution} {source} {video_codec} {audio}" potential_missing = ['edition', 'region', 'distributor'] - elif type == "REMUX" and source in ("BluRay", "HDDVD"): #BluRay Remux - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" #SOURCE + elif type == "REMUX" and source in ("BluRay", "HDDVD"): # BluRay Remux + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {three_d} {edition} {repack} {resolution} {uhd} {source} REMUX {hdr} {video_codec} {audio}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {source} REMUX {audio}" #SOURCE + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {source} REMUX {audio}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "ENCODE": #Encode - name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" #SOURCE + elif type == "ENCODE": # Encode + name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {source} {audio} {hdr} {video_encode}" # SOURCE potential_missing = ['edition', 'description'] - elif type == "WEBDL": #WEB-DL + elif type == "WEBDL": # WEB-DL name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {service} WEB-DL {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "WEBRIP": #WEBRip + elif type == "WEBRIP": # WEBRip name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {uhd} {service} WEBRip {audio} {hdr} {video_encode}" potential_missing = ['edition', 'service'] - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} {year} {alt_title} {season}{episode} {episode_title} {part} {edition} {repack} {resolution} {source} {audio} {video_encode}" potential_missing = [] - - try: + try: name = ' '.join(name.split()) - except: + except Exception: console.print("[bold red]Unable to generate name. Please re-run and correct any of the following args if needed.") console.print(f"--category [yellow]{meta['category']}") console.print(f"--type [yellow]{meta['type']}") @@ -2533,31 +2681,28 @@ async def get_name(self, meta): clean_name = self.clean_filename(name) return name_notag, name, clean_name, potential_missing - - - async def get_season_episode(self, video, meta): if meta['category'] == 'TV': filelist = meta['filelist'] meta['tv_pack'] = 0 is_daily = False - if meta['anime'] == False: + if meta['anime'] is False: try: if meta.get('manual_date'): - raise ManualDateException + raise ManualDateException # noqa: F405 try: guess_year = guessit(video)['year'] except Exception: guess_year = "" if guessit(video)["season"] == guess_year: if f"s{guessit(video)['season']}" in video.lower(): - season_int = str(guessit(video)["season"]) + season_int = str(guessit(video)["season"]) season = "S" + season_int.zfill(2) else: season_int = "1" season = "S01" else: - season_int = str(guessit(video)["season"]) + season_int = str(guessit(video)["season"]) season = "S" + season_int.zfill(2) except Exception: @@ -2574,11 +2719,11 @@ async def get_season_episode(self, video, meta): season_int = "1" season = "S01" try: - if is_daily != True: + if is_daily is not True: episodes = "" if len(filelist) == 1: episodes = guessit(video)['episode'] - if type(episodes) == list: + if isinstance(episodes, list): episode = "" for item in guessit(video)["episode"]: ep = (str(item).zfill(2)) @@ -2596,25 +2741,18 @@ async def get_season_episode(self, video, meta): episode_int = "0" meta['tv_pack'] = 1 else: - #If Anime + # If Anime parsed = anitopy.parse(Path(video).name) - # romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(guessit(parsed['anime_title'], {"excludes" : ["country", "language"]})['title']) romaji, mal_id, eng_title, seasonYear, anilist_episodes = self.get_romaji(parsed['anime_title'], meta.get('mal', None)) if mal_id: meta['mal_id'] = mal_id - if meta.get('tmdb_manual', None) == None: + if meta.get('tmdb_manual', None) is None: year = parsed.get('anime_year', str(seasonYear)) - meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes" : ["country", "language"]})['title'], year, meta, meta['category']) + meta = await self.get_tmdb_id(guessit(parsed['anime_title'], {"excludes": ["country", "language"]})['title'], year, meta, meta['category']) meta = await self.tmdb_other_meta(meta) if meta['category'] != "TV": return meta - # meta['title'] = eng_title - # difference = SequenceMatcher(None, eng_title, romaji.lower()).ratio() - # if difference >= 0.8: - # meta['aka'] = "" - # else: - # meta['aka'] = f" AKA {romaji}" tag = parsed.get('release_group', "") if tag != "": meta['tag'] = f"-{tag}" @@ -2623,56 +2761,51 @@ async def get_season_episode(self, video, meta): episodes = parsed.get('episode_number', guessit(video).get('episode', '1')) if not isinstance(episodes, list) and not episodes.isnumeric(): episodes = guessit(video)['episode'] - if type(episodes) == list: - episode = "" - for item in episodes: - ep = (str(item).zfill(2)) - episode += f"E{ep}" - episode_int = episodes[0] + if isinstance(episodes, list): + episode_int = int(episodes[0]) # Always convert to integer + episode = "".join([f"E{str(int(item)).zfill(2)}" for item in episodes]) else: - episode_int = str(int(episodes)) - episode = f"E{str(int(episodes)).zfill(2)}" + episode_int = int(episodes) # Convert to integer + episode = f"E{str(episode_int).zfill(2)}" except Exception: episode = "E01" - episode_int = "1" + episode_int = 1 # Ensure it's an integer console.print('[bold yellow]There was an error guessing the episode number. Guessing E01. Use [bold green]--episode #[/bold green] to correct if needed') await asyncio.sleep(1.5) else: episode = "" - episode_int = "0" + episode_int = 0 # Ensure it's an integer meta['tv_pack'] = 1 - + try: if meta.get('season_int'): - season = meta.get('season_int') + season_int = int(meta.get('season_int')) # Convert to integer else: - season = parsed.get('anime_season', guessit(video)['season']) - season_int = season - season = f"S{season.zfill(2)}" + season = parsed.get('anime_season', guessit(video).get('season', '1')) + season_int = int(season) # Convert to integer + season = f"S{str(season_int).zfill(2)}" except Exception: try: - if int(episode_int) >= anilist_episodes: + if episode_int >= anilist_episodes: params = { - 'id' : str(meta['tvdb_id']), - 'origin' : 'tvdb', - 'absolute' : str(episode_int), - # 'destination' : 'tvdb' + 'id': str(meta['tvdb_id']), + 'origin': 'tvdb', + 'absolute': str(episode_int), } url = "https://thexem.info/map/single" response = requests.post(url, params=params).json() if response['result'] == "failure": - raise XEMNotFound + raise XEMNotFound # noqa: F405 if meta['debug']: console.log(f"[cyan]TheXEM Absolute -> Standard[/cyan]\n{response}") - season_int = str(response['data']['scene']['season']) - season = f"S{str(response['data']['scene']['season']).zfill(2)}" + season_int = int(response['data']['scene']['season']) # Convert to integer + season = f"S{str(season_int).zfill(2)}" if len(filelist) == 1: - episode_int = str(response['data']['scene']['episode']) - episode = f"E{str(response['data']['scene']['episode']).zfill(2)}" + episode_int = int(response['data']['scene']['episode']) # Convert to integer + episode = f"E{str(episode_int).zfill(2)}" else: - #Get season from xem name map + season_int = 1 # Default to 1 if error occurs season = "S01" - season_int = "1" names_url = f"https://thexem.info/map/names?origin=tvdb&id={str(meta['tvdb_id'])}" names_response = requests.get(names_url).json() if meta['debug']: @@ -2686,76 +2819,55 @@ async def get_season_episode(self, video, meta): romaji_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", romaji.lower().replace(' ', '')) name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, romaji_check, name_check).ratio() - if romaji_check in name_check: - if diff >= difference: - if season_num != "all": - season_int = season_num - season = f"S{season_num.zfill(2)}" - else: - season_int = "1" - season = "S01" - difference = diff + if romaji_check in name_check and diff >= difference: + season_int = int(season_num) if season_num != "all" else 1 # Convert to integer + season = f"S{str(season_int).zfill(2)}" + difference = diff if lang == "us": for name in names: eng_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", eng_title.lower().replace(' ', '')) name_check = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", name.lower().replace(' ', '')) diff = SequenceMatcher(None, eng_check, name_check).ratio() - if eng_check in name_check: - if diff >= difference: - if season_num != "all": - season_int = season_num - season = f"S{season_num.zfill(2)}" - else: - season_int = "1" - season = "S01" - difference = diff + if eng_check in name_check and diff >= difference: + season_int = int(season_num) if season_num != "all" else 1 # Convert to integer + season = f"S{str(season_int).zfill(2)}" + difference = diff else: raise XEMNotFound except Exception: if meta['debug']: console.print_exception() try: - season = guessit(video)['season'] - season_int = season + season = guessit(video).get('season', '1') + season_int = int(season) # Convert to integer except Exception: - season_int = "1" + season_int = 1 # Default to 1 if error occurs season = "S01" console.print(f"[bold yellow]{meta['title']} does not exist on thexem, guessing {season}") console.print(f"[bold yellow]If [green]{season}[/green] is incorrect, use --season to correct") await asyncio.sleep(3) - # try: - # version = parsed['release_version'] - # if int(version) == 2: - # meta['repack'] = "REPACK" - # elif int(version) > 2: - # meta['repack'] = f"REPACK{int(version) - 1}" - # # version = f"v{version}" - # except Exception: - # # version = "" - # pass - - if meta.get('manual_season', None) == None: + + if meta.get('manual_season', None) is None: meta['season'] = season else: season_int = meta['manual_season'].lower().replace('s', '') meta['season'] = f"S{meta['manual_season'].lower().replace('s', '').zfill(2)}" - if meta.get('manual_episode', None) == None: + if meta.get('manual_episode', None) is None: meta['episode'] = episode else: episode_int = meta['manual_episode'].lower().replace('e', '') meta['episode'] = f"E{meta['manual_episode'].lower().replace('e', '').zfill(2)}" meta['tv_pack'] = 0 - + # if " COMPLETE " in Path(video).name.replace('.', ' '): # meta['season'] = "COMPLETE" meta['season_int'] = season_int meta['episode_int'] = episode_int - - meta['episode_title_storage'] = guessit(video,{"excludes" : "part"}).get('episode_title', '') + meta['episode_title_storage'] = guessit(video, {"excludes": "part"}).get('episode_title', '') if meta['season'] == "S00" or meta['episode'] == "E00": meta['episode_title'] = meta['episode_title_storage'] - + # Guess the part of the episode (if available) meta['part'] = "" if meta['tv_pack'] == 1: @@ -2764,62 +2876,60 @@ async def get_season_episode(self, video, meta): return meta - def get_service(self, video, tag, audio, guess_title): service = guessit(video).get('streaming_service', "") services = { - '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', - 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', - 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', - 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', - 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', - 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', - 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', - 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', - 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', - 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', 'Crunchy Roll': 'CR', 'Crave': 'CRAV', - 'CRIT': 'CRIT', 'Criterion' : 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', - 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', - 'DC Universe': 'DCU', 'DDY': 'DDY', 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', - 'Deadhouse Films': 'DHF', 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', - 'Doc Club': 'DOCC', 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', - 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', - 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', - 'ETV': 'ETV', 'E!': 'ETV', 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', - 'Family Jr': 'FJR', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', - 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi' : 'FUNI', - 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', - 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', - 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', - 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR' : 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', 'hoichoi': 'HoiChoi', 'ID': 'ID', - 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', - 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', 'KNPY': 'KNPY', 'Kanopy' : 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', - 'MA' : 'MA', 'Movies Anywhere' : 'MA', 'MAX' : 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', - 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', 'Netflix': 'NF', - 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NRK': 'NRK', - 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', - 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', - 'PMNT': 'PMNT', 'PMTP' : 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', - 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RSTR': 'RSTR', 'RTE': 'RTE', - 'RTE One': 'RTE', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST' : 'SKST', 'SkyShowtime': 'SKST', - 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', 'Spike': 'SPIK', 'Spike TV': 'SPKE', - 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', 'STRP': 'STRP', 'Star+' : 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', - 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', - 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', - 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', - 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', - 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', - 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', - 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', + '9NOW': '9NOW', '9Now': '9NOW', 'AE': 'AE', 'A&E': 'AE', 'AJAZ': 'AJAZ', 'Al Jazeera English': 'AJAZ', + 'ALL4': 'ALL4', 'Channel 4': 'ALL4', 'AMBC': 'AMBC', 'ABC': 'AMBC', 'AMC': 'AMC', 'AMZN': 'AMZN', + 'Amazon Prime': 'AMZN', 'ANLB': 'ANLB', 'AnimeLab': 'ANLB', 'ANPL': 'ANPL', 'Animal Planet': 'ANPL', + 'AOL': 'AOL', 'ARD': 'ARD', 'AS': 'AS', 'Adult Swim': 'AS', 'ATK': 'ATK', "America's Test Kitchen": 'ATK', + 'ATVP': 'ATVP', 'AppleTV': 'ATVP', 'AUBC': 'AUBC', 'ABC Australia': 'AUBC', 'BCORE': 'BCORE', 'BKPL': 'BKPL', + 'Blackpills': 'BKPL', 'BluTV': 'BLU', 'Binge': 'BNGE', 'BOOM': 'BOOM', 'Boomerang': 'BOOM', 'BRAV': 'BRAV', + 'BravoTV': 'BRAV', 'CBC': 'CBC', 'CBS': 'CBS', 'CC': 'CC', 'Comedy Central': 'CC', 'CCGC': 'CCGC', + 'Comedians in Cars Getting Coffee': 'CCGC', 'CHGD': 'CHGD', 'CHRGD': 'CHGD', 'CMAX': 'CMAX', 'Cinemax': 'CMAX', + 'CMOR': 'CMOR', 'CMT': 'CMT', 'Country Music Television': 'CMT', 'CN': 'CN', 'Cartoon Network': 'CN', 'CNBC': 'CNBC', + 'CNLP': 'CNLP', 'Canal+': 'CNLP', 'COOK': 'COOK', 'CORE': 'CORE', 'CR': 'CR', 'Crunchy Roll': 'CR', 'Crave': 'CRAV', + 'CRIT': 'CRIT', 'Criterion': 'CRIT', 'CRKL': 'CRKL', 'Crackle': 'CRKL', 'CSPN': 'CSPN', 'CSpan': 'CSPN', 'CTV': 'CTV', 'CUR': 'CUR', + 'CuriosityStream': 'CUR', 'CW': 'CW', 'The CW': 'CW', 'CWS': 'CWS', 'CWSeed': 'CWS', 'DAZN': 'DAZN', 'DCU': 'DCU', + 'DC Universe': 'DCU', 'DDY': 'DDY', 'Digiturk Diledigin Yerde': 'DDY', 'DEST': 'DEST', 'DramaFever': 'DF', 'DHF': 'DHF', + 'Deadhouse Films': 'DHF', 'DISC': 'DISC', 'Discovery': 'DISC', 'DIY': 'DIY', 'DIY Network': 'DIY', 'DOCC': 'DOCC', + 'Doc Club': 'DOCC', 'DPLY': 'DPLY', 'DPlay': 'DPLY', 'DRPO': 'DRPO', 'Discovery Plus': 'DSCP', 'DSKI': 'DSKI', + 'Daisuki': 'DSKI', 'DSNP': 'DSNP', 'Disney+': 'DSNP', 'DSNY': 'DSNY', 'Disney': 'DSNY', 'DTV': 'DTV', + 'EPIX': 'EPIX', 'ePix': 'EPIX', 'ESPN': 'ESPN', 'ESQ': 'ESQ', 'Esquire': 'ESQ', 'ETTV': 'ETTV', 'El Trece': 'ETTV', + 'ETV': 'ETV', 'E!': 'ETV', 'FAM': 'FAM', 'Fandor': 'FANDOR', 'Facebook Watch': 'FBWatch', 'FJR': 'FJR', + 'Family Jr': 'FJR', 'FOOD': 'FOOD', 'Food Network': 'FOOD', 'FOX': 'FOX', 'Fox': 'FOX', 'Fox Premium': 'FOXP', + 'UFC Fight Pass': 'FP', 'FPT': 'FPT', 'FREE': 'FREE', 'Freeform': 'FREE', 'FTV': 'FTV', 'FUNI': 'FUNI', 'FUNi': 'FUNI', + 'Foxtel': 'FXTL', 'FYI': 'FYI', 'FYI Network': 'FYI', 'GC': 'GC', 'NHL GameCenter': 'GC', 'GLBL': 'GLBL', + 'Global': 'GLBL', 'GLOB': 'GLOB', 'GloboSat Play': 'GLOB', 'GO90': 'GO90', 'GagaOOLala': 'Gaga', 'HBO': 'HBO', + 'HBO Go': 'HBO', 'HGTV': 'HGTV', 'HIDI': 'HIDI', 'HIST': 'HIST', 'History': 'HIST', 'HLMK': 'HLMK', 'Hallmark': 'HLMK', + 'HMAX': 'HMAX', 'HBO Max': 'HMAX', 'HS': 'HTSR', 'HTSR': 'HTSR', 'HSTR': 'Hotstar', 'HULU': 'HULU', 'Hulu': 'HULU', 'hoichoi': 'HoiChoi', 'ID': 'ID', + 'Investigation Discovery': 'ID', 'IFC': 'IFC', 'iflix': 'IFX', 'National Audiovisual Institute': 'INA', 'ITV': 'ITV', + 'KAYO': 'KAYO', 'KNOW': 'KNOW', 'Knowledge Network': 'KNOW', 'KNPY': 'KNPY', 'Kanopy': 'KNPY', 'LIFE': 'LIFE', 'Lifetime': 'LIFE', 'LN': 'LN', + 'MA': 'MA', 'Movies Anywhere': 'MA', 'MAX': 'MAX', 'MBC': 'MBC', 'MNBC': 'MNBC', 'MSNBC': 'MNBC', 'MTOD': 'MTOD', 'Motor Trend OnDemand': 'MTOD', 'MTV': 'MTV', 'MUBI': 'MUBI', + 'NATG': 'NATG', 'National Geographic': 'NATG', 'NBA': 'NBA', 'NBA TV': 'NBA', 'NBC': 'NBC', 'NF': 'NF', 'Netflix': 'NF', + 'National Film Board': 'NFB', 'NFL': 'NFL', 'NFLN': 'NFLN', 'NFL Now': 'NFLN', 'NICK': 'NICK', 'Nickelodeon': 'NICK', 'NRK': 'NRK', + 'Norsk Rikskringkasting': 'NRK', 'OnDemandKorea': 'ODK', 'Opto': 'OPTO', 'Oprah Winfrey Network': 'OWN', 'PA': 'PA', 'PBS': 'PBS', + 'PBSK': 'PBSK', 'PBS Kids': 'PBSK', 'PCOK': 'PCOK', 'Peacock': 'PCOK', 'PLAY': 'PLAY', 'PLUZ': 'PLUZ', 'Pluzz': 'PLUZ', 'PMNP': 'PMNP', + 'PMNT': 'PMNT', 'PMTP': 'PMTP', 'POGO': 'POGO', 'PokerGO': 'POGO', 'PSN': 'PSN', 'Playstation Network': 'PSN', 'PUHU': 'PUHU', 'QIBI': 'QIBI', + 'RED': 'RED', 'YouTube Red': 'RED', 'RKTN': 'RKTN', 'Rakuten TV': 'RKTN', 'The Roku Channel': 'ROKU', 'RSTR': 'RSTR', 'RTE': 'RTE', + 'RTE One': 'RTE', 'RUUTU': 'RUUTU', 'SBS': 'SBS', 'Science Channel': 'SCI', 'SESO': 'SESO', 'SeeSo': 'SESO', 'SHMI': 'SHMI', 'Shomi': 'SHMI', 'SKST': 'SKST', 'SkyShowtime': 'SKST', + 'SHO': 'SHO', 'Showtime': 'SHO', 'SNET': 'SNET', 'Sportsnet': 'SNET', 'Sony': 'SONY', 'SPIK': 'SPIK', 'Spike': 'SPIK', 'Spike TV': 'SPKE', + 'SPRT': 'SPRT', 'Sprout': 'SPRT', 'STAN': 'STAN', 'Stan': 'STAN', 'STARZ': 'STARZ', 'STRP': 'STRP', 'Star+': 'STRP', 'STZ': 'STZ', 'Starz': 'STZ', 'SVT': 'SVT', + 'Sveriges Television': 'SVT', 'SWER': 'SWER', 'SwearNet': 'SWER', 'SYFY': 'SYFY', 'Syfy': 'SYFY', 'TBS': 'TBS', 'TEN': 'TEN', + 'TFOU': 'TFOU', 'TFou': 'TFOU', 'TIMV': 'TIMV', 'TLC': 'TLC', 'TOU': 'TOU', 'TRVL': 'TRVL', 'TUBI': 'TUBI', 'TubiTV': 'TUBI', + 'TV3': 'TV3', 'TV3 Ireland': 'TV3', 'TV4': 'TV4', 'TV4 Sweeden': 'TV4', 'TVING': 'TVING', 'TVL': 'TVL', 'TV Land': 'TVL', + 'TVNZ': 'TVNZ', 'UFC': 'UFC', 'UKTV': 'UKTV', 'UNIV': 'UNIV', 'Univision': 'UNIV', 'USAN': 'USAN', 'USA Network': 'USAN', + 'VH1': 'VH1', 'VIAP': 'VIAP', 'VICE': 'VICE', 'Viceland': 'VICE', 'Viki': 'VIKI', 'VIMEO': 'VIMEO', 'VLCT': 'VLCT', + 'Velocity': 'VLCT', 'VMEO': 'VMEO', 'Vimeo': 'VMEO', 'VRV': 'VRV', 'VUDU': 'VUDU', 'WME': 'WME', 'WatchMe': 'WME', 'WNET': 'WNET', + 'W Network': 'WNET', 'WWEN': 'WWEN', 'WWE Network': 'WWEN', 'XBOX': 'XBOX', 'Xbox Video': 'XBOX', 'YHOO': 'YHOO', 'Yahoo': 'YHOO', 'YT': 'YT', 'ZDF': 'ZDF', 'iP': 'iP', 'BBC iPlayer': 'iP', 'iQIYI': 'iQIYI', 'iT': 'iT', 'iTunes': 'iT' - } - - + } + video_name = re.sub(r"[.()]", " ", video.replace(tag, '').replace(guess_title, '')) if "DTS-HD MA" in audio: video_name = video_name.replace("DTS-HD.MA.", "").replace("DTS-HD MA ", "") for key, value in services.items(): - if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes" : ["country", "language"]}).get('title', ''): + if (' ' + key + ' ') in video_name and key not in guessit(video, {"excludes": ["country", "language"]}).get('title', ''): service = value elif key == service: service = value @@ -2831,10 +2941,8 @@ def get_service(self, video, tag, audio, guess_title): service_longname = "Amazon" return service, service_longname - - def stream_optimized(self, stream_opt): - if stream_opt == True: + if stream_opt is True: stream = 1 else: stream = 0 @@ -2845,22 +2953,22 @@ def is_anon(self, anon_in): if anon.lower() == "true": console.print("[bold red]Global ANON has been removed in favor of per-tracker settings. Please update your config accordingly.") time.sleep(10) - if anon_in == True: + if anon_in is True: anon_out = 1 else: anon_out = 0 return anon_out async def upload_image(self, session, url, data, headers, files): - if headers == None and files == None: + if headers is None and files is None: async with session.post(url=url, data=data) as resp: response = await resp.json() return response - elif headers == None and files != None: + elif headers is None and files is not None: async with session.post(url=url, data=data, files=files) as resp: response = await resp.json() return response - elif headers != None and files == None: + elif headers is not None and files is None: async with session.post(url=url, data=data, headers=headers) as resp: response = await resp.json() return response @@ -2868,46 +2976,50 @@ async def upload_image(self, session, url, data, headers, files): async with session.post(url=url, data=data, headers=headers, files=files) as resp: response = await resp.json() return response - - + def clean_filename(self, name): - invalid = '<>:"/\|?*' + invalid = '<>:"/\\|?*' for char in invalid: name = name.replace(char, '-') return name - async def gen_desc(self, meta): + desclink = meta.get('desclink', None) descfile = meta.get('descfile', None) - ptp_desc = blu_desc = "" + ptp_desc = "" desc_source = [] + imagelist = [] with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'w', newline="", encoding='utf8') as description: description.seek(0) if (desclink, descfile, meta['desc']) == (None, None, None): - if meta.get('ptp_manual') != None: + if meta.get('ptp_manual') is not None: desc_source.append('PTP') - if meta.get('blu_manual') != None: + if meta.get('blu_manual') is not None: desc_source.append('BLU') if len(desc_source) != 1: desc_source = None else: desc_source = desc_source[0] - if meta.get('ptp', None) != None and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: + if meta.get('ptp', None) is not None and str(self.config['TRACKERS'].get('PTP', {}).get('useAPI')).lower() == "true" and desc_source in ['PTP', None]: + if meta.get('skip_gen_desc', False): + console.print("[cyan]Something went wrong with PTP description.") + return meta ptp = PTP(config=self.config) - ptp_desc = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) + ptp_desc, imagelist = await ptp.get_ptp_description(meta['ptp'], meta['is_disc']) if ptp_desc.replace('\r\n', '').replace('\n', '').strip() != "": description.write(ptp_desc) description.write("\n") meta['description'] = 'PTP' + meta['imagelist'] = imagelist # Save the imagelist to meta if needed if ptp_desc == "" and meta.get('blu_desc', '').rstrip() not in [None, ''] and desc_source in ['BLU', None]: if meta.get('blu_desc', '').strip().replace('\r\n', '').replace('\n', '') != '': description.write(meta['blu_desc']) meta['description'] = 'BLU' - if meta.get('desc_template', None) != None: + if meta.get('desc_template', None) is not None: from jinja2 import Template with open(f"{meta['base_dir']}/data/templates/{meta['desc_template']}.txt", 'r') as f: desc_templater = Template(f.read()) @@ -2915,15 +3027,18 @@ async def gen_desc(self, meta): if template_desc.strip() != "": description.write(template_desc) description.write("\n") + console.print(f"[INFO] Description from template '{meta['desc_template']}' used:\n{template_desc}") - if meta['nfo'] != False: + if meta['nfo'] is not False: description.write("[code]") nfo = glob.glob("*.nfo")[0] description.write(open(nfo, 'r', encoding="utf-8").read()) description.write("[/code]") description.write("\n") meta['description'] = "CUSTOM" - if desclink != None: + console.print(f"[INFO] Description from NFO file '{nfo}' used:\n{nfo_content}") # noqa: F405 + + if desclink is not None: parsed = urllib.parse.urlparse(desclink.replace('/raw/', '/')) split = os.path.split(parsed.path) if split[0] != '/': @@ -2934,24 +3049,29 @@ async def gen_desc(self, meta): description.write(requests.get(raw).text) description.write("\n") meta['description'] = "CUSTOM" - - if descfile != None: - if os.path.isfile(descfile) == True: + console.print(f"[INFO] Description from link '{desclink}' used:\n{desclink_content}") # noqa: F405 + + if descfile is not None: + if os.path.isfile(descfile): text = open(descfile, 'r').read() description.write(text) - meta['description'] = "CUSTOM" - if meta['desc'] != None: + meta['description'] = "CUSTOM" + console.print(f"[INFO] Description from file '{descfile}' used:\n{text}") + + if meta['desc'] is not None: description.write(meta['desc']) description.write("\n") meta['description'] = "CUSTOM" + console.print(f"[INFO] Custom description used:\n{meta['desc']}") + description.write("\n") return meta - + async def tag_override(self, meta): with open(f"{meta['base_dir']}/data/tags.json", 'r', encoding="utf-8") as f: tags = json.load(f) f.close() - + for tag in tags: value = tags.get(tag) if value.get('in_name', "") == tag and tag in meta['path']: @@ -2970,7 +3090,6 @@ async def tag_override(self, meta): else: meta[key] = value.get(key) return meta - async def package(self, meta): if meta['tag'] == "": @@ -2994,7 +3113,7 @@ async def package(self, meta): generic.write(f"TVDB: https://www.thetvdb.com/?id={meta['tvdb_id']}&tab=series\n") poster_img = f"{meta['base_dir']}/tmp/{meta['uuid']}/POSTER.png" if meta.get('poster', None) not in ['', None] and not os.path.exists(poster_img): - if meta.get('rehosted_poster', None) == None: + if meta.get('rehosted_poster', None) is None: r = requests.get(meta['poster'], stream=True) if r.status_code == 200: console.print("[bold yellow]Rehosting Poster") @@ -3005,23 +3124,23 @@ async def package(self, meta): poster = poster[0] generic.write(f"TMDB Poster: {poster.get('raw_url', poster.get('img_url'))}\n") meta['rehosted_poster'] = poster.get('raw_url', poster.get('img_url')) - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as metafile: json.dump(meta, metafile, indent=4) metafile.close() else: console.print("[bold yellow]Poster could not be retrieved") - elif os.path.exists(poster_img) and meta.get('rehosted_poster') != None: + elif os.path.exists(poster_img) and meta.get('rehosted_poster') is not None: generic.write(f"TMDB Poster: {meta.get('rehosted_poster')}\n") if len(meta['image_list']) > 0: - generic.write(f"\nImage Webpage:\n") + generic.write("\nImage Webpage:\n") for each in meta['image_list']: generic.write(f"{each['web_url']}\n") - generic.write(f"\nThumbnail Image:\n") + generic.write("\nThumbnail Image:\n") for each in meta['image_list']: generic.write(f"{each['img_url']}\n") title = re.sub(r"[^0-9a-zA-Z\[\\]]+", "", meta['title']) archive = f"{meta['base_dir']}/tmp/{meta['uuid']}/{title}" - torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}","*.torrent") + torrent_files = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", "*.torrent") if isinstance(torrent_files, list) and len(torrent_files) > 1: for each in torrent_files: if not each.startswith(('BASE', '[RAND')): @@ -3034,12 +3153,12 @@ async def package(self, meta): # shutil.copy(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"), os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['name'].replace(' ', '.')}.torrent").replace(' ', '.')) filebrowser = self.config['TRACKERS'].get('MANUAL', {}).get('filebrowser', None) shutil.make_archive(archive, 'tar', f"{meta['base_dir']}/tmp/{meta['uuid']}") - if filebrowser != None: + if filebrowser is not None: url = '/'.join(s.strip('/') for s in (filebrowser, f"/tmp/{meta['uuid']}")) url = urllib.parse.quote(url, safe="https://") else: files = { - "files[]" : (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) + "files[]": (f"{meta['title']}.tar", open(f"{archive}.tar", 'rb')) } response = requests.post("https://uguu.se/upload.php", files=files).json() if meta['debug']: @@ -3048,14 +3167,14 @@ async def package(self, meta): return url except Exception: return False - return + return async def get_imdb_aka(self, imdb_id): if imdb_id == "0": return "", None ia = Cinemagoer() result = ia.get_movie(imdb_id.replace('tt', '')) - + original_language = result.get('language codes') if isinstance(original_language, list): if len(original_language) > 1: @@ -3081,7 +3200,6 @@ async def get_dvd_size(self, discs): dvd_sizes.sort() compact = " ".join(dvd_sizes) return compact - def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imdbid): if not is_disc: @@ -3090,7 +3208,7 @@ def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imd for each in extra: if each.lower().startswith('tmdb'): parser = Args(config=self.config) - category, tmdbid = parser.parse_tmdb_id(id = extra[each], category=category) + category, tmdbid = parser.parse_tmdb_id(id=extra[each], category=category) if each.lower().startswith('imdb'): try: imdbid = str(int(extra[each].replace('tt', ''))).zfill(7) @@ -3098,7 +3216,6 @@ def get_tmdb_imdb_from_mediainfo(self, mediainfo, category, is_disc, tmdbid, imd pass return category, tmdbid, imdbid - def daily_to_tmdb_season_episode(self, tmdbid, date): show = tmdb.TV(tmdbid) seasons = show.info().get('seasons') @@ -3118,9 +3235,6 @@ def daily_to_tmdb_season_episode(self, tmdbid, date): console.print(f"[yellow]Unable to map the date ([bold yellow]{str(date)}[/bold yellow]) to a Season/Episode number") return season, episode - - - async def get_imdb_info(self, imdbID, meta): imdb_info = {} if int(str(imdbID).replace('tt', '')) != 0: @@ -3149,18 +3263,17 @@ async def get_imdb_info(self, imdbID, meta): imdb_info['directors'].append(f"nm{director.getID()}") else: imdb_info = { - 'title' : meta['title'], - 'year' : meta['year'], - 'aka' : '', - 'type' : None, - 'runtime' : meta.get('runtime', '60'), - 'cover' : meta.get('poster'), + 'title': meta['title'], + 'year': meta['year'], + 'aka': '', + 'type': None, + 'runtime': meta.get('runtime', '60'), + 'cover': meta.get('poster'), } if len(meta.get('tmdb_directors', [])) >= 1: imdb_info['directors'] = meta['tmdb_directors'] return imdb_info - async def search_imdb(self, filename, search_year): imdbID = '0' @@ -3172,7 +3285,6 @@ async def search_imdb(self, filename, search_year): imdbID = str(movie.movieID).replace('tt', '') return imdbID - async def imdb_other_meta(self, meta): imdb_info = meta['imdb_info'] = await self.get_imdb_info(meta['imdb_id'], meta) meta['title'] = imdb_info['title'] @@ -3194,49 +3306,49 @@ async def search_tvmaze(self, filename, year, imdbID, tvdbID): tvmazeID = 0 lookup = False show = None - if imdbID == None: + if imdbID is None: imdbID = '0' - if tvdbID == None: + if tvdbID is None: tvdbID = 0 if int(tvdbID) != 0: params = { - "thetvdb" : tvdbID + "thetvdb": tvdbID } url = "https://api.tvmaze.com/lookup/shows" lookup = True elif int(imdbID) != 0: params = { - "imdb" : f"tt{imdbID}" + "imdb": f"tt{imdbID}" } url = "https://api.tvmaze.com/lookup/shows" lookup = True else: params = { - "q" : filename + "q": filename } - url = f"https://api.tvmaze.com/search/shows" + url = "https://api.tvmaze.com/search/shows" resp = requests.get(url=url, params=params) if resp.ok: resp = resp.json() - if resp == None: + if resp is None: return tvmazeID, imdbID, tvdbID - if lookup == True: + if lookup is True: show = resp else: if year not in (None, ''): for each in resp: premier_date = each['show'].get('premiered', '') - if premier_date != None: + if premier_date is not None: if premier_date.startswith(str(year)): show = each['show'] elif len(resp) >= 1: show = resp[0]['show'] - if show != None: + if show is not None: tvmazeID = show.get('id') if int(imdbID) == 0: - if show.get('externals', {}).get('imdb', '0') != None: + if show.get('externals', {}).get('imdb', '0') is not None: imdbID = str(show.get('externals', {}).get('imdb', '0')).replace('tt', '') if int(tvdbID) == 0: - if show.get('externals', {}).get('tvdb', '0') != None: + if show.get('externals', {}).get('tvdb', '0') is not None: tvdbID = show.get('externals', {}).get('tvdb', '0') return tvmazeID, imdbID, tvdbID \ No newline at end of file diff --git a/src/search.py b/src/search.py index 7256ce9c..9b51c677 100644 --- a/src/search.py +++ b/src/search.py @@ -1,8 +1,8 @@ import platform -import asyncio import os from src.console import console + class Search(): """ Logic for searching @@ -11,16 +11,16 @@ def __init__(self, config): self.config = config pass - async def searchFile(self, filename): - os_info = platform.platform() + os_info = platform.platform() # noqa F841 filename = filename.lower() files_total = [] if filename == "": console.print("nothing entered") return - file_found = False + file_found = False # noqa F841 words = filename.split() + async def search_file(search_dir): files_total_search = [] console.print(f"Searching {search_dir}") @@ -30,11 +30,11 @@ async def search_file(search_dir): l_name = name.lower() os_info = platform.platform() if await self.file_search(l_name, words): - file_found = True - if('Windows' in os_info): - files_total_search.append(root+'\\'+name) + file_found = True # noqa F841 + if ('Windows' in os_info): + files_total_search.append(root + '\\' + name) else: - files_total_search.append(root+'/'+name) + files_total_search.append(root + '/' + name) return files_total_search config_dir = self.config['DISCORD']['search_dir'] if isinstance(config_dir, list): @@ -46,14 +46,15 @@ async def search_file(search_dir): return files_total async def searchFolder(self, foldername): - os_info = platform.platform() + os_info = platform.platform() # noqa F841 foldername = foldername.lower() folders_total = [] if foldername == "": console.print("nothing entered") return - folders_found = False + folders_found = False # noqa F841 words = foldername.split() + async def search_dir(search_dir): console.print(f"Searching {search_dir}") folders_total_search = [] @@ -65,11 +66,11 @@ async def search_dir(search_dir): os_info = platform.platform() if await self.file_search(l_name, words): - folder_found = True - if('Windows' in os_info): - folders_total_search.append(root+'\\'+name) + folder_found = True # noqa F841 + if ('Windows' in os_info): + folders_total_search.append(root + '\\' + name) else: - folders_total_search.append(root+'/'+name) + folders_total_search.append(root + '/' + name) return folders_total_search config_dir = self.config['DISCORD']['search_dir'] @@ -83,10 +84,11 @@ async def search_dir(search_dir): return folders_total return folders_total + async def file_search(self, name, name_words): check = True for word in name_words: if word not in name: check = False break - return check + return check \ No newline at end of file diff --git a/src/trackers/ACM.py b/src/trackers/ACM.py index 773867cb..a65dd41c 100644 --- a/src/trackers/ACM.py +++ b/src/trackers/ACM.py @@ -9,7 +9,6 @@ from src.console import console - class ACM(): """ Edit for Tracker: @@ -19,12 +18,6 @@ class ACM(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'ACM' @@ -39,10 +32,10 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id - async def get_type (self, meta): + async def get_type(self, meta): if meta['is_disc'] == "BDMV": bdinfo = meta['bdinfo'] bd_sizes = [25, 50, 66, 100] @@ -91,73 +84,73 @@ async def get_type_id(self, type): 'SDTV': '13', 'DVD 9': '16', 'HDTV': '17' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { '2160p': '1', '1080p': '2', - '1080i':'2', + '1080i': '2', '720p': '3', '576p': '4', '576i': '4', '480p': '5', '480i': '5' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - #ACM rejects uploads with more that 4 keywords + # ACM rejects uploads with more that 4 keywords async def get_keywords(self, keywords): - if keywords !='': + if keywords != '': keywords_list = keywords.split(',') keywords_list = [keyword for keyword in keywords_list if " " not in keyword][:4] - keywords = ', '.join( keywords_list) + keywords = ', '.join(keywords_list) return keywords def get_subtitles(self, meta): sub_lang_map = { - ("Arabic", "ara", "ar") : 'Ara', - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br') : 'Por-BR', - ("Bulgarian", "bul", "bg") : 'Bul', - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)") : 'Chi', - ("Croatian", "hrv", "hr", "scr") : 'Cro', - ("Czech", "cze", "cz", "cs") : 'Cze', - ("Danish", "dan", "da") : 'Dan', - ("Dutch", "dut", "nl") : 'Dut', - ("English", "eng", "en", "English (CC)", "English - SDH") : 'Eng', - ("English - Forced", "English (Forced)", "en (Forced)") : 'Eng', - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)") : 'Eng', - ("Estonian", "est", "et") : 'Est', - ("Finnish", "fin", "fi") : 'Fin', - ("French", "fre", "fr") : 'Fre', - ("German", "ger", "de") : 'Ger', - ("Greek", "gre", "el") : 'Gre', - ("Hebrew", "heb", "he") : 'Heb', - ("Hindi" "hin", "hi") : 'Hin', - ("Hungarian", "hun", "hu") : 'Hun', - ("Icelandic", "ice", "is") : 'Ice', - ("Indonesian", "ind", "id") : 'Ind', - ("Italian", "ita", "it") : 'Ita', - ("Japanese", "jpn", "ja") : 'Jpn', - ("Korean", "kor", "ko") : 'Kor', - ("Latvian", "lav", "lv") : 'Lav', - ("Lithuanian", "lit", "lt") : 'Lit', - ("Norwegian", "nor", "no") : 'Nor', - ("Persian", "fa", "far") : 'Per', - ("Polish", "pol", "pl") : 'Pol', - ("Portuguese", "por", "pt") : 'Por', - ("Romanian", "rum", "ro") : 'Rom', - ("Russian", "rus", "ru") : 'Rus', - ("Serbian", "srp", "sr", "scc") : 'Ser', - ("Slovak", "slo", "sk") : 'Slo', - ("Slovenian", "slv", "sl") : 'Slv', - ("Spanish", "spa", "es") : 'Spa', - ("Swedish", "swe", "sv") : 'Swe', - ("Thai", "tha", "th") : 'Tha', - ("Turkish", "tur", "tr") : 'Tur', - ("Ukrainian", "ukr", "uk") : 'Ukr', - ("Vietnamese", "vie", "vi") : 'Vie', + ("Arabic", "ara", "ar"): 'Ara', + ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br'): 'Por-BR', + ("Bulgarian", "bul", "bg"): 'Bul', + ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)"): 'Chi', + ("Croatian", "hrv", "hr", "scr"): 'Cro', + ("Czech", "cze", "cz", "cs"): 'Cze', + ("Danish", "dan", "da"): 'Dan', + ("Dutch", "dut", "nl"): 'Dut', + ("English", "eng", "en", "English (CC)", "English - SDH"): 'Eng', + ("English - Forced", "English (Forced)", "en (Forced)"): 'Eng', + ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)"): 'Eng', + ("Estonian", "est", "et"): 'Est', + ("Finnish", "fin", "fi"): 'Fin', + ("French", "fre", "fr"): 'Fre', + ("German", "ger", "de"): 'Ger', + ("Greek", "gre", "el"): 'Gre', + ("Hebrew", "heb", "he"): 'Heb', + ("Hindi" "hin", "hi"): 'Hin', + ("Hungarian", "hun", "hu"): 'Hun', + ("Icelandic", "ice", "is"): 'Ice', + ("Indonesian", "ind", "id"): 'Ind', + ("Italian", "ita", "it"): 'Ita', + ("Japanese", "jpn", "ja"): 'Jpn', + ("Korean", "kor", "ko"): 'Kor', + ("Latvian", "lav", "lv"): 'Lav', + ("Lithuanian", "lit", "lt"): 'Lit', + ("Norwegian", "nor", "no"): 'Nor', + ("Persian", "fa", "far"): 'Per', + ("Polish", "pol", "pl"): 'Pol', + ("Portuguese", "por", "pt"): 'Por', + ("Romanian", "rum", "ro"): 'Rom', + ("Russian", "rus", "ru"): 'Rus', + ("Serbian", "srp", "sr", "scc"): 'Ser', + ("Slovak", "slo", "sk"): 'Slo', + ("Slovenian", "slv", "sl"): 'Slv', + ("Spanish", "spa", "es"): 'Spa', + ("Swedish", "swe", "sv"): 'Swe', + ("Thai", "tha", "th"): 'Tha', + ("Turkish", "tur", "tr"): 'Tur', + ("Ukrainian", "ukr", "uk"): 'Ukr', + ("Vietnamese", "vie", "vi"): 'Vie', } sub_langs = [] @@ -193,10 +186,6 @@ def get_subs_tag(self, subs): return ' [No Eng subs]' return f" [{subs[0]} subs only]" - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -207,12 +196,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) acm_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: # bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() mi_dump = None bd_dump = "" @@ -225,30 +214,30 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : acm_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : await self.get_keywords(meta['keywords']), - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': acm_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': await self.get_keywords(meta['keywords']), + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 if region_id != 0: @@ -262,33 +251,29 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(await self.get_type(meta)), + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(await self.get_type(meta)), # A majority of the ACM library doesn't contain resolution information # 'resolutions[]' : await self.get_res_id(meta['resolution']), # 'name' : "" @@ -302,7 +287,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) @@ -319,7 +304,7 @@ async def edit_name(self, meta): name = meta.get('name') aka = meta.get('aka') original_title = meta.get('original_title') - year = str(meta.get('year')) + year = str(meta.get('year')) # noqa F841 audio = meta.get('audio') source = meta.get('source') is_disc = meta.get('is_disc') @@ -328,7 +313,7 @@ async def edit_name(self, meta): if aka != '': # ugly fix to remove the extra space in the title aka = aka + ' ' - name = name.replace (aka, f' / {original_title} {chr(int("202A", 16))}') + name = name.replace(aka, f' / {original_title} {chr(int("202A", 16))}') elif aka == '': if meta.get('title') != original_title: # name = f'{name[:name.find(year)]}/ {original_title} {chr(int("202A", 16))}{name[name.find(year):]}' @@ -336,20 +321,18 @@ async def edit_name(self, meta): if 'AAC' in audio: name = name.replace(audio.strip().replace(" ", " "), audio.replace("AAC ", "AAC")) name = name.replace("DD+ ", "DD+") - name = name.replace ("UHD BluRay REMUX", "Remux") - name = name.replace ("BluRay REMUX", "Remux") - name = name.replace ("H.265", "HEVC") + name = name.replace("UHD BluRay REMUX", "Remux") + name = name.replace("BluRay REMUX", "Remux") + name = name.replace("H.265", "HEVC") if is_disc == 'DVD': - name = name.replace (f'{source} DVD5', f'{resolution} DVD {source}') - name = name.replace (f'{source} DVD9', f'{resolution} DVD {source}') + name = name.replace(f'{source} DVD5', f'{resolution} DVD {source}') + name = name.replace(f'{source} DVD9', f'{resolution} DVD {source}') if audio == meta.get('channels'): - name = name.replace (f'{audio}', f'MPEG {audio}') + name = name.replace(f'{audio}', f'MPEG {audio}') name = name + self.get_subs_tag(subs) return name - - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: @@ -387,7 +370,7 @@ async def edit_desc(self, meta): img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img=350]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() - return + return \ No newline at end of file diff --git a/src/trackers/AITHER.py b/src/trackers/AITHER.py index 2f0ec799..4d182bc7 100644 --- a/src/trackers/AITHER.py +++ b/src/trackers/AITHER.py @@ -2,15 +2,14 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool import json -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class AITHER(): """ Edit for Tracker: @@ -25,8 +24,10 @@ def __init__(self, config): self.source_flag = 'Aither' self.search_url = 'https://aither.cc/api/torrents/filter' self.upload_url = 'https://aither.cc/api/torrents/upload' - self.signature = f"\n[center][url=https://aither.cc/]Powered by Only-Uploader[/url][/center]" - self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Hi10', 'HiQVE', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'TAoE', 'TGx', 'TSP', 'TSPxL', 'Tigole', 'Weasley[HONE]', 'Will1869', 'YIFY', 'x0r'] + self.signature = f"\n[center][url=https://github.com/edge20200/Only-Uploader]Powered by Only-Uploader[/url][/center]" + self.banned_groups = ['4K4U', 'AROMA', 'd3g', 'edge2020', 'EMBER', 'EVO', 'FGT', 'FreetheFish', 'Hi10', 'HiQVE', 'ION10', 'iVy', 'Judas', 'LAMA', 'MeGusta', 'nikt0', 'OEPlus', 'OFT', 'OsC', 'PYC', + 'QxR', 'Ralphy', 'RARBG', 'RetroPeeps', 'SAMPA', 'Sicario', 'Silence', 'SkipTT', 'SPDVD', 'STUTTERSHIT', 'SWTYBLZ', 'TAoE', 'TGx', 'Tigole', 'TSP', 'TSPxL', 'VXT', 'Weasley[HONE]', + 'Will1869', 'x0r', 'YIFY'] pass async def upload(self, meta): @@ -37,11 +38,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -51,28 +52,28 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' @@ -82,27 +83,25 @@ async def upload(self, meta): } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): aither_name = meta['name'] has_eng_audio = False @@ -134,7 +133,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -145,39 +144,35 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -192,8 +187,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/AL.py b/src/trackers/AL.py index 4aecdd66..42a2ba72 100644 --- a/src/trackers/AL.py +++ b/src/trackers/AL.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -28,22 +27,22 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] pass - + async def get_cat_id(self, category_name): category_id = { - 'MOVIE': '1', - 'TV': '2', - }.get(category_name, '1') + 'MOVIE': '1', + 'TV': '2', + }.get(category_name, '1') return category_id async def get_type_id(self, type): type_id = { 'BDMV': '1', - 'DISC': '1', + 'DISC': '1', 'REMUX': '2', 'ENCODE': '3', - 'WEBDL': '4', - 'WEBRIP': '5', + 'WEBDL': '4', + 'WEBRIP': '5', 'HDTV': '6', 'DVDISO': '7', 'DVDRIP': '8', @@ -51,23 +50,23 @@ async def get_type_id(self, type): 'BDRIP': '10', 'COLOR': '11', 'MONO': '12' - }.get(type, '1') + }.get(type, '1') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', - '4320p': '1', - '2160p': '2', - '1440p' : '3', + '8640p': '10', + '4320p': '1', + '2160p': '2', + '1440p': '3', '1080p': '3', - '1080i':'4', - '720p': '5', - '576p': '6', + '1080i': '4', + '720p': '5', + '576p': '6', '576i': '7', - '480p': '8', + '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def upload(self, meta): @@ -80,12 +79,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -95,34 +94,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - + if region_id != 0: data['region_id'] = region_id if distributor_id != 0: @@ -134,18 +133,18 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - - if meta['debug'] == False: + + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") - return + return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() @@ -153,12 +152,12 @@ async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -170,7 +169,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) @@ -178,5 +177,5 @@ async def search_existing(self, meta): # Got this from CBR and changed the encoding rename async def edit_name(self, meta): - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","x264").replace("H 265","x265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1").replace("AAC1 0","AAC1.0").replace("DD+5 1","DDP5.1").replace("DD+2 0","DDP2.0").replace("DD+1 0","DDP1.0") + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "x264").replace("H 265", "x265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") return name \ No newline at end of file diff --git a/src/trackers/ANT.py b/src/trackers/ANT.py index cd1cd769..2efc1266 100644 --- a/src/trackers/ANT.py +++ b/src/trackers/ANT.py @@ -4,6 +4,7 @@ import asyncio import requests import platform +import cli_ui from str2bool import str2bool from pymediainfo import MediaInfo import math @@ -12,6 +13,7 @@ from src.trackers.COMMON import COMMON from src.console import console + class ANT(): """ Edit for Tracker: @@ -21,25 +23,21 @@ class ANT(): Upload """ - ############################################################### - # ####### EDIT ME ##### # - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'ANT' self.source_flag = 'ANT' self.search_url = 'https://anthelion.me/api.php' self.upload_url = 'https://anthelion.me/api.php' - self.banned_groups = ['3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', - 'CrEwSaDe', 'd3g', 'DDR', 'DNL', 'DeadFish', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', - 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', - 'LiGaS', 'LOAD', 'MeGusta', 'MkvCage', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'NOIVTC', 'nSD', 'Oj', 'Ozlem', - 'PiRaTeS', 'PRoDJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', - 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'TBS', 'Telly', 'TM', 'UPiNSMOKE', 'URANiME', 'WAF', 'xRed', - 'XS', 'YIFY', 'YTS', 'Zeus', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = [ + '3LTON', '4yEo', 'ADE', 'AFG', 'AniHLS', 'AnimeRG', 'AniURL', 'AROMA', 'aXXo', 'Brrip', 'CHD', 'CM8', + 'CrEwSaDe', 'd3g', 'DDR', 'DNL', 'DeadFish', 'ELiTE', 'eSc', 'FaNGDiNG0', 'FGT', 'Flights', 'FRDS', + 'FUM', 'HAiKU', 'HD2DVD', 'HDS', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JIVE', 'KiNGDOM', 'Leffe', + 'LiGaS', 'LOAD', 'MeGusta', 'MkvCage', 'mHD', 'mSD', 'NhaNc3', 'nHD', 'NOIVTC', 'nSD', 'Oj', 'Ozlem', + 'PiRaTeS', 'PRoDJi', 'RAPiDCOWS', 'RARBG', 'RetroPeeps', 'RDN', 'REsuRRecTioN', 'RMTeam', 'SANTi', + 'SicFoI', 'SPASM', 'SPDVD', 'STUTTERSHIT', 'TBS', 'Telly', 'TM', 'UPiNSMOKE', 'URANiME', 'WAF', 'xRed', + 'XS', 'YIFY', 'YTS', 'Zeus', 'ZKBL', 'ZmN', 'ZMNT' + ] self.signature = None pass @@ -65,34 +63,52 @@ async def get_flags(self, meta): flags.append('Remux') return flags - ############################################################### - # #### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ### # - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) torrent_filename = "BASE" torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") + + # Calculate the total size of all files in the torrent total_size = sum(file.size for file in torrent.files) + # Calculate the total bytes consumed by all the pathnames in the torrent + def calculate_pathname_bytes(files): + total_pathname_bytes = sum(len(str(file).encode('utf-8')) for file in files) + return total_pathname_bytes + + total_pathname_bytes = calculate_pathname_bytes(torrent.files) + # Calculate the number of pieces and the torrent file size based on the current piece size - def calculate_pieces_and_file_size(total_size, piece_size): + def calculate_pieces_and_file_size(total_size, pathname_bytes, piece_size): num_pieces = math.ceil(total_size / piece_size) - torrent_file_size = 20 + (num_pieces * 20) # Approximate size: 20 bytes header + 20 bytes per piece + # Approximate size: 20 bytes header + 20 bytes per piece + pathname bytes + torrent_file_size = 20 + (num_pieces * 20) + pathname_bytes return num_pieces, torrent_file_size # Check if the existing torrent fits within the constraints - num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, torrent.piece_size) + num_pieces, torrent_file_size = calculate_pieces_and_file_size(total_size, total_pathname_bytes, torrent.piece_size) - # If the torrent doesn't meet the constraints, regenerate it + # Convert torrent file size to KiB for display + torrent_file_size_kib = torrent_file_size / 1024 + + # If the torrent doesn't meet the constraints, ask the user if they want to regenerate it if not (1000 <= num_pieces <= 2000) or torrent_file_size > 102400: - console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT.") - from src.prep import Prep - prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) - - # Call create_torrent with the default piece size calculation - prep.create_torrent(meta, Path(meta['path']), "ANT") - torrent_filename = "ANT" + console.print(f"[yellow]Existing .torrent is outside of ANT preferred constraints with {num_pieces} pieces and is approximately {torrent_file_size_kib:.2f} KiB.") + regenerate = cli_ui.ask_yes_no("Do you wish to regenerate the torrent?", default=True) + + if regenerate: + console.print("[yellow]Regenerating torrent to fit within 1000-2000 pieces and 100 KiB .torrent size limit needed for ANT.") + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + # Override the max piece size before regenerating the torrent + meta['max_piece_size'] = '64' # 64 MiB, the maximum piece size allowed + + # Call create_torrent with the adjusted piece size + prep.create_torrent(meta, Path(meta['path']), "ANT") + torrent_filename = "ANT" + else: + console.print("[green]Using the existing torrent despite not meeting the preferred constraints.") else: console.print("[green]Existing torrent meets the constraints.") @@ -136,7 +152,7 @@ def calculate_pieces_and_file_size(total_size, piece_size): headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } - + try: if not meta['debug']: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) diff --git a/src/trackers/BHD.py b/src/trackers/BHD.py index 850dd765..e79bfaf2 100644 --- a/src/trackers/BHD.py +++ b/src/trackers/BHD.py @@ -4,13 +4,13 @@ import requests from difflib import SequenceMatcher from str2bool import str2bool -import urllib import os import platform from src.trackers.COMMON import COMMON from src.console import console + class BHD(): """ Edit for Tracker: @@ -39,12 +39,12 @@ async def upload(self, meta): tags = await self.get_tags(meta) custom, edition = await self.get_edition(meta, tags) bhd_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') @@ -52,24 +52,24 @@ async def upload(self, meta): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() torrent_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" files = { - 'mediainfo' : mi_dump, - } + 'mediainfo': mi_dump, + } if os.path.exists(torrent_file): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files['file'] = open_torrent.read() open_torrent.close() data = { - 'name' : bhd_name, - 'category_id' : cat_id, - 'type' : type_id, + 'name': bhd_name, + 'category_id': cat_id, + 'type': type_id, 'source': source_id, - 'imdb_id' : meta['imdb_id'].replace('tt', ''), - 'tmdb_id' : meta['tmdb'], - 'description' : desc, - 'anon' : anon, - 'sd' : meta.get('sd', 0), - 'live' : draft + 'imdb_id': meta['imdb_id'].replace('tt', ''), + 'tmdb_id': meta['tmdb'], + 'description': desc, + 'anon': anon, + 'sd': meta.get('sd', 0), + 'live': draft # 'internal' : 0, # 'featured' : 0, # 'free' : 0, @@ -77,7 +77,7 @@ async def upload(self, meta): # 'sticky' : 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -87,7 +87,7 @@ async def upload(self, meta): data['special'] = 1 if meta.get('region', "") != "": data['region'] = meta['region'] - if custom == True: + if custom is True: data['custom_edition'] = edition elif edition != "": data['edition'] = edition @@ -98,7 +98,7 @@ async def upload(self, meta): } url = self.upload_url + self.config['TRACKERS'][self.tracker]['api_key'].strip() - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=url, files=files, data=data, headers=headers) try: response = response.json() @@ -112,37 +112,31 @@ async def upload(self, meta): elif response['satus_message'].startswith('Invalid name value'): console.print(f"[bold yellow]Submitted Name: {bhd_name}") console.print(response) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) - - - - - - async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '1') + }.get(category_name, '1') return category_id async def get_source(self, source): sources = { - "Blu-ray" : "Blu-ray", - "BluRay" : "Blu-ray", - "HDDVD" : "HD-DVD", - "HD DVD" : "HD-DVD", - "Web" : "WEB", - "HDTV" : "HDTV", - "UHDTV" : "HDTV", - "NTSC" : "DVD", "NTSC DVD" : "DVD", - "PAL" : "DVD", "PAL DVD": "DVD", + "Blu-ray": "Blu-ray", + "BluRay": "Blu-ray", + "HDDVD": "HD-DVD", + "HD DVD": "HD-DVD", + "Web": "WEB", + "HDTV": "HDTV", + "UHDTV": "HDTV", + "NTSC": "DVD", "NTSC DVD": "DVD", + "PAL": "DVD", "PAL DVD": "DVD", } source_id = sources.get(source) @@ -185,8 +179,6 @@ async def get_type(self, meta): type_id = "Other" return type_id - - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: @@ -221,8 +213,6 @@ async def edit_desc(self, meta): desc.close() return - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") @@ -230,9 +220,9 @@ async def search_existing(self, meta): if category == 'MOVIE': category = "Movies" data = { - 'tmdb_id' : meta['tmdb'], - 'categories' : category, - 'types' : await self.get_type(meta), + 'tmdb_id': meta['tmdb'], + 'categories': category, + 'types': await self.get_type(meta), } # Search all releases if SD if meta['sd'] == 1: @@ -255,7 +245,7 @@ async def search_existing(self, meta): else: console.print(f"[yellow]{response.get('status_message')}") await asyncio.sleep(5) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Most likely the site is down.') await asyncio.sleep(5) @@ -263,7 +253,7 @@ async def search_existing(self, meta): async def get_live(self, meta): draft = self.config['TRACKERS'][self.tracker]['draft_default'].strip() - draft = bool(str2bool(str(draft))) #0 for send to draft, 1 for live + draft = bool(str2bool(str(draft))) # 0 for send to draft, 1 for live if draft: draft_int = 0 else: @@ -301,13 +291,13 @@ async def get_tags(self, meta): tags.append('EnglishDub') if "Open Matte" in meta.get('edition', ""): tags.append("OpenMatte") - if meta.get('scene', False) == True: + if meta.get('scene', False) is True: tags.append("Scene") - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: tags.append('Personal') if "hybrid" in meta.get('edition', "").lower(): tags.append('Hybrid') - if meta.get('has_commentary', False) == True: + if meta.get('has_commentary', False) is True: tags.append('Commentary') if "DV" in meta.get('hdr', ''): tags.append('DV') @@ -331,4 +321,4 @@ async def edit_name(self, meta): # name = name.replace('H.264', 'x264') if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '' and meta['episode'].strip() != '': name = name.replace(meta['episode'], f"{meta['episode']} {meta['episode_title_storage']}", 1) - return name + return name \ No newline at end of file diff --git a/src/trackers/BHDTV.py b/src/trackers/BHDTV.py index ea6f911c..17004cfe 100644 --- a/src/trackers/BHDTV.py +++ b/src/trackers/BHDTV.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- # import discord -import asyncio -from torf import Torrent import requests from src.console import console from str2bool import str2bool @@ -12,8 +10,6 @@ from pymediainfo import MediaInfo -# from pprint import pprint - class BHDTV(): """ Edit for Tracker: @@ -27,10 +23,10 @@ def __init__(self, config): self.config = config self.tracker = 'BHDTV' self.source_flag = 'BIT-HDTV' - #search not implemented - #self.search_url = 'https://api.bit-hdtv.com/torrent/search/advanced' + # search not implemented + # self.search_url = 'https://api.bit-hdtv.com/torrent/search/advanced' self.upload_url = 'https://www.bit-hdtv.com/takeupload.php' - #self.forum_link = 'https://www.bit-hdtv.com/rules.php' + # self.forum_link = 'https://www.bit-hdtv.com/rules.php' self.banned_groups = [] pass @@ -48,18 +44,16 @@ async def upload(self, meta): # must be TV pack sub_cat_id = await self.get_type_tv_pack_id(meta['type']) - - resolution_id = await self.get_res_id(meta['resolution']) # region_id = await common.unit3d_region_ids(meta.get('region')) # distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) if meta['anon'] == 0 and bool( - str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) == False: + str2bool(self.config['TRACKERS'][self.tracker].get('anon', "False"))) is False: anon = 0 else: - anon = 1 + anon = 1 #noaq F841 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -80,32 +74,31 @@ async def upload(self, meta): data = { 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip(), 'name': meta['name'].replace(' ', '.').replace(':.', '.').replace(':', '.').replace('DD+', 'DDP'), - 'mediainfo': mi_dump if bd_dump == None else bd_dump, + 'mediainfo': mi_dump if bd_dump is None else bd_dump, 'cat': cat_id, 'subcat': sub_cat_id, 'resolution': resolution_id, - #'anon': anon, + # 'anon': anon, # admins asked to remove short description. 'sdescr': " ", - 'descr': media_info if bd_dump == None else "Disc so Check Mediainfo dump ", + 'descr': media_info if bd_dump is None else "Disc so Check Mediainfo dump ", 'screen': desc, 'url': f"https://www.tvmaze.com/shows/{meta['tvmaze_id']}" if meta['category'] == 'TV' else f"https://www.imdb.com/title/tt{meta['imdb_id']}", 'format': 'json' } - - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, data=data, files=files) try: # pprint(data) console.print(response.json()) - except: - console.print(f"[cyan]It may have uploaded, go check") + except Exception: + console.print("[cyan]It may have uploaded, go check") # cprint(f"Request Data:", 'cyan') pprint(data) console.print(traceback.print_exc()) else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") pprint(data) # # adding my anounce url to torrent. if 'view' in response.json()['data']: @@ -116,7 +109,6 @@ async def upload(self, meta): "Torrent Did not upload") open_torrent.close() - async def get_cat_id(self, meta): category_id = '0' if meta['category'] == 'MOVIE': @@ -128,17 +120,16 @@ async def get_cat_id(self, meta): category_id = '10' return category_id - async def get_type_movie_id(self, meta): type_id = '0' - test = meta['type'] + test = meta['type'] #noqa F841 if meta['type'] == 'DISC': if meta['3D']: type_id = '46' else: type_id = '2' elif meta['type'] == 'REMUX': - if str(meta['name']).__contains__('265') : + if str(meta['name']).__contains__('265'): type_id = '48' elif meta['3D']: type_id = '45' @@ -147,53 +138,50 @@ async def get_type_movie_id(self, meta): elif meta['type'] == 'HDTV': type_id = '6' elif meta['type'] == 'ENCODE': - if str(meta['name']).__contains__('265') : + if str(meta['name']).__contains__('265'): type_id = '43' elif meta['3D']: type_id = '44' else: type_id = '1' elif meta['type'] == 'WEBDL' or meta['type'] == 'WEBRIP': - type_id = '5' + type_id = '5' return type_id - async def get_type_tv_id(self, type): type_id = { 'HDTV': '7', 'WEBDL': '8', 'WEBRIP': '8', - #'WEBRIP': '55', - #'SD': '59', + # 'WEBRIP': '55', + # 'SD': '59', 'ENCODE': '10', 'REMUX': '11', 'DISC': '12', }.get(type, '0') return type_id - async def get_type_tv_pack_id(self, type): type_id = { 'HDTV': '13', 'WEBDL': '14', 'WEBRIP': '8', - #'WEBRIP': '55', - #'SD': '59', + # 'WEBRIP': '55', + # 'SD': '59', 'ENCODE': '16', 'REMUX': '17', 'DISC': '18', }.get(type, '0') return type_id - async def get_res_id(self, resolution): resolution_id = { '2160p': '4', '1080p': '3', - '1080i':'2', + '1080i': '2', '720p': '1' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def edit_desc(self, meta): @@ -211,6 +199,6 @@ async def edit_desc(self, meta): return async def search_existing(self, meta): - console.print(f"[red]Dupes must be checked Manually") + console.print("[red]Dupes must be checked Manually") return ['Dupes must be checked Manually'] - ### hopefully someone else has the time to implement this. + # hopefully someone else has the time to implement this. \ No newline at end of file diff --git a/src/trackers/BLU.py b/src/trackers/BLU.py index b7360aed..33f5427e 100644 --- a/src/trackers/BLU.py +++ b/src/trackers/BLU.py @@ -2,13 +2,13 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console + class BLU(): """ Edit for Tracker: @@ -37,7 +37,6 @@ def __init__(self, config): async def upload(self, meta): common = COMMON(config=self.config) - blu_name = meta['name'] desc_header = "" if meta.get('webdv', False): @@ -49,12 +48,12 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -64,31 +63,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[BLU]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { - 'name' : blu_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': blu_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -106,29 +105,25 @@ async def upload(self, meta): 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition): category_id = { 'MOVIE': '1', 'TV': '2', 'FANRES': '3' - }.get(category_name, '0') + }.get(category_name, '0') if category_name == 'MOVIE' and 'FANRES' in edition: category_id = '3' return category_id @@ -141,23 +136,23 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '12' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '11', '2160p': '1', - '1440p' : '2', + '1440p': '2', '1080p': '2', - '1080i':'3', + '1080i': '3', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def derived_dv_layer(self, meta): @@ -186,17 +181,16 @@ async def derived_dv_layer(self, meta): name = name.replace(meta['resolution'], f"Hybrid {meta['resolution']}") return name, desc_header - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -210,7 +204,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) diff --git a/src/trackers/CBR.py b/src/trackers/CBR.py index 15e9eedb..a107158a 100644 --- a/src/trackers/CBR.py +++ b/src/trackers/CBR.py @@ -2,15 +2,13 @@ # import discord import asyncio import requests -import distutils.util -import os +from str2bool import str2bool import platform from src.trackers.COMMON import COMMON from src.console import console - class CBR(): """ Edit for Tracker: @@ -40,12 +38,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -55,31 +53,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[CBR]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -97,29 +95,25 @@ async def upload(self, meta): 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition, meta): category_id = { 'MOVIE': '1', 'TV': '2', 'ANIMES': '4' - }.get(category_name, '0') - if meta['anime'] == True and category_id == '2': + }.get(category_name, '0') + if meta['anime'] is True and category_id == '2': category_id = '4' return category_id @@ -131,7 +125,7 @@ async def get_type_id(self, type): 'WEBDL': '4', 'WEBRIP': '5', 'HDTV': '6' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -139,29 +133,26 @@ async def get_res_id(self, resolution): '4320p': '1', '2160p': '2', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9', 'Other': '10', - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -175,7 +166,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]NΓ£o foi possivel buscar no tracker torrents duplicados. O tracker estΓ‘ offline ou sua api estΓ‘ incorreta') await asyncio.sleep(5) @@ -183,7 +174,6 @@ async def search_existing(self, meta): async def edit_name(self, meta): + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.265").replace("DD+7 1", "DDP7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('DTS-HD MA 5 1', 'DTS-HD MA 5.1').replace("TrueHD 5 1", "TrueHD 5.1").replace("DTS-X 7 1", "DTS-X 7.1").replace("DTS-X 5 1", "DTS-X 5.1").replace("FLAC 2 0", "FLAC 2.0").replace("FLAC 5 1", "FLAC 5.1").replace("DD1 0", "DD1.0").replace("DTS ES 5 1", "DTS ES 5.1").replace("DTS5 1", "DTS 5.1").replace("AAC1 0", "AAC1.0").replace("DD+5 1", "DDP5.1").replace("DD+2 0", "DDP2.0").replace("DD+1 0", "DDP1.0") - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.265").replace("DD+7 1","DDP7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('DTS-HD MA 5 1','DTS-HD MA 5.1').replace("TrueHD 5 1","TrueHD 5.1").replace("DTS-X 7 1","DTS-X 7.1").replace("DTS-X 5 1","DTS-X 5.1").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 2 0","FLAC 2.0").replace("FLAC 5 1","FLAC 5.1").replace("DD1 0","DD1.0").replace("DTS ES 5 1","DTS ES 5.1").replace("DTS5 1","DTS 5.1").replace("AAC1 0","AAC1.0").replace("DD+5 1","DDP5.1").replace("DD+2 0","DDP2.0").replace("DD+1 0","DDP1.0") - - return name + return name \ No newline at end of file diff --git a/src/trackers/COMMON.py b/src/trackers/COMMON.py index bc53799f..688b295a 100644 --- a/src/trackers/COMMON.py +++ b/src/trackers/COMMON.py @@ -1,13 +1,14 @@ from torf import Torrent import os -import traceback import requests import re import json +import click from src.bbcode import BBCODE from src.console import console + class COMMON(): def __init__(self, config): self.config = config @@ -32,7 +33,6 @@ async def add_tracker_torrent(self, meta, tracker, source_flag, new_tracker, com new_torrent.metainfo['info']['source'] = source_flag Torrent.copy(new_torrent).write(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]{meta['clean_name']}.torrent", overwrite=True) - async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, desc_header=""): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r', encoding='utf8').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{tracker}]DESCRIPTION.txt", 'w', encoding='utf8') as descfile: @@ -61,7 +61,7 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des desc = base desc = bbcode.convert_pre_to_code(desc) desc = bbcode.convert_hide_to_spoiler(desc) - if comparison == False: + if comparison is False: desc = bbcode.convert_comparison_to_collapse(desc, 1000) desc = desc.replace('[img]', '[img=300]') @@ -75,14 +75,11 @@ async def unit3d_edit_desc(self, meta, tracker, signature, comparison=False, des descfile.write(f"[url={web_url}][img=350]{raw_url}[/img][/url]") descfile.write("[/center]") - if signature != None: + if signature is not None: descfile.write(signature) descfile.close() return - - - async def unit3d_region_ids(self, region): region_id = { 'AFG': 1, 'AIA': 2, 'ALA': 3, 'ALG': 4, 'AND': 5, 'ANG': 6, 'ARG': 7, 'ARM': 8, 'ARU': 9, @@ -115,7 +112,7 @@ async def unit3d_region_ids(self, region): 'TOG': 218, 'TRI': 219, 'TUN': 220, 'TUR': 221, 'TUV': 222, 'TWN': 223, 'UAE': 224, 'UGA': 225, 'UKR': 226, 'UMI': 227, 'URU': 228, 'USA': 229, 'UZB': 230, 'VAN': 231, 'VAT': 232, 'VEN': 233, 'VGB': 234, 'VIE': 235, 'VIN': 236, 'VIR': 237, 'WAL': 238, 'WLF': 239, 'YEM': 240, 'ZAM': 241, - 'ZIM': 242, 'EUR' : 243 + 'ZIM': 242, 'EUR': 243 }.get(region, 0) return region_id @@ -145,39 +142,126 @@ async def unit3d_distributor_ids(self, distributor): }.get(distributor, 0) return distributor_id - async def unit3d_torrent_info(self, tracker, torrent_url, id): - tmdb = imdb = tvdb = description = category = infohash = mal = None + async def unit3d_torrent_info(self, tracker, torrent_url, search_url, id=None, file_name=None): + tmdb = imdb = tvdb = description = category = infohash = mal = files = None # noqa F841 imagelist = [] - params = {'api_token' : self.config['TRACKERS'][tracker].get('api_key', '')} - url = f"{torrent_url}{id}" + + # Build the params for the API request + params = {'api_token': self.config['TRACKERS'][tracker].get('api_key', '')} + + # Determine the URL based on whether we're searching by file name or ID + if file_name: + url = f"{search_url}?file_name={file_name}" + console.print(f"[green]Searching {tracker} by file name: [bold yellow]{file_name}[/bold yellow]") + elif id: + url = f"{torrent_url}{id}?" + console.print(f"[green]Searching {tracker} by ID: [bold yellow]{id}[/bold yellow] via {url}") + else: + console.print("[red]No ID or file name provided for search.[/red]") + return None, None, None, None, None, None, None, None, None + response = requests.get(url=url, params=params) + # console.print(f"Requested URL: {response.url}") + # console.print(f"Status Code: {response.status_code}") + try: - response = response.json() - attributes = response['attributes'] - category = attributes.get('category') - description = attributes.get('description') - tmdb = attributes.get('tmdb_id') - tvdb = attributes.get('tvdb_id') - mal = attributes.get('mal_id') - imdb = attributes.get('imdb_id') - infohash = attributes.get('info_hash') + json_response = response.json() + # console.print(json_response) + except ValueError: + # console.print(f"Response Text: {response.text}") + return None, None, None, None, None, None, None, None, None - bbcode = BBCODE() - description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) - console.print(f"[green]Successfully grabbed description from {tracker}") - except Exception: - console.print(traceback.print_exc()) - console.print(f"[yellow]Invalid Response from {tracker} API.") + try: + # Handle response when searching by file name (which might return a 'data' array) + data = json_response.get('data', []) + if data: + attributes = data[0].get('attributes', {}) + + # Extract data from the attributes + category = attributes.get('category') + description = attributes.get('description') + tmdb = attributes.get('tmdb_id') + tvdb = attributes.get('tvdb_id') + mal = attributes.get('mal_id') + imdb = attributes.get('imdb_id') + infohash = attributes.get('info_hash') + + if description: + bbcode = BBCODE() + description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) + console.print(f"[green]Successfully grabbed description from {tracker}") + console.print(f"[blue]Extracted description: [yellow]{description}") + + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {description}") + elif edit_choice.lower() == 'd': + description = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print("[green]Keeping the original description.[/green]") + else: + console.print(f"[yellow]No description found for {tracker}.[/yellow]") + else: + console.print(f"[yellow]No data found in the response for {tracker} when searching by file name.[/yellow]") + + # Handle response when searching by ID + if id and not data: + attributes = json_response.get('attributes', {}) + + # Extract data from the attributes + category = attributes.get('category') + description = attributes.get('description') + tmdb = attributes.get('tmdb_id') + tvdb = attributes.get('tvdb_id') + mal = attributes.get('mal_id') + imdb = attributes.get('imdb_id') + infohash = attributes.get('info_hash') + + if description: + bbcode = BBCODE() + description, imagelist = bbcode.clean_unit3d_description(description, torrent_url) + console.print(f"[green]Successfully grabbed description from {tracker}") + console.print(f"[blue]Extracted description: [yellow]{description}") + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + + if edit_choice.lower() == 'e': + edited_description = click.edit(description) + if edited_description: + description = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {description}") + elif edit_choice.lower() == 'd': + description = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print("[green]Keeping the original description.[/green]") + else: + console.print(f"[yellow]No description found for {tracker}.[/yellow]") + + except Exception as e: + console.print_exception() + console.print(f"[yellow]Invalid Response from {tracker} API. Error: {str(e)}[/yellow]") - return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist + if description: # Ensure description is only printed if it's not None + console.print(f"[green]Final description to be returned:[/green] {description}") + + return tmdb, imdb, tvdb, mal, description, category, infohash, imagelist, file_name async def parseCookieFile(self, cookiefile): """Parse a cookies.txt file and return a dictionary of key value pairs compatible with requests.""" cookies = {} - with open (cookiefile, 'r') as fp: + with open(cookiefile, 'r') as fp: for line in fp: if not line.startswith(("# ", "\n", "#\n")): lineFields = re.split(' |\t', line.strip()) @@ -185,24 +269,22 @@ async def parseCookieFile(self, cookiefile): cookies[lineFields[5]] = lineFields[6] return cookies - - async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): ptgen = "" url = 'https://ptgen.zhenzhen.workers.dev' if ptgen_site != '': url = ptgen_site params = {} - data={} - #get douban url + data = {} + # get douban url if int(meta.get('imdb_id', '0')) != 0: data['search'] = f"tt{meta['imdb_id']}" ptgen = requests.get(url, params=data) - if ptgen.json()["error"] != None: + if ptgen.json()["error"] is not None: for retry in range(ptgen_retry): try: ptgen = requests.get(url, params=params) - if ptgen.json()["error"] == None: + if ptgen.json()["error"] is None: break except requests.exceptions.JSONDecodeError: continue @@ -210,20 +292,20 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): params['url'] = ptgen.json()['data'][0]['link'] except Exception: console.print("[red]Unable to get data from ptgen using IMDb") - params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") + params['url'] = console.input("[red]Please enter [yellow]Douban[/yellow] link: ") else: console.print("[red]No IMDb id was found.") - params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") + params['url'] = console.input("[red]Please enter [yellow]Douban[/yellow] link: ") try: ptgen = requests.get(url, params=params) - if ptgen.json()["error"] != None: + if ptgen.json()["error"] is not None: for retry in range(ptgen_retry): ptgen = requests.get(url, params=params) - if ptgen.json()["error"] == None: + if ptgen.json()["error"] is None: break ptgen = ptgen.json() meta['ptgen'] = ptgen - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) f.close() ptgen = ptgen['format'] @@ -237,32 +319,6 @@ async def ptgen(self, meta, ptgen_site="", ptgen_retry=3): return "" return ptgen - - - # async def ptgen(self, meta): - # ptgen = "" - # url = "https://api.iyuu.cn/App.Movie.Ptgen" - # params = {} - # if int(meta.get('imdb_id', '0')) != 0: - # params['url'] = f"tt{meta['imdb_id']}" - # else: - # console.print("[red]No IMDb id was found.") - # params['url'] = console.input(f"[red]Please enter [yellow]Douban[/yellow] link: ") - # try: - # ptgen = requests.get(url, params=params) - # ptgen = ptgen.json() - # ptgen = ptgen['data']['format'] - # if "[/img]" in ptgen: - # ptgen = ptgen.split("[/img]")[1] - # ptgen = f"[img]{meta.get('imdb_info', {}).get('cover', meta.get('cover', ''))}[/img]{ptgen}" - # except: - # console.print_exception() - # console.print("[bold red]There was an error getting the ptgen") - # console.print(ptgen) - # return ptgen - - - async def filter_dupes(self, dupes, meta): if meta['debug']: console.log("[cyan]Pre-filtered dupes") @@ -275,35 +331,35 @@ async def filter_dupes(self, dupes, meta): remove_set = set({meta['resolution']}) search_combos = [ { - 'search' : meta['hdr'], - 'search_for' : {'HDR', 'PQ10'}, - 'update' : {'HDR|PQ10'} + 'search': meta['hdr'], + 'search_for': {'HDR', 'PQ10'}, + 'update': {'HDR|PQ10'} }, { - 'search' : meta['hdr'], - 'search_for' : {'DV'}, - 'update' : {'DV|DoVi'} + 'search': meta['hdr'], + 'search_for': {'DV'}, + 'update': {'DV|DoVi'} }, { - 'search' : meta['hdr'], - 'search_not' : {'DV', 'DoVi', 'HDR', 'PQ10'}, - 'update' : {'!(DV)|(DoVi)|(HDR)|(PQ10)'} + 'search': meta['hdr'], + 'search_not': {'DV', 'DoVi', 'HDR', 'PQ10'}, + 'update': {'!(DV)|(DoVi)|(HDR)|(PQ10)'} }, { - 'search' : str(meta.get('tv_pack', 0)), - 'search_for' : '1', - 'update' : {f"{meta['season']}(?!E\d+)"} + 'search': str(meta.get('tv_pack', 0)), + 'search_for': '1', + 'update': {rf"{meta['season']}(?!E\d+)"} }, { - 'search' : meta['episode'], - 'search_for' : meta['episode'], - 'update' : {meta['season'], meta['episode']} + 'search': meta['episode'], + 'search_for': meta['episode'], + 'update': {meta['season'], meta['episode']} } ] search_matches = [ { - 'if' : {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'}, - 'in' : meta['type'] + 'if': {'REMUX', 'WEBDL', 'WEBRip', 'HDTV'}, + 'in': meta['type'] } ] for s in search_combos: @@ -338,4 +394,4 @@ async def filter_dupes(self, dupes, meta): allow = False if allow and each not in new_dupes: new_dupes.append(each) - return new_dupes + return new_dupes \ No newline at end of file diff --git a/src/trackers/FL.py b/src/trackers/FL.py index b8ba056b..ea76e47d 100644 --- a/src/trackers/FL.py +++ b/src/trackers/FL.py @@ -2,20 +2,19 @@ import asyncio import re import os -from pathlib import Path from str2bool import str2bool -import json import glob import pickle from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse import cli_ui from bs4 import BeautifulSoup from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console + class FL(): def __init__(self, config): @@ -29,7 +28,6 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] - async def get_category_id(self, meta): has_ro_audio, has_ro_sub = await self.get_ro_tracks(meta) # 25 = 3D Movie @@ -69,7 +67,7 @@ async def get_category_id(self, meta): # 3 = DVD + RO cat_id = 3 - if meta.get('anime', False) == True: + if meta.get('anime', False) is True: # 24 = Anime cat_id = 24 return cat_id @@ -102,9 +100,8 @@ async def edit_name(self, meta): fl_name = fl_name.replace(' ', '.').replace('..', '.') return fl_name - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### # noqa E266 ############################################################### async def upload(self, meta): @@ -117,9 +114,9 @@ async def upload(self, meta): # Confirm the correct naming order for FL cli_ui.info(f"Filelist name: {fl_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: fl_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if fl_confirm != True: + if fl_confirm is not True: fl_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if fl_name_manually == "": console.print('No proper name given') @@ -130,10 +127,10 @@ async def upload(self, meta): # Torrent File Naming # Note: Don't Edit .torrent filename after creation, SubsPlease anime releases (because of their weird naming) are an exception - if meta.get('anime', True) == True and meta.get('tag', '') == '-SubsPlease': + if meta.get('anime', True) is True and meta.get('tag', '') == '-SubsPlease': torrentFileName = fl_name else: - if meta.get('isdir', False) == False: + if meta.get('isdir', False) is False: torrentFileName = meta.get('uuid') torrentFileName = os.path.splitext(torrentFileName)[0] else: @@ -142,26 +139,26 @@ async def upload(self, meta): # Download new .torrent from site fl_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r', newline='').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r', encoding='utf-8').read() with open(torrent_path, 'rb') as torrentFile: torrentFileName = unidecode(torrentFileName) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") } data = { - 'name' : fl_name, - 'type' : cat_id, - 'descr' : fl_desc.strip(), - 'nfo' : mi_dump + 'name': fl_name, + 'type': cat_id, + 'descr': fl_desc.strip(), + 'nfo': mi_dump } if int(meta.get('imdb_id', '').replace('tt', '')) != 0: data['imdbid'] = meta.get('imdb_id', '').replace('tt', '') data['description'] = meta['imdb_info'].get('genres', '') - if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if self.uploader_name not in ("", None) and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: data['epenis'] = self.uploader_name if has_ro_audio: data['materialro'] = 'on' @@ -194,10 +191,9 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to FL Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - async def search_existing(self, meta): dupes = [] with requests.Session() as session: @@ -205,20 +201,20 @@ async def search_existing(self, meta): with open(cookiefile, 'rb') as cf: session.cookies.update(pickle.load(cf)) - search_url = f"https://filelist.io/browse.php" + search_url = "https://filelist.io/browse.php" if int(meta['imdb_id'].replace('tt', '')) != 0: params = { - 'search' : meta['imdb_id'], - 'cat' : await self.get_category_id(meta), - 'searchin' : '3' + 'search': meta['imdb_id'], + 'cat': await self.get_category_id(meta), + 'searchin': '3' } else: params = { - 'search' : meta['title'], - 'cat' : await self.get_category_id(meta), - 'searchin' : '0' + 'search': meta['title'], + 'cat': await self.get_category_id(meta), + 'searchin': '0' } - + r = session.get(search_url, params=params) await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') @@ -229,18 +225,15 @@ async def search_existing(self, meta): return dupes - - - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/FL.pkl") if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -250,7 +243,6 @@ async def validate_credentials(self, meta): return False return True - async def validate_cookies(self, meta, cookiefile): url = "https://filelist.io/index.php" if os.path.exists(cookiefile): @@ -274,12 +266,12 @@ async def login(self, cookiefile): r = session.get("https://filelist.io/login.php") await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') - validator = soup.find('input', {'name' : 'validator'}).get('value') + validator = soup.find('input', {'name': 'validator'}).get('value') data = { - 'validator' : validator, - 'username' : self.username, - 'password' : self.password, - 'unlock' : '1', + 'validator': validator, + 'username': self.username, + 'password': self.password, + 'unlock': '1', } response = session.post('https://filelist.io/takelogin.php', data=data) await asyncio.sleep(0.5) @@ -306,8 +298,6 @@ async def download_new_torrent(self, session, id, torrent_path): console.print(r.text) return - - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w', newline='') as descfile: @@ -323,7 +313,7 @@ async def edit_desc(self, meta): if meta['is_disc'] != 'BDMV': url = "https://up.img4k.net/api/description" data = { - 'mediainfo' : open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r').read(), + 'mediainfo': open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO_CLEANPATH.txt", 'r').read(), } if int(meta['imdb_id'].replace('tt', '')) != 0: data['imdbURL'] = f"tt{meta['imdb_id']}" @@ -336,10 +326,10 @@ async def edit_desc(self, meta): else: # BD Description Generator final_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_EXT.txt", 'r', encoding='utf-8').read() - if final_desc.strip() != "": # Use BD_SUMMARY_EXT and bbcode format it + if final_desc.strip() != "": # Use BD_SUMMARY_EXT and bbcode format it final_desc = final_desc.replace('[/pre][/quote]', f'[/pre][/quote]\n\n{desc}\n', 1) final_desc = final_desc.replace('DISC INFO:', '[pre][quote=BD_Info][b][color=#FF0000]DISC INFO:[/color][/b]').replace('PLAYLIST REPORT:', '[b][color=#FF0000]PLAYLIST REPORT:[/color][/b]').replace('VIDEO:', '[b][color=#FF0000]VIDEO:[/color][/b]').replace('AUDIO:', '[b][color=#FF0000]AUDIO:[/color][/b]').replace('SUBTITLES:', '[b][color=#FF0000]SUBTITLES:[/color][/b]') - final_desc += "[/pre][/quote]\n" # Closed bbcode tags + final_desc += "[/pre][/quote]\n" # Closed bbcode tags # Upload screens and append to the end of the description url = "https://up.img4k.net/api/description" screen_glob = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['filename']}-*.png") @@ -350,11 +340,10 @@ async def edit_desc(self, meta): final_desc += response.text.replace('\r\n', '\n') descfile.write(final_desc) - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() - async def get_ro_tracks(self, meta): has_ro_audio = has_ro_sub = False if meta.get('is_disc', '') != 'BDMV': @@ -373,4 +362,4 @@ async def get_ro_tracks(self, meta): if audio_track['language'] == 'Romanian': has_ro_audio = True break - return has_ro_audio, has_ro_sub + return has_ro_audio, has_ro_sub \ No newline at end of file diff --git a/src/trackers/FNP.py b/src/trackers/FNP.py index 2cdf8ede..561ab8f9 100644 --- a/src/trackers/FNP.py +++ b/src/trackers/FNP.py @@ -3,7 +3,6 @@ import asyncio import requests import distutils.util -import os import platform from src.trackers.COMMON import COMMON @@ -19,12 +18,6 @@ class FNP(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'FNP' @@ -39,7 +32,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -50,17 +43,17 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', '1440p' : '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', @@ -69,10 +62,6 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -82,12 +71,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -97,31 +86,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -136,35 +125,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -176,8 +161,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/HDB.py b/src/trackers/HDB.py index 8dbe7504..08e7ce2e 100644 --- a/src/trackers/HDB.py +++ b/src/trackers/HDB.py @@ -3,15 +3,16 @@ import re import os from pathlib import Path -import traceback import json import glob from unidecode import unidecode from urllib.parse import urlparse, quote from src.trackers.COMMON import COMMON -from src.bbcode import BBCODE -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console +from datetime import datetime +from torf import Torrent + class HDB(): @@ -25,7 +26,6 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] - async def get_type_category_id(self, meta): cat_id = "EXIT" # 6 = Audio Track @@ -46,12 +46,12 @@ async def get_type_category_id(self, meta): async def get_type_codec_id(self, meta): codecmap = { - "AVC" : 1, "H.264" : 1, - "HEVC" : 5, "H.265" : 5, - "MPEG-2" : 2, - "VC-1" : 3, - "XviD" : 4, - "VP9" : 6 + "AVC": 1, "H.264": 1, + "HEVC": 5, "H.265": 5, + "MPEG-2": 2, + "VC-1": 3, + "XviD": 4, + "VP9": 6 } searchcodec = meta.get('video_codec', meta.get('video_encode')) codec_id = codecmap.get(searchcodec, "EXIT") @@ -65,7 +65,7 @@ async def get_type_medium_id(self, meta): # 4 = Capture if meta.get('type', '') == "HDTV": medium_id = 4 - if meta.get('has_encode_settings', False) == True: + if meta.get('has_encode_settings', False) is True: medium_id = 3 # 3 = Encode if meta.get('type', '') in ("ENCODE", "WEBRIP"): @@ -80,18 +80,18 @@ async def get_type_medium_id(self, meta): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def get_tags(self, meta): @@ -99,27 +99,27 @@ async def get_tags(self, meta): # Web Services: service_dict = { - "AMZN" : 28, - "NF" : 29, - "HULU" : 34, - "DSNP" : 33, - "HMAX" : 30, - "ATVP" : 27, - "iT" : 38, - "iP" : 56, - "STAN" : 32, - "PCOK" : 31, - "CR" : 72, - "PMTP" : 69, - "MA" : 77, - "SHO" : 76, - "BCORE" : 66, "CORE" : 66, - "CRKL" : 73, - "FUNI" : 74, - "HLMK" : 71, - "HTSR" : 79, - "CRAV" : 80, - 'MAX' : 88 + "AMZN": 28, + "NF": 29, + "HULU": 34, + "DSNP": 33, + "HMAX": 30, + "ATVP": 27, + "iT": 38, + "iP": 56, + "STAN": 32, + "PCOK": 31, + "CR": 72, + "PMTP": 69, + "MA": 77, + "SHO": 76, + "BCORE": 66, "CORE": 66, + "CRKL": 73, + "FUNI": 74, + "HLMK": 71, + "HTSR": 79, + "CRAV": 80, + 'MAX': 88 } if meta.get('service') in service_dict.keys(): tags.append(service_dict.get(meta['service'])) @@ -127,18 +127,17 @@ async def get_tags(self, meta): # Collections # Masters of Cinema, The Criterion Collection, Warner Archive Collection distributor_dict = { - "WARNER ARCHIVE" : 68, "WARNER ARCHIVE COLLECTION" : 68, "WAC" : 68, - "CRITERION" : 18, "CRITERION COLLECTION" : 18, "CC" : 18, - "MASTERS OF CINEMA" : 19, "MOC" : 19, - "KINO LORBER" : 55, "KINO" : 55, - "BFI VIDEO" : 63, "BFI" : 63, "BRITISH FILM INSTITUTE" : 63, - "STUDIO CANAL" : 65, - "ARROW" : 64 + "WARNER ARCHIVE": 68, "WARNER ARCHIVE COLLECTION": 68, "WAC": 68, + "CRITERION": 18, "CRITERION COLLECTION": 18, "CC": 18, + "MASTERS OF CINEMA": 19, "MOC": 19, + "KINO LORBER": 55, "KINO": 55, + "BFI VIDEO": 63, "BFI": 63, "BRITISH FILM INSTITUTE": 63, + "STUDIO CANAL": 65, + "ARROW": 64 } if meta.get('distributor') in distributor_dict.keys(): tags.append(distributor_dict.get(meta['distributor'])) - # 4K Remaster, if "IMAX" in meta.get('edition', ''): tags.append(14) @@ -151,20 +150,20 @@ async def get_tags(self, meta): tags.append(7) if "Atmos" in meta['audio']: tags.append(5) - if meta.get('silent', False) == True: - console.print('[yellow]zxx audio track found, suggesting you tag as silent') #57 + if meta.get('silent', False) is True: + console.print('[yellow]zxx audio track found, suggesting you tag as silent') # 57 # Video Metadata # HDR10, HDR10+, Dolby Vision, 10-bit, if "HDR" in meta.get('hdr', ''): if "HDR10+" in meta['hdr']: - tags.append(25) #HDR10+ + tags.append(25) # HDR10+ else: - tags.append(9) #HDR10 + tags.append(9) # HDR10 if "DV" in meta.get('hdr', ''): - tags.append(6) #DV + tags.append(6) # DV if "HLG" in meta.get('hdr', ''): - tags.append(10) #HLG + tags.append(10) # HLG return tags @@ -197,11 +196,6 @@ async def edit_name(self, meta): return hdb_name - - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -219,47 +213,71 @@ async def upload(self, meta): if "Dual-Audio" in meta['audio'] and meta['is_disc'] not in ("BDMV", "HDDVD", "DVD"): console.print("[bold red]Dual-Audio Encodes are not allowed") return - # FORM - # file : .torent file (needs renaming) - # name : name - # type_category : get_type_category_id - # type_codec : get_type_codec_id - # type_medium : get_type_medium_id - # type_origin : 0 unless internal (1) - # descr : description - # techinfo : mediainfo only, no bdinfo - # tags[] : get_tags - # imdb : imdb link - # tvdb_id : tvdb id - # season : season number - # episode : episode number - # anidb_id - # POST > upload/upload # Download new .torrent from site hdb_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() torrent_path = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent" + torrent = Torrent.read(torrent_path) + + # Check if the piece size exceeds 16 MiB and regenerate the torrent if needed + if torrent.piece_size > 16777216: # 16 MiB in bytes + console.print("[red]Piece size is OVER 16M and does not work on HDB. Generating a new .torrent") + + # Import Prep and regenerate the torrent with 16 MiB piece size limit + from src.prep import Prep + prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) + + if meta['is_disc'] == 1: + include = [] + exclude = [] + else: + include = ["*.mkv", "*.mp4", "*.ts"] + exclude = ["*.*", "*sample.mkv", "!sample*.*"] + + # Create a new torrent with piece size explicitly set to 16 MiB + new_torrent = prep.CustomTorrent( + path=Path(meta['path']), + trackers=["https://fake.tracker"], + source="L4G", + private=True, + exclude_globs=exclude, # Ensure this is always a list + include_globs=include, # Ensure this is always a list + creation_date=datetime.now(), + comment="Created by L4G's Upload Assistant", + created_by="L4G's Upload Assistant" + ) + + # Explicitly set the piece size and update metainfo + new_torrent.piece_size = 16777216 # 16 MiB in bytes + new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set + + # Validate and write the new torrent + new_torrent.validate_piece_size() + new_torrent.generate(callback=prep.torf_cb, interval=5) + new_torrent.write(torrent_path, overwrite=True) + + # Proceed with the upload process with open(torrent_path, 'rb') as torrentFile: if len(meta['filelist']) == 1: torrentFileName = unidecode(os.path.basename(meta['video']).replace(' ', '.')) else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorrent") } data = { - 'name' : hdb_name, - 'category' : cat_id, - 'codec' : codec_id, - 'medium' : medium_id, - 'origin' : 0, - 'descr' : hdb_desc.rstrip(), - 'techinfo' : '', - 'tags[]' : hdb_tags, + 'name': hdb_name, + 'category': cat_id, + 'codec': codec_id, + 'medium': medium_id, + 'origin': 0, + 'descr': hdb_desc.rstrip(), + 'techinfo': '', + 'tags[]': hdb_tags, } # If internal, set 1 - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 # If not BDMV fill mediainfo @@ -275,7 +293,6 @@ async def upload(self, meta): data['tvdb_episode'] = int(meta.get('episode_int', 1)) # aniDB - url = "https://hdbits.org/upload/upload" # Submit if meta['debug']: @@ -297,48 +314,44 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to HDB Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") url = "https://hdbits.org/api/torrents" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'category' : await self.get_type_category_id(meta), - 'codec' : await self.get_type_codec_id(meta), - 'medium' : await self.get_type_medium_id(meta), - 'search' : meta['resolution'] + 'username': self.username, + 'passkey': self.passkey, + 'category': await self.get_type_category_id(meta), + 'codec': await self.get_type_codec_id(meta), + 'medium': await self.get_type_medium_id(meta), + 'search': meta['resolution'] } if int(meta.get('imdb_id', '0').replace('tt', '0')) != 0: - data['imdb'] = {'id' : meta['imdb_id']} + data['imdb'] = {'id': meta['imdb_id']} if int(meta.get('tvdb_id', '0')) != 0: - data['tvdb'] = {'id' : meta['tvdb_id']} + data['tvdb'] = {'id': meta['tvdb_id']} try: response = requests.get(url=url, data=json.dumps(data)) response = response.json() for each in response['data']: result = each['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your passkey is incorrect') await asyncio.sleep(5) return dupes - - - async def validate_credentials(self, meta): - vapi = await self.validate_api() + vapi = await self.validate_api() vcookie = await self.validate_cookies(meta) - if vapi != True: + if vapi is not True: console.print('[red]Failed to validate API. Please confirm that the site is up and your passkey is valid.') return False - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') return False return True @@ -346,15 +359,15 @@ async def validate_credentials(self, meta): async def validate_api(self): url = "https://hdbits.org/api/test" data = { - 'username' : self.username, - 'passkey' : self.passkey + 'username': self.username, + 'passkey': self.passkey } try: r = requests.post(url, data=json.dumps(data)).json() if r.get('status', 5) == 0: return True return False - except: + except Exception: return False async def validate_cookies(self, meta): @@ -382,9 +395,9 @@ async def download_new_torrent(self, id, torrent_path): # Get HDB .torrent filename api_url = "https://hdbits.org/api/torrents" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'id' : id + 'username': self.username, + 'passkey': self.passkey, + 'id': id } r = requests.get(url=api_url, data=json.dumps(data)) filename = r.json()['data'][0]['filename'] @@ -392,8 +405,8 @@ async def download_new_torrent(self, id, torrent_path): # Download new .torrent download_url = f"https://hdbits.org/download.php/{quote(filename)}" params = { - 'passkey' : self.passkey, - 'id' : id + 'passkey': self.passkey, + 'id': id } r = requests.get(url=download_url, params=params) @@ -406,7 +419,7 @@ async def edit_desc(self, meta): with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: from src.bbcode import BBCODE # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: descfile.write(f"[center][quote]This release is sourced from {meta['service_longname']}[/quote][/center]") bbcode = BBCODE() if meta.get('discs', []) != []: @@ -433,7 +446,7 @@ async def edit_desc(self, meta): desc = bbcode.convert_comparison_to_centered(desc, 1000) desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) - if self.rehost_images == True: + if self.rehost_images is True: console.print("[green]Rehosting Images...") hdbimg_bbcode = await self.hdbimg_upload(meta) descfile.write(f"{hdbimg_bbcode}") @@ -446,20 +459,19 @@ async def edit_desc(self, meta): web_url = images[each]['web_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write(self.signature) descfile.close() - async def hdbimg_upload(self, meta): images = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png") url = "https://img.hdbits.org/upload_api.php" data = { - 'username' : self.username, - 'passkey' : self.passkey, - 'galleryoption' : 1, - 'galleryname' : meta['name'], - 'thumbsize' : 'w300' + 'username': self.username, + 'passkey': self.passkey, + 'galleryoption': 1, + 'galleryname': meta['name'], + 'thumbsize': 'w300' } files = {} @@ -473,59 +485,78 @@ async def hdbimg_upload(self, meta): image_bbcode = r.text return image_bbcode - - async def get_info_from_torrent_id(self, hdb_id): hdb_imdb = hdb_name = hdb_torrenthash = None url = "https://hdbits.org/api/torrents" data = { - "username" : self.username, - "passkey" : self.passkey, - "id" : hdb_id + "username": self.username, + "passkey": self.passkey, + "id": hdb_id } response = requests.get(url, json=data) if response.ok: try: response = response.json() if response['data'] != []: - hdb_imdb = response['data'][0].get('imdb', {'id' : None}).get('id') - hdb_tvdb = response['data'][0].get('tvdb', {'id' : None}).get('id') + hdb_imdb = response['data'][0].get('imdb', {'id': None}).get('id') + hdb_tvdb = response['data'][0].get('tvdb', {'id': None}).get('id') hdb_name = response['data'][0]['name'] hdb_torrenthash = response['data'][0]['hash'] - except: + except Exception: console.print_exception() else: console.print("Failed to get info from HDB ID. Either the site is down or your credentials are invalid") return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash - async def search_filename(self, filelist): + async def search_filename(self, search_term, search_file_folder): hdb_imdb = hdb_tvdb = hdb_name = hdb_torrenthash = hdb_id = None url = "https://hdbits.org/api/torrents" - data = { - "username" : self.username, - "passkey" : self.passkey, - "limit" : 100, - "file_in_torrent" : os.path.basename(filelist[0]) - } + + if search_file_folder == 'folder': # Handling disc case + data = { + "username": self.username, + "passkey": self.passkey, + "limit": 100, + "folder_in_torrent": os.path.basename(search_term) # Using folder name for search + } + console.print(f"[green]Searching HDB for folder: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") + else: # Handling non-disc case + data = { + "username": self.username, + "passkey": self.passkey, + "limit": 100, + "file_in_torrent": os.path.basename(search_term) # Using filename for search + } + console.print(f"[green]Searching HDB for file: [bold yellow]{os.path.basename(search_term)}[/bold yellow]") + response = requests.get(url, json=data) - console.print(f"[green]Searching HDB for: [bold yellow]{os.path.basename(filelist[0])}[/bold yellow]") + if response.ok: try: - response = response.json() - if response['data'] != []: - for each in response['data']: - if each['numfiles'] == len(filelist): - hdb_imdb = each.get('imdb', {'id' : None}).get('id') - hdb_tvdb = each.get('tvdb', {'id' : None}).get('id') + response_json = response.json() + # console.print(f"[green]HDB API response: {response_json}[/green]") # Log the entire response for debugging + + # Check if 'data' key is present + if 'data' not in response_json: + console.print(f"[red]Error: 'data' key not found in HDB API response. Full response: {response_json}[/red]") + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + + if response_json['data'] != []: + for each in response_json['data']: + if search_file_folder == 'folder' or each['numfiles'] == len(search_term): # Handle folder or filelist match + hdb_imdb = each.get('imdb', {'id': None}).get('id') + hdb_tvdb = each.get('tvdb', {'id': None}).get('id') hdb_name = each['name'] hdb_torrenthash = each['hash'] hdb_id = each['id'] - console.print(f'[bold green]Matched release with HDB ID: [yellow]{hdb_id}[/yellow][/bold green]') + console.print(f'[bold green]Matched release with HDB ID: [yellow]https://hdbits.org/details.php?id={hdb_id}[/yellow][/bold green]') return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id - except: + except Exception as e: console.print_exception() + console.print(f"[red]Failed to parse HDB API response. Error: {str(e)}[/red]") else: - console.print("Failed to get info from HDB ID. Either the site is down or your credentials are invalid") - console.print(f'[yellow]Could not find a matching release on HDB') - return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id + console.print(f"[red]Failed to get info from HDB. Status code: {response.status_code}, Reason: {response.reason}[/red]") + + console.print('[yellow]Could not find a matching release on HDB[/yellow]') + return hdb_imdb, hdb_tvdb, hdb_name, hdb_torrenthash, hdb_id \ No newline at end of file diff --git a/src/trackers/HDT.py b/src/trackers/HDT.py index 982e5b03..ec63dc58 100644 --- a/src/trackers/HDT.py +++ b/src/trackers/HDT.py @@ -2,20 +2,16 @@ import asyncio import re import os -import json -import glob import cli_ui -import pickle -from pathlib import Path from str2bool import str2bool from bs4 import BeautifulSoup from unidecode import unidecode from pymediainfo import MediaInfo - from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console + class HDT(): def __init__(self, config): @@ -92,9 +88,6 @@ async def get_category_id(self, meta): return cat_id - - - async def edit_name(self, meta): hdt_name = meta['name'] if meta['category'] == "TV" and meta.get('tv_pack', 0) == 0 and meta.get('episode_title_storage', '').strip() != '': @@ -110,7 +103,7 @@ async def edit_name(self, meta): return hdt_name ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### # noqa E266 ############################################################### async def upload(self, meta): @@ -122,9 +115,9 @@ async def upload(self, meta): # Confirm the correct naming order for HDT cli_ui.info(f"HDT name: {hdt_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: hdt_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if hdt_confirm != True: + if hdt_confirm is not True: hdt_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if hdt_name_manually == "": console.print('No proper name given') @@ -140,12 +133,12 @@ async def upload(self, meta): with open(torrent_path, 'rb') as torrentFile: torrentFileName = unidecode(hdt_name) files = { - 'torrent' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") + 'torrent': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent") } data = { - 'filename' : hdt_name, - 'category' : cat_id, - 'info' : hdt_desc.strip() + 'filename': hdt_name, + 'category': cat_id, + 'info': hdt_desc.strip() } # 3D @@ -173,7 +166,7 @@ async def upload(self, meta): data['season'] = 'false' # Anonymous check - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: data['anonymous'] = 'false' else: data['anonymous'] = 'true' @@ -200,10 +193,9 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to HDT Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa F405 return - async def search_existing(self, meta): dupes = [] with requests.Session() as session: @@ -211,22 +203,22 @@ async def search_existing(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") session.cookies.update(await common.parseCookieFile(cookiefile)) - search_url = f"https://hd-torrents.org/torrents.php" + search_url = "https://hd-torrents.org/torrents.php" csrfToken = await self.get_csrfToken(session, search_url) if int(meta['imdb_id'].replace('tt', '')) != 0: params = { - 'csrfToken' : csrfToken, - 'search' : meta['imdb_id'], - 'active' : '0', - 'options' : '2', - 'category[]' : await self.get_category_id(meta) + 'csrfToken': csrfToken, + 'search': meta['imdb_id'], + 'active': '0', + 'options': '2', + 'category[]': await self.get_category_id(meta) } else: params = { - 'csrfToken' : csrfToken, - 'search' : meta['title'], - 'category[]' : await self.get_category_id(meta), - 'options' : '3' + 'csrfToken': csrfToken, + 'search': meta['title'], + 'category[]': await self.get_category_id(meta), + 'options': '3' } r = session.get(search_url, params=params) @@ -239,16 +231,14 @@ async def search_existing(self, meta): return dupes - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/HDT.txt") vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up or export a fresh cookie file from the site') return False return True - async def validate_cookies(self, meta, cookiefile): common = COMMON(config=self.config) url = "https://hd-torrents.org/index.php" @@ -268,8 +258,6 @@ async def validate_cookies(self, meta, cookiefile): else: return False - - """ Old login method, disabled because of site's DDOS protection. Better to use exported cookies. @@ -299,12 +287,11 @@ async def login(self, cookiefile): return """ - async def get_csrfToken(self, session, url): r = session.get(url) await asyncio.sleep(0.5) soup = BeautifulSoup(r.text, 'html.parser') - csrfToken = soup.find('input', {'name' : 'csrfToken'}).get('value') + csrfToken = soup.find('input', {'name': 'csrfToken'}).get('value') return csrfToken async def edit_desc(self, meta): @@ -315,7 +302,7 @@ async def edit_desc(self, meta): video = meta['filelist'][0] mi_template = os.path.abspath(f"{meta['base_dir']}/data/templates/MEDIAINFO.txt") if os.path.exists(mi_template): - media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform" : f"file://{mi_template}"}) + media_info = MediaInfo.parse(video, output="STRING", full=False, mediainfo_options={"inform": f"file://{mi_template}"}) descfile.write(f"""[left][font=consolas]\n{media_info}\n[/font][/left]\n""") else: console.print("[bold red]Couldn't find the MediaInfo template") @@ -335,5 +322,4 @@ async def edit_desc(self, meta): raw_url = images[each]['raw_url'] descfile.write(f' ') - descfile.close() - + descfile.close() \ No newline at end of file diff --git a/src/trackers/HP.py b/src/trackers/HP.py index 9d706ca8..a03e5d1f 100644 --- a/src/trackers/HP.py +++ b/src/trackers/HP.py @@ -2,13 +2,13 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console + class HP(): """ Edit for Tracker: @@ -18,9 +18,6 @@ class HP(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'HP' @@ -35,7 +32,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -46,7 +43,7 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -54,7 +51,7 @@ async def get_res_id(self, resolution): '8640p':'10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', '1080i':'4', '720p': '5', @@ -62,13 +59,9 @@ async def get_res_id(self, resolution): '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -78,12 +71,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -93,31 +86,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -132,35 +125,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -175,8 +164,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/HUNO.py b/src/trackers/HUNO.py index a1f4b034..d34182c8 100644 --- a/src/trackers/HUNO.py +++ b/src/trackers/HUNO.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool import os import re @@ -11,6 +10,7 @@ from src.trackers.COMMON import COMMON from src.console import console + class HUNO(): """ Edit for Tracker: @@ -29,7 +29,6 @@ def __init__(self, config): self.banned_groups = ["4K4U, Bearfish, BiTOR, BONE, D3FiL3R, d3g, DTR, ELiTE, EVO, eztv, EzzRips, FGT, HashMiner, HETeam, HEVCBay, HiQVE, HR-DR, iFT, ION265, iVy, JATT, Joy, LAMA, m3th, MeGusta, MRN, Musafirboy, OEPlus, Pahe.in, PHOCiS, PSA, RARBG, RMTeam, ShieldBearer, SiQ, TBD, Telly, TSP, VXT, WKS, YAWNiX, YIFY, YTS"] pass - async def upload(self, meta): common = COMMON(config=self.config) await common.unit3d_edit_desc(meta, self.tracker, self.signature) @@ -37,17 +36,17 @@ async def upload(self, meta): cat_id = await self.get_cat_id(meta['category']) type_id = await self.get_type_id(meta) resolution_id = await self.get_res_id(meta['resolution']) - if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) == False: + if meta['anon'] == 0 and bool(str2bool(self.config['TRACKERS']['HUNO'].get('anon', "False"))) is False: anon = 0 else: anon = 1 # adding logic to check if its an encode or webrip and not HEVC as only HEVC encodes and webrips are allowed if meta['video_codec'] != "HEVC" and (meta['type'] == "ENCODE" or meta['type'] == "WEBRIP"): - console.print(f'[bold red]Only x265/HEVC encodes are allowed') + console.print('[bold red]Only x265/HEVC encodes are allowed') return - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -57,22 +56,22 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[HUNO]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : await self.get_name(meta), - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : await self.is_plex_friendly(meta), - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], + 'name': await self.get_name(meta), + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': await self.is_plex_friendly(meta), + 'sd': meta['sd'], + 'keywords': meta['keywords'], 'season_pack': meta.get('tv_pack', 0), # 'featured' : 0, # 'free' : 0, @@ -95,18 +94,18 @@ async def upload(self, meta): 'api_token': tracker_config['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) # adding torrent link to comment of torrent file t_id = response.json()['data'].split(".")[1].split("/")[3] await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://hawke.uno/torrents/" + t_id) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() @@ -136,8 +135,8 @@ async def get_name(self, meta): basename = self.get_basename(meta) hc = meta.get('hardcoded-subs') type = meta.get('type', "") - title = meta.get('title',"") - alt_title = meta.get('aka', "") + title = meta.get('title', "") + alt_title = meta.get('aka', "") # noqa F841 year = meta.get('year', "") resolution = meta.get('resolution', "") audio = self.get_audio(meta) @@ -156,7 +155,7 @@ async def get_name(self, meta): hdr = meta.get('hdr', "") if not hdr.strip(): hdr = "SDR" - distributor = meta.get('distributor', "") + distributor = meta.get('distributor', "") # noqa F841 video_codec = meta.get('video_codec', "") video_encode = meta.get('video_encode', "").replace(".", "") if 'x265' in basename: @@ -170,42 +169,42 @@ async def get_name(self, meta): search_year = year scale = "DS4K" if "DS4K" in basename.upper() else "RM4K" if "RM4K" in basename.upper() else "" - #YAY NAMING FUN - if meta['category'] == "MOVIE": #MOVIE SPECIFIC - if type == "DISC": #Disk + # YAY NAMING FUN + if meta['category'] == "MOVIE": # MOVIE SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} ({year}) {three_d} {edition} ({resolution} {region} {uhd} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'DVD': name = f"{title} ({year}) {edition} ({resolution} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'HDDVD': name = f"{title} ({year}) {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source == "BluRay": #BluRay Remux + elif type == "REMUX" and source == "BluRay": # BluRay Remux name = f"{title} ({year}) {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux name = f"{title} ({year}) {edition} (DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "ENCODE": #Encode + elif type == "ENCODE": # Encode name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type in ("WEBDL", "WEBRIP"): #WEB + elif type in ("WEBDL", "WEBRIP"): # WEB name = f"{title} ({year}) {edition} ({resolution} {scale} {uhd} {service} WEB-DL {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} ({year}) {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" - elif meta['category'] == "TV": #TV SPECIFIC - if type == "DISC": #Disk + elif meta['category'] == "TV": # TV SPECIFIC + if type == "DISC": # Disk if meta['is_disc'] == 'BDMV': name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {region} {uhd} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" if meta['is_disc'] == 'DVD': name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {dvd_size} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" elif meta['is_disc'] == 'HDDVD': name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {source} {hybrid} {video_codec} {hdr} {audio} {tag}) {repack}" - elif type == "REMUX" and source == "BluRay": #BluRay Remux - name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): #DVD Remux - name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type == "ENCODE": #Encode - name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" #SOURCE - elif type in ("WEBDL", "WEBRIP"): #WEB + elif type == "REMUX" and source == "BluRay": # BluRay Remux + name = f"{title} ({search_year}) {season}{episode} {three_d} {edition} ({resolution} {uhd} {source} {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type == "REMUX" and source in ("PAL DVD", "NTSC DVD"): # DVD Remux + name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} DVD {hybrid} REMUX {video_codec} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type == "ENCODE": # Encode + name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {source} {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" # SOURCE + elif type in ("WEBDL", "WEBRIP"): # WEB name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} {scale} {uhd} {service} WEB-DL {hybrid} {video_encode} {hdr} {audio} {tag}) {repack}" - elif type == "HDTV": #HDTV + elif type == "HDTV": # HDTV name = f"{title} ({search_year}) {season}{episode} {edition} ({resolution} HDTV {hybrid} {video_encode} {audio} {tag}) {repack}" if hc: name = re.sub(r'((\([0-9]{4}\)))', r'\1 Ensubbed', name) @@ -216,10 +215,9 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id - async def get_type_id(self, meta): basename = self.get_basename(meta) type = meta['type'] @@ -235,14 +233,13 @@ async def get_type_id(self, meta): else: return '0' - async def get_res_id(self, resolution): resolution_id = { - 'Other':'10', + 'Other': '10', '4320p': '1', '2160p': '2', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', @@ -251,27 +248,25 @@ async def get_res_id(self, resolution): }.get(resolution, '10') return resolution_id - async def is_plex_friendly(self, meta): lossy_audio_codecs = ["AAC", "DD", "DD+", "OPUS"] - if any(l in meta["audio"] for l in lossy_audio_codecs): + if any(l in meta["audio"] for l in lossy_audio_codecs): # noqa E741 return 1 return 0 - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS']['HUNO']['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS']['HUNO']['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -285,8 +280,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/JPTV.py b/src/trackers/JPTV.py index 23ceb9fd..b4f7cebd 100644 --- a/src/trackers/JPTV.py +++ b/src/trackers/JPTV.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -19,12 +18,6 @@ class JPTV(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'JPTV' @@ -55,33 +48,25 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') - # DVDISO 17 - # DVDRIP 1 - # TS (Raw) 14 - # Re-encode 15 + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -92,12 +77,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) jptv_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = "" for each in meta['discs']: mi_dump = mi_dump + each['summary'].strip() + "\n\n" @@ -108,31 +93,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : jptv_name, - 'description' : desc, - 'mediainfo' : mi_dump, + 'name': jptv_name, + 'description': desc, + 'mediainfo': mi_dump, # 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -147,35 +132,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -190,21 +171,20 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) return dupes - async def edit_name(self, meta): name = meta.get('name') aka = meta.get('aka') original_title = meta.get('original_title') - year = str(meta.get('year')) + year = str(meta.get('year')) # noqa F841 audio = meta.get('audio') - source = meta.get('source') - is_disc = meta.get('is_disc') + source = meta.get('source') # noqa F841 + is_disc = meta.get('is_disc') # noqa F841 if aka != '': # ugly fix to remove the extra space in the title aka = aka + ' ' @@ -217,4 +197,4 @@ async def edit_name(self, meta): name = name.replace(audio.strip().replace(" ", " "), audio.replace(" ", "")) name = name.replace("DD+ ", "DD+") - return name + return name \ No newline at end of file diff --git a/src/trackers/LCD.py b/src/trackers/LCD.py index da64f5eb..936571cb 100644 --- a/src/trackers/LCD.py +++ b/src/trackers/LCD.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -10,7 +9,6 @@ from src.console import console - class LCD(): """ Edit for Tracker: @@ -40,12 +38,12 @@ async def upload(self, meta): region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -55,31 +53,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[LCD]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -97,29 +95,25 @@ async def upload(self, meta): 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition, meta): category_id = { 'MOVIE': '1', 'TV': '2', 'ANIMES': '6' - }.get(category_name, '0') - if meta['anime'] == True and category_id == '2': + }.get(category_name, '0') + if meta['anime'] is True and category_id == '2': category_id = '6' return category_id @@ -131,39 +125,36 @@ async def get_type_id(self, type): 'WEBDL': '4', 'WEBRIP': '5', 'HDTV': '6' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { -# '8640p':'10', + # '8640p':'10', '4320p': '1', '2160p': '2', -# '1440p' : '2', + # '1440p' : '2', '1080p': '3', - '1080i':'34', + '1080i': '34', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9', 'Other': '10', - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Buscando por duplicatas no tracker...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', ''), meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -177,7 +168,7 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]NΓ£o foi possivel buscar no tracker torrents duplicados. O tracker estΓ‘ offline ou sua api estΓ‘ incorreta') await asyncio.sleep(5) @@ -185,7 +176,6 @@ async def search_existing(self, meta): async def edit_name(self, meta): + name = meta['uuid'].replace('.mkv', '').replace('.mp4', '').replace(".", " ").replace("DDP2 0", "DDP2.0").replace("DDP5 1", "DDP5.1").replace("H 264", "H.264").replace("H 265", "H.264").replace("DD+7 1", "DD+7.1").replace("AAC2 0", "AAC2.0").replace('DD5 1', 'DD5.1').replace('DD2 0', 'DD2.0').replace('TrueHD 7 1', 'TrueHD 7.1').replace('DTS-HD MA 7 1', 'DTS-HD MA 7.1').replace('-C A A', '-C.A.A'), - name = meta['uuid'].replace('.mkv','').replace('.mp4','').replace(".", " ").replace("DDP2 0","DDP2.0").replace("DDP5 1","DDP5.1").replace("H 264","H.264").replace("H 265","H.264").replace("DD+7 1","DD+7.1").replace("AAC2 0","AAC2.0").replace('DD5 1','DD5.1').replace('DD2 0','DD2.0').replace('TrueHD 7 1','TrueHD 7.1').replace('DTS-HD MA 7 1','DTS-HD MA 7.1').replace('-C A A','-C.A.A') - - return name + return name \ No newline at end of file diff --git a/src/trackers/LST.py b/src/trackers/LST.py index 1977f4bc..2b702829 100644 --- a/src/trackers/LST.py +++ b/src/trackers/LST.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -19,12 +18,6 @@ class LST(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'LST' @@ -42,7 +35,7 @@ async def get_cat_id(self, category_name, keywords, service): 'MOVIE': '1', 'TV': '2', 'Anime': '6', - }.get(category_name, '0') + }.get(category_name, '0') if category_name == 'TV' and 'anime' in keywords: category_id = '6' elif category_name == 'TV' and 'hentai' in service: @@ -57,29 +50,25 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -89,12 +78,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -103,38 +92,37 @@ async def upload(self, meta): desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'r').read() if meta.get('service') == "hentai": - desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + f"\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + f"\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc + desc = "[center]" + "[img]" + str(meta['poster']) + "[/img][/center]" + "\n[center]" + "https://www.themoviedb.org/tv/" + str(meta['tmdb']) + "\nhttps://myanimelist.net/anime/" + str(meta['mal']) + "[/center]" + desc open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } - # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -149,35 +137,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('keywords', ''), meta.get('service', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -191,8 +175,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/LT.py b/src/trackers/LT.py index 60e9b2c5..2f0a58e3 100644 --- a/src/trackers/LT.py +++ b/src/trackers/LT.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -19,12 +18,6 @@ class LT(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'LT' @@ -41,17 +34,17 @@ async def get_cat_id(self, category_name, meta): 'TV': '2', 'ANIME': '5', 'TELENOVELAS': '8', - 'Doramas & Turcas': '20', - }.get(category_name, '0') - #if is anime - if meta['anime'] == True and category_id == '2': + 'Doramas & Turcas': '20', + }.get(category_name, '0') + # if is anime + if meta['anime'] is True and category_id == '2': category_id = '5' - #elif is telenovela + # elif is telenovela elif category_id == '2' and ("telenovela" in meta['keywords'] or "telenovela" in meta['overview']): category_id = '8' - #if is TURCAS o Doramas - #elif meta["original_language"] in ['ja', 'ko', 'tr'] and category_id == '2' and 'Drama' in meta['genres'] : - #category_id = '20' + # if is TURCAS o Doramas + # elif meta["original_language"] in ['ja', 'ko', 'tr'] and category_id == '2' and 'Drama' in meta['genres'] : + # category_id = '20' return category_id async def get_type_id(self, type): @@ -62,29 +55,29 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def edit_name(self, meta): lt_name = meta['name'].replace('Dubbed', '').replace('Dual-Audio', '').replace(' ', ' ').strip() # Check if audio Spanish exists, if not append [SUBS] at the end - if meta['type'] != 'DISC': #DISC don't have mediainfo + if meta['type'] != 'DISC': # DISC don't have mediainfo audio_language_list = meta['mediainfo']['media']['track'][0].get('Audio_Language_List', '') if 'Spanish' not in audio_language_list and '[SUBS]' not in lt_name: if not meta['tag']: @@ -93,11 +86,6 @@ async def edit_name(self, meta): lt_name = lt_name.replace(meta['tag'], f" [SUBS]{meta['tag']}") return lt_name - - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -105,15 +93,15 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) - #region_id = await common.unit3d_region_ids(meta.get('region')) + # region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) lt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -123,37 +111,34 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : lt_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': lt_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 - - if region_id != 0: - data['region_id'] = region_id if distributor_id != 0: data['distributor_id'] = distributor_id if meta.get('category') == "TV": @@ -163,35 +148,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -205,8 +186,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/MTV.py b/src/trackers/MTV.py index 56c071f1..afca463d 100644 --- a/src/trackers/MTV.py +++ b/src/trackers/MTV.py @@ -8,11 +8,11 @@ import cli_ui import pickle import re -import traceback from pathlib import Path from str2bool import str2bool from src.trackers.COMMON import COMMON -from datetime import datetime, date +from datetime import datetime + class MTV(): """ @@ -43,7 +43,7 @@ async def upload(self, meta): # Initiate the upload with retry logic await self.upload_with_retry(meta, cookiefile, common) - + async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): approved_image_hosts = ['ptpimg', 'imgbox'] @@ -80,6 +80,9 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): if torrent.piece_size > 8388608: # 8 MiB in bytes console.print("[red]Piece size is OVER 8M and does not work on MTV. Generating a new .torrent") + # Override the max_piece_size to 8 MiB + meta['max_piece_size'] = '8' # 8 MiB, to ensure the new torrent adheres to this limit + # Determine include and exclude patterns based on whether it's a disc or not if meta['is_disc']: include = [] # Adjust as needed for disc-specific inclusions, make sure it's a list @@ -92,6 +95,7 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): from src.prep import Prep prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) new_torrent = prep.CustomTorrent( + meta=meta, path=Path(meta['path']), trackers=["https://fake.tracker"], source="L4G", @@ -102,16 +106,12 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): comment="Created by L4G's Upload Assistant", created_by="L4G's Upload Assistant" ) - - # Explicitly set the piece size and update metainfo - new_torrent.piece_size = 8388608 # 8 MiB in bytes - new_torrent.metainfo['info']['piece length'] = 8388608 # Ensure 'piece length' is set - + # Validate and write the new torrent new_torrent.validate_piece_size() new_torrent.generate(callback=prep.torf_cb, interval=5) new_torrent.write(f"{meta['base_dir']}/tmp/{meta['uuid']}/MTV.torrent", overwrite=True) - + torrent_filename = "MTV" await common.edit_torrent(meta, self.tracker, self.source_flag, torrent_filename=torrent_filename) @@ -170,17 +170,17 @@ async def upload_with_retry(self, meta, cookiefile, common, img_host_index=1): console.print(response.url) else: if "authkey.php" in response.url: - console.print(f"[red]No DL link in response, It may have uploaded, check manually.") + console.print("[red]No DL link in response, It may have uploaded, check manually.") else: - console.print(f"[red]Upload Failed. It doesn't look like you are logged in.") - except: - console.print(f"[red]It may have uploaded, check manually.") + console.print("[red]Upload Failed. It doesn't look like you are logged in.") + except Exception: + console.print("[red]It may have uploaded, check manually.") print(traceback.print_exc()) else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) return - + async def handle_image_upload(self, meta, img_host_index=1, approved_image_hosts=None): if approved_image_hosts is None: approved_image_hosts = ['ptpimg', 'imgbox'] @@ -229,7 +229,7 @@ async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: # adding bd_dump to description if it exits and adding empty string to mediainfo - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -241,19 +241,19 @@ async def edit_desc(self, meta): desc.write("[mediainfo]" + mi_dump + "[/mediainfo]\n\n") images = meta['image_list'] if len(images) > 0: - desc.write(f"[spoiler=Screenshots]") + desc.write("[spoiler=Screenshots]") for each in range(len(images)): raw_url = images[each]['raw_url'] img_url = images[each]['img_url'] desc.write(f"[url={raw_url}][img=250]{img_url}[/img][/url]") - desc.write(f"[/spoiler]") + desc.write("[/spoiler]") desc.write(f"\n\n{base}") desc.close() return async def edit_group_desc(self, meta): description = "" - if meta['imdb_id'] not in ("0", "", None): + if meta['imdb_id'] not in ("0", "", None): description += f"https://www.imdb.com/title/tt{meta['imdb_id']}" if meta['tmdb'] != 0: description += f"\nhttps://www.themoviedb.org/{str(meta['category'].lower())}/{str(meta['tmdb'])}" @@ -289,21 +289,21 @@ async def edit_name(self, meta): mtv_name = re.sub(r"[^0-9a-zA-ZΓ€-ΓΏ. &+'\-\[\]]+", "", mtv_name) mtv_name = mtv_name.replace(' ', '.').replace('..', '.') return mtv_name - + async def get_res_id(self, resolution): resolution_id = { - '8640p':'0', + '8640p': '0', '4320p': '4000', '2160p': '2160', - '1440p' : '1440', + '1440p': '1440', '1080p': '1080', - '1080i':'1080', + '1080i': '1080', '720p': '720', '576p': '0', '576i': '0', '480p': '480', '480i': '480' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id async def get_cat_id(self, meta): @@ -344,7 +344,7 @@ async def get_source_id(self, meta): 'MIXED': '11', 'Unknown': '12', 'ENCODE': '7' - }.get(meta['type'], '0') + }.get(meta['type'], '0') return type_id async def get_origin_id(self, meta): @@ -355,6 +355,7 @@ async def get_origin_id(self, meta): # returning P2P else: return '3' + async def get_tags(self, meta): tags = [] # Genres @@ -369,7 +370,7 @@ async def get_tags(self, meta): tags.append('hd') # Streaming Service if str(meta['service_longname']) != "": - tags.append(f"{meta['service_longname'].lower().replace(' ', '.')}.source") + tags.append(f"{meta['service_longname'].lower().replace(' ', '.')}.source") # Release Type/Source for each in ['remux', 'WEB.DL', 'WEBRip', 'HDTV', 'BluRay', 'DVD', 'HDDVD']: if (each.lower().replace('.', '') in meta['type'].lower()) or (each.lower().replace('-', '') in meta['source']): @@ -388,14 +389,14 @@ async def get_tags(self, meta): tags.append('sd.season') else: tags.append('hd.season') - + # movie tags if meta['category'] == 'MOVIE': if meta['sd'] == 1: tags.append('sd.movie') else: tags.append('hd.movie') - + # Audio tags audio_tag = "" for each in ['dd', 'ddp', 'aac', 'truehd', 'mp3', 'mp2', 'dts', 'dts.hd', 'dts.x']: @@ -436,10 +437,10 @@ async def validate_credentials(self, meta): if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your username and password is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -448,14 +449,14 @@ async def validate_credentials(self, meta): else: return False vapi = await self.validate_api() - if vapi != True: + if vapi is not True: console.print('[red]Failed to validate API. Please confirm that the site is up and your API key is valid.') return True async def validate_api(self): url = self.search_url params = { - 'apikey' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } try: r = requests.get(url, params=params) @@ -464,7 +465,7 @@ async def validate_api(self): console.print("[red]Invalid API Key") return False return True - except: + except Exception: return False async def validate_cookies(self, meta, cookiefile): @@ -499,12 +500,12 @@ async def login(self, cookiefile): with requests.Session() as session: url = 'https://www.morethantv.me/login' payload = { - 'username' : self.config['TRACKERS'][self.tracker].get('username'), - 'password' : self.config['TRACKERS'][self.tracker].get('password'), - 'keeploggedin' : 1, - 'cinfo' : '1920|1080|24|0', - 'submit' : 'login', - 'iplocked' : 1, + 'username': self.config['TRACKERS'][self.tracker].get('username'), + 'password': self.config['TRACKERS'][self.tracker].get('password'), + 'keeploggedin': 1, + 'cinfo': '1920|1080|24|0', + 'submit': 'login', + 'iplocked': 1, # 'ssl' : 'yes' } res = session.get(url="https://www.morethantv.me/login") @@ -521,11 +522,11 @@ async def login(self, cookiefile): mfa_code = pyotp.parse_uri(otp_uri).now() else: mfa_code = console.input('[yellow]MTV 2FA Code: ') - + two_factor_payload = { - 'token' : resp.text.rsplit('name="token" value="', 1)[1][:48], - 'code' : mfa_code, - 'submit' : 'login' + 'token': resp.text.rsplit('name="token" value="', 1)[1][:48], + 'code': mfa_code, + 'submit': 'login' } resp = session.post(url="https://www.morethantv.me/twofactor/login", data=two_factor_payload) # checking if logged in @@ -543,9 +544,9 @@ async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 't' : 'search', - 'apikey' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'q' : "" + 't': 'search', + 'apikey': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'q': "" } if meta['imdb_id'] not in ("0", "", None): params['imdbid'] = "tt" + meta['imdb_id'] @@ -569,9 +570,9 @@ async def search_existing(self, meta): console.print(f"[yellow]{rr.get('status_message')}") await asyncio.sleep(5) else: - console.print(f"[red]Site Seems to be down or not responding to API") - except: - console.print(f"[red]Unable to search for existing torrents on site. Most likely the site is down.") + console.print("[red]Site Seems to be down or not responding to API") + except Exception: + console.print("[red]Unable to search for existing torrents on site. Most likely the site is down.") dupes.append("FAILED SEARCH") print(traceback.print_exc()) await asyncio.sleep(5) diff --git a/src/trackers/NBL.py b/src/trackers/NBL.py index 90c8ca0e..ee610aa7 100644 --- a/src/trackers/NBL.py +++ b/src/trackers/NBL.py @@ -2,9 +2,7 @@ # import discord import asyncio import requests -import os from guessit import guessit -from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console @@ -18,13 +16,6 @@ class NBL(): Set type/category IDs Upload """ - - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'NBL' @@ -38,9 +29,8 @@ def __init__(self, config): 'PlaySD', 'playXD', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'Raze', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'SpaceFish', 'SPASM', 'SSA', 'Telly', 'Tenrai-Sensei', 'TM', 'Trix', 'URANiME', 'VipapkStudios', 'ViSiON', 'Wardevil', 'xRed', 'XS', 'YakuboEncodes', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] - - pass + pass async def get_cat_id(self, meta): if meta.get('tv_pack', 0) == 1: @@ -49,9 +39,6 @@ async def get_cat_id(self, meta): cat_id = 1 return cat_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### async def edit_desc(self, meta): # Leave this in so manual works return @@ -63,21 +50,21 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read()[:-65].strip() open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'file_input': open_torrent} data = { - 'api_key' : self.api_key, - 'tvmazeid' : int(meta.get('tvmaze_id', 0)), - 'mediainfo' : mi_dump, - 'category' : await self.get_cat_id(meta), - 'ignoredupes' : 'on' + 'api_key': self.api_key, + 'tvmazeid': int(meta.get('tvmaze_id', 0)), + 'mediainfo': mi_dump, + 'category': await self.get_cat_id(meta), + 'ignoredupes': 'on' } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data) try: if response.ok: @@ -86,33 +73,29 @@ async def upload(self, meta): else: console.print(response) console.print(response.text) - except: + except Exception: console.print_exception() console.print("[bold yellow]It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") if int(meta.get('tvmaze_id', 0)) != 0: - search_term = {'tvmaze' : int(meta['tvmaze_id'])} + search_term = {'tvmaze': int(meta['tvmaze_id'])} elif int(meta.get('imdb_id', '0').replace('tt', '')) == 0: - search_term = {'imdb' : meta.get('imdb_id', '0').replace('tt', '')} + search_term = {'imdb': meta.get('imdb_id', '0').replace('tt', '')} else: - search_term = {'series' : meta['title']} + search_term = {'series': meta['title']} json = { - 'jsonrpc' : '2.0', - 'id' : 1, - 'method' : 'getTorrents', - 'params' : [ + 'jsonrpc': '2.0', + 'id': 1, + 'method': 'getTorrents', + 'params': [ self.api_key, search_term ] @@ -143,4 +126,4 @@ async def search_existing(self, meta): except Exception: console.print_exception() - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/OE.py b/src/trackers/OE.py index ff3557b3..3c86198f 100644 --- a/src/trackers/OE.py +++ b/src/trackers/OE.py @@ -2,15 +2,13 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool -import json -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class OE(): """ Edit for Tracker: @@ -26,7 +24,7 @@ def __init__(self, config): self.search_url = 'https://onlyencodes.cc/api/torrents/filter' self.upload_url = 'https://onlyencodes.cc/api/torrents/upload' self.signature = f"\n[center][url=https://onlyencodes.cc/wikis/17]Powered by Only-Uploader[/url][/center]" - self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AOC', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GalaxyRG265', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'HiQVE', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta', 'MezRips', 'mHD', 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly', 'Tenrai-Sensei', 'TERMiNAL', 'TGx', 'TM', 'topaz', 'TSP', 'TSPxL', 'UnKn0wn','URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] + self.banned_groups = ['0neshot', '3LT0N', '4K4U', '4yEo', '$andra', '[Oj]', 'AFG', 'AkihitoSubs', 'AniHLS', 'Anime Time', 'AnimeRG', 'AniURL', 'AR', 'AROMA', 'ASW', 'aXXo', 'BakedFish', 'BiTOR', 'BHDStudio', 'BRrip', 'bonkai', 'Cleo', 'CM8', 'C4K', 'CrEwSaDe', 'core', 'd3g', 'DDR', 'DeadFish', 'DeeJayAhmed', 'DNL', 'ELiTE', 'EMBER', 'eSc', 'EVO', 'EZTV', 'FaNGDiNG0', 'FGT', 'fenix', 'FUM', 'FRDS', 'FROZEN', 'GalaxyTV', 'GalaxyRG', 'GERMini', 'Grym', 'GrymLegacy', 'HAiKU', 'HD2DVD', 'HDTime', 'Hi10', 'ION10', 'iPlanet', 'JacobSwaggedUp', 'JIVE', 'Judas', 'KiNGDOM', 'LAMA', 'Leffe', 'LiGaS', 'LOAD', 'LycanHD', 'MeGusta,' 'MezRips,' 'mHD,' 'Mr.Deadpool', 'mSD', 'NemDiggers', 'neoHEVC', 'NeXus', 'NhaNc3', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'NOIVTC', 'pahe.in', 'PlaySD', 'playXD', 'PRODJi', 'ProRes', 'project-gxs', 'PSA', 'QaS', 'Ranger', 'RAPiDCOWS', 'RARBG', 'Raze', 'RCDiVX', 'RDN', 'Reaktor', 'REsuRRecTioN', 'RMTeam', 'ROBOTS', 'rubix', 'SANTi', 'SHUTTERSHIT', 'SpaceFish', 'SPASM', 'SSA', 'TBS', 'Telly,' 'Tenrai-Sensei,' 'TERMiNAL,' 'TM', 'topaz', 'TSP', 'TSPxL', 'Trix', 'URANiME', 'UTR', 'VipapkSudios', 'ViSION', 'WAF', 'Wardevil', 'x0r', 'xRed', 'XS', 'YakuboEncodes', 'YIFY', 'YTS', 'YuiSubs', 'ZKBL', 'ZmN', 'ZMNT'] pass async def upload(self, meta): @@ -37,11 +35,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('video_codec'), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) oe_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -51,31 +49,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : oe_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': oe_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -89,22 +87,20 @@ async def upload(self, meta): 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") open_torrent.close() return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): oe_name = meta.get('name') return oe_name @@ -113,7 +109,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type, tv_pack, video_codec, category): @@ -121,8 +117,8 @@ async def get_type_id(self, type, tv_pack, video_codec, category): 'DISC': '19', 'REMUX': '20', 'WEBDL': '21', - }.get(type, '0') - if type == "WEBRIP": + }.get(type, '0') + if type == "WEBRIP": if video_codec == "HEVC": # x265 Encode type_id = '10' @@ -146,34 +142,30 @@ async def get_type_id(self, type, tv_pack, video_codec, category): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -185,8 +177,8 @@ async def search_existing(self, meta): for each in response['data']: result = [each][0]['attributes']['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/OTW.py b/src/trackers/OTW.py index 78d7eb18..15301774 100644 --- a/src/trackers/OTW.py +++ b/src/trackers/OTW.py @@ -2,8 +2,7 @@ # import discord import asyncio import requests -import distutils.util -import os +from str2bool import str2bool import platform from src.trackers.COMMON import COMMON @@ -19,12 +18,6 @@ class OTW(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### - - # ALSO EDIT CLASS NAME ABOVE - def __init__(self, config): self.config = config self.tracker = 'OTW' @@ -39,7 +32,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -50,29 +43,25 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -82,12 +71,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -97,31 +86,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -136,35 +125,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -176,8 +161,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/PTER.py b/src/trackers/PTER.py index fbf582a2..fd5d2819 100644 --- a/src/trackers/PTER.py +++ b/src/trackers/PTER.py @@ -1,19 +1,16 @@ from bs4 import BeautifulSoup import requests -import asyncio import re import os from pathlib import Path -import traceback import json import glob from str2bool import str2bool -import cli_ui import pickle from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa E403 from src.console import console @@ -23,19 +20,19 @@ def __init__(self, config): self.config = config self.tracker = 'PTER' self.source_flag = 'PTER' - self.passkey = str(config['TRACKERS']['PTER'].get('passkey', '')).strip() + self.passkey = str(config['TRACKERS']['PTER'].get('passkey', '')).strip() self.username = config['TRACKERS']['PTER'].get('username', '').strip() self.password = config['TRACKERS']['PTER'].get('password', '').strip() self.rehost_images = config['TRACKERS']['PTER'].get('img_rehost', False) self.ptgen_api = config['TRACKERS']['PTER'].get('ptgen_api').strip() - self.ptgen_retry=3 + self.ptgen_retry = 3 self.signature = None self.banned_groups = [""] async def validate_credentials(self, meta): vcookie = await self.validate_cookies(meta) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') return False return True @@ -79,9 +76,9 @@ async def search_existing(self, meta): soup = BeautifulSoup(r.text, 'lxml') rows = soup.select('table.torrents > tr:has(table.torrentname)') for row in rows: - text=row.select_one('a[href^="details.php?id="]') - if text != None: - release=text.attrs['title'] + text = row.select_one('a[href^="details.php?id="]') + if text is not None: + release = text.attrs['title'] if release: dupes.append(release) else: @@ -108,8 +105,8 @@ async def get_type_category_id(self, meta): async def get_area_id(self, meta): - area_id=8 - area_map = { #To do + area_id = 8 + area_map = { # To do "中国倧陆": 1, "中国香港": 2, "中国台湾": 3, "ηΎŽε›½": 4, "ζ—₯本": 6, "ιŸ©ε›½": 5, "印度": 7, "法国": 4, "ζ„ε€§εˆ©": 4, "εΎ·ε›½": 4, "θ₯Ώη­η‰™": 4, "葑萄牙": 4, "θ‹±ε›½": 4, "阿根廷": 8, "澳倧利亚": 4, "ζ―”εˆ©ζ—Ά": 4, @@ -121,19 +118,17 @@ async def get_area_id(self, meta): return area_map[area] return area_id - - async def get_type_medium_id(self, meta): medium_id = "EXIT" # 1 = UHD Discs if meta.get('is_disc', '') in ("BDMV", "HD DVD"): - if meta['resolution']=='2160p': + if meta['resolution'] == '2160p': medium_id = 1 else: - medium_id = 2 #BD Discs + medium_id = 2 # BD Discs if meta.get('is_disc', '') == "DVD": - medium_id = 7 + medium_id = 7 # 4 = HDTV if meta.get('type', '') == "HDTV": @@ -165,7 +160,6 @@ async def edit_desc(self, meta): if ptgen.strip() != '': descfile.write(ptgen) - bbcode = BBCODE() if meta.get('discs', []) != []: discs = meta['discs'] @@ -190,7 +184,7 @@ async def edit_desc(self, meta): desc = re.sub(r"(\[img=\d+)]", "[img]", desc, flags=re.IGNORECASE) descfile.write(desc) - if self.rehost_images == True: + if self.rehost_images is True: console.print("[green]Rehosting Images...") images = await self.pterimg_upload(meta) if len(images) > 0: @@ -210,12 +204,12 @@ async def edit_desc(self, meta): descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write("\n\n") descfile.write(self.signature) descfile.close() - async def get_auth_token(self,meta): + async def get_auth_token(self, meta): if not os.path.exists(f"{meta['base_dir']}/data/cookies"): Path(f"{meta['base_dir']}/data/cookies").mkdir(parents=True, exist_ok=True) cookiefile = f"{meta['base_dir']}/data/cookies/Pterimg.pickle" @@ -228,7 +222,7 @@ async def get_auth_token(self,meta): loggedIn = await self.validate_login(r) else: console.print("[yellow]Pterimg Cookies not found. Creating new session.") - if loggedIn == True: + if loggedIn is True: auth_token = re.search(r'auth_token.*?\"(\w+)\"', r.text).groups()[0] else: data = { @@ -238,9 +232,9 @@ async def get_auth_token(self,meta): } r = session.get("https://s3.pterclub.com") data['auth_token'] = re.search(r'auth_token.*?\"(\w+)\"', r.text).groups()[0] - loginresponse = session.post(url='https://s3.pterclub.com/login',data=data) + loginresponse = session.post(url='https://s3.pterclub.com/login', data=data) if not loginresponse.ok: - raise LoginException("Failed to login to Pterimg. ") + raise LoginException("Failed to login to Pterimg. ") # noqa #F405 auth_token = re.search(r'auth_token = *?\"(\w+)\"', loginresponse.text).groups()[0] with open(cookiefile, 'wb') as cf: pickle.dump(session.cookies, cf) @@ -256,14 +250,14 @@ async def validate_login(self, response): async def pterimg_upload(self, meta): images = glob.glob(f"{meta['base_dir']}/tmp/{meta['uuid']}/{meta['filename']}-*.png") - url='https://s3.pterclub.com' - image_list=[] + url = 'https://s3.pterclub.com' + image_list = [] data = { 'type': 'file', 'action': 'upload', 'nsfw': 0, 'auth_token': await self.get_auth_token(meta) - } + } cookiefile = f"{meta['base_dir']}/data/cookies/Pterimg.pickle" with requests.Session() as session: if os.path.exists(cookiefile): @@ -278,9 +272,9 @@ async def pterimg_upload(self, meta): except json.decoder.JSONDecodeError: res = {} if not req.ok: - if res['error']['message'] in ('ι‡ε€δΈŠδΌ ','Duplicated upload'): + if res['error']['message'] in ('ι‡ε€δΈŠδΌ ', 'Duplicated upload'): continue - raise(f'HTTP {req.status_code}, reason: {res["error"]["message"]}') + raise (f'HTTP {req.status_code}, reason: {res["error"]["message"]}') image_dict = {} image_dict['web_url'] = res['image']['url'] image_dict['img_url'] = res['image']['url'] @@ -288,7 +282,7 @@ async def pterimg_upload(self, meta): return image_list async def get_anon(self, anon): - if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 'no' else: anon = 'yes' @@ -304,7 +298,7 @@ async def edit_name(self, meta): pter_name = pter_name.replace(meta["aka"], '') pter_name = pter_name.replace('PQ10', 'HDR') - if meta['type'] == 'WEBDL' and meta.get('has_encode_settings', False) == True: + if meta['type'] == 'WEBDL' and meta.get('has_encode_settings', False) is True: pter_name = pter_name.replace('H.264', 'x264') return pter_name @@ -328,13 +322,13 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - desc_file=f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" + desc_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt" if not os.path.exists(desc_file): await self.edit_desc(meta) pter_name = await self.edit_name(meta) - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') @@ -348,19 +342,19 @@ async def upload(self, meta): else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), } - #use chinese small_descr + # use chinese small_descr if meta['ptgen']["trans_title"] != ['']: - small_descr='' + small_descr = '' for title_ in meta['ptgen']["trans_title"]: - small_descr+=f'{title_} / ' - small_descr+="| 类别:"+meta['ptgen']["genre"][0] - small_descr=small_descr.replace('/ |','|') + small_descr += f'{title_} / ' + small_descr += "| 类别:" + meta['ptgen']["genre"][0] + small_descr = small_descr.replace('/ |', '|') else: - small_descr=meta['title'] - data= { + small_descr = meta['title'] + data = { "name": pter_name, "small_descr": small_descr, "descr": pter_desc, @@ -371,7 +365,7 @@ async def upload(self, meta): "zhongzi": await self.is_zhongzi(meta) } - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: data["pr"] = "yes" url = "https://pterclub.com/takeupload.php" @@ -390,13 +384,13 @@ async def upload(self, meta): mi_dump.close() if up.url.startswith("https://pterclub.com/details.php?id="): - console.print(f"[green]Uploaded to: [yellow]{up.url.replace('&uploaded=1','')}[/yellow][/green]") + console.print(f"[green]Uploaded to: [yellow]{up.url.replace('&uploaded=1', '')}[/yellow][/green]") id = re.search(r"(id=)(\d+)", urlparse(up.url).query).group(2) await self.download_new_torrent(id, torrent_path) else: console.print(data) console.print("\n\n") - raise UploadException(f"Upload to Pter Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to Pter Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 return async def download_new_torrent(self, id, torrent_path): @@ -407,7 +401,4 @@ async def download_new_torrent(self, id, torrent_path): tor.write(r.content) else: console.print("[red]There was an issue downloading the new .torrent from pter") - console.print(r.text) - - - + console.print(r.text) \ No newline at end of file diff --git a/src/trackers/PTP.py b/src/trackers/PTP.py index 7c8ce9f7..1de02054 100644 --- a/src/trackers/PTP.py +++ b/src/trackers/PTP.py @@ -5,20 +5,20 @@ import os from pathlib import Path from str2bool import str2bool -import time -import traceback import json import glob import multiprocessing import platform import pickle +import click from pymediainfo import MediaInfo from src.trackers.COMMON import COMMON from src.bbcode import BBCODE -from src.exceptions import * +from src.exceptions import * # noqa F403 from src.console import console from torf import Torrent -import datetime +from datetime import datetime + class PTP(): @@ -28,69 +28,69 @@ def __init__(self, config): self.source_flag = 'PTP' self.api_user = config['TRACKERS']['PTP'].get('ApiUser', '').strip() self.api_key = config['TRACKERS']['PTP'].get('ApiKey', '').strip() - self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() - self.username = config['TRACKERS']['PTP'].get('username', '').strip() + self.announce_url = config['TRACKERS']['PTP'].get('announce_url', '').strip() + self.username = config['TRACKERS']['PTP'].get('username', '').strip() self.password = config['TRACKERS']['PTP'].get('password', '').strip() self.web_source = str2bool(str(config['TRACKERS']['PTP'].get('add_web_source_to_desc', True))) self.user_agent = f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' - self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', + self.banned_groups = ['aXXo', 'BMDru', 'BRrip', 'CM8', 'CrEwSaDe', 'CTFOH', 'd3g', 'DNL', 'FaNGDiNG0', 'HD2DVD', 'HDTime', 'ION10', 'iPlanet', 'KiNGDOM', 'mHD', 'mSD', 'nHD', 'nikt0', 'nSD', 'NhaNc3', 'OFT', 'PRODJi', 'SANTi', 'SPiRiT', 'STUTTERSHIT', 'ViSION', 'VXT', 'WAF', 'x0r', 'YIFY',] - + self.sub_lang_map = { - ("Arabic", "ara", "ar") : 22, - ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br') : 49, - ("Bulgarian", "bul", "bg") : 29, - ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)") : 14, - ("Croatian", "hrv", "hr", "scr") : 23, - ("Czech", "cze", "cz", "cs") : 30, - ("Danish", "dan", "da") : 10, - ("Dutch", "dut", "nl") : 9, - ("English", "eng", "en", "English (CC)", "English - SDH") : 3, - ("English - Forced", "English (Forced)", "en (Forced)") : 50, - ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)") : 51, - ("Estonian", "est", "et") : 38, - ("Finnish", "fin", "fi") : 15, - ("French", "fre", "fr") : 5, - ("German", "ger", "de") : 6, - ("Greek", "gre", "el") : 26, - ("Hebrew", "heb", "he") : 40, - ("Hindi" "hin", "hi") : 41, - ("Hungarian", "hun", "hu") : 24, - ("Icelandic", "ice", "is") : 28, - ("Indonesian", "ind", "id") : 47, - ("Italian", "ita", "it") : 16, - ("Japanese", "jpn", "ja") : 8, - ("Korean", "kor", "ko") : 19, - ("Latvian", "lav", "lv") : 37, - ("Lithuanian", "lit", "lt") : 39, - ("Norwegian", "nor", "no") : 12, - ("Persian", "fa", "far") : 52, - ("Polish", "pol", "pl") : 17, - ("Portuguese", "por", "pt") : 21, - ("Romanian", "rum", "ro") : 13, - ("Russian", "rus", "ru") : 7, - ("Serbian", "srp", "sr", "scc") : 31, - ("Slovak", "slo", "sk") : 42, - ("Slovenian", "slv", "sl") : 43, - ("Spanish", "spa", "es") : 4, - ("Swedish", "swe", "sv") : 11, - ("Thai", "tha", "th") : 20, - ("Turkish", "tur", "tr") : 18, - ("Ukrainian", "ukr", "uk") : 34, - ("Vietnamese", "vie", "vi") : 25, + ("Arabic", "ara", "ar"): 22, + ("Brazilian Portuguese", "Brazilian", "Portuguese-BR", 'pt-br'): 49, + ("Bulgarian", "bul", "bg"): 29, + ("Chinese", "chi", "zh", "Chinese (Simplified)", "Chinese (Traditional)"): 14, + ("Croatian", "hrv", "hr", "scr"): 23, + ("Czech", "cze", "cz", "cs"): 30, + ("Danish", "dan", "da"): 10, + ("Dutch", "dut", "nl"): 9, + ("English", "eng", "en", "English (CC)", "English - SDH"): 3, + ("English - Forced", "English (Forced)", "en (Forced)"): 50, + ("English Intertitles", "English (Intertitles)", "English - Intertitles", "en (Intertitles)"): 51, + ("Estonian", "est", "et"): 38, + ("Finnish", "fin", "fi"): 15, + ("French", "fre", "fr"): 5, + ("German", "ger", "de"): 6, + ("Greek", "gre", "el"): 26, + ("Hebrew", "heb", "he"): 40, + ("Hindi" "hin", "hi"): 41, + ("Hungarian", "hun", "hu"): 24, + ("Icelandic", "ice", "is"): 28, + ("Indonesian", "ind", "id"): 47, + ("Italian", "ita", "it"): 16, + ("Japanese", "jpn", "ja"): 8, + ("Korean", "kor", "ko"): 19, + ("Latvian", "lav", "lv"): 37, + ("Lithuanian", "lit", "lt"): 39, + ("Norwegian", "nor", "no"): 12, + ("Persian", "fa", "far"): 52, + ("Polish", "pol", "pl"): 17, + ("Portuguese", "por", "pt"): 21, + ("Romanian", "rum", "ro"): 13, + ("Russian", "rus", "ru"): 7, + ("Serbian", "srp", "sr", "scc"): 31, + ("Slovak", "slo", "sk"): 42, + ("Slovenian", "slv", "sl"): 43, + ("Spanish", "spa", "es"): 4, + ("Swedish", "swe", "sv"): 11, + ("Thai", "tha", "th"): 20, + ("Turkish", "tur", "tr"): 18, + ("Ukrainian", "ukr", "uk"): 34, + ("Vietnamese", "vie", "vi"): 25, } async def get_ptp_id_imdb(self, search_term, search_file_folder): imdb_id = ptp_torrent_id = None filename = str(os.path.basename(search_term)) params = { - 'filelist' : filename + 'filelist': filename } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url, params=params, headers=headers) @@ -133,15 +133,15 @@ async def get_ptp_id_imdb(self, search_term, search_file_folder): pass console.print(f'[yellow]Could not find any release matching [bold yellow]{filename}[/bold yellow] on PTP') return None, None, None - + async def get_imdb_from_torrent_id(self, ptp_torrent_id): params = { - 'torrentid' : ptp_torrent_id + 'torrentid': ptp_torrent_id } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url, params=params, headers=headers) @@ -150,57 +150,80 @@ async def get_imdb_from_torrent_id(self, ptp_torrent_id): if response.status_code == 200: response = response.json() imdb_id = response['ImdbId'] + ptp_infohash = None for torrent in response['Torrents']: if torrent.get('Id', 0) == str(ptp_torrent_id): ptp_infohash = torrent.get('InfoHash', None) - return imdb_id, ptp_infohash + return imdb_id, ptp_infohash, None elif int(response.status_code) in [400, 401, 403]: console.print(response.text) - return None, None + return None, None, None elif int(response.status_code) == 503: console.print("[bold yellow]PTP Unavailable (503)") - return None, None + return None, None, None else: - return None, None + return None, None, None except Exception: - return None, None - + return None, None, None + async def get_ptp_description(self, ptp_torrent_id, is_disc): params = { - 'id' : ptp_torrent_id, - 'action' : 'get_description' + 'id': ptp_torrent_id, + 'action': 'get_description' } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' + console.print(f"[yellow]Requesting description from {url} with ID {ptp_torrent_id}") response = requests.get(url, params=params, headers=headers) await asyncio.sleep(1) + ptp_desc = response.text + # console.print(f"[yellow]Raw description received:\n{ptp_desc[:3800]}...") # Show first 500 characters for brevity + bbcode = BBCODE() - desc = bbcode.clean_ptp_description(ptp_desc, is_disc) - console.print(f"[bold green]Successfully grabbed description from PTP") - return desc - + desc, imagelist = bbcode.clean_ptp_description(ptp_desc, is_disc) + + console.print("[bold green]Successfully grabbed description from PTP") + console.print(f"[cyan]Description after cleaning:[yellow]\n{desc[:1000]}...") # Show first 1000 characters for brevity + + # Allow user to edit or discard the description + console.print("[cyan]Do you want to edit, discard or keep the description?[/cyan]") + edit_choice = input("[cyan]Enter 'e' to edit, 'd' to discard, or press Enter to keep it as is: [/cyan]") + + if edit_choice.lower() == 'e': + edited_description = click.edit(desc) + if edited_description: + desc = edited_description.strip() + console.print(f"[green]Final description after editing:[/green] {desc}") + elif edit_choice.lower() == 'd': + desc = None + console.print("[yellow]Description discarded.[/yellow]") + else: + console.print("[green]Keeping the original description.[/green]") + + return desc, imagelist + async def get_group_by_imdb(self, imdb): params = { - 'imdb' : imdb, + 'imdb': imdb, } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url=url, headers=headers, params=params) await asyncio.sleep(1) try: response = response.json() - if response.get("Page") == "Browse": # No Releases on Site with ID + if response.get("Page") == "Browse": # No Releases on Site with ID return None - elif response.get('Page') == "Details": # Group Found + elif response.get('Page') == "Details": # Group Found groupID = response.get('GroupId') console.print(f"[green]Matched IMDb: [yellow]tt{imdb}[/yellow] to Group ID: [yellow]{groupID}[/yellow][/green]") console.print(f"[green]Title: [yellow]{response.get('Name')}[/yellow] ([yellow]{response.get('Year')}[/yellow])") @@ -212,14 +235,14 @@ async def get_group_by_imdb(self, imdb): async def get_torrent_info(self, imdb, meta): params = { - 'imdb' : imdb, - 'action' : 'torrent_info', - 'fast' : 1 + 'imdb': imdb, + 'action': 'torrent_info', + 'fast': 1 } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = "https://passthepopcorn.me/ajax.php" response = requests.get(url=url, params=params, headers=headers) @@ -240,9 +263,9 @@ async def get_torrent_info(self, imdb, meta): async def get_torrent_info_tmdb(self, meta): tinfo = { - "title" : meta.get("title", ""), - "year" : meta.get("year", ""), - "album_desc" : meta.get("overview", ""), + "title": meta.get("title", ""), + "year": meta.get("year", ""), + "album_desc": meta.get("overview", ""), } tags = await self.get_tags([meta.get("genres", ""), meta.get("keywords", "")]) tinfo['tags'] = ", ".join(tags) @@ -266,21 +289,20 @@ async def get_tags(self, check_against): async def search_existing(self, groupID, meta): # Map resolutions to SD / HD / UHD quality = None - if meta.get('sd', 0) == 1: # 1 is SD + if meta.get('sd', 0) == 1: # 1 is SD quality = "Standard Definition" elif meta['resolution'] in ["1440p", "1080p", "1080i", "720p"]: quality = "High Definition" elif meta['resolution'] in ["2160p", "4320p", "8640p"]: quality = "Ultra High Definition" - params = { - 'id' : groupID, + 'id': groupID, } headers = { - 'ApiUser' : self.api_user, - 'ApiKey' : self.api_key, - 'User-Agent' : self.user_agent + 'ApiUser': self.api_user, + 'ApiKey': self.api_key, + 'User-Agent': self.user_agent } url = 'https://passthepopcorn.me/torrents.php' response = requests.get(url=url, headers=headers, params=params) @@ -291,7 +313,7 @@ async def search_existing(self, groupID, meta): torrents = response.get('Torrents', []) if len(torrents) != 0: for torrent in torrents: - if torrent.get('Quality') == quality and quality != None: + if torrent.get('Quality') == quality and quality is not None: existing.append(f"[{torrent.get('Resolution')}] {torrent.get('ReleaseName', 'RELEASE NAME NOT FOUND')}") except Exception: console.print("[red]An error has occured trying to find existing releases") @@ -299,11 +321,11 @@ async def search_existing(self, groupID, meta): async def ptpimg_url_rehost(self, image_url): payload = { - 'format' : 'json', - 'api_key' : self.config["DEFAULT"]["ptpimg_api"], - 'link-upload' : image_url + 'format': 'json', + 'api_key': self.config["DEFAULT"]["ptpimg_api"], + 'link-upload': image_url } - headers = { 'referer': 'https://ptpimg.me/index.php'} + headers = {'referer': 'https://ptpimg.me/index.php'} url = "https://ptpimg.me/upload.php" response = requests.post(url, headers=headers, data=payload) @@ -312,7 +334,7 @@ async def ptpimg_url_rehost(self, image_url): ptpimg_code = response[0]['code'] ptpimg_ext = response[0]['ext'] img_url = f"https://ptpimg.me/{ptpimg_code}.{ptpimg_ext}" - except: + except Exception: console.print("[red]PTPIMG image rehost failed") img_url = image_url # img_url = ptpimg_upload(image_url, ptpimg_api) @@ -351,7 +373,7 @@ def get_type(self, imdb_info, meta): ptpType = "Stand-up Comedy" elif "concert" in keywords: ptpType = "Concert" - if ptpType == None: + if ptpType is None: if meta.get('mode', 'discord') == 'cli': ptpTypeList = ["Feature Film", "Short Film", "Miniseries", "Stand-up Comedy", "Concert", "Movie Collection"] ptpType = cli_ui.ask_choice("Select the proper type", choices=ptpTypeList) @@ -372,14 +394,14 @@ def get_codec(self, meta): codec = "DVD9" else: codecmap = { - "AVC" : "H.264", - "H.264" : "H.264", - "HEVC" : "H.265", - "H.265" : "H.265", + "AVC": "H.264", + "H.264": "H.264", + "HEVC": "H.265", + "H.265": "H.265", } searchcodec = meta.get('video_codec', meta.get('video_encode')) codec = codecmap.get(searchcodec, searchcodec) - if meta.get('has_encode_settings') == True: + if meta.get('has_encode_settings') is True: codec = codec.replace("H.", "x") return codec @@ -403,23 +425,23 @@ def get_container(self, meta): else: ext = os.path.splitext(meta['filelist'][0])[1] containermap = { - '.mkv' : "MKV", - '.mp4' : 'MP4' + '.mkv': "MKV", + '.mp4': 'MP4' } container = containermap.get(ext, 'Other') return container def get_source(self, source): sources = { - "Blu-ray" : "Blu-ray", - "BluRay" : "Blu-ray", - "HD DVD" : "HD-DVD", - "HDDVD" : "HD-DVD", - "Web" : "WEB", - "HDTV" : "HDTV", - 'UHDTV' : 'HDTV', - "NTSC" : "DVD", - "PAL" : "DVD" + "Blu-ray": "Blu-ray", + "BluRay": "Blu-ray", + "HD DVD": "HD-DVD", + "HDDVD": "HD-DVD", + "Web": "WEB", + "HDTV": "HDTV", + 'UHDTV': 'HDTV', + "NTSC": "DVD", + "PAL": "DVD" } source_id = sources.get(source, "OtherR") return source_id @@ -438,7 +460,8 @@ def get_subtitles(self, meta): if language == "en": if track.get('Forced', "") == "Yes": language = "en (Forced)" - if "intertitles" in track.get('Title', "").lower(): + title = track.get('Title', "") + if isinstance(title, str) and "intertitles" in title.lower(): language = "en (Intertitles)" for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: @@ -448,29 +471,29 @@ def get_subtitles(self, meta): for lang, subID in sub_lang_map.items(): if language in lang and subID not in sub_langs: sub_langs.append(subID) - + if sub_langs == []: - sub_langs = [44] # No Subtitle + sub_langs = [44] # No Subtitle return sub_langs def get_trumpable(self, sub_langs): trumpable_values = { - "English Hardcoded Subs (Full)" : 4, - "English Hardcoded Subs (Forced)" : 50, - "No English Subs" : 14, - "English Softsubs Exist (Mislabeled)" : None, - "Hardcoded Subs (Non-English)" : "OTHER" + "English Hardcoded Subs (Full)": 4, + "English Hardcoded Subs (Forced)": 50, + "No English Subs": 14, + "English Softsubs Exist (Mislabeled)": None, + "Hardcoded Subs (Non-English)": "OTHER" } opts = cli_ui.select_choices("English subtitles not found. Please select any/all applicable options:", choices=list(trumpable_values.keys())) trumpable = [] if opts: for t, v in trumpable_values.items(): if t in ''.join(opts): - if v == None: + if v is None: break - elif v != 50: # Hardcoded, Forced + elif v != 50: # Hardcoded, Forced trumpable.append(v) - elif v == "OTHER": #Hardcoded, Non-English + elif v == "OTHER": # Hardcoded, Non-English trumpable.append(14) hc_sub_langs = cli_ui.ask_string("Enter language code for HC Subtitle languages") for lang, subID in self.sub_lang_map.items(): @@ -479,7 +502,7 @@ def get_trumpable(self, sub_langs): else: sub_langs.append(v) trumpable.append(4) - + sub_langs = list(set(sub_langs)) trumpable = list(set(trumpable)) if trumpable == []: @@ -496,7 +519,7 @@ def get_remaster_title(self, meta): remaster_title.append('The Criterion Collection') elif meta.get('distributor') in ('MASTERS OF CINEMA', 'MOC'): remaster_title.append('Masters of Cinema') - + # Editions # Director's Cut, Extended Edition, Rifftrax, Theatrical Cut, Uncut, Unrated if "director's cut" in meta.get('edition', '').lower(): @@ -517,7 +540,7 @@ def get_remaster_title(self, meta): # Features # 2-Disc Set, 2in1, 2D/3D Edition, 3D Anaglyph, 3D Full SBS, 3D Half OU, 3D Half SBS, - # 4K Restoration, 4K Remaster, + # 4K Restoration, 4K Remaster, # Extras, Remux, if meta.get('type') == "REMUX": remaster_title.append("Remux") @@ -531,10 +554,10 @@ def get_remaster_title(self, meta): remaster_title.append('Dual Audio') if "Dubbed" in meta['audio']: remaster_title.append('English Dub') - if meta.get('has_commentary', False) == True: + if meta.get('has_commentary', False) is True: remaster_title.append('With Commentary') - # HDR10, HDR10+, Dolby Vision, 10-bit, + # HDR10, HDR10+, Dolby Vision, 10-bit, # if "Hi10P" in meta.get('video_encode', ''): # remaster_title.append('10-bit') if meta.get('hdr', '').strip() == '' and meta.get('bit_depth') == '10': @@ -584,16 +607,16 @@ async def edit_desc(self, meta): mi_dump = each['summary'] else: mi_dump = each['summary'] - if meta.get('vapoursynth', False) == True: + if meta.get('vapoursynth', False) is True: use_vs = True else: use_vs = False ds = multiprocessing.Process(target=prep.disc_screenshots, args=(f"FILE_{i}", each['bdinfo'], meta['uuid'], meta['base_dir'], use_vs, [], meta.get('ffdebug', False), 2)) ds.start() - while ds.is_alive() == True: + while ds.is_alive() is True: await asyncio.sleep(1) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}",f"FILE_{i}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") + images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) if each['type'] == "DVD": desc.write(f"[b][size=3]{each['name']}:[/size][/b]\n") @@ -608,12 +631,12 @@ async def edit_desc(self, meta): else: ds = multiprocessing.Process(target=prep.dvd_screenshots, args=(meta, i, 2)) ds.start() - while ds.is_alive() == True: + while ds.is_alive() is True: await asyncio.sleep(1) new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"{meta['discs'][i]['name']}-*.png") - images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) - - if len(images) > 0: + images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) + + if len(images) > 0: for each in range(len(images[:int(meta['screens'])])): raw_url = images[each]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") @@ -624,12 +647,12 @@ async def edit_desc(self, meta): file = meta['filelist'][i] if i == 0: # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None and self.web_source == True: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None and self.web_source is True: desc.write(f"[quote][align=center]This release is sourced from {meta['service_longname']}[/align][/quote]") mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8').read() else: # Export Mediainfo - mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version' : '1'}) + mi_dump = MediaInfo.parse(file, output="STRING", full=False, mediainfo_options={'inform_version': '1'}) # mi_dump = mi_dump.replace(file, os.path.basename(file)) with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/TEMP_PTP_MEDIAINFO.txt", "w", newline="", encoding="utf-8") as f: f.write(mi_dump) @@ -637,9 +660,9 @@ async def edit_desc(self, meta): # Generate and upload screens for other files s = multiprocessing.Process(target=prep.screenshots, args=(file, f"FILE_{i}", meta['uuid'], meta['base_dir'], meta, 2)) s.start() - while s.is_alive() == True: + while s.is_alive() is True: await asyncio.sleep(3) - new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}",f"FILE_{i}-*.png") + new_screens = glob.glob1(f"{meta['base_dir']}/tmp/{meta['uuid']}", f"FILE_{i}-*.png") images, dummy = prep.upload_screens(meta, 2, 1, 0, 2, new_screens, {}) desc.write(f"[mediainfo]{mi_dump}[/mediainfo]\n") @@ -647,8 +670,8 @@ async def edit_desc(self, meta): base2ptp = self.convert_bbcode(base) if base2ptp.strip() != "": desc.write(base2ptp) - desc.write("\n\n") - if len(images) > 0: + desc.write("\n\n") + if len(images) > 0: for each in range(len(images[:int(meta['screens'])])): raw_url = images[each]['raw_url'] desc.write(f"[img]{raw_url}[/img]\n") @@ -667,17 +690,17 @@ async def get_AntiCsrfToken(self, meta): loggedIn = await self.validate_login(uploadresponse) else: console.print("[yellow]PTP Cookies not found. Creating new session.") - if loggedIn == True: + if loggedIn is True: AntiCsrfToken = re.search(r'data-AntiCsrfToken="(.*)"', uploadresponse.text).group(1) else: - passKey = re.match(r"https?://please\.passthepopcorn\.me:?\d*/(.+)/announce",self.announce_url).group(1) + passKey = re.match(r"https?://please\.passthepopcorn\.me:?\d*/(.+)/announce", self.announce_url).group(1) data = { "username": self.username, "password": self.password, "passkey": passKey, "keeplogged": "1", } - headers = {"User-Agent" : self.user_agent} + headers = {"User-Agent": self.user_agent} loginresponse = session.post("https://passthepopcorn.me/ajax.php?action=login", data=data, headers=headers) await asyncio.sleep(2) try: @@ -690,14 +713,14 @@ async def get_AntiCsrfToken(self, meta): resp = loginresponse.json() try: if resp["Result"] != "Ok": - raise LoginException("Failed to login to PTP. Probably due to the bad user name, password, announce url, or 2FA code.") + raise LoginException("Failed to login to PTP. Probably due to the bad user name, password, announce url, or 2FA code.") # noqa F405 AntiCsrfToken = resp["AntiCsrfToken"] with open(cookiefile, 'wb') as cf: pickle.dump(session.cookies, cf) except Exception: - raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") + raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") # noqa F405 except Exception: - raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") + raise LoginException(f"Got exception while loading JSON login response from PTP. Response: {loginresponse.text}") # noqa F405 return AntiCsrfToken async def validate_login(self, response): @@ -705,7 +728,7 @@ async def validate_login(self, response): if response.text.find("""""") != -1: console.print("Looks like you are not logged in to PTP. Probably due to the bad user name, password, or expired session.") elif "Your popcorn quota has been reached, come back later!" in response.text: - raise LoginException("Your PTP request/popcorn quota has been reached, try again later") + raise LoginException("Your PTP request/popcorn quota has been reached, try again later") # noqa F405 else: loggedIn = True return loggedIn @@ -723,26 +746,26 @@ async def fill_upload_form(self, groupID, meta): data = { "submit": "true", "remaster_year": "", - "remaster_title": self.get_remaster_title(meta), #Eg.: Hardcoded English + "remaster_title": self.get_remaster_title(meta), # Eg.: Hardcoded English "type": self.get_type(meta['imdb_info'], meta), - "codec": "Other", # Sending the codec as custom. + "codec": "Other", # Sending the codec as custom. "other_codec": self.get_codec(meta), "container": "Other", "other_container": self.get_container(meta), "resolution": resolution, - "source": "Other", # Sending the source as custom. + "source": "Other", # Sending the source as custom. "other_source": self.get_source(meta['source']), "release_desc": desc, "nfo_text": "", - "subtitles[]" : ptp_subtitles, - "trumpable[]" : ptp_trumpable, - "AntiCsrfToken" : await self.get_AntiCsrfToken(meta) - } + "subtitles[]": ptp_subtitles, + "trumpable[]": ptp_trumpable, + "AntiCsrfToken": await self.get_AntiCsrfToken(meta) + } if data["remaster_year"] != "" or data["remaster_title"] != "": data["remaster"] = "on" if resolution == "Other": data["other_resolution"] = other_resolution - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: data["internalrip"] = "on" # IF SPECIAL (idk how to check for this automatically) # data["special"] = "on" @@ -751,18 +774,18 @@ async def fill_upload_form(self, groupID, meta): else: data["imdb"] = meta["imdb_id"] - if groupID == None: # If need to make new group + if groupID is None: # If need to make new group url = "https://passthepopcorn.me/upload.php" if data["imdb"] == "0": tinfo = await self.get_torrent_info_tmdb(meta) else: tinfo = await self.get_torrent_info(meta.get("imdb_id", "0"), meta) cover = meta["imdb_info"].get("cover") - if cover == None: + if cover is None: cover = meta.get('poster') - if cover != None and "ptpimg" not in cover: + if cover is not None and "ptpimg" not in cover: cover = await self.ptpimg_url_rehost(cover) - while cover == None: + while cover is None: cover = cli_ui.ask_string("No Poster was found. Please input a link to a poster: \n", default="") if "ptpimg" not in str(cover) and str(cover).endswith(('.jpg', '.png')): cover = await self.ptpimg_url_rehost(cover) @@ -777,15 +800,15 @@ async def fill_upload_form(self, groupID, meta): if new_data['year'] in ['', '0', 0, None] and meta.get('manual_year') not in [0, '', None]: new_data['year'] = meta['manual_year'] while new_data["tags"] == "": - if meta.get('mode', 'discord') == 'cli': + if meta.get('mode', 'discord') == 'cli': console.print('[yellow]Unable to match any tags') console.print("Valid tags can be found on the PTP upload form") new_data["tags"] = console.input("Please enter at least one tag. Comma seperated (action, animation, short):") data.update(new_data) - if meta["imdb_info"].get("directors", None) != None: + if meta["imdb_info"].get("directors", None) is not None: data["artist[]"] = tuple(meta['imdb_info'].get('directors')) data["importance[]"] = "1" - else: # Upload on existing group + else: # Upload on existing group url = f"https://passthepopcorn.me/upload.php?groupid={groupID}" data["groupid"] = groupID @@ -816,7 +839,7 @@ async def upload(self, meta, url, data): prep = Prep(screens=meta['screens'], img_host=meta['imghost'], config=self.config) new_torrent = prep.CustomTorrent( path=Path(meta['path']), - trackers=["https://fake.tracker"], + trackers=[self.announce_url], source="L4G", private=True, exclude_globs=exclude, # Ensure this is always a list @@ -825,7 +848,7 @@ async def upload(self, meta, url, data): comment="Created by L4G's Upload Assistant", created_by="L4G's Upload Assistant" ) - + # Explicitly set the piece size and update metainfo new_torrent.piece_size = 16777216 # 16 MiB in bytes new_torrent.metainfo['info']['piece length'] = 16777216 # Ensure 'piece length' is set @@ -864,11 +887,11 @@ async def upload(self, meta, url, data): if match is not None: errorMessage = match.group(1) - raise UploadException(f"Upload to PTP failed: {errorMessage} ({response.status_code}). (We are still on the upload page.)") + raise UploadException(f"Upload to PTP failed: {errorMessage} ({response.status_code}). (We are still on the upload page.)") # noqa F405 # URL format in case of successful upload: https://passthepopcorn.me/torrents.php?id=9329&torrentid=91868 match = re.match(r".*?passthepopcorn\.me/torrents\.php\?id=(\d+)&torrentid=(\d+)", response.url) if match is None: console.print(url) console.print(data) - raise UploadException(f"Upload to PTP failed: result URL {response.url} ({response.status_code}) is not the expected one.") \ No newline at end of file + raise UploadException(f"Upload to PTP failed: result URL {response.url} ({response.status_code}) is not the expected one.") # noqa F405 \ No newline at end of file diff --git a/src/trackers/R4E.py b/src/trackers/R4E.py index 3349ab56..b939b9fa 100644 --- a/src/trackers/R4E.py +++ b/src/trackers/R4E.py @@ -2,16 +2,14 @@ # import discord import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool -import json import tmdbsimple as tmdb -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class R4E(): """ Edit for Tracker: @@ -36,11 +34,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['resolution']) await common.unit3d_edit_desc(meta, self.tracker, self.signature) name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS']['R4E'].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -50,21 +48,21 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[R4E]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], + 'name': name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], # 'personal_release' : int(meta.get('personalrelease', False)), NOT IMPLEMENTED on R4E # 'internal' : 0, # 'featured' : 0, @@ -79,21 +77,19 @@ async def upload(self, meta): if meta.get('category') == "TV": data['season_number'] = meta.get('season_int', '0') data['episode_number'] = meta.get('episode_int', '0') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=url, files=files, data=data, headers=headers) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): name = meta['name'] return name @@ -103,34 +99,34 @@ async def get_cat_id(self, category_name, tmdb_id): movie = tmdb.Movies(tmdb_id) movie_info = movie.info() is_docu = self.is_docu(movie_info['genres']) - category_id = '70' # Motorsports Movie + category_id = '70' # Motorsports Movie if is_docu: - category_id = '66' # Documentary + category_id = '66' # Documentary elif category_name == 'TV': tv = tmdb.TV(tmdb_id) tv_info = tv.info() is_docu = self.is_docu(tv_info['genres']) - category_id = '79' # TV Series + category_id = '79' # TV Series if is_docu: - category_id = '2' # TV Documentary + category_id = '2' # TV Documentary else: category_id = '24' return category_id async def get_type_id(self, type): type_id = { - '8640p':'2160p', + '8640p': '2160p', '4320p': '2160p', '2160p': '2160p', - '1440p' : '1080p', + '1440p': '1080p', '1080p': '1080p', - '1080i':'1080i', + '1080i': '1080i', '720p': '720p', '576p': 'SD', '576i': 'SD', '480p': 'SD', '480i': 'SD' - }.get(type, '10') + }.get(type, '10') return type_id async def is_docu(self, genres): @@ -145,11 +141,11 @@ async def search_existing(self, meta): console.print("[yellow]Searching for existing torrents on site...") url = "https://racing4everyone.eu/api/torrents/filter" params = { - 'api_token' : self.config['TRACKERS']['R4E']['api_key'].strip(), - 'tmdb' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'name' : "" + 'api_token': self.config['TRACKERS']['R4E']['api_key'].strip(), + 'tmdb': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -163,8 +159,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/RF.py b/src/trackers/RF.py index dd2fc4cd..c7dfda14 100644 --- a/src/trackers/RF.py +++ b/src/trackers/RF.py @@ -2,13 +2,13 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool from src.trackers.COMMON import COMMON from src.console import console + class RF(): """ Edit for Tracker: @@ -18,9 +18,6 @@ class RF(): Upload """ - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'RF' @@ -41,11 +38,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type']) resolution_id = await self.get_res_id(meta['resolution']) stt_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -55,31 +52,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : stt_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': stt_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -95,20 +92,18 @@ async def upload(self, meta): } if meta.get('category') == "TV": console.print('[bold red]This site only ALLOWS Movies.') - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): stt_name = meta['name'] return stt_name @@ -116,7 +111,7 @@ async def edit_name(self, meta): async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -125,10 +120,10 @@ async def get_type_id(self, type): 'REMUX': '40', 'WEBDL': '42', 'WEBRIP': '45', - #'FANRES': '6', + # 'FANRES': '6', 'ENCODE': '41', 'HDTV': '35', - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -139,25 +134,24 @@ async def get_res_id(self, resolution): # '1440p' : '3', '1080p': '3', '1080i': '4', - '720p': '5', - '576p': '6', - '576i': '7', - '480p': '8', - '480i': '9' - }.get(resolution, '10') + '720p': '5', + '576p': '6', + '576i': '7', + '480p': '8', + '480i': '9' + }.get(resolution, '10') return resolution_id - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': console.print('[bold red]Unable to search site for TV as this site only ALLOWS Movies') @@ -172,8 +166,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/RTF.py b/src/trackers/RTF.py index 28ce5592..61d598e8 100644 --- a/src/trackers/RTF.py +++ b/src/trackers/RTF.py @@ -5,11 +5,11 @@ import base64 import re import datetime -import json from src.trackers.COMMON import COMMON from src.console import console + class RTF(): """ Edit for Tracker: @@ -18,10 +18,6 @@ class RTF(): Set type/category IDs Upload """ - - ############################################################### - ######## EDIT ME ######## - ############################################################### def __init__(self, config): self.config = config self.tracker = 'RTF' @@ -36,7 +32,7 @@ async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -45,21 +41,21 @@ async def upload(self, meta): screenshots = [] for image in meta['image_list']: - if image['raw_url'] != None: + if image['raw_url'] is not None: screenshots.append(image['raw_url']) json_data = { - 'name' : meta['name'], + 'name': meta['name'], # description does not work for some reason # 'description' : meta['overview'] + "\n\n" + desc + "\n\n" + "Uploaded by L4G Upload Assistant", 'description': "this is a description", # editing mediainfo so that instead of 1 080p its 1,080p as site mediainfo parser wont work other wise. - 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump == None else f"{bd_dump}", + 'mediaInfo': re.sub(r"(\d+)\s+(\d+)", r"\1,\2", mi_dump) if bd_dump is None else f"{bd_dump}", "nfo": "", "url": "https://www.imdb.com/title/" + (meta['imdb_id'] if str(meta['imdb_id']).startswith("tt") else "tt" + meta['imdb_id']) + "/", # auto pulled from IMDB "descr": "This is short description", - "poster": meta["poster"] if meta["poster"] != None else "", + "poster": meta["poster"] if meta["poster"] is not None else "", "type": "401" if meta['category'] == 'MOVIE'else "402", "screenshots": screenshots, 'isAnonymous': self.config['TRACKERS'][self.tracker]["anon"], @@ -77,13 +73,11 @@ async def upload(self, meta): 'Authorization': self.config['TRACKERS'][self.tracker]['api_key'].strip(), } - if datetime.date.today().year - meta['year'] <= 9: - console.print(f"[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") + console.print("[red]ERROR: Not uploading!\nMust be older than 10 Years as per rules") return - - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, json=json_data, headers=headers) try: console.print(response.json()) @@ -91,14 +85,13 @@ async def upload(self, meta): t_id = response.json()['torrent']['id'] await common.add_tracker_torrent(meta, self.tracker, self.source_flag, self.config['TRACKERS'][self.tracker].get('announce_url'), "https://retroflix.club/browse/t/" + str(t_id)) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(json_data) - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") @@ -108,7 +101,7 @@ async def search_existing(self, meta): } params = { - 'includingDead' : '1' + 'includingDead': '1' } if meta['imdb_id'] != "0": @@ -122,7 +115,7 @@ async def search_existing(self, meta): for each in response: result = [each][0]['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) @@ -157,8 +150,8 @@ async def generate_new_api(self, meta): if response.status_code == 201: console.print('[bold green]Using New API key generated for this upload') - console.print(f'[bold green]Please update your L4G config with the below RTF API Key for future uploads') + console.print('[bold green]Please update your L4G config with the below RTF API Key for future uploads') console.print(f'[bold yellow]{response.json()["token"]}') self.config['TRACKERS'][self.tracker]['api_key'] = response.json()["token"] else: - console.print(f'[bold red]Error getting new API key got error code {response.status_code}, Please check username and password in config') + console.print(f'[bold red]Error getting new API key got error code {response.status_code}, Please check username and password in config') \ No newline at end of file diff --git a/src/trackers/SN.py b/src/trackers/SN.py index 54f13d64..5d1c5388 100644 --- a/src/trackers/SN.py +++ b/src/trackers/SN.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import requests import asyncio -import traceback from src.trackers.COMMON import COMMON from src.console import console @@ -15,7 +14,6 @@ class SN(): Set type/category IDs Upload """ - def __init__(self, config): self.config = config self.tracker = 'SN' @@ -31,7 +29,7 @@ async def get_type_id(self, type): 'BluRay': '3', 'Web': '1', # boxset is 4 - #'NA': '4', + # 'NA': '4', 'DVD': '2' }.get(type, '0') return type_id @@ -39,11 +37,11 @@ async def get_type_id(self, type): async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) - #await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) + # await common.unit3d_edit_desc(meta, self.tracker, self.forum_link) await self.edit_desc(meta) cat_id = "" sub_cat_id = "" - #cat_id = await self.get_cat_id(meta) + # cat_id = await self.get_cat_id(meta) if meta['category'] == 'MOVIE': cat_id = 1 # sub cat is source so using source to get @@ -56,8 +54,7 @@ async def upload(self, meta): sub_cat_id = 5 # todo need to do a check for docs and add as subcat - - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -90,7 +87,7 @@ async def upload(self, meta): } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.request("POST", url=self.upload_url, data=data, files=files) try: @@ -99,16 +96,15 @@ async def upload(self, meta): else: console.print("[red]Did not upload successfully") console.print(response.json()) - except: + except Exception: console.print("[red]Error! It may have uploaded, go check") console.print(data) console.print_exception() return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as desc: @@ -125,13 +121,12 @@ async def edit_desc(self, meta): desc.close() return - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_key' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_key': self.config['TRACKERS'][self.tracker]['api_key'].strip() } # using title if IMDB id does not exist to search @@ -141,7 +136,7 @@ async def search_existing(self, meta): else: params['filter'] = meta['title'] else: - #using IMDB_id to search if it exists. + # using IMDB_id to search if it exists. if meta['category'] == 'TV': params['media_ref'] = f"tt{meta['imdb_id']}" params['filter'] = f"{meta.get('season', '')}{meta.get('episode', '')}" + " " + meta['resolution'] @@ -155,8 +150,8 @@ async def search_existing(self, meta): for i in response['data']: result = i['name'] dupes.append(result) - except: + except Exception: console.print('[red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/STC.py b/src/trackers/STC.py index a2ee100e..6bcecf2c 100644 --- a/src/trackers/STC.py +++ b/src/trackers/STC.py @@ -1,15 +1,13 @@ # -*- coding: utf-8 -*- import asyncio import requests -from difflib import SequenceMatcher from str2bool import str2bool -import json -import os import platform from src.trackers.COMMON import COMMON from src.console import console + class STC(): """ Edit for Tracker: @@ -24,7 +22,7 @@ def __init__(self, config): self.source_flag = 'STC' self.upload_url = 'https://skipthecommericals.xyz/api/torrents/upload' self.search_url = 'https://skipthecommericals.xyz/api/torrents/filter' - self.signature = '\n[center][url=https://skipthecommericals.xyz/]Powered by Only-Uploader[/url][/center]' + self.signature = '\n[center][url=https://skipthecommericals.xyz/pages/1]Please Seed[/url][/center]' self.banned_groups = [""] pass @@ -36,11 +34,11 @@ async def upload(self, meta): type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")) resolution_id = await self.get_res_id(meta['resolution']) stc_name = await self.edit_name(meta) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -50,31 +48,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : stc_name, - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': stc_name, + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -88,22 +86,20 @@ async def upload(self, meta): 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") open_torrent.close() return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - async def edit_name(self, meta): stc_name = meta.get('name') return stc_name @@ -112,7 +108,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type, tv_pack, sd, category): @@ -123,7 +119,7 @@ async def get_type_id(self, type, tv_pack, sd, category): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') if tv_pack == 1: if sd == 1: # Season SD @@ -146,37 +142,30 @@ async def get_type_id(self, type, tv_pack, sd, category): async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id - - - - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', "")), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = f"{meta.get('season', '')}{meta.get('episode', '')}" @@ -188,8 +177,8 @@ async def search_existing(self, meta): for each in response['data']: result = [each][0]['attributes']['name'] dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/THR.py b/src/trackers/THR.py index 548ce873..0db69a07 100644 --- a/src/trackers/THR.py +++ b/src/trackers/THR.py @@ -4,9 +4,7 @@ import requests import json import glob -from difflib import SequenceMatcher import cli_ui -import base64 import os import re import platform @@ -34,14 +32,14 @@ async def upload(self, session, meta): await self.edit_torrent(meta) cat_id = await self.get_cat_id(meta) subs = self.get_subtitles(meta) - pronfo = await self.edit_desc(meta) + pronfo = await self.edit_desc(meta) # noqa #F841 thr_name = unidecode(meta['name'].replace('DD+', 'DDP')) # Confirm the correct naming order for FL cli_ui.info(f"THR name: {thr_name}") - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: thr_confirm = cli_ui.ask_yes_no("Correct?", default=False) - if thr_confirm != True: + if thr_confirm is not True: thr_name_manually = cli_ui.ask_string("Please enter a proper name", default="") if thr_name_manually == "": console.print('No proper name given') @@ -51,7 +49,6 @@ async def upload(self, session, meta): thr_name = thr_name_manually torrent_name = re.sub(r"[^0-9a-zA-Z. '\-\[\]]+", " ", thr_name) - if meta.get('is_disc', '') == 'BDMV': mi_file = None # bd_file = f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8' @@ -71,33 +68,32 @@ async def upload(self, session, meta): tfile = f.read() f.close() - #Upload Form + # Upload Form url = 'https://www.torrenthr.org/takeupload.php' files = { - 'tfile' : (f'{torrent_name}.torrent', tfile) + 'tfile': (f'{torrent_name}.torrent', tfile) } payload = { - 'name' : thr_name, - 'descr' : desc, - 'type' : cat_id, - 'url' : f"https://www.imdb.com/title/tt{meta.get('imdb_id').replace('tt', '')}/", - 'tube' : meta.get('youtube', '') + 'name': thr_name, + 'descr': desc, + 'type': cat_id, + 'url': f"https://www.imdb.com/title/tt{meta.get('imdb_id').replace('tt', '')}/", + 'tube': meta.get('youtube', '') } headers = { - 'User-Agent' : f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } - #If pronfo fails, put mediainfo into THR parser + # If pronfo fails, put mediainfo into THR parser if meta.get('is_disc', '') != 'BDMV': files['nfo'] = ("MEDIAINFO.txt", mi_file) if subs != []: payload['subs[]'] = tuple(subs) - - if meta['debug'] == False: + if meta['debug'] is False: thr_upload_prompt = True else: thr_upload_prompt = cli_ui.ask_yes_no("send to takeupload.php?", default=False) - if thr_upload_prompt == True: + if thr_upload_prompt is True: await asyncio.sleep(0.5) response = session.post(url=url, files=files, data=payload, headers=headers) try: @@ -105,18 +101,16 @@ async def upload(self, session, meta): console.print(response.text) if response.url.endswith('uploaded=1'): console.print(f'[green]Successfully Uploaded at: {response.url}') - #Check if actually uploaded - except: + # Check if actually uploaded + except Exception: if meta['debug']: console.print(response.text) console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(payload) - - async def get_cat_id(self, meta): if meta['category'] == "MOVIE": if meta.get('is_disc') == "BMDV": @@ -133,7 +127,7 @@ async def get_cat_id(self, meta): cat = '7' else: cat = '34' - elif meta.get('anime') != False: + elif meta.get('anime') is not False: cat = '31' return cat @@ -156,19 +150,15 @@ def get_subtitles(self, meta): if sub_langs != []: subs = [] sub_lang_map = { - 'hr' : 1, 'en' : 2, 'bs' : 3, 'sr' : 4, 'sl' : 5, - 'Croatian' : 1, 'English' : 2, 'Bosnian' : 3, 'Serbian' : 4, 'Slovenian' : 5 + 'hr': 1, 'en': 2, 'bs': 3, 'sr': 4, 'sl': 5, + 'Croatian': 1, 'English': 2, 'Bosnian': 3, 'Serbian': 4, 'Slovenian': 5 } for sub in sub_langs: language = sub_lang_map.get(sub) - if language != None: + if language is not None: subs.append(language) return subs - - - - async def edit_torrent(self, meta): if os.path.exists(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent"): THR_torrent = Torrent.read(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent") @@ -213,11 +203,11 @@ async def edit_desc(self, meta): for image in image_glob: url = "https://img2.torrenthr.org/api/1/upload" data = { - 'key' : self.config['TRACKERS']['THR'].get('img_api'), + 'key': self.config['TRACKERS']['THR'].get('img_api'), # 'source' : base64.b64encode(open(image, "rb").read()).decode('utf8') } - files = {'source' : open(image, 'rb')} - response = requests.post(url, data = data, files=files) + files = {'source': open(image, 'rb')} + response = requests.post(url, data=data, files=files) try: response = response.json() # med_url = response['image']['medium']['url'] @@ -239,18 +229,18 @@ async def edit_desc(self, meta): # ProNFO pronfo_url = f"https://www.pronfo.com/api/v1/access/upload/{self.config['TRACKERS']['THR'].get('pronfo_api_key', '')}" data = { - 'content' : open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r').read(), - 'theme' : self.config['TRACKERS']['THR'].get('pronfo_theme', 'gray'), - 'rapi' : self.config['TRACKERS']['THR'].get('pronfo_rapi_id') + 'content': open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r').read(), + 'theme': self.config['TRACKERS']['THR'].get('pronfo_theme', 'gray'), + 'rapi': self.config['TRACKERS']['THR'].get('pronfo_rapi_id') } response = requests.post(pronfo_url, data=data) try: response = response.json() - if response.get('error', True) == False: + if response.get('error', True) is False: mi_img = response.get('url') desc.write(f"\n[img]{mi_img}[/img]\n") pronfo = True - except: + except Exception: console.print('[bold red]Error parsing pronfo response, using THR parser instead') if meta['debug']: console.print(f"[red]{response}") @@ -267,9 +257,6 @@ async def edit_desc(self, meta): desc.close() return pronfo - - - def search_existing(self, session, imdb_id): from bs4 import BeautifulSoup imdb_id = imdb_id.replace('tt', '') @@ -288,14 +275,14 @@ def search_existing(self, session, imdb_id): def login(self, session): url = 'https://www.torrenthr.org/takelogin.php' payload = { - 'username' : self.username, - 'password' : self.password, - 'ssl' : 'yes' + 'username': self.username, + 'password': self.password, + 'ssl': 'yes' } headers = { - 'User-Agent' : f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' + 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } resp = session.post(url, headers=headers, data=payload) if resp.url == "https://www.torrenthr.org/index.php": console.print('[green]Successfully logged in') - return session + return session \ No newline at end of file diff --git a/src/trackers/TL.py b/src/trackers/TL.py index 6573fe98..87d748d3 100644 --- a/src/trackers/TL.py +++ b/src/trackers/TL.py @@ -83,7 +83,7 @@ async def upload(self, meta): open_desc = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'a+') - info_filename = 'BD_SUMMARY_00' if meta['bdinfo'] != None else 'MEDIAINFO_CLEANPATH' + info_filename = 'BD_SUMMARY_00' if meta['bdinfo'] is not None else 'MEDIAINFO_CLEANPATH' open_info = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/{info_filename}.txt", 'r', encoding='utf-8') open_desc.write('\n\n') open_desc.write(open_info.read()) @@ -96,23 +96,23 @@ async def upload(self, meta): 'torrent': (self.get_name(meta) + '.torrent', open_torrent) } data = { - 'announcekey' : self.announce_key, - 'category' : cat_id + 'announcekey': self.announce_key, + 'category': cat_id } headers = { 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers) if not response.text.isnumeric(): console.print(f'[red]{response.text}') else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() open_desc.close() def get_name(self, meta): path = Path(meta['path']) - return path.stem if path.is_file() else path.name + return path.stem if path.is_file() else path.name \ No newline at end of file diff --git a/src/trackers/TTG.py b/src/trackers/TTG.py index c03daef4..0f11f797 100644 --- a/src/trackers/TTG.py +++ b/src/trackers/TTG.py @@ -4,15 +4,12 @@ import asyncio import re import os -from pathlib import Path -import traceback -import json import cli_ui from str2bool import str2bool from unidecode import unidecode -from urllib.parse import urlparse, quote +from urllib.parse import urlparse from src.trackers.COMMON import COMMON -from src.exceptions import * +from src.exceptions import * # noqa #F405 from src.console import console @@ -32,7 +29,6 @@ def __init__(self, config): self.signature = None self.banned_groups = [""] - async def edit_name(self, meta): ttg_name = meta['name'] @@ -48,45 +44,44 @@ async def get_type_id(self, meta): if meta['category'] == "MOVIE": # 51 = DVDRip if meta['resolution'].startswith("720"): - type_id = 52 # 720p + type_id = 52 # 720p if meta['resolution'].startswith("1080"): - type_id = 53 # 1080p/i + type_id = 53 # 1080p/i if meta['is_disc'] == "BDMV": - type_id = 54 # Blu-ray disc + type_id = 54 # Blu-ray disc elif meta['category'] == "TV": if meta.get('tv_pack', 0) != 1: # TV Singles if meta['resolution'].startswith("720"): - type_id = 69 # 720p TV EU/US + type_id = 69 # 720p TV EU/US if lang in ('ZH', 'CN', 'CMN'): - type_id = 76 # Chinese + type_id = 76 # Chinese if meta['resolution'].startswith("1080"): - type_id = 70 # 1080 TV EU/US + type_id = 70 # 1080 TV EU/US if lang in ('ZH', 'CN', 'CMN'): - type_id = 75 # Chinese + type_id = 75 # Chinese if lang in ('KR', 'KO'): - type_id = 75 # Korean + type_id = 75 # Korean if lang in ('JA', 'JP'): - type_id = 73 # Japanese + type_id = 73 # Japanese else: # TV Packs - type_id = 87 # EN/US + type_id = 87 # EN/US if lang in ('KR', 'KO'): - type_id = 99 # Korean + type_id = 99 # Korean if lang in ('JA', 'JP'): - type_id = 88 # Japanese + type_id = 88 # Japanese if lang in ('ZH', 'CN', 'CMN'): - type_id = 90 # Chinese - + type_id = 90 # Chinese if "documentary" in meta.get("genres", "").lower().replace(' ', '').replace('-', '') or 'documentary' in meta.get("keywords", "").lower().replace(' ', '').replace('-', ''): if meta['resolution'].startswith("720"): - type_id = 62 # 720p + type_id = 62 # 720p if meta['resolution'].startswith("1080"): - type_id = 63 # 1080 + type_id = 63 # 1080 if meta.get('is_disc', '') == 'BDMV': - type_id = 64 # BDMV + type_id = 64 # BDMV if "animation" in meta.get("genres", "").lower().replace(' ', '').replace('-', '') or 'animation' in meta.get("keywords", "").lower().replace(' ', '').replace('-', ''): if meta.get('sd', 1) == 0: @@ -104,16 +99,12 @@ async def get_type_id(self, meta): return type_id async def get_anon(self, anon): - if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if anon == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 'no' else: anon = 'yes' return anon - ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### - ############################################################### - async def upload(self, meta): common = COMMON(config=self.config) await common.edit_torrent(meta, self.tracker, self.source_flag) @@ -121,16 +112,16 @@ async def upload(self, meta): ttg_name = await self.edit_name(meta) # FORM - # type = category dropdown - # name = name - # descr = description - # anonymity = "yes" / "no" - # nodistr = "yes" / "no" (exclusive?) not required - # imdb_c = tt123456 - # + # type = category dropdown + # name = name + # descr = description + # anonymity = "yes" / "no" + # nodistr = "yes" / "no" (exclusive?) not required + # imdb_c = tt123456 + # # POST > upload/upload - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8') else: mi_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt", 'r', encoding='utf-8') @@ -143,20 +134,19 @@ async def upload(self, meta): else: torrentFileName = unidecode(os.path.basename(meta['path']).replace(' ', '.')) files = { - 'file' : (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), - 'nfo' : ("torrent.nfo", mi_dump) + 'file': (f"{torrentFileName}.torrent", torrentFile, "application/x-bittorent"), + 'nfo': ("torrent.nfo", mi_dump) } data = { - 'MAX_FILE_SIZE' : '4000000', - 'team' : '', - 'hr' : 'no', - 'name' : ttg_name, - 'type' : await self.get_type_id(meta), - 'descr' : ttg_desc.rstrip(), + 'MAX_FILE_SIZE': '4000000', + 'team': '', + 'hr': 'no', + 'name': ttg_name, + 'type': await self.get_type_id(meta), + 'descr': ttg_desc.rstrip(), - - 'anonymity' : await self.get_anon(meta['anon']), - 'nodistr' : 'no', + 'anonymity': await self.get_anon(meta['anon']), + 'nodistr': 'no', } url = "https://totheglory.im/takeupload.php" @@ -184,10 +174,9 @@ async def upload(self, meta): console.print(data) console.print("\n\n") console.print(up.text) - raise UploadException(f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') + raise UploadException(f"Upload to TTG Failed: result URL {up.url} ({up.status_code}) was not expected", 'red') # noqa #F405 return - async def search_existing(self, meta): dupes = [] with requests.Session() as session: @@ -218,18 +207,15 @@ async def search_existing(self, meta): return dupes - - - async def validate_credentials(self, meta): cookiefile = os.path.abspath(f"{meta['base_dir']}/data/cookies/TTG.pkl") if not os.path.exists(cookiefile): await self.login(cookiefile) vcookie = await self.validate_cookies(meta, cookiefile) - if vcookie != True: + if vcookie is not True: console.print('[red]Failed to validate cookies. Please confirm that the site is up and your passkey is valid.') recreate = cli_ui.ask_yes_no("Log in again and create new session?") - if recreate == True: + if recreate is True: if os.path.exists(cookiefile): os.remove(cookiefile) await self.login(cookiefile) @@ -259,7 +245,7 @@ async def validate_cookies(self, meta, cookiefile): async def login(self, cookiefile): url = "https://totheglory.im/takelogin.php" - data={ + data = { 'username': self.username, 'password': self.password, 'passid': self.passid, @@ -270,11 +256,11 @@ async def login(self, cookiefile): await asyncio.sleep(0.5) if response.url.endswith('2fa.php'): soup = BeautifulSoup(response.text, 'html.parser') - auth_token = soup.find('input', {'name' : 'authenticity_token'}).get('value') + auth_token = soup.find('input', {'name': 'authenticity_token'}).get('value') two_factor_data = { - 'otp' : console.input('[yellow]TTG 2FA Code: '), - 'authenticity_token' : auth_token, - 'uid' : self.uid + 'otp': console.input('[yellow]TTG 2FA Code: '), + 'authenticity_token': auth_token, + 'uid': self.uid } two_factor_url = "https://totheglory.im/take2fa.php" response = session.post(two_factor_url, data=two_factor_data) @@ -290,8 +276,6 @@ async def login(self, cookiefile): console.print(response.url) return - - async def edit_desc(self, meta): base = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/DESCRIPTION.txt", 'r').read() with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt", 'w') as descfile: @@ -304,7 +288,7 @@ async def edit_desc(self, meta): descfile.write(ptgen) # Add This line for all web-dls - if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) == None: + if meta['type'] == 'WEBDL' and meta.get('service_longname', '') != '' and meta.get('description', None) is None: descfile.write(f"[center][b][color=#ff00ff][size=3]{meta['service_longname']}ηš„ζ— ζŸREMUXη‰‡ζΊοΌŒζ²‘ζœ‰θ½¬η /This release is sourced from {meta['service_longname']} and is not transcoded, just remuxed from the direct {meta['service_longname']} stream[/size][/color][/b][/center]") bbcode = BBCODE() if meta.get('discs', []) != []: @@ -337,7 +321,7 @@ async def edit_desc(self, meta): img_url = images[each]['img_url'] descfile.write(f"[url={web_url}][img]{img_url}[/img][/url]") descfile.write("[/center]") - if self.signature != None: + if self.signature is not None: descfile.write("\n\n") descfile.write(self.signature) descfile.close() @@ -350,4 +334,4 @@ async def download_new_torrent(self, id, torrent_path): tor.write(r.content) else: console.print("[red]There was an issue downloading the new .torrent from TTG") - console.print(r.text) + console.print(r.text) \ No newline at end of file diff --git a/src/trackers/UNIT3D_TEMPLATE.py b/src/trackers/UNIT3D_TEMPLATE.py index 93d8d316..301c1ff4 100644 --- a/src/trackers/UNIT3D_TEMPLATE.py +++ b/src/trackers/UNIT3D_TEMPLATE.py @@ -2,7 +2,6 @@ # import discord import asyncio import requests -import os import platform from str2bool import str2bool @@ -20,7 +19,7 @@ class UNIT3D_TEMPLATE(): """ ############################################################### - ######## EDIT ME ######## + ######## EDIT ME ######## noqa E266 ############################################################### # ALSO EDIT CLASS NAME ABOVE @@ -39,7 +38,7 @@ async def get_cat_id(self, category_name): category_id = { 'MOVIE': '1', 'TV': '2', - }.get(category_name, '0') + }.get(category_name, '0') return category_id async def get_type_id(self, type): @@ -50,27 +49,27 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): resolution_id = { - '8640p':'10', + '8640p': '10', '4320p': '1', '2160p': '2', - '1440p' : '3', + '1440p': '3', '1080p': '3', - '1080i':'4', + '1080i': '4', '720p': '5', '576p': '6', '576i': '7', '480p': '8', '480i': '9' - }.get(resolution, '10') + }.get(resolution, '10') return resolution_id ############################################################### - ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### + ###### STOP HERE UNLESS EXTRA MODIFICATION IS NEEDED ###### noqa E266 ############################################################### async def upload(self, meta): @@ -82,12 +81,12 @@ async def upload(self, meta): await common.unit3d_edit_desc(meta, self.tracker, self.signature) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -97,31 +96,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent", 'rb') files = {'torrent': open_torrent} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -136,35 +135,31 @@ async def upload(self, meta): 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})' } params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip() + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category']), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category']), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta.get('edition', "") != "": params['name'] = params['name'] + f" {meta['edition']}" @@ -176,8 +171,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/trackers/UTP.py b/src/trackers/UTP.py index ca90b5b9..e8bbc5f5 100644 --- a/src/trackers/UTP.py +++ b/src/trackers/UTP.py @@ -2,13 +2,13 @@ # import discord import asyncio import requests -import distutils.util -import os +from str2bool import str2bool import platform from src.trackers.COMMON import COMMON from src.console import console + class UTP(): """ Edit for Tracker: @@ -37,12 +37,12 @@ async def upload(self, meta): resolution_id = await self.get_res_id(meta['resolution']) region_id = await common.unit3d_region_ids(meta.get('region')) distributor_id = await common.unit3d_distributor_ids(meta.get('distributor')) - if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) == False: + if meta['anon'] == 0 and bool(str2bool(str(self.config['TRACKERS'][self.tracker].get('anon', "False")))) is False: anon = 0 else: anon = 1 - if meta['bdinfo'] != None: + if meta['bdinfo'] is not None: mi_dump = None bd_dump = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt", 'r', encoding='utf-8').read() else: @@ -52,31 +52,31 @@ async def upload(self, meta): open_torrent = open(f"{meta['base_dir']}/tmp/{meta['uuid']}/[UTOPIA]{meta['clean_name']}.torrent", 'rb') files = {'torrent': ("placeholder.torrent", open_torrent, "application/x-bittorrent")} data = { - 'name' : meta['name'], - 'description' : desc, - 'mediainfo' : mi_dump, - 'bdinfo' : bd_dump, - 'category_id' : cat_id, - 'type_id' : type_id, - 'resolution_id' : resolution_id, - 'tmdb' : meta['tmdb'], - 'imdb' : meta['imdb_id'].replace('tt', ''), - 'tvdb' : meta['tvdb_id'], - 'mal' : meta['mal_id'], - 'igdb' : 0, - 'anonymous' : anon, - 'stream' : meta['stream'], - 'sd' : meta['sd'], - 'keywords' : meta['keywords'], - 'personal_release' : int(meta.get('personalrelease', False)), - 'internal' : 0, - 'featured' : 0, - 'free' : 0, - 'doubleup' : 0, - 'sticky' : 0, + 'name': meta['name'], + 'description': desc, + 'mediainfo': mi_dump, + 'bdinfo': bd_dump, + 'category_id': cat_id, + 'type_id': type_id, + 'resolution_id': resolution_id, + 'tmdb': meta['tmdb'], + 'imdb': meta['imdb_id'].replace('tt', ''), + 'tvdb': meta['tvdb_id'], + 'mal': meta['mal_id'], + 'igdb': 0, + 'anonymous': anon, + 'stream': meta['stream'], + 'sd': meta['sd'], + 'keywords': meta['keywords'], + 'personal_release': int(meta.get('personalrelease', False)), + 'internal': 0, + 'featured': 0, + 'free': 0, + 'doubleup': 0, + 'sticky': 0, } # Internal - if self.config['TRACKERS'][self.tracker].get('internal', False) == True: + if self.config['TRACKERS'][self.tracker].get('internal', False) is True: if meta['tag'] != "" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])): data['internal'] = 1 @@ -94,29 +94,25 @@ async def upload(self, meta): 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip() } - if meta['debug'] == False: + if meta['debug'] is False: response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params) try: console.print(response.json()) - except: + except Exception: console.print("It may have uploaded, go check") return else: - console.print(f"[cyan]Request Data:") + console.print("[cyan]Request Data:") console.print(data) open_torrent.close() - - - - async def get_cat_id(self, category_name, edition): category_id = { 'MOVIE': '1', 'TV': '2', 'FANRES': '3' - }.get(category_name, '0') + }.get(category_name, '0') if category_name == 'MOVIE' and 'FANRES' in edition: category_id = '3' return category_id @@ -129,7 +125,7 @@ async def get_type_id(self, type): 'WEBRIP': '5', 'HDTV': '6', 'ENCODE': '3' - }.get(type, '0') + }.get(type, '0') return type_id async def get_res_id(self, resolution): @@ -138,22 +134,19 @@ async def get_res_id(self, resolution): '2160p': '2', '1080p': '3', '1080i': '4' - }.get(resolution, '1') + }.get(resolution, '1') return resolution_id - - - async def search_existing(self, meta): dupes = [] console.print("[yellow]Searching for existing torrents on site...") params = { - 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(), - 'tmdbId' : meta['tmdb'], - 'categories[]' : await self.get_cat_id(meta['category'], meta.get('edition', '')), - 'types[]' : await self.get_type_id(meta['type']), - 'resolutions[]' : await self.get_res_id(meta['resolution']), - 'name' : "" + 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip(), + 'tmdbId': meta['tmdb'], + 'categories[]': await self.get_cat_id(meta['category'], meta.get('edition', '')), + 'types[]': await self.get_type_id(meta['type']), + 'resolutions[]': await self.get_res_id(meta['resolution']), + 'name': "" } if meta['category'] == 'TV': params['name'] = params['name'] + f" {meta.get('season', '')}{meta.get('episode', '')}" @@ -167,8 +160,8 @@ async def search_existing(self, meta): # difference = SequenceMatcher(None, meta['clean_name'], result).ratio() # if difference >= 0.05: dupes.append(result) - except: + except Exception: console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect') await asyncio.sleep(5) - return dupes + return dupes \ No newline at end of file diff --git a/src/vs.py b/src/vs.py index fe25bf95..768f333f 100644 --- a/src/vs.py +++ b/src/vs.py @@ -1,15 +1,12 @@ import vapoursynth as vs -core = vs.core -from awsmfunc import ScreenGen, DynamicTonemap, FrameInfo, zresize +from awsmfunc import ScreenGen, DynamicTonemap, zresize import random -import argparse -from typing import Union, List -from pathlib import Path -import os, sys -import platform -import multiprocessing +import os from functools import partial +core = vs.core + + def CustomFrameInfo(clip, text): def FrameProps(n, f, clip): # Modify the frame properties extraction here to avoid the decode issue @@ -20,6 +17,7 @@ def FrameProps(n, f, clip): # Apply FrameProps to each frame return core.std.FrameEval(clip, partial(FrameProps, clip=clip), prop_src=clip) + def optimize_images(image, config): import platform # Ensure platform is imported here if config.get('optimize_images', True): @@ -27,7 +25,7 @@ def optimize_images(image, config): try: pyver = platform.python_version_tuple() if int(pyver[0]) == 3 and int(pyver[1]) >= 7: - import oxipng + import oxipng if os.path.getsize(image) >= 16000000: oxipng.optimize(image, level=6) else: @@ -36,6 +34,7 @@ def optimize_images(image, config): print(f"Image optimization failed: {e}") return + def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir=".", config=None): if config is None: config = {'optimize_images': True} # Default configuration @@ -87,10 +86,10 @@ def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir=".", conf if not frames: for _ in range(num): frames.append(random.randint(start, end)) - frames = sorted(frames) - frames = [f"{x}\n" for x in frames] + frames = sorted(frames) + frames = [f"{x}\n" for x in frames] - # Write the frame numbers to a file for reuse + # Write the frame numbers to a file for reuse with open(screens_file, "w") as txt: txt.writelines(frames) print(f"Generated and saved new frame numbers to {screens_file}") @@ -115,7 +114,7 @@ def vs_screengn(source, encode=None, filter_b_frames=False, num=5, dir=".", conf tonemapped = True src = DynamicTonemap(src, src_fmt=False, libplacebo=True, adjust_gamma=True) if encode: - enc = DynamicTonemap(enc, src_fmt=False, libplacebo=True, adjust_gamma=True) + enc = DynamicTonemap(enc, src_fmt=False, libplacebo=True, adjust_gamma=True) # Use the custom FrameInfo function if tonemapped: diff --git a/upload.py b/upload.py index 17a8f6bd..9a6dd34b 100755 --- a/upload.py +++ b/upload.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 - import requests from src.args import Args from src.clients import Clients @@ -45,26 +43,23 @@ import os import sys import platform -import multiprocessing -import logging import shutil import glob import cli_ui +import traceback from src.console import console from rich.markdown import Markdown from rich.style import Style - cli_ui.setup(color='always', title="L4G's Upload Assistant") -import traceback base_dir = os.path.abspath(os.path.dirname(__file__)) try: from data.config import config -except: +except Exception: if not os.path.exists(os.path.abspath(f"{base_dir}/data/config.py")): try: if os.path.exists(os.path.abspath(f"{base_dir}/data/config.json")): @@ -79,11 +74,11 @@ from data.config import config else: raise NotImplementedError - except: + except Exception: cli_ui.info(cli_ui.red, "We have switched from .json to .py for config to have a much more lenient experience") cli_ui.info(cli_ui.red, "Looks like the auto updater didnt work though") cli_ui.info(cli_ui.red, "Updating is just 2 easy steps:") - cli_ui.info(cli_ui.red, "1: Rename", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.json"), cli_ui.red, "to", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py") ) + cli_ui.info(cli_ui.red, "1: Rename", cli_ui.yellow, os.path.abspath(f"{base_dir}/data/config.json"), cli_ui.red, "to", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) cli_ui.info(cli_ui.red, "2: Add", cli_ui.green, "config = ", cli_ui.red, "to the beginning of", cli_ui.green, os.path.abspath(f"{base_dir}/data/config.py")) exit() else: @@ -91,6 +86,7 @@ client = Clients(config=config) parser = Args(config) + async def do_the_thing(base_dir): meta = dict() meta['base_dir'] = base_dir @@ -112,8 +108,8 @@ async def do_the_thing(base_dir): path = path[:-1] queue = [] if os.path.exists(path): - meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) - queue = [path] + meta, help, before_args = parser.parse(tuple(' '.join(sys.argv[1:]).split(' ')), meta) + queue = [path] else: # Search glob if dirname exists if os.path.exists(os.path.dirname(path)) and len(paths) <= 1: @@ -139,11 +135,11 @@ async def do_the_thing(base_dir): p1 = split_path[0] for i, each in enumerate(split_path): try: - if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i+1]}"): + if os.path.exists(p1) and not os.path.exists(f"{p1} {split_path[i + 1]}"): queue.append(p1) - p1 = split_path[i+1] + p1 = split_path[i + 1] else: - p1 += f" {split_path[i+1]}" + p1 += f" {split_path[i + 1]}" except IndexError: if os.path.exists(p1): queue.append(p1) @@ -157,10 +153,9 @@ async def do_the_thing(base_dir): else: # Add Search Here - console.print(f"[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") + console.print("[red]There was an issue with your input. If you think this was not an issue, please make a report that includes the full command used.") exit() - base_meta = {k: v for k, v in meta.items()} for path in queue: meta = {k: v for k, v in base_meta.items()} @@ -172,7 +167,7 @@ async def do_the_thing(base_dir): for key, value in saved_meta.items(): overwrite_list = [ 'trackers', 'dupe', 'debug', 'anon', 'category', 'type', 'screens', 'nohash', 'manual_edition', 'imdb', 'tmdb_manual', 'mal', 'manual', - 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_title', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'region', 'freeleech', + 'hdb', 'ptp', 'blu', 'no_season', 'no_aka', 'no_year', 'no_dub', 'no_tag', 'no_seed', 'client', 'desclink', 'descfile', 'desc', 'draft', 'region', 'freeleech', 'personalrelease', 'unattended', 'season', 'episode', 'torrent_creation', 'qbit_tag', 'qbit_cat', 'skip_imghost_upload', 'imghost', 'manual_source', 'webdv', 'hardcoded-subs' ] if meta.get(key, None) != value and key in overwrite_list: @@ -182,7 +177,7 @@ async def do_the_thing(base_dir): except FileNotFoundError: pass console.print(f"[green]Gathering info for {os.path.basename(path)}") - if meta['imghost'] == None: + if meta['imghost'] is None: meta['imghost'] = config['DEFAULT']['img_host_1'] if not meta['unattended']: ua = config['DEFAULT'].get('auto_mode', False) @@ -193,41 +188,41 @@ async def do_the_thing(base_dir): meta = await prep.gather_prep(meta=meta, mode='cli') meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) - if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) == False: + if meta.get('image_list', False) in (False, []) and meta.get('skip_imghost_upload', False) is False: return_dict = {} - meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'],[], return_dict) + meta['image_list'], dummy_var = prep.upload_screens(meta, meta['screens'], 1, 0, meta['screens'], [], return_dict) if meta['debug']: console.print(meta['image_list']) # meta['uploaded_screens'] = True - elif meta.get('skip_imghost_upload', False) == True and meta.get('image_list', False) == False: + elif meta.get('skip_imghost_upload', False) is True and meta.get('image_list', False) is False: meta['image_list'] = [] if not os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")): reuse_torrent = None - if meta.get('rehash', False) == False: + if meta.get('rehash', False) is False: reuse_torrent = await client.find_existing_torrent(meta) - if reuse_torrent != None: + if reuse_torrent is not None: prep.create_base_from_existing_torrent(reuse_torrent, meta['base_dir'], meta['uuid']) - if meta['nohash'] == False and reuse_torrent == None: + if meta['nohash'] is False and reuse_torrent is None: prep.create_torrent(meta, Path(meta['path']), "BASE") if meta['nohash']: meta['client'] = "none" - elif os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) and meta.get('rehash', False) == True and meta['nohash'] == False: + elif os.path.exists(os.path.abspath(f"{meta['base_dir']}/tmp/{meta['uuid']}/BASE.torrent")) and meta.get('rehash', False) is True and meta['nohash'] is False: prep.create_torrent(meta, Path(meta['path']), "BASE") if int(meta.get('randomized', 0)) >= 1: prep.create_random_torrents(meta['base_dir'], meta['uuid'], meta['randomized'], meta['path']) - if meta.get('trackers', None) != None: + if meta.get('trackers', None) is not None: trackers = meta['trackers'] else: trackers = config['TRACKERS']['default_trackers'] if "," in trackers: trackers = trackers.split(',') - with open (f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: + with open(f"{meta['base_dir']}/tmp/{meta['uuid']}/meta.json", 'w') as f: json.dump(meta, f, indent=4) f.close() confirm = get_confirmation(meta) - while confirm == False: + while confirm is False: # help.print_help() editargs = cli_ui.ask_string("Input args that need correction e.g.(--tag NTb --category tv --tmdb 12345)") editargs = (meta['path'],) + tuple(editargs.split()) @@ -240,24 +235,20 @@ async def do_the_thing(base_dir): meta['name_notag'], meta['name'], meta['clean_name'], meta['potential_missing'] = await prep.get_name(meta) confirm = get_confirmation(meta) - if isinstance(trackers, list) == False: + if isinstance(trackers, list) is False: trackers = [trackers] trackers = [s.strip().upper() for s in trackers] if meta.get('manual', False): trackers.insert(0, "MANUAL") - - - #################################### - ####### Upload to Trackers ####### - #################################### + # Upload to Trackers common = COMMON(config=config) - api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'RF', 'ACM','LCD','LST','HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'ULCX', 'AL'] - http_trackers = ['HDB', 'TTG', 'FL', 'PTER', 'HDT', 'MTV'] + api_trackers = ['BLU', 'AITHER', 'STC', 'R4E', 'RF', 'ACM', 'LCD', 'LST', 'HUNO', 'SN', 'LT', 'NBL', 'ANT', 'JPTV', 'OE', 'BHDTV', 'RTF', 'OTW', 'FNP', 'CBR', 'UTP', 'ULCX', 'AL', 'HDB'] + http_trackers = ['TTG', 'FL', 'PTER', 'HDT', 'MTV'] tracker_class_map = { - 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, - 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, - 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, 'TL' : TL, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, + 'BLU' : BLU, 'BHD': BHD, 'AITHER' : AITHER, 'STC' : STC, 'R4E' : R4E, 'THR' : THR, 'HP' : HP, 'PTP' : PTP, 'RF' : RF, + 'SN' : SN, 'ACM' : ACM, 'HDB' : HDB, 'LCD': LCD, 'TTG' : TTG, 'LST' : LST, 'HUNO': HUNO, 'FL' : FL, 'LT' : LT, 'NBL' : NBL, + 'ANT' : ANT, 'PTER': PTER, 'JPTV' : JPTV, 'TL' : TL, 'HDT' : HDT, 'MTV': MTV, 'OE': OE, 'BHDTV': BHDTV, 'RTF': RTF, 'OTW': OTW, 'FNP': FNP, 'CBR': CBR, 'UTP': UTP, 'ULCX': ULCX, 'AL':AL} for tracker in trackers: @@ -285,7 +276,7 @@ async def do_the_thing(base_dir): dupes = await common.filter_dupes(dupes, meta) # note BHDTV does not have search implemented. meta = dupe_check(dupes, meta) - if meta['upload'] == True: + if meta['upload'] is True: await tracker_class.upload(meta) if tracker == 'SN': await asyncio.sleep(16) @@ -301,11 +292,11 @@ async def do_the_thing(base_dir): console.print(f"Uploading to {tracker}") if check_banned_group(tracker_class.tracker, tracker_class.banned_groups, meta): continue - if await tracker_class.validate_credentials(meta) == True: + if await tracker_class.validate_credentials(meta) is True: dupes = await tracker_class.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['upload'] == True: + if meta['upload'] is True: await tracker_class.upload(meta) await client.add_to_client(meta, tracker_class.tracker) @@ -313,7 +304,7 @@ async def do_the_thing(base_dir): if meta['unattended']: do_manual = True else: - do_manual = cli_ui.ask_yes_no(f"Get files for manual upload?", default=True) + do_manual = cli_ui.ask_yes_no("Get files for manual upload?", default=True) if do_manual: for manual_tracker in trackers: if manual_tracker != 'MANUAL': @@ -324,7 +315,7 @@ async def do_the_thing(base_dir): else: await tracker_class.edit_desc(meta) url = await prep.package(meta) - if url == False: + if url is False: console.print(f"[yellow]Unable to upload prep files, they can be found at `tmp/{meta['uuid']}") else: console.print(f"[green]{meta['name']}") @@ -348,7 +339,7 @@ async def do_the_thing(base_dir): dupes = await bhd.search_existing(meta) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['upload'] == True: + if meta['upload'] is True: await bhd.upload(meta) await client.add_to_client(meta, "BHD") @@ -359,11 +350,11 @@ async def do_the_thing(base_dir): upload_to_thr = cli_ui.ask_yes_no(f"Upload to THR? {debug}", default=meta['unattended']) if upload_to_thr: console.print("Uploading to THR") - #Unable to get IMDB id/Youtube Link + # nable to get IMDB id/Youtube Link if meta.get('imdb_id', '0') == '0': imdb_id = cli_ui.ask_string("Unable to find IMDB id, please enter e.g.(tt1234567)") meta['imdb_id'] = imdb_id.replace('tt', '').zfill(7) - if meta.get('youtube', None) == None: + if meta.get('youtube', None) is None: youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)") meta['youtube'] = youtube thr = THR(config=config) @@ -375,10 +366,10 @@ async def do_the_thing(base_dir): dupes = thr.search_existing(session, meta.get('imdb_id')) dupes = await common.filter_dupes(dupes, meta) meta = dupe_check(dupes, meta) - if meta['upload'] == True: + if meta['upload'] is True: await thr.upload(session, meta) await client.add_to_client(meta, "THR") - except: + except Exception: console.print(traceback.print_exc()) if tracker == "PTP": @@ -397,9 +388,9 @@ async def do_the_thing(base_dir): try: console.print("[yellow]Searching for Group ID") groupID = await ptp.get_group_by_imdb(meta['imdb_id']) - if groupID == None: + if groupID is None: console.print("[yellow]No Existing Group found") - if meta.get('youtube', None) == None or "youtube" not in str(meta.get('youtube', '')): + if meta.get('youtube', None) is None or "youtube" not in str(meta.get('youtube', '')): youtube = cli_ui.ask_string("Unable to find youtube trailer, please link one e.g.(https://www.youtube.com/watch?v=dQw4w9WgXcQ)", default="") meta['youtube'] = youtube meta['upload'] = True @@ -410,12 +401,12 @@ async def do_the_thing(base_dir): meta = dupe_check(dupes, meta) if meta.get('imdb_info', {}) == {}: meta['imdb_info'] = await prep.get_imdb_info(meta['imdb_id'], meta) - if meta['upload'] == True: + if meta['upload'] is True: ptpUrl, ptpData = await ptp.fill_upload_form(groupID, meta) await ptp.upload(meta, ptpUrl, ptpData) await asyncio.sleep(5) await client.add_to_client(meta, "PTP") - except: + except Exception: console.print(traceback.print_exc()) if tracker == "TL": @@ -433,7 +424,7 @@ async def do_the_thing(base_dir): def get_confirmation(meta): - if meta['debug'] == True: + if meta['debug'] is True: console.print("[bold red]DEBUG: True") console.print(f"Prep material saved to {meta['base_dir']}/tmp/{meta['uuid']}") console.print() @@ -455,7 +446,7 @@ def get_confirmation(meta): if int(meta.get('freeleech', '0')) != 0: cli_ui.info(f"Freeleech: {meta['freeleech']}") if meta['tag'] == "": - tag = "" + tag = "" else: tag = f" / {meta['tag'][1:]}" if meta['is_disc'] == "DVD": @@ -464,14 +455,14 @@ def get_confirmation(meta): res = meta['resolution'] cli_ui.info(f"{res} / {meta['type']}{tag}") - if meta.get('personalrelease', False) == True: + if meta.get('personalrelease', False) is True: cli_ui.info("Personal Release!") console.print() - if meta.get('unattended', False) == False: + if meta.get('unattended', False) is False: get_missing(meta) - ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) == True else "" # \a rings the bell + ring_the_bell = "\a" if config['DEFAULT'].get("sfx_on_prompt", True) is True else "" # \a rings the bell cli_ui.info(ring_the_bell) - + # Handle the 'keep_folder' logic based on 'is disc' and 'isdir' if meta.get('is disc', False): meta['keep_folder'] = False # Ensure 'keep_folder' is False if 'is disc' is True @@ -479,25 +470,27 @@ def get_confirmation(meta): if meta['isdir']: if 'keep_folder' in meta: if meta['keep_folder']: - cli_ui.info_section(cli_ui.yellow, f"Uploading with --keep-folder") + cli_ui.info_section(cli_ui.yellow, "Uploading with --keep-folder") kf_confirm = cli_ui.ask_yes_no("You specified --keep-folder. Uploading in folders might not be allowed. Are you sure you want to proceed?", default=False) if not kf_confirm: cli_ui.info('Aborting...') exit() - - cli_ui.info_section(cli_ui.yellow, f"Is this correct?") + + cli_ui.info_section(cli_ui.yellow, "Is this correct?") cli_ui.info(f"Name: {meta['name']}") confirm = cli_ui.ask_yes_no("Correct?", default=False) else: cli_ui.info(f"Name: {meta['name']}") confirm = True + return confirm + def dupe_check(dupes, meta): if not dupes: - console.print("[green]No dupes found") - meta['upload'] = True - return meta + console.print("[green]No dupes found") + meta['upload'] = True + return meta else: console.print() dupe_text = "\n".join(dupes) @@ -505,7 +498,7 @@ def dupe_check(dupes, meta): cli_ui.info_section(cli_ui.bold, "Check if these are actually dupes!") cli_ui.info(dupe_text) if meta['unattended']: - if meta.get('dupe', False) == False: + if meta.get('dupe', False) is False: console.print("[red]Found potential dupes. Aborting. If this is not a dupe, or you would like to upload anyways, pass --skip-dupe-check") upload = False else: @@ -513,11 +506,11 @@ def dupe_check(dupes, meta): upload = True console.print() if not meta['unattended']: - if meta.get('dupe', False) == False: + if meta.get('dupe', False) is False: upload = cli_ui.ask_yes_no("Upload Anyways?", default=False) else: upload = True - if upload == False: + if upload is False: meta['upload'] = False else: meta['upload'] = True @@ -549,14 +542,15 @@ def check_banned_group(tracker, banned_group_list, meta): return True return False + def get_missing(meta): info_notes = { - 'edition' : 'Special Edition/Release', - 'description' : "Please include Remux/Encode Notes if possible (either here or edit your upload)", - 'service' : "WEB Service e.g.(AMZN, NF)", - 'region' : "Disc Region", - 'imdb' : 'IMDb ID (tt1234567)', - 'distributor' : "Disc Distributor e.g.(BFI, Criterion, etc)" + 'edition': 'Special Edition/Release', + 'description': "Please include Remux/Encode Notes if possible (either here or edit your upload)", + 'service': "WEB Service e.g.(AMZN, NF)", + 'region': "Disc Region", + 'imdb': 'IMDb ID (tt1234567)', + 'distributor': "Disc Distributor e.g.(BFI, Criterion, etc)" } missing = [] if meta.get('imdb_id', '0') == '0': @@ -579,6 +573,7 @@ def get_missing(meta): console.print() return + if __name__ == '__main__': pyver = platform.python_version_tuple() if int(pyver[0]) != 3: