import numpy as np import argparse import os, sys from gui import GuiMain, GuiImage, GuiTag import cv2 import logging import magic from tmsu import * from util import * from predictor import * from PIL import Image import datetime ''' Walk over all files for the given base directory and all subdirectories recursively. Parameters: args: Argument dict. ''' def walk(tmsu, args): logger = logging.getLogger(__name__) logger.info("Walking files ...") mime = magic.Magic(mime=True) files = [os.path.abspath(os.path.join(dp, f)) for dp, dn, filenames in os.walk(args["file_dir"]) for f in filenames] logger.debug("Files: {}".format(files)) logger.info("Number of files found: {}".format(len(files))) if args["index"] >= len(files): logger.error("Invalid start index. index = {}, number of files = {}".format(args["index"], len(files))) return if args["predict_images"] or args["predict_videos"]: backend = { "torch": Predictor.BackendTorch, "tensorflow": Predictor.BackendTensorflow, "keras": Predictor.BackendTensorflow }.get(args["predict_images_backend"]) if backend == Predictor.BackendTorch: predictor = Predictor(Predictor.BackendTorch(top=args["predict_images_top"])) elif backend == Predictor.BackendTensorflow: predictor = Predictor(Predictor.BackendTensorflow(top=args["predict_images_top"], detail=(not args["predict_images_skip_detail"]), detail_factor=args["predict_images_detail_factor"])) for i in range(args["index"], len(files)): file_path = files[i] logger.info("Handling file {}, {}".format(i, file_path)) tags = tmsu.tags(file_path) not_empty = bool(tags) logger.info("Existing tags: {}".format(tags)) if (not_empty and args["skip_tagged"]): logger.info("Already tagged, skipping.") continue if args["open_system"]: open_system(file_path) if args["tag_metadata"]: # Base name and extension base = os.path.splitext(os.path.basename(file_path)) if base[1]: tags.update({base[0], base[1]}) else: tags.update({base[0]}) # File creation and modification time time_c = datetime.datetime.fromtimestamp(os.path.getctime(file_path)) time_m = datetime.datetime.fromtimestamp(os.path.getmtime(file_path)) tags.update({time_c.strftime("%Y-%m-%d"), time_c.strftime("%Y"), time_c.strftime("%B"), time_c.strftime("%A"), time_c.strftime("%Hh")}) if time_c != time_m: tags.update({time_m.strftime("%Y-%m-%d"), time_m.strftime("%Y"), time_m.strftime("%B"), time_m.strftime("%A"), time_m.strftime("%Hh")}) # Detect MIME-type for file mime_type = mime.from_file(file_path).split("/") tags.update(mime_type) # Handle images if mime_type[0] == "image": logger.debug("File is image") if args["predict_images"] or args["gui_tag"]: img = cv2.imread(file_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if args["predict_images"]: logger.info("Predicting image tags ...") tags_predict = predictor.predict(img) logger.info("Predicted tags: {}".format(tags_predict)) tags.update(tags_predict) if args["gui_tag"]: while(True): # For GUI inputs (rotate, ...) logger.debug("Showing image GUI ...") img_show = image_resize(img, width=args["gui_image_length"]) if img.shape[1] > img.shape[0] else image_resize(img, height=args["gui_image_length"]) #img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB) ret = GuiImage(i, file_path, img_show, tags).loop() tags = set(ret[1]).difference({''}) if ret[0] == GuiImage.RETURN_ROTATE_90_CLOCKWISE: img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE) elif ret[0] == GuiImage.RETURN_ROTATE_90_COUNTERCLOCKWISE: img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE) elif ret[0] == GuiImage.RETURN_NEXT: break elif ret[0] == GuiImage.RETURN_ABORT: return elif mime_type[0] == "video": logger.debug("File is video") if args["predict_videos"] or args["gui_tag"]: cap = cv2.VideoCapture(file_path) n_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) step = n_frames / args["predict_videos_key_frames"] print(step) preview = None for frame in np.arange(0, n_frames, step): cap.set(cv2.CAP_PROP_POS_FRAMES, max(-1, round(frame - 1))) _, f = cap.read() f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB) if frame == 0: preview = f if args["predict_videos"]: logger.info("Predictig video frame {} of {}".format(frame, n_frames)) tags_predict = predictor.predict(f) logger.info("Predicted tags: {}".format(tags_predict)) tags.update(tags_predict) else: break if args["gui_tag"]: while(True): # For GUI inputs (rotate, ...) logger.debug("Showing image GUI ...") img_show = image_resize(preview, width=args["gui_image_length"]) if preview.shape[1] > preview.shape[0] else image_resize(preview, height=args["gui_image_length"]) #img_show = cv2.cvtColor(img_show, cv2.COLOR_BGR2RGB) ret = GuiImage(i, file_path, img_show, tags).loop() tags = set(ret[1]).difference({''}) if ret[0] == GuiImage.RETURN_ROTATE_90_CLOCKWISE: preview = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE) elif ret[0] == GuiImage.RETURN_ROTATE_90_COUNTERCLOCKWISE: preview = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE) elif ret[0] == GuiImage.RETURN_NEXT: break elif ret[0] == GuiImage.RETURN_ABORT: return else: if args["gui_tag"]: while(True): logger.debug("Showing generic tagging GUI ...") ret = GuiTag(i, file_path, tags).loop() tags = set(ret[1]).difference({''}) if ret[0] == GuiTag.RETURN_NEXT: break elif ret[0] == GuiTag.RETURN_ABORT: return if ((not args["gui_tag"]) and (not args["skip_prompt"])): tags = set(input_with_prefill("\nTags for file {}:\n".format(file_path), ','.join(tags)).split(",")) logger.info("Tagging {}".format(tags)) tmsu.tag(file_path, tags, untag=not_empty) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Tag multiple files using TMSU.') parser.add_argument('-b', '--base', nargs='?', default='.', type=dir_path, help='Base directory with database (default: %(default)s)') parser.add_argument('-f', '--file-dir', nargs='?', default='.', type=dir_path, help='File directory for walking (default: %(default)s)') parser.add_argument('-g', '--gui', nargs='?', const=1, default=False, type=bool, help='Show main GUI (default: %(default)s)') parser.add_argument('--tmsu-command', nargs='?', const=1, default="tmsu", type=str, help='TMSU command override (default: %(default)s)') parser.add_argument('--tag-metadata', nargs='?', const=1, default=True, type=bool, help='Use metadata as default tags (default: %(default)s)') parser.add_argument('--predict-images', nargs='?', const=1, default=False, type=bool, help='Use prediction for image tagging (default: %(default)s)') parser.add_argument('--predict-images-backend', nargs='?', const=1, choices=["torch", "tensorflow", "keras"], default="torch", type=str.lower, help='Determines which backend should be used for keyword prediction (default: %(default)s)') parser.add_argument('--predict-images-top', nargs='?', const=1, default=10, type=int, help='Defines how many top prediction keywords should be used (default: %(default)s)') parser.add_argument('--predict-images-detail-factor', nargs='?', const=1, default=2, type=int, help='Width factor for detail scan, multiplied by 224 for ResNet50 (default: %(default)s)') parser.add_argument('--predict-images-skip-detail', nargs='?', const=1, default=False, type=bool, help='Skip detail scan in image prediction (default: %(default)s)') parser.add_argument('--predict-videos', nargs='?', const=1, default=False, type=bool, help='Use prediction for video tagging (default: %(default)s)') parser.add_argument('--predict-videos-key-frames', nargs='?', const=1, default=5, type=int, help='Defines how many key frames are used to predict videos (default: %(default)s)') parser.add_argument('--gui-tag', nargs='?', const=1, default=False, type=bool, help='Show GUI for tagging (default: %(default)s)') parser.add_argument('--gui-image-length', nargs='?', const=1, default=800, type=int, help='Length of longest side for preview (default: %(default)s)') parser.add_argument('--open-system', nargs='?', const=1, default=False, type=bool, help='Open all files with system default (default: %(default)s)') parser.add_argument('--skip-prompt', nargs='?', const=1, default=False, type=bool, help='Skip prompt for file tags (default: %(default)s)') parser.add_argument('--skip-tagged', nargs='?', const=1, default=False, type=bool, help='Skip already tagged files (default: %(default)s)') parser.add_argument('-i', '--index', nargs='?', const=1, default=0, type=int, help='Start tagging at the given file index (default: %(default)s)') parser.add_argument('-v', '--verbose', action="count", default=0, help="Verbosity level") args = parser.parse_args() if args.verbose == 0: log_level = logging.WARNING elif args.verbose == 1: log_level = logging.INFO elif args.verbose >= 2: log_level = logging.DEBUG logging.basicConfig(stream=sys.stdout, level=log_level) logger = logging.getLogger(__name__) args = { "base": args.base, "file_dir": args.file_dir, "gui": args.gui, "tmsu_command": args.tmsu_command, "tag_metadata": args.tag_metadata, "predict_images": args.predict_images, "predict_images_backend": args.predict_images_backend, "predict_images_top": args.predict_images_top, "predict_images_detail_factor": args.predict_images_detail_factor, "predict_images_skip_detail": args.predict_images_skip_detail, "predict_videos": args.predict_videos, "predict_videos_key_frames": args.predict_videos_key_frames, "gui_tag": args.gui_tag, "gui_image_length": args.gui_image_length, "open_system": args.open_system, "skip_prompt": args.skip_prompt, "skip_tagged": args.skip_tagged, "index": args.index, "verbosity": args.verbose } logger.debug("args = {}".format(args)) if args["gui"]: logger.debug("Starting main GUI ...") args = GuiMain(args).loop() tmsu = TMSU(args["base"], args["tmsu_command"]) if tmsu.status: walk(tmsu, args)