#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse
import getpass
import os
import random
from collections import Counter, defaultdict
from enum import Enum
from pathlib import Path
from typing import List

import cv2
import numpy as np
import tqdm
from apistar.exceptions import ErrorResponse
from arkindex import options_from_env

from kaldi_data_generator.image_utils import (
    determine_rotate_angle,
    download_image,
    extract_min_area_rect_image,
    extract_polygon_image,
    resize_transcription_data,
    rotate,
    trim,
)
from kaldi_data_generator.utils import (
    CachedApiClient,
    TranscriptionData,
    logger,
    write_file,
)

SEED = 42
random.seed(SEED)
MANUAL = "manual"
TEXT_LINE = "text_line"
WHITE = 255
DEFAULT_RESCALE = 1.0

ROTATION_CLASSES_TO_ANGLES = {
    "rotate_0": 0,
    "rotate_left_90": 90,
    "rotate_180": 180,
    "rotate_right_90": -90,
}


def create_api_client(cache_dir=None):
    logger.info("Creating API client")
    # return ArkindexClient(**options_from_env())
    return CachedApiClient(cache_root=cache_dir, **options_from_env())


class Extraction(Enum):
    boundingRect: int = 0
    polygon: int = 1
    # minimum containing rectangle with an angle (cv2.min_area_rect)
    min_area_rect: int = 2
    deskew_polygon: int = 3
    deskew_min_area_rect: int = 4
    skew_polygon: int = 5
    skew_min_area_rect: int = 6


class HTRDataGenerator:
    def __init__(
        self,
        format,
        dataset_name="foo",
        out_dir_base="/tmp/kaldi_data",
        grayscale=True,
        extraction=Extraction.boundingRect,
        accepted_classes=None,
        filter_printed=False,
        skip_vertical_lines=False,
        accepted_worker_version_ids=None,
        transcription_type=TEXT_LINE,
        max_deskew_angle=45,
        skew_angle=0,
        should_rotate=False,
        scale_x=None,
        scale_y_top=None,
        scale_y_bottom=None,
        cache_dir=None,
        api_client=None,
    ):

        self.format = format
        self.out_dir_base = out_dir_base
        self.dataset_name = dataset_name
        self.grayscale = grayscale
        self.extraction_mode = extraction
        self.accepted_classes = accepted_classes
        self.should_filter_by_class = bool(self.accepted_classes)
        self.accepted_worker_version_ids = accepted_worker_version_ids
        self.should_filter_by_worker = bool(self.accepted_worker_version_ids)
        self.should_filter_printed = filter_printed
        self.transcription_type = transcription_type
        self.skip_vertical_lines = skip_vertical_lines
        self.skipped_pages_count = 0
        self.skipped_vertical_lines_count = 0
        self.accepted_lines_count = 0
        self.max_deskew_angle = max_deskew_angle
        self.skew_angle = skew_angle
        self.should_rotate = should_rotate
        if scale_x or scale_y_top or scale_y_bottom:
            self.should_resize_polygons = True
            # use 1.0 as default - no resize, if not specified
            self.scale_x = scale_x or DEFAULT_RESCALE
            self.scale_y_top = scale_y_top or DEFAULT_RESCALE
            self.scale_y_bottom = scale_y_bottom or DEFAULT_RESCALE
        else:
            self.should_resize_polygons = False
        self.api_client = api_client

        if MANUAL in self.accepted_worker_version_ids:
            self.accepted_worker_version_ids[
                self.accepted_worker_version_ids.index(MANUAL)
            ] = None

        if self.format == "kraken":
            self.out_line_dir = out_dir_base
            os.makedirs(self.out_line_dir, exist_ok=True)
        else:
            self.out_line_text_dir = os.path.join(
                self.out_dir_base, "Transcriptions", self.dataset_name
            )
            os.makedirs(self.out_line_text_dir, exist_ok=True)
            self.out_line_img_dir = os.path.join(
                self.out_dir_base, "Lines", self.dataset_name
            )
            os.makedirs(self.out_line_img_dir, exist_ok=True)

        self.cache_dir = cache_dir
        logger.info(f"Setting up cache to {self.cache_dir}")
        self.img_cache_dir = self.cache_dir / "images"
        self.img_cache_dir.mkdir(exist_ok=True, parents=True)
        if not any(self.img_cache_dir.iterdir()):
            logger.info("Cache is empty, no need to check")
            self._cache_is_empty = True
        else:
            self._cache_is_empty = False

        if self.grayscale:
            self._color = "grayscale"
            self._cv2_flag = cv2.IMREAD_GRAYSCALE
        else:
            self._color = "rgb"
            self._cv2_flag = cv2.IMREAD_COLOR

    def get_image(self, image_url: str, page_id: str) -> "np.ndarray":
        # id is last part before full/full/0/default.jpg
        img_id = image_url.split("/")[-5].replace("%2F", "/")

        cached_img_path = self.img_cache_dir / self._color / img_id
        if not self._cache_is_empty and cached_img_path.exists():
            logger.info(f"Cached image exists: {cached_img_path} - {page_id}")
        else:
            logger.info(f"Image not in cache: {cached_img_path} - {page_id}")
            cached_img_path.parent.mkdir(exist_ok=True, parents=True)
            pil_img = download_image(image_url)
            if self.grayscale:
                pil_img = pil_img.convert("L")
            pil_img.save(cached_img_path, format="jpeg")

        img = cv2.imread(str(cached_img_path), self._cv2_flag)
        return img

    def get_accepted_zones(self, page_id: str):
        try:
            accepted_zones = []
            for elt in self.api_client.cached_paginate(
                "ListElementChildren", id=page_id, with_best_classes=True
            ):
                printed = True
                for classification in elt["best_classes"]:
                    if classification["ml_class"]["name"] == "handwritten":
                        printed = False
                for classification in elt["best_classes"]:
                    if classification["ml_class"]["name"] in self.accepted_classes:
                        if self.should_filter_printed:
                            if not printed:
                                accepted_zones.append(elt["zone"]["id"])
                        else:
                            accepted_zones.append(elt["zone"]["id"])
            logger.info(
                "Number of accepted zone for page {} : {}".format(
                    page_id, len(accepted_zones)
                )
            )
            return accepted_zones
        except ErrorResponse as e:
            logger.info(
                f"ListTranscriptions failed {e.status_code} - {e.title} - {e.content} - {page_id}"
            )
            raise e

    def _validate_transcriptions(self, page_id: str, lines: List[TranscriptionData]):
        if not lines:
            return

        line_elem_counter = Counter([trans.element_id for trans in lines])
        most_common = line_elem_counter.most_common(10)
        if most_common[0][-1] > 1:
            logger.error("Line elements have multiple transcriptions! Showing top 10:")
            logger.error(f"{most_common}")
            raise ValueError(f"Multiple transcriptions: {most_common[0]}")

        worker_version_counter = Counter([trans.worker_version_id for trans in lines])
        if len(worker_version_counter) > 1:
            logger.warning(
                f"There are transcriptions from multiple worker versions on this page: {page_id}:"
            )
            logger.warning(
                f"Top 10 worker versions: {worker_version_counter.most_common(10)}"
            )

    def _choose_best_transcriptions(
        self, lines: List[TranscriptionData]
    ) -> List[TranscriptionData]:
        """
        Get the best transcription based on the order of accepted worker version ids.
        :param lines:
        :return:
        """
        if not lines:
            return []

        trans_by_element = defaultdict(list)
        for line in lines:
            trans_by_element[line.element_id].append(line)

        best_transcriptions = []
        for elem, trans_list in trans_by_element.items():
            tmp_dict = {t.worker_version_id: t for t in trans_list}

            for wv in self.accepted_worker_version_ids:
                if wv in tmp_dict:
                    best_transcriptions.append(tmp_dict[wv])
                    break
            else:
                logger.info(f"No suitable trans found for {elem}")
        return best_transcriptions

    def get_transcriptions(self, page_id: str, accepted_zones):
        lines = []
        try:
            for res in self.api_client.cached_paginate(
                "ListTranscriptions", id=page_id, recursive=True
            ):
                if (
                    self.should_filter_by_worker
                    and res["worker_version_id"] not in self.accepted_worker_version_ids
                ):
                    continue
                if (
                    self.should_filter_by_class
                    and res["element"]["zone"]["id"] not in accepted_zones
                ):
                    continue
                if res["element"]["type"] != self.transcription_type:
                    continue

                text = res["text"]
                if not text or not text.strip():
                    continue

                if "zone" in res:
                    polygon = res["zone"]["polygon"]
                elif "element" in res:
                    polygon = res["element"]["zone"]["polygon"]
                else:
                    raise ValueError(f"Data problem with polygon :: {res}")

                trans_data = TranscriptionData(
                    element_id=res["element"]["id"],
                    polygon=polygon,
                    text=text,
                    trans_id=res["id"],
                    worker_version_id=res["worker_version_id"],
                )

                lines.append(trans_data)

            if self.accepted_worker_version_ids:
                # if accepted worker versions have been defined then use them
                lines = self._choose_best_transcriptions(lines)
            else:
                # if no accepted worker versions have been defined
                # then check that there aren't multiple transcriptions
                # on the same text line
                self._validate_transcriptions(page_id, lines)

            if self.should_rotate:
                classes_by_elem = self.get_children_classes(page_id)

                for trans in lines:
                    rotation_classes = [
                        c
                        for c in classes_by_elem[trans.element_id]
                        if c in ROTATION_CLASSES_TO_ANGLES
                    ]
                    if len(rotation_classes) > 0:
                        if len(rotation_classes) > 1:
                            logger.warning(
                                f"Several rotation classes = {len(rotation_classes)} - {trans.element_id}"
                            )
                        trans.rotation_class = rotation_classes[0]
                    else:
                        logger.warning(f"No rotation classes on {trans.element_id}")

            count_skipped = 0
            if self.skip_vertical_lines:
                filtered_lines = []
                for line in lines:
                    if line.is_vertical:
                        count_skipped += 1
                        continue
                    filtered_lines.append(line)

                lines = filtered_lines

            count = len(lines)

            return lines, count, count_skipped

        except ErrorResponse as e:
            logger.info(
                f"ListTranscriptions failed {e.status_code} - {e.title} - {e.content} - {page_id}"
            )
            raise e

    def get_children_classes(self, page_id):
        return {
            elem["id"]: [
                best_class["ml_class"]["name"]
                for best_class in elem["best_classes"]
                if best_class["state"] != "rejected"
            ]
            for elem in self.api_client.cached_paginate(
                "ListElementChildren",
                id=page_id,
                recursive=True,
                type=TEXT_LINE,
                with_best_classes=True,
            )
        }

    def _save_line_image(
        self, page_id, i, line_img, manifest_fp=None, trans: TranscriptionData = None
    ):
        if self.should_rotate:
            if trans.rotation_class:
                rotate_angle = ROTATION_CLASSES_TO_ANGLES[trans.rotation_class]
                line_img = self.rotate_and_trim(line_img, rotate_angle)
        if self.format == "kraken":
            cv2.imwrite(f"{self.out_line_dir}/{page_id}_{i}.png", line_img)
            manifest_fp.write(f"{page_id}_{i}.png\n")
        else:
            cv2.imwrite(f"{self.out_line_img_dir}/{page_id}_{i}.jpg", line_img)

    def extract_lines(self, page_id: str, image_data: dict):
        if self.should_filter_by_class:
            accepted_zones = self.get_accepted_zones(page_id)
        else:
            accepted_zones = []
        lines, count, count_skipped = self.get_transcriptions(page_id, accepted_zones)

        if count == 0:
            self.skipped_pages_count += 1
            logger.info(f"Page {page_id} skipped, because it has no lines")
            return

        logger.debug(f"Total num of lines {count + count_skipped}")
        logger.debug(f"Num of accepted lines {count}")
        logger.debug(f"Num of skipped lines {count_skipped}")

        self.skipped_vertical_lines_count += count_skipped
        self.accepted_lines_count += count

        full_image_url = image_data["s3_url"]
        if full_image_url is None:
            full_image_url = image_data["url"] + "/full/full/0/default.jpg"

        img = self.get_image(full_image_url, page_id=page_id)

        # sort vertically then horizontally
        sorted_lines = sorted(lines, key=lambda key: (key.rect.y, key.rect.x))

        if self.should_resize_polygons:
            sorted_lines = [
                resize_transcription_data(
                    line,
                    image_data["width"],
                    image_data["height"],
                    self.scale_x,
                    self.scale_y_top,
                    self.scale_y_bottom,
                )
                for line in sorted_lines
            ]

        if self.format == "kraken":
            manifest_fp = open(f"{self.out_line_dir}/manifest.txt", "a")
            # append to file, not re-write it
        else:
            # not needed for kaldi
            manifest_fp = None

        if self.extraction_mode == Extraction.boundingRect:
            for i, trans in enumerate(sorted_lines):
                (x, y, w, h) = trans.rect
                cropped = img[y : y + h, x : x + w].copy()
                self._save_line_image(page_id, i, cropped, manifest_fp, trans)

        elif self.extraction_mode == Extraction.polygon:
            for i, trans in enumerate(sorted_lines):
                polygon_img = extract_polygon_image(
                    img, polygon=trans.polygon, rect=trans.rect
                )
                self._save_line_image(page_id, i, polygon_img, manifest_fp, trans)

        elif self.extraction_mode == Extraction.min_area_rect:
            for i, trans in enumerate(sorted_lines):
                min_rect_img = extract_min_area_rect_image(
                    img, polygon=trans.polygon, rect=trans.rect
                )

                self._save_line_image(page_id, i, min_rect_img, manifest_fp, trans)

        elif self.extraction_mode == Extraction.deskew_polygon:
            for i, trans in enumerate(sorted_lines):
                # get angle from min area rect
                rotate_angle = determine_rotate_angle(trans.polygon)

                if abs(rotate_angle) > self.max_deskew_angle:
                    logger.warning(
                        f"Deskew angle ({rotate_angle}) over the limit ({self.max_deskew_angle}), won't rotate"
                    )
                    rotate_angle = 0

                # get polygon image
                polygon_img = extract_polygon_image(
                    img, polygon=trans.polygon, rect=trans.rect
                )

                trimmed_img = self.rotate_and_trim(polygon_img, rotate_angle)

                self._save_line_image(page_id, i, trimmed_img, manifest_fp, trans)

        elif self.extraction_mode == Extraction.deskew_min_area_rect:
            for i, trans in enumerate(sorted_lines):
                # get angle from min area rect
                rotate_angle = determine_rotate_angle(trans.polygon)

                if abs(rotate_angle) > self.max_deskew_angle:
                    logger.warning(
                        f"Deskew angle ({rotate_angle}) over the limit ({self.max_deskew_angle}), won't rotate"
                    )
                    rotate_angle = 0

                min_rect_img = extract_min_area_rect_image(
                    img, polygon=trans.polygon, rect=trans.rect
                )

                trimmed_img = self.rotate_and_trim(min_rect_img, rotate_angle)

                self._save_line_image(page_id, i, trimmed_img, manifest_fp, trans)

        elif self.extraction_mode == Extraction.skew_polygon:
            for i, trans in enumerate(sorted_lines):
                rotate_angle = self.skew_angle

                # get polygon image
                polygon_img = extract_polygon_image(
                    img, polygon=trans.polygon, rect=trans.rect
                )

                trimmed_img = self.rotate_and_trim(polygon_img, rotate_angle)

                self._save_line_image(page_id, i, trimmed_img, manifest_fp, trans)

        elif self.extraction_mode == Extraction.skew_min_area_rect:
            for i, trans in enumerate(sorted_lines):
                rotate_angle = self.skew_angle

                min_rect_img = extract_min_area_rect_image(
                    img, polygon=trans.polygon, rect=trans.rect
                )

                trimmed_img = self.rotate_and_trim(min_rect_img, rotate_angle)

                self._save_line_image(page_id, i, trimmed_img, manifest_fp, trans)

        else:
            raise ValueError(f"Unsupported extraction mode: {self.extraction_mode}")

        if self.format == "kraken":
            manifest_fp.close()

        for i, trans in enumerate(sorted_lines):
            if self.format == "kraken":
                write_file(f"{self.out_line_dir}/{page_id}_{i}.gt.txt", trans.text)
            else:
                write_file(f"{self.out_line_text_dir}/{page_id}_{i}.txt", trans.text)

    def rotate_and_trim(self, img, rotate_angle):
        """
        Rotate image by given an angle and trim extra whitespace left after rotating
        """
        if self.grayscale:
            background = WHITE
        else:
            background = (WHITE, WHITE, WHITE)

        # rotate polygon image
        deskewed_img = rotate(img, rotate_angle, background)
        # trim extra whitespace left after rotating
        trimmed_img = trim(deskewed_img, background)
        trimmed_img = np.array(trimmed_img)

        return trimmed_img

    def run_pages(self, pages: list):
        if all(isinstance(n, str) for n in pages):
            for page in pages:
                elt = self.api_client.request("RetrieveElement", id=page)
                page_id = elt["id"]
                image_data = elt["zone"]["image"]
                logger.debug(f"Page {page_id}")
                self.extract_lines(page_id, image_data)
        else:
            for page in tqdm.tqdm(pages):
                page_id = page["id"]
                image_data = page["zone"]["image"]
                logger.debug(f"Page {page_id}")
                self.extract_lines(page_id, image_data)

    def run_volumes(self, volume_ids: list):
        for volume_id in tqdm.tqdm(volume_ids):
            logger.info(f"Volume {volume_id}")
            pages = [
                page
                for page in self.api_client.cached_paginate(
                    "ListElementChildren", id=volume_id, recursive=True, type="page"
                )
            ]
            self.run_pages(pages)

    def run_folders(self, element_ids: list, volume_type: str):
        for elem_id in tqdm.tqdm(element_ids):
            logger.info(f"Folder {elem_id}")
            vol_ids = [
                page["id"]
                for page in self.api_client.cached_paginate(
                    "ListElementChildren", id=elem_id, recursive=True, type=volume_type
                )
            ]
            self.run_volumes(vol_ids)

    def run_corpora(self, corpus_ids: list, volume_type: str):
        for corpus_id in tqdm.tqdm(corpus_ids):
            logger.info(f"Corpus {corpus_id}")
            vol_ids = [
                vol["id"]
                for vol in self.api_client.cached_paginate(
                    "ListElements", corpus=corpus_id, type=volume_type
                )
            ]
            self.run_volumes(vol_ids)


class Split(Enum):
    Train: int = 0
    Test: int = 1
    Validation: int = 2

    @property
    def short_name(self) -> str:
        if self == self.Validation:
            return "val"
        return self.name.lower()


class KaldiPartitionSplitter:
    def __init__(
        self,
        out_dir_base="/tmp/kaldi_data",
        split_train_ratio=0.8,
        split_test_ratio=0.1,
        use_existing_split=False,
    ):
        self.out_dir_base = out_dir_base
        self.split_train_ratio = split_train_ratio
        self.split_test_ratio = split_test_ratio
        self.split_val_ratio = 1 - self.split_train_ratio - self.split_test_ratio
        self.use_existing_split = use_existing_split

    def page_level_split(self, line_ids: list) -> dict:
        # need to sort again, because `set` will lose the order
        page_ids = sorted({"_".join(line_id.split("_")[:-1]) for line_id in line_ids})
        random.Random(SEED).shuffle(page_ids)
        page_count = len(page_ids)

        train_page_ids = page_ids[: round(page_count * self.split_train_ratio)]
        page_ids = page_ids[round(page_count * self.split_train_ratio) :]

        test_page_ids = page_ids[: round(page_count * self.split_test_ratio)]
        page_ids = page_ids[round(page_count * self.split_test_ratio) :]

        val_page_ids = page_ids

        page_dict = {page_id: Split.Train.value for page_id in train_page_ids}
        page_dict.update({page_id: Split.Test.value for page_id in test_page_ids})
        page_dict.update({page_id: Split.Validation.value for page_id in val_page_ids})
        return page_dict

    def existing_split(self, line_ids: list) -> list:
        split_dict = {split.short_name: [] for split in Split}
        for line_id in line_ids:
            split_prefix = line_id.split("/")[0].lower()
            split_dict[split_prefix].append(line_id)
        splits = [split_dict[split.short_name] for split in Split]
        return splits

    def create_partitions(self):
        logger.info("Creating partitions")
        lines_path = Path(f"{self.out_dir_base}/Lines")
        line_ids = [
            str(file.relative_to(lines_path).with_suffix(""))
            for file in sorted(lines_path.glob("**/*.jpg"))
        ]

        if self.use_existing_split:
            logger.info("Using existing split")
            datasets = self.existing_split(line_ids)
        else:
            page_dict = self.page_level_split(line_ids)
            datasets = [[] for _ in range(3)]
            for line_id in line_ids:
                page_id = "_".join(line_id.split("_")[:-1])
                split_id = page_dict[page_id]
                datasets[split_id].append(line_id)

        partitions_dir = os.path.join(self.out_dir_base, "Partitions")
        os.makedirs(partitions_dir, exist_ok=True)
        for i, dataset in enumerate(datasets):
            if not dataset:
                logger.info(f"Partition {Split(i).name} is empty! Skipping..")
                continue
            file_name = f"{partitions_dir}/{Split(i).name}Lines.lst"
            write_file(file_name, "\n".join(dataset) + "\n")


def create_parser():
    user_name = getpass.getuser()

    parser = argparse.ArgumentParser(
        description="Script to generate Kaldi or kraken training data from annotations from Arkindex",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument(
        "-f",
        "--format",
        type=str,
        help="is the data generated going to be used for kaldi or kraken",
    )
    parser.add_argument(
        "-n",
        "--dataset_name",
        type=str,
        help="Name of the dataset being created for kaldi or kraken "
        "(useful for distinguishing different datasets when in Lines or Transcriptions directory)",
    )
    parser.add_argument(
        "-o", "--out_dir", type=str, required=True, help="output directory"
    )
    parser.add_argument(
        "--train_ratio",
        type=float,
        default=0.8,
        help="Ratio of pages to be used in train (between 0 and 1)",
    )
    parser.add_argument(
        "--test_ratio",
        type=float,
        default=0.1,
        help="Ratio of pages to be used in test (between 0 and 1 - train_ratio)",
    )
    parser.add_argument(
        "--use_existing_split",
        action="store_true",
        default=False,
        help="Use an existing split instead of random. "
        "Expecting line_ids to be prefixed with (train, val and test)",
    )
    parser.add_argument(
        "--split_only",
        "--no_download",
        action="store_true",
        default=False,
        help="Create the split from already downloaded lines, don't download the lines",
    )
    parser.add_argument(
        "--no_split",
        action="store_true",
        default=False,
        help="No splitting of the data to be done just download the line in the right format",
    )

    parser.add_argument(
        "-e",
        "--extraction_mode",
        type=lambda x: Extraction[x],
        default=Extraction.boundingRect,
        help=f"Mode for extracting the line images: {[e.name for e in Extraction]}",
    )

    parser.add_argument(
        "--max_deskew_angle",
        type=int,
        default=45,
        help="Maximum angle by which deskewing is allowed to rotate the line image. "
        "If the angle determined by deskew tool is bigger than max "
        "then that line won't be deskewed/rotated.",
    )

    parser.add_argument(
        "--skew_angle",
        type=int,
        default=0,
        help="Angle by which the line image will be rotated. Useful for data augmnetation"
        " - creating skewed text lines for a more robust model."
        " Only used with skew_* extraction modes.",
    )

    parser.add_argument(
        "--should_rotate",
        action="store_true",
        default=False,
        help="Use text line rotation class to rotate lines if possible",
    )

    parser.add_argument(
        "--transcription_type",
        type=str,
        default="text_line",
        help="Which type of elements' transcriptions to use? (page, paragraph, text_line, etc)",
    )

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        "--grayscale",
        action="store_true",
        dest="grayscale",
        help="Convert images to grayscale (By default grayscale)",
    )
    group.add_argument(
        "--color", action="store_false", dest="grayscale", help="Use color images"
    )
    group.set_defaults(grayscale=True)

    parser.add_argument(
        "--corpora",
        nargs="*",
        help="List of corpus ids to be used, separated by spaces",
    )
    parser.add_argument(
        "--folders",
        type=str,
        nargs="*",
        help="List of folder ids to be used, separated by spaces. "
        "Elements of `volume_type` will be searched recursively in these folders",
    )
    parser.add_argument(
        "--volumes",
        nargs="*",
        help="List of volume ids to be used, separated by spaces",
    )
    parser.add_argument(
        "--pages", nargs="*", help="List of page ids to be used, separated by spaces"
    )
    parser.add_argument(
        "-v",
        "--volume_type",
        type=str,
        default="volume",
        help="Volumes (1 level above page) may have a different name on corpora",
    )
    parser.add_argument(
        "--skip_vertical_lines",
        action="store_true",
        default=False,
        help="skips vertical lines when downloading",
    )

    parser.add_argument(
        "--accepted_classes",
        nargs="*",
        help="List of accepted ml_class names. Filter lines by class of related elements",
    )

    parser.add_argument(
        "--accepted_worker_version_ids",
        nargs="*",
        default=[],
        help="List of accepted worker version ids. Filter transcriptions by worker version ids."
        "The order is important - only up to one transcription will be chosen per element (text_line)"
        " and the worker version order defines the precedence. If there exists a transcription for"
        " the first worker version then it will be chosen, otherwise will continue on to the next"
        " worker version."
        " Use `--accepted_worker_version_ids manual` to get only manual transcriptions",
    )

    parser.add_argument(
        "--filter_printed",
        action="store_true",
        help="Filter lines annotated as printed",
    )

    parser.add_argument(
        "--scale_x",
        type=float,
        default=None,
        help="Ratio of how much to scale the polygon horizontally (1.0 means no rescaling)",
    )
    parser.add_argument(
        "--scale_y_top",
        type=float,
        default=None,
        help="Ratio of how much to scale the polygon vertically on the top (1.0 means no rescaling)",
    )

    parser.add_argument(
        "--scale_y_bottom",
        type=float,
        default=None,
        help="Ratio of how much to scale the polygon vertically on the bottom (1.0 means no rescaling)",
    )

    parser.add_argument(
        "--cache_dir",
        type=Path,
        default=Path(f"/tmp/kaldi_data_generator_{user_name}/cache/"),
        help="Cache dir where to save the full size downloaded images. Change it to force redownload.",
    )

    return parser


def main():
    parser = create_parser()
    args = parser.parse_args()

    if not args.dataset_name and not args.split_only and not args.format == "kraken":
        parser.error("--dataset_name must be specified (unless --split-only)")

    logger.info(f"ARGS {args} \n")

    api_client = create_api_client(args.cache_dir)

    if not args.split_only:
        data_generator = HTRDataGenerator(
            format=args.format,
            dataset_name=args.dataset_name,
            out_dir_base=args.out_dir,
            grayscale=args.grayscale,
            extraction=args.extraction_mode,
            accepted_classes=args.accepted_classes,
            filter_printed=args.filter_printed,
            skip_vertical_lines=args.skip_vertical_lines,
            transcription_type=args.transcription_type,
            accepted_worker_version_ids=args.accepted_worker_version_ids,
            max_deskew_angle=args.max_deskew_angle,
            skew_angle=args.skew_angle,
            should_rotate=args.should_rotate,
            scale_x=args.scale_x,
            scale_y_top=args.scale_y_top,
            scale_y_bottom=args.scale_y_bottom,
            cache_dir=args.cache_dir,
            api_client=api_client,
        )

        # extract all the lines and transcriptions
        if args.pages:
            data_generator.run_pages(args.pages)
        if args.volumes:
            data_generator.run_volumes(args.volumes)
        if args.folders:
            data_generator.run_folders(args.folders, args.volume_type)
        if args.corpora:
            data_generator.run_corpora(args.corpora, args.volume_type)
        if data_generator.skipped_vertical_lines_count > 0:
            logger.info(
                f"Number of skipped pages: {data_generator.skipped_pages_count}"
            )
            _skipped_vertical_count = data_generator.skipped_vertical_lines_count
            _total_count = _skipped_vertical_count + data_generator.accepted_lines_count
            skipped_ratio = _skipped_vertical_count / _total_count * 100

            logger.info(
                f"Skipped {data_generator.skipped_vertical_lines_count} vertical lines ({round(skipped_ratio, 2)}%)"
            )
    else:
        logger.info("Creating a split from already downloaded files")
    if not args.no_split:
        kaldi_partitioner = KaldiPartitionSplitter(
            out_dir_base=args.out_dir,
            split_train_ratio=args.train_ratio,
            split_test_ratio=args.test_ratio,
            use_existing_split=args.use_existing_split,
        )

        # create partitions from all the extracted data
        kaldi_partitioner.create_partitions()
    else:
        logger.info("No split to be done")

    logger.info("DONE")


if __name__ == "__main__":
    main()