Skip to content
Snippets Groups Projects
kaldi_data_generator.py 21.8 KiB
Newer Older
Martin's avatar
Martin committed
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
Martin's avatar
Martin committed
import argparse
Martin's avatar
Martin committed
import logging
Martin's avatar
Martin committed
import os
Martin's avatar
Martin committed
import random
Martin's avatar
Martin committed
from enum import Enum
Martin's avatar
Martin committed
from io import BytesIO
Martin's avatar
Martin committed
from pathlib import Path
Martin's avatar
Martin committed
from typing import Tuple
Martin's avatar
Martin committed

Martin's avatar
Martin committed
import cv2
import numpy as np
Martin's avatar
Martin committed
import requests
Martin's avatar
Martin committed
import tqdm
Martin's avatar
Martin committed
from apistar.exceptions import ErrorResponse
from arkindex import ArkindexClient, options_from_env
Martin's avatar
Martin committed
from PIL import Image
Martin's avatar
Martin committed

Martin's avatar
Martin committed
Box = Tuple[int, int, int, int]

Martin's avatar
Martin committed
logging.basicConfig(
Martin's avatar
Martin committed
    level=logging.INFO, format="%(asctime)s %(levelname)s/%(name)s: %(message)s"
Martin's avatar
Martin committed
)
logger = logging.getLogger(os.path.basename(__file__))

Martin's avatar
Martin committed
api_client = ArkindexClient(**options_from_env())

Martin's avatar
Martin committed
SEED = 42
random.seed(SEED)
MANUAL = "manual"
Martin's avatar
Martin committed
TEXT_LINE = "text_line"
Martin's avatar
Martin committed

Martin's avatar
Martin committed

Martin's avatar
Martin committed
def download_image(url):
Martin's avatar
Martin committed
    """
Martin's avatar
Martin committed
    Download an image and open it with Pillow
Martin's avatar
Martin committed
    """
    assert url.startswith("http"), "Image URL must be HTTP(S)"
Martin's avatar
Martin committed
    # Download the image
    # Cannot use stream=True as urllib's responses do not support the seek(int) method,
    # which is explicitly required by Image.open on file-like objects
    resp = requests.get(url)
    resp.raise_for_status()

    # Preprocess the image and prepare it for classification
    image = Image.open(BytesIO(resp.content))
Martin's avatar
Martin committed
    logger.debug(
        "Downloaded image {} - size={}x{}".format(url, image.size[0], image.size[1])
    )
Martin's avatar
Martin committed

    return image

Martin's avatar
Martin committed

Martin's avatar
Martin committed
def write_file(file_name, content):
Martin's avatar
Martin committed
    with open(file_name, "w") as f:
Martin's avatar
Martin committed
        f.write(content)

Martin's avatar
Martin committed

class Extraction(Enum):
    boundingRect: int = 0
    polygon: int = 1


kermorvant's avatar
kermorvant committed
class HTRDataGenerator:
Martin's avatar
Martin committed
    def __init__(
        self,
        module,
        dataset_name="foo",
        out_dir_base="/tmp/kaldi_data",
        grayscale=True,
        extraction=Extraction.boundingRect,
        accepted_slugs=None,
        accepted_classes=None,
        filter_printed=False,
        skip_vertical_lines=False,
        accepted_worker_version_ids=None,
Martin's avatar
Martin committed
        transcription_type=TEXT_LINE,
Martin's avatar
Martin committed
    ):
Martin's avatar
Martin committed

kermorvant's avatar
kermorvant committed
        self.module = module
Martin's avatar
Martin committed
        self.out_dir_base = out_dir_base
        self.dataset_name = dataset_name
        self.grayscale = grayscale
        self.extraction_mode = extraction
Martin's avatar
Martin committed
        self.accepted_slugs = accepted_slugs
        self.should_filter_by_slug = bool(self.accepted_slugs)
        self.accepted_classes = accepted_classes
        self.should_filter_by_class = bool(self.accepted_classes)
        self.accepted_worker_version_ids = accepted_worker_version_ids
        self.should_filter_by_worker = bool(self.accepted_worker_version_ids)
        self.should_filter_printed = filter_printed
Martin's avatar
Martin committed
        self.transcription_type = transcription_type
        self.skip_vertical_lines = skip_vertical_lines
        self.skipped_pages_count = 0
        self.skipped_vertical_lines_count = 0
        self.accepted_lines_count = 0
        if MANUAL in self.accepted_worker_version_ids:
Martin's avatar
Martin committed
            self.accepted_worker_version_ids[
                self.accepted_worker_version_ids.index(MANUAL)
            ] = None
Martin's avatar
Martin committed
        if self.module == "kraken":
kermorvant's avatar
kermorvant committed
            self.out_line_dir = out_dir_base
            os.makedirs(self.out_line_dir, exist_ok=True)
        else:
Martin's avatar
Martin committed
            self.out_line_text_dir = os.path.join(
                self.out_dir_base, "Transcriptions", self.dataset_name
            )
kermorvant's avatar
kermorvant committed
            os.makedirs(self.out_line_text_dir, exist_ok=True)
Martin's avatar
Martin committed
            self.out_line_img_dir = os.path.join(
                self.out_dir_base, "Lines", self.dataset_name
            )
kermorvant's avatar
kermorvant committed
            os.makedirs(self.out_line_img_dir, exist_ok=True)
Martin's avatar
Martin committed
    def get_image(self, image_url: str, page_id: str) -> "np.ndarray":
        out_full_img_dir = os.path.join(self.out_dir_base, "full", page_id)
Martin's avatar
Martin committed
        os.makedirs(out_full_img_dir, exist_ok=True)
Martin's avatar
Martin committed
        out_full_img_path = os.path.join(out_full_img_dir, "full.jpg")
Martin's avatar
Martin committed
        if self.grayscale:
Martin's avatar
Martin committed
            download_image(image_url).convert("L").save(
                out_full_img_path, format="jpeg"
            )
Martin's avatar
Martin committed
            img = cv2.imread(out_full_img_path, cv2.IMREAD_GRAYSCALE)
        else:
Martin's avatar
Martin committed
            download_image(image_url).save(out_full_img_path, format="jpeg")
Martin's avatar
Martin committed
            img = cv2.imread(out_full_img_path)
        return img
kermorvant's avatar
kermorvant committed
    def get_accepted_zones(self, page_id: str):
        try:
            accepted_zones = []
Martin's avatar
Martin committed
            for elt in api_client.paginate(
                "ListElementChildren", id=page_id, with_best_classes=True
            ):
kermorvant's avatar
kermorvant committed
                printed = True
Martin's avatar
Martin committed
                for classification in elt["best_classes"]:
                    if classification["ml_class"]["name"] == "handwritten":
kermorvant's avatar
kermorvant committed
                        printed = False
Martin's avatar
Martin committed
                for classification in elt["best_classes"]:
                    if classification["ml_class"]["name"] in self.accepted_classes:
kermorvant's avatar
kermorvant committed
                        if self.should_filter_printed:
                            if not printed:
Martin's avatar
Martin committed
                                accepted_zones.append(elt["zone"]["id"])
kermorvant's avatar
kermorvant committed
                        else:
Martin's avatar
Martin committed
                            accepted_zones.append(elt["zone"]["id"])
            logger.info(
                "Number of accepted zone for page {} : {}".format(
                    page_id, len(accepted_zones)
                )
            )
kermorvant's avatar
kermorvant committed
            return accepted_zones
        except ErrorResponse as e:
Martin's avatar
Martin committed
            logger.info(
                f"ListTranscriptions failed {e.status_code} - {e.title} - {e.content} - {page_id}"
            )
kermorvant's avatar
kermorvant committed
            raise e
Martin's avatar
Martin committed

kermorvant's avatar
kermorvant committed
    def get_transcriptions(self, page_id: str, accepted_zones):
Martin's avatar
Martin committed
        count = 0
        count_skipped = 0
Martin's avatar
Martin committed
        lines = []
Martin's avatar
Martin committed
        try:
Martin's avatar
Martin committed
            for res in api_client.paginate(
                "ListTranscriptions", id=page_id, recursive=True
            ):
                if (
                    self.should_filter_by_slug
                    and res["source"]["slug"] not in self.accepted_slugs
                ):
Martin's avatar
Martin committed
                    continue
Martin's avatar
Martin committed
                if (
                    self.should_filter_by_worker
                    and res["worker_version_id"] not in self.accepted_worker_version_ids
                ):
Martin's avatar
Martin committed
                if (
                    self.should_filter_by_class
                    and res["element"]["zone"]["id"] not in accepted_zones
                ):
Martin's avatar
Martin committed
                if res["element"]["type"] != self.transcription_type:
                    continue
Martin's avatar
Martin committed

Martin's avatar
Martin committed
                text = res["text"]
Martin's avatar
Martin committed
                if not text or not text.strip():
                    continue
Martin's avatar
Martin committed
                if "zone" in res:
                    polygon = res["zone"]["polygon"]
                elif "element" in res:
                    polygon = res["element"]["zone"]["polygon"]
Martin's avatar
Martin committed
                else:
                    raise ValueError(f"Data problem with polygon :: {res}")

                polygon = np.asarray(polygon).clip(0)
Martin's avatar
Martin committed
                [x, y, w, h] = cv2.boundingRect(polygon)
                if self.skip_vertical_lines:
                    if h > w:
                        count_skipped += 1
                        continue
Martin's avatar
Martin committed
                lines.append(((x, y, w, h), polygon, text))
Martin's avatar
Martin committed
                count += 1
            return (lines, count, count_skipped)

Martin's avatar
Martin committed
        except ErrorResponse as e:
Martin's avatar
Martin committed
            logger.info(
                f"ListTranscriptions failed {e.status_code} - {e.title} - {e.content} - {page_id}"
            )
Martin's avatar
Martin committed
            raise e
kermorvant's avatar
kermorvant committed

    def extract_lines(self, page_id: str, image_data: dict):
        if self.should_filter_by_class:
            accepted_zones = self.get_accepted_zones(page_id)
        else:
            accepted_zones = []
        lines, count, count_skipped = self.get_transcriptions(page_id, accepted_zones)

Martin's avatar
Martin committed
        if count == 0:
            self.skipped_pages_count += 1
Martin's avatar
Martin committed
            logger.info(f"Page {page_id} skipped, because it has no lines")
Martin's avatar
Martin committed
            return

        logger.debug(f"Total num of lines {count + count_skipped}")
        logger.debug(f"Num of accepted lines {count}")
        logger.debug(f"Num of skipped lines {count_skipped}")

        self.skipped_vertical_lines_count += count_skipped
        self.accepted_lines_count += count

Martin's avatar
Martin committed
        full_image_url = image_data["s3_url"]
Martin's avatar
Martin committed
        if full_image_url is None:
Martin's avatar
Martin committed
            full_image_url = image_data["url"] + "/full/full/0/default.jpg"
Martin's avatar
Martin committed

        img = self.get_image(full_image_url, page_id=page_id)

Martin's avatar
Martin committed
        # sort vertically then horizontally
        sorted_lines = sorted(lines, key=lambda key: (key[0][1], key[0][0]))

Martin's avatar
Martin committed
        if self.module == "kraken":
kermorvant's avatar
kermorvant committed
            f = open(f"{self.out_line_dir}/manifest.txt", "a")
            # append to file, not re-write it

        if self.extraction_mode == Extraction.boundingRect:
Martin's avatar
Martin committed
            for i, ((x, y, w, h), polygon, text) in enumerate(sorted_lines):
Martin's avatar
Martin committed
                cropped = img[y : y + h, x : x + w].copy()
                if self.module == "kraken":
                    cv2.imwrite(f"{self.out_line_dir}/{page_id}_{i}.png", cropped)
kermorvant's avatar
kermorvant committed
                    f.write(f"{page_id}_{i}.png\n")
                else:
Martin's avatar
Martin committed
                    cv2.imwrite(f"{self.out_line_img_dir}/{page_id}_{i}.jpg", cropped)

        elif self.extraction_mode == Extraction.polygon:
Martin's avatar
Martin committed
            for i, (rect, polygon, text) in enumerate(sorted_lines):
Martin's avatar
Martin committed
                polygon_img = self.extract_polygon_image(
                    img, polygon=polygon, rect=rect
                )
                if self.module == "kraken":
                    cv2.imwrite(f"{self.out_line_dir}/{page_id}_{i}.png", polygon_img)
                    f.write(f"{page_id}_{i}.png\n")
kermorvant's avatar
kermorvant committed
                else:
Martin's avatar
Martin committed
                    cv2.imwrite(
                        f"{self.out_line_img_dir}/{page_id}_{i}.jpg", polygon_img
                    )
        else:
            raise ValueError("Unsupported extraction mode")
Martin's avatar
Martin committed
        if self.module == "kraken":
            f.close()
Martin's avatar
Martin committed
        for i, (rect, polygon, text) in enumerate(sorted_lines):
Martin's avatar
Martin committed
            if self.module == "kraken":
kermorvant's avatar
kermorvant committed
                write_file(f"{self.out_line_dir}/{page_id}_{i}.gt.txt", text)
            else:
                write_file(f"{self.out_line_text_dir}/{page_id}_{i}.txt", text)
    @staticmethod
Martin's avatar
Martin committed
    def extract_polygon_image(
        img: "np.ndarray", polygon: "np.ndarray", rect: Box
    ) -> "np.ndarray":
        pts = polygon.copy()
        [x, y, w, h] = rect
Martin's avatar
Martin committed
        cropped = img[y : y + h, x : x + w].copy()
        pts = pts - pts.min(axis=0)
        mask = np.zeros(cropped.shape[:2], np.uint8)
        cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)
        dst = cv2.bitwise_and(cropped, cropped, mask=mask)
        bg = np.ones_like(cropped, np.uint8) * 255
        cv2.bitwise_not(bg, bg, mask=mask)
        dst2 = bg + dst
        return dst2

Martin's avatar
Martin committed
    def run_pages(self, pages: list):
        if all(isinstance(n, str) for n in pages):
            for page in pages:
Martin's avatar
Martin committed
                elt = api_client.request("RetrieveElement", id=page)
                page_id = elt["id"]
                image_data = elt["zone"]["image"]
                logger.debug(f"Page {page_id}")
                self.extract_lines(page_id, image_data)
        else:
            for page in tqdm.tqdm(pages):
Martin's avatar
Martin committed
                page_id = page["id"]
                image_data = page["zone"]["image"]
                logger.debug(f"Page {page_id}")
                self.extract_lines(page_id, image_data)
Martin's avatar
Martin committed
    def run_volumes(self, volume_ids: list):
        for volume_id in tqdm.tqdm(volume_ids):
Martin's avatar
Martin committed
            logger.info(f"Volume {volume_id}")
Martin's avatar
Martin committed
            pages = [
                page
                for page in api_client.paginate(
                    "ListElementChildren", id=volume_id, recursive=True, type="page"
                )
            ]
Martin's avatar
Martin committed
            self.run_pages(pages)
Martin's avatar
Martin committed
    def run_folders(self, element_ids: list, volume_type: str):
        for elem_id in tqdm.tqdm(element_ids):
            logger.info(f"Folder {elem_id}")
Martin's avatar
Martin committed
            vol_ids = [
                page["id"]
                for page in api_client.paginate(
                    "ListElementChildren", id=elem_id, recursive=True, type=volume_type
                )
            ]
Martin's avatar
Martin committed
            self.run_volumes(vol_ids)

Martin's avatar
Martin committed
    def run_corpora(self, corpus_ids: list, volume_type: str):
        for corpus_id in tqdm.tqdm(corpus_ids):
Martin's avatar
Martin committed
            logger.info(f"Corpus {corpus_id}")
Martin's avatar
Martin committed
            vol_ids = [
                vol["id"]
                for vol in api_client.paginate(
                    "ListElements", corpus=corpus_id, type=volume_type
                )
            ]
Martin's avatar
Martin committed
            self.run_volumes(vol_ids)

Martin's avatar
Martin committed
class Split(Enum):
    Train: int = 0
    Test: int = 1
    Validation: int = 2

    @property
    def short_name(self) -> str:
        if self == self.Validation:
            return "val"
        return self.name.lower()

Martin's avatar
Martin committed

class KaldiPartitionSplitter:
Martin's avatar
Martin committed
    def __init__(
        self,
        out_dir_base="/tmp/kaldi_data",
        split_train_ratio=0.8,
        split_test_ratio=0.1,
        use_existing_split=False,
    ):
        self.out_dir_base = out_dir_base
        self.split_train_ratio = split_train_ratio
        self.split_test_ratio = split_test_ratio
Martin's avatar
Martin committed
        self.split_val_ratio = 1 - self.split_train_ratio - self.split_test_ratio
        self.use_existing_split = use_existing_split
Martin's avatar
Martin committed

Martin's avatar
Martin committed
    def page_level_split(self, line_ids: list) -> dict:
        # need to sort again, because `set` will lose the order
        page_ids = sorted({"_".join(line_id.split("_")[:-1]) for line_id in line_ids})
        random.Random(SEED).shuffle(page_ids)
Martin's avatar
Martin committed
        page_count = len(page_ids)

Martin's avatar
Martin committed
        train_page_ids = page_ids[: round(page_count * self.split_train_ratio)]
        page_ids = page_ids[round(page_count * self.split_train_ratio) :]
Martin's avatar
Martin committed

Martin's avatar
Martin committed
        test_page_ids = page_ids[: round(page_count * self.split_test_ratio)]
        page_ids = page_ids[round(page_count * self.split_test_ratio) :]
Martin's avatar
Martin committed

        val_page_ids = page_ids

Martin's avatar
Martin committed
        page_dict = {page_id: Split.Train.value for page_id in train_page_ids}
        page_dict.update({page_id: Split.Test.value for page_id in test_page_ids})
        page_dict.update({page_id: Split.Validation.value for page_id in val_page_ids})
Martin's avatar
Martin committed
        return page_dict

    def existing_split(self, line_ids: list) -> list:
        split_dict = {split.short_name: [] for split in Split}
        for line_id in line_ids:
Martin's avatar
Martin committed
            split_prefix = line_id.split("/")[0].lower()
            split_dict[split_prefix].append(line_id)
        splits = [split_dict[split.short_name] for split in Split]
        return splits

Martin's avatar
Martin committed
    def create_partitions(self):
Martin's avatar
Martin committed
        logger.info("Creating partitions")
Martin's avatar
Martin committed
        lines_path = Path(f"{self.out_dir_base}/Lines")
        line_ids = [
            str(file.relative_to(lines_path).with_suffix(""))
            for file in sorted(lines_path.glob("**/*.jpg"))
Martin's avatar
Martin committed
        ]
Martin's avatar
Martin committed

        if self.use_existing_split:
            logger.info("Using existing split")
            datasets = self.existing_split(line_ids)
        else:
            page_dict = self.page_level_split(line_ids)
            datasets = [[] for _ in range(3)]
            for line_id in line_ids:
Martin's avatar
Martin committed
                page_id = "_".join(line_id.split("_")[:-1])
                split_id = page_dict[page_id]
                datasets[split_id].append(line_id)
Martin's avatar
Martin committed

Martin's avatar
Martin committed
        partitions_dir = os.path.join(self.out_dir_base, "Partitions")
Martin's avatar
Martin committed
        os.makedirs(partitions_dir, exist_ok=True)
        for i, dataset in enumerate(datasets):
Martin's avatar
Martin committed
            if not dataset:
Martin's avatar
Martin committed
                logger.info(f"Partition {Split(i).name} is empty! Skipping..")
Martin's avatar
Martin committed
                continue
Martin's avatar
Martin committed
            file_name = f"{partitions_dir}/{Split(i).name}Lines.lst"
Martin's avatar
Martin committed
            write_file(file_name, "\n".join(dataset) + "\n")
Martin's avatar
Martin committed
def create_parser():
    parser = argparse.ArgumentParser(
kermorvant's avatar
kermorvant committed
        description="Script to generate Kaldi or kraken training data from annotations from Arkindex",
Martin's avatar
Martin committed
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )
    parser.add_argument(
        "-f",
        "--format",
        type=str,
        help="is the data generated going to be used for kaldi or kraken",
    )
    parser.add_argument(
        "-n",
        "--dataset_name",
        type=str,
        help="Name of the dataset being created for kaldi or kraken "
        "(useful for distinguishing different datasets when in Lines or Transcriptions directory)",
    )
    parser.add_argument(
        "-o", "--out_dir", type=str, required=True, help="output directory"
    )
    parser.add_argument(
        "--train_ratio",
        type=float,
        default=0.8,
        help="Ratio of pages to be used in train (between 0 and 1)",
    )
    parser.add_argument(
        "--test_ratio",
        type=float,
        default=0.1,
        help="Ratio of pages to be used in test (between 0 and 1 - train_ratio)",
    )
    parser.add_argument(
        "--use_existing_split",
        action="store_true",
        default=False,
        help="Use an existing split instead of random. "
        "Expecting line_ids to be prefixed with (train, val and test)",
    )
    parser.add_argument(
        "--split_only",
        "--no_download",
        action="store_true",
        default=False,
        help="Create the split from already downloaded lines, don't download the lines",
    )
    parser.add_argument(
        "--no_split",
        action="store_true",
        default=False,
        help="No splitting of the data to be done just download the line in the right format",
    )

    parser.add_argument(
        "-e",
        "--extraction_mode",
        type=lambda x: Extraction[x],
        default=Extraction.boundingRect,
        help=f"Mode for extracting the line images: {[e.name for e in Extraction]}",
    )
Martin's avatar
Martin committed

Martin's avatar
Martin committed
    parser.add_argument(
        "--transcription_type",
        type=str,
        default="text_line",
        help="Which type of elements' transcriptions to use? (page, paragraph, text_line, etc)",
    )

Martin's avatar
Martin committed
    group = parser.add_mutually_exclusive_group(required=False)
Martin's avatar
Martin committed
    group.add_argument(
        "--grayscale", action="store_true", help="Convert images to grayscale"
    )
    group.add_argument("--color", action="store_false", help="Use color images")
Martin's avatar
Martin committed
    parser.set_defaults(grayscale=True)

Martin's avatar
Martin committed
    parser.add_argument(
        "--corpora",
        nargs="*",
        help="List of corpus ids to be used, separated by spaces",
    )
    parser.add_argument(
        "--folders",
        type=str,
        nargs="*",
        help="List of folder ids to be used, separated by spaces. "
        "Elements of `volume_type` will be searched recursively in these folders",
    )
    parser.add_argument(
        "--volumes",
        nargs="*",
        help="List of volume ids to be used, separated by spaces",
    )
    parser.add_argument(
        "--pages", nargs="*", help="List of page ids to be used, separated by spaces"
    )
    parser.add_argument(
        "-v",
        "--volume_type",
        type=str,
        default="volume",
        help="Volumes (1 level above page) may have a different name on corpora",
    )
    parser.add_argument(
        "--skip_vertical_lines",
        action="store_true",
        default=False,
        help="skips vertical lines when downloading",
    )

    parser.add_argument(
        "--accepted_slugs",
        nargs="*",
        help="List of accepted slugs for downloading transcriptions",
    )

    parser.add_argument(
        "--accepted_classes",
        nargs="*",
        help="List of accepted ml_class names. Filter lines by class of related elements",
    )

    parser.add_argument(
        "--accepted_worker_version_ids",
        nargs="*",
        default=[],
        help="List of accepted worker version ids. Filter lines by worker version ids of related elements"
        "Use `--accepted_worker_version_ids manual` to get only manual transcriptions",
    )

    parser.add_argument(
        "--filter_printed",
        action="store_true",
        help="Filter lines annotated as printed",
    )
Martin's avatar
Martin committed
    return parser


def main():
    parser = create_parser()
    args = parser.parse_args()

    if not args.dataset_name and not args.split_only and not args.format == "kraken":
        parser.error("--dataset_name must be specified (unless --split-only)")
Martin's avatar
Martin committed

Martin's avatar
Martin committed
    logger.info(f"ARGS {args} \n")
Martin's avatar
Martin committed

kermorvant's avatar
kermorvant committed
        data_generator = HTRDataGenerator(
            module=args.format,
Martin's avatar
Martin committed
            dataset_name=args.dataset_name,
            out_dir_base=args.out_dir,
            grayscale=args.grayscale,
            extraction=args.extraction_mode,
            accepted_slugs=args.accepted_slugs,
            accepted_classes=args.accepted_classes,
            filter_printed=args.filter_printed,
            skip_vertical_lines=args.skip_vertical_lines,
Martin's avatar
Martin committed
            transcription_type=args.transcription_type,
Martin's avatar
Martin committed
            accepted_worker_version_ids=args.accepted_worker_version_ids,
        )
        if args.pages:
            data_generator.run_pages(args.pages)
kermorvant's avatar
kermorvant committed
            data_generator.run_volumes(args.volumes)
kermorvant's avatar
kermorvant committed
            data_generator.run_folders(args.folders, args.volume_type)
kermorvant's avatar
kermorvant committed
            data_generator.run_corpora(args.corpora, args.volume_type)
        if data_generator.skipped_vertical_lines_count > 0:
Martin's avatar
Martin committed
            logger.info(
                f"Number of skipped pages: {data_generator.skipped_pages_count}"
            )
            skipped_ratio = data_generator.skipped_vertical_lines_count / (
Martin's avatar
Martin committed
                data_generator.skipped_vertical_lines_count
                + data_generator.accepted_lines_count
            )
            logger.info(
                f"Skipped {data_generator.skipped_vertical_lines_count} vertical lines ({skipped_ratio}/1.0)"
            )
    else:
        logger.info("Creating a split from already downloaded files")
kermorvant's avatar
kermorvant committed
    if not args.no_split:
        kaldi_partitioner = KaldiPartitionSplitter(
            out_dir_base=args.out_dir,
            split_train_ratio=args.train_ratio,
            split_test_ratio=args.test_ratio,
Martin's avatar
Martin committed
            use_existing_split=args.use_existing_split,
        )
Martin's avatar
Martin committed

kermorvant's avatar
kermorvant committed
        # create partitions from all the extracted data
        kaldi_partitioner.create_partitions()
    else:
        logger.info("No split to be done")
Martin's avatar
Martin committed

Martin's avatar
Martin committed
    logger.info("DONE")
Martin's avatar
Martin committed
if __name__ == "__main__":
Martin's avatar
Martin committed
    main()