Skip to content
Snippets Groups Projects
kaldi_data_generator.py 11.5 KiB
Newer Older
Martin's avatar
Martin committed
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
Martin's avatar
Martin committed
import argparse
Martin's avatar
Martin committed
import logging
Martin's avatar
Martin committed
import os
Martin's avatar
Martin committed
import random
Martin's avatar
Martin committed
from enum import Enum
Martin's avatar
Martin committed
from io import BytesIO
Martin's avatar
Martin committed
from pathlib import Path
Martin's avatar
Martin committed
from typing import Tuple
Martin's avatar
Martin committed

import tqdm

Martin's avatar
Martin committed
import cv2
import numpy as np
Martin's avatar
Martin committed
import requests
from PIL import Image
Martin's avatar
Martin committed
from apistar.exceptions import ErrorResponse
from arkindex import ArkindexClient, options_from_env
Martin's avatar
Martin committed

Martin's avatar
Martin committed
Box = Tuple[int, int, int, int]

Martin's avatar
Martin committed
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s %(levelname)s/%(name)s: %(message)s"
)
logger = logging.getLogger(os.path.basename(__file__))

Martin's avatar
Martin committed
api_client = ArkindexClient(**options_from_env())

Martin's avatar
Martin committed

Martin's avatar
Martin committed
def download_image(url):
    '''
    Download an image and open it with Pillow
    '''
    assert url.startswith('http'), 'Image URL must be HTTP(S)'
    # Download the image
    # Cannot use stream=True as urllib's responses do not support the seek(int) method,
    # which is explicitly required by Image.open on file-like objects
    resp = requests.get(url)
    resp.raise_for_status()

    # Preprocess the image and prepare it for classification
    image = Image.open(BytesIO(resp.content))
    logger.debug('Downloaded image {} - size={}x{}'.format(url,
Martin's avatar
Martin committed
                                                          image.size[0],
                                                          image.size[1]))
Martin's avatar
Martin committed

    return image

Martin's avatar
Martin committed

Martin's avatar
Martin committed
def write_file(file_name, content):
    with open(file_name, 'w') as f:
        f.write(content)

Martin's avatar
Martin committed

class Extraction(Enum):
    boundingRect: int = 0
    polygon: int = 1


Martin's avatar
Martin committed
class KaldiDataGenerator:

    def __init__(self, dataset_name='foo', out_dir_base='/tmp/kaldi_data', grayscale=True,
Martin's avatar
Martin committed
                 extraction=Extraction.boundingRect, accepted_slugs=None):
Martin's avatar
Martin committed
        self.out_dir_base = out_dir_base
        self.dataset_name = dataset_name
        self.grayscale = grayscale
        self.extraction_mode = extraction
Martin's avatar
Martin committed
        self.accepted_slugs = accepted_slugs
        self.should_filter_by_slug = bool(self.accepted_slugs)
Martin's avatar
Martin committed

        self.out_line_text_dir = os.path.join(self.out_dir_base, 'Transcriptions', self.dataset_name)
        os.makedirs(self.out_line_text_dir, exist_ok=True)
        self.out_line_img_dir = os.path.join(self.out_dir_base, 'Lines', self.dataset_name)
        os.makedirs(self.out_line_img_dir, exist_ok=True)

Martin's avatar
Martin committed
    def get_image(self, image_url: str, page_id: str) -> 'np.ndarray':
Martin's avatar
Martin committed
        out_full_img_dir = os.path.join(self.out_dir_base, 'full', page_id)
        os.makedirs(out_full_img_dir, exist_ok=True)
        out_full_img_path = os.path.join(out_full_img_dir, 'full.jpg')
        if self.grayscale:
            download_image(image_url).convert('L').save(
                out_full_img_path, format='jpeg')
            img = cv2.imread(out_full_img_path, cv2.IMREAD_GRAYSCALE)
        else:
            download_image(image_url).save(
                out_full_img_path, format='jpeg')
            img = cv2.imread(out_full_img_path)
        return img

Martin's avatar
Martin committed
    def extract_lines(self, page_id: str):
Martin's avatar
Martin committed
        count = 0
Martin's avatar
Martin committed
        lines = []
Martin's avatar
Martin committed
        try:
            for res in api_client.paginate('ListTranscriptions', id=page_id, type='line'):
Martin's avatar
Martin committed
                if self.should_filter_by_slug and res['source']['slug'] not in self.accepted_slugs:
                    continue
Martin's avatar
Martin committed
                text = res['text']
                if not text or not text.strip():
                    continue
Martin's avatar
Martin committed
                polygon = np.asarray(res['zone']['polygon']).clip(0)
                [x, y, w, h] = cv2.boundingRect(polygon)
Martin's avatar
Martin committed
                lines.append(((x, y, w, h), polygon, text))
Martin's avatar
Martin committed
                count += 1
        except ErrorResponse as e:
Martin's avatar
Martin committed
            logger.info(f"ListTranscriptions failed {e.status_code} - {e.title} - {e.content} - {page_id}")
Martin's avatar
Martin committed
            raise e
        logger.debug(f"Num of lines {count}")
Martin's avatar
Martin committed
        if count == 0:
Martin's avatar
Martin committed
            logger.info(f"Page {page_id} skipped, because it has no lines")
Martin's avatar
Martin committed
            return

Martin's avatar
Martin committed
        full_image_url = res['zone']['image']['s3_url']
Martin's avatar
Martin committed
        if full_image_url is None:
            full_image_url =  res['zone']['image']['url'] + '/full/full/0/default.jpg'
Martin's avatar
Martin committed

        img = self.get_image(full_image_url, page_id=page_id)

Martin's avatar
Martin committed
        # sort vertically then horizontally
        sorted_lines = sorted(lines, key=lambda key: (key[0][1], key[0][0]))

        if self.extraction_mode == Extraction.boundingRect:
Martin's avatar
Martin committed
            for i, ((x, y, w, h), polygon, text) in enumerate(sorted_lines):
                cropped = img[y:y + h, x:x + w].copy()
                cv2.imwrite(f'{self.out_line_img_dir}/{page_id}_{i}.jpg', cropped)

        elif self.extraction_mode == Extraction.polygon:
Martin's avatar
Martin committed
            for i, (rect, polygon, text) in enumerate(sorted_lines):
                polygon_img = self.extract_polygon_image(img, polygon=polygon, rect=rect)
                cv2.imwrite(f'{self.out_line_img_dir}/{page_id}_{i}.jpg', polygon_img)

        else:
            raise ValueError("Unsupported extraction mode")
Martin's avatar
Martin committed

Martin's avatar
Martin committed
        for i, (rect, polygon, text) in enumerate(sorted_lines):
Martin's avatar
Martin committed
            write_file(f"{self.out_line_text_dir}/{page_id}_{i}.txt", text)
    @staticmethod
Martin's avatar
Martin committed
    def extract_polygon_image(img: 'np.ndarray', polygon: 'np.ndarray', rect: Box) -> 'np.ndarray':
        pts = polygon.copy()
        [x, y, w, h] = rect
        cropped = img[y:y + h, x:x + w].copy()
        pts = pts - pts.min(axis=0)
        mask = np.zeros(cropped.shape[:2], np.uint8)
        cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)
        dst = cv2.bitwise_and(cropped, cropped, mask=mask)
        bg = np.ones_like(cropped, np.uint8) * 255
        cv2.bitwise_not(bg, bg, mask=mask)
        dst2 = bg + dst
        return dst2

Martin's avatar
Martin committed
    def run_pages(self, page_ids: list):
        for page_id in tqdm.tqdm(page_ids):
            logger.debug(f"Page {page_id}")
            self.extract_lines(page_id)

Martin's avatar
Martin committed
    def run_volumes(self, volume_ids: list):
        for volume_id in tqdm.tqdm(volume_ids):
Martin's avatar
Martin committed
            logger.info(f"Volume {volume_id}")
            page_ids = [page['id'] for page in api_client.paginate('ListElementChildren', id=volume_id)]
            self.run_pages(page_ids)

Martin's avatar
Martin committed
    def run_corpora(self, corpus_ids: list):
        for corpus_id in tqdm.tqdm(corpus_ids):
Martin's avatar
Martin committed
            logger.info(f"Corpus {corpus_id}")
Martin's avatar
Martin committed
            vol_ids = [vol['id'] for vol in api_client.paginate('ListElements', corpus=corpus_id, type='volume')]
            self.run_volumes(vol_ids)

Martin's avatar
Martin committed
class Split(Enum):
    Train: int = 0
    Test: int = 1
    Validation: int = 2


class KaldiPartitionSplitter:

    def __init__(self, out_dir_base='/tmp/kaldi_data', split_train_ratio=0.8, split_test_ratio=0.1):
        self.out_dir_base = out_dir_base
        self.split_train_ratio = split_train_ratio
        self.split_test_ratio = split_test_ratio
Martin's avatar
Martin committed
        self.split_val_ratio = 1 - self.split_train_ratio - self.split_test_ratio
Martin's avatar
Martin committed

Martin's avatar
Martin committed
    def page_level_split(self, line_ids: list) -> dict:
Martin's avatar
Martin committed
        page_ids = list({'_'.join(line_id.split('_')[:-1]) for line_id in line_ids})
        random.shuffle(page_ids)
        page_count = len(page_ids)

        train_page_ids = page_ids[:round(page_count * self.split_train_ratio)]
        page_ids = page_ids[round(page_count * self.split_train_ratio):]

        test_page_ids = page_ids[:round(page_count * self.split_test_ratio)]
        page_ids = page_ids[round(page_count * self.split_test_ratio):]

        val_page_ids = page_ids

Martin's avatar
Martin committed
        page_dict = {page_id: Split.Train.value for page_id in train_page_ids}
        page_dict.update({page_id: Split.Test.value for page_id in test_page_ids})
        page_dict.update({page_id: Split.Validation.value for page_id in val_page_ids})
Martin's avatar
Martin committed
        return page_dict

    def create_partitions(self):
Martin's avatar
Martin committed
        logger.info("Creating partitions")
Martin's avatar
Martin committed
        lines_path = Path(f'{self.out_dir_base}/Lines')
        line_ids = [str(file.relative_to(lines_path).with_suffix('')) for file in lines_path.glob('**/*.jpg')]

        page_dict = self.page_level_split(line_ids)
        datasets = [[] for _ in range(3)]
        for line_id in line_ids:
            page_id = '_'.join(line_id.split('_')[:-1])
            split_id = page_dict[page_id]
            datasets[split_id].append(line_id)

        partitions_dir = os.path.join(self.out_dir_base, 'Partitions')
        os.makedirs(partitions_dir, exist_ok=True)
        for i, dataset in enumerate(datasets):
Martin's avatar
Martin committed
            if not dataset:
Martin's avatar
Martin committed
                logger.info(f"Partition {Split(i).name} is empty! Skipping..")
Martin's avatar
Martin committed
                continue
Martin's avatar
Martin committed
            file_name = f"{partitions_dir}/{Split(i).name}Lines.lst"
Martin's avatar
Martin committed
            write_file(file_name, '\n'.join(dataset) + '\n')


Martin's avatar
Martin committed
def create_parser():
    parser = argparse.ArgumentParser(
        description="Script to generate Kaldi training data from annotations from Arkindex",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
Martin's avatar
Martin committed
    parser.add_argument('-n', '--dataset_name', type=str, required=True,
Martin's avatar
Martin committed
                        help='Name of the dataset being created for kaldi '
                             '(useful for distinguishing different datasets when in Lines or Transcriptions directory)')
Martin's avatar
Martin committed
    parser.add_argument('-o', '--out_dir', type=str, required=True,
                        help='output directory')
    parser.add_argument('--train_ratio', type=float, default=0.8,
                        help='Ratio of pages to be used in train (between 0 and 1)')
    parser.add_argument('--test_ratio', type=float, default=0.1,
                        help='Ratio of pages to be used in train (between 0 and 1 - train_ratio)')
    parser.add_argument('-e', '--extraction_mode', type=lambda x: Extraction[x], default=Extraction.boundingRect,
                        help=f'Mode for extracting the line images: {[e.name for e in Extraction]}')
Martin's avatar
Martin committed

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument('--grayscale', action='store_true',
                       help='Convert images to grayscale')
    group.add_argument('--color', action='store_false',
                       help='Use color images')
    parser.set_defaults(grayscale=True)

Martin's avatar
Martin committed
    parser.add_argument('--corpora', nargs='*',
                        help='List of corpus ids to be used, separated by spaces')
Martin's avatar
Martin committed
    parser.add_argument('--volumes', nargs='*',
                        help='List of volume ids to be used, separated by spaces')
    parser.add_argument('--pages', nargs='*',
                        help='List of page ids to be used, separated by spaces')

Martin's avatar
Martin committed
    parser.add_argument('--accepted_slugs', nargs='*',
                        help='List of accepted slugs for downloading transcriptions')

Martin's avatar
Martin committed
    return parser


def main():
    args = create_parser().parse_args()

Martin's avatar
Martin committed
    logger.info(f"ARGS {args} \n")
Martin's avatar
Martin committed

    kaldi_data_generator = KaldiDataGenerator(dataset_name=args.dataset_name,
                                              out_dir_base=args.out_dir,
                                              grayscale=args.grayscale,
Martin's avatar
Martin committed
                                              extraction=args.extraction_mode,
                                              accepted_slugs=args.accepted_slugs)
Martin's avatar
Martin committed

    kaldi_partitioner = KaldiPartitionSplitter(out_dir_base=args.out_dir,
                                               split_train_ratio=args.train_ratio,
                                               split_test_ratio=args.test_ratio)
    # extract all the lines and transcriptions
    if args.pages:
Martin's avatar
Martin committed
        kaldi_data_generator.run_pages(args.pages)
Martin's avatar
Martin committed
    if args.volumes:
        kaldi_data_generator.run_volumes(args.volumes)
Martin's avatar
Martin committed
    if args.corpora:
        kaldi_data_generator.run_corpora(args.corpora)
Martin's avatar
Martin committed

    # create partitions from all the extracted data
    kaldi_partitioner.create_partitions()
Martin's avatar
Martin committed

Martin's avatar
Martin committed
    logger.info("DONE")
Martin's avatar
Martin committed
if __name__ == '__main__':
    main()