Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import os
from pathlib import Path
import requests
from io import BytesIO
from PIL import Image
import cv2
import numpy as np
import random
from apistar.exceptions import ErrorResponse
from arkindex import ArkindexClient, options_from_env
api_client = ArkindexClient(**options_from_env())
def download_image(url):
'''
Download an image and open it with Pillow
'''
assert url.startswith('http'), 'Image URL must be HTTP(S)'
# Download the image
# Cannot use stream=True as urllib's responses do not support the seek(int) method,
# which is explicitly required by Image.open on file-like objects
resp = requests.get(url)
resp.raise_for_status()
# Preprocess the image and prepare it for classification
image = Image.open(BytesIO(resp.content))
print('Downloaded image {} - size={}x{}'.format(url,
image.size[0],
image.size[1]))
return image
def write_file(file_name, content):
with open(file_name, 'w') as f:
f.write(content)
def get_image(image_url, grayscale, out_dir):
out_full_img_dir = os.path.join(out_dir, 'full', page_id)
os.makedirs(out_full_img_dir, exist_ok=True)
out_full_img_path = os.path.join(out_full_img_dir, 'full.jpg')
if grayscale:
download_image(image_url).convert('L').save(
out_full_img_path, format='jpeg')
img = cv2.imread(out_full_img_path, cv2.IMREAD_GRAYSCALE)
else:
download_image(image_url).save(
out_full_img_path, format='jpeg')
img = cv2.imread(out_full_img_path)
return img
def extract_lines(page_id, grayscale=True, out_dir='/tmp'):
count = 0
line_bounding_rects = []
line_polygons = []
line_transcriptions = []
try:
for res in api_client.paginate('ListTranscriptions', id=page_id, type='line'):
text = res['text']
if not text or not text.strip():
continue
line_transcriptions.append(text)
polygon = res['zone']['polygon']
line_polygons.append(polygon)
[x, y, w, h] = cv2.boundingRect(np.asarray(polygon))
line_bounding_rects.append([x, y, w, h])
count += 1
except ErrorResponse as e:
print("ListTranscriptions failed", e.status_code, e.title, e.content, page_id)
raise e
full_image_url = res['zone']['image']['s3_url']
img = get_image(full_image_url, grayscale=grayscale, out_dir=out_dir)
out_line_img_dir = os.path.join(out_dir, 'Lines', page_id)
os.makedirs(out_line_img_dir, exist_ok=True)
for i, [x, y, w, h] in enumerate(line_bounding_rects):
croped = img[y:y + h, x:x + w].copy()
# cv2.imwrite(f'{out_line_img_dir}/{i}.jpg', croped)
cv2.imwrite(f'{out_line_img_dir}_{i}.jpg', croped)
out_line_text_dir = os.path.join(out_dir, 'Transcriptions', page_id)
os.makedirs(out_line_text_dir, exist_ok=True)
for i, text in enumerate(line_transcriptions):
write_file(f"{out_line_text_dir}_{i}.txt", text)
# write_file(f"{out_line_text_dir}/{i}.txt", text)
split_train_ratio = 0.8
split_test_ratio = 0.1
split_val_ratio = 1 - split_train_ratio - split_test_ratio
def page_level_split(line_ids):
# page_ids = list({'_'.join(line_id.split('_')[:-1]) for line_id in line_ids})
page_ids = list({line_id for line_id in line_ids})
random.shuffle(page_ids)
page_count = len(page_ids)
train_page_ids = page_ids[:round(page_count * split_train_ratio)]
page_ids = page_ids[round(page_count * split_train_ratio):]
test_page_ids = page_ids[:round(page_count * split_test_ratio)]
page_ids = page_ids[round(page_count * split_test_ratio):]
val_page_ids = page_ids
page_dict = {page_id: TRAIN for page_id in train_page_ids}
page_dict.update({page_id: TEST for page_id in test_page_ids})
page_dict.update({page_id: VAL for page_id in val_page_ids})
return (train_page_ids, val_page_ids, test_page_ids), page_dict
TRAIN,TEST,VAL = 0,1,2
out_file_dict = {0 : 'Train', 1 : 'Test', 2 : 'Validation'}
def create_partitions(line_ids, out_dir):
(train_page_ids, val_page_ids, test_page_ids), page_dict = page_level_split(line_ids)
datasets = [[] for i in range(3)]
for line_id in line_ids:
page_id = line_id
split_id = page_dict[page_id]
datasets[split_id].append(line_id)
partitions_dir = os.path.join(out_dir, 'Partitions')
os.makedirs(partitions_dir, exist_ok=True)
for i, dataset in enumerate(datasets):
file_name = f"{partitions_dir}/{out_file_dict[i]}Lines.lst"
with open(file_name, 'w') as f:
f.write('\n'.join(dataset) + '\n')
out_dir_base = '/tmp/foo2'
#page_id = 'bf23cc96-f6b2-4182-923e-6c163db37eba'
page_ids = ['bf23cc96-f6b2-4182-923e-6c163db37eba',
'7c51e648-370e-43b7-9340-3b1a17c13828',
'56521074-59f4-4173-bfc1-4b1384ff8139',]
for page_id in page_ids:
extract_lines(page_id, out_dir=out_dir_base)
lines_path = Path(f'{out_dir_base}/Lines')
line_ids = [str(file.relative_to(lines_path).with_suffix('')) for file in lines_path.glob('**/*.jpg')]
create_partitions(line_ids, out_dir=out_dir_base)