diff --git a/dan/manager/ocr.py b/dan/manager/ocr.py index 30ca845ff3aa57b3a2a0b21957f4e54f2f53d3bb..533d074cc72495b10f38a72b492a00ca4f7a1d06 100644 --- a/dan/manager/ocr.py +++ b/dan/manager/ocr.py @@ -44,11 +44,6 @@ class OCRDatasetManager(DatasetManager): for key in datasets.keys(): with open(os.path.join(datasets[key], "charset.pkl"), "rb") as f: charset = charset.union(set(pickle.load(f))) - if ( - "\n" in charset - and "remove_linebreaks" in self.params["config"]["constraints"] - ): - charset.remove("\n") if "" in charset: charset.remove("") return sorted(list(charset)) @@ -167,13 +162,9 @@ class OCRDataset(GenericDataset): def convert_sample_labels(self, sample): label = sample["label"] - if "remove_linebreaks" in self.params["config"]["constraints"]: - full_label = label.replace("\n", " ").replace(" ", " ") - else: - full_label = label - sample["label"] = full_label - sample["token_label"] = token_to_ind(self.charset, full_label) + sample["label"] = label + sample["token_label"] = token_to_ind(self.charset, label) sample["token_label"].append(self.tokens["end"]) sample["label_len"] = len(sample["token_label"]) sample["token_label"].insert(0, self.tokens["start"]) diff --git a/dan/ocr/document/train.py b/dan/ocr/document/train.py index 7d755e7112ca319e81eaf87cd8da9b562477e852..d0a5fc479236911debe3ac5cfc3cf57d120e5d33 100644 --- a/dan/ocr/document/train.py +++ b/dan/ocr/document/train.py @@ -109,7 +109,6 @@ def get_config(): "height_divisor": 32, # Image height will be divided by 32 "padding_value": 0, # Image padding value "padding_token": None, # Label padding value - "constraints": [], "preprocessings": [ { "type": "to_RGB", diff --git a/docs/usage/train/parameters.md b/docs/usage/train/parameters.md index 9674f06ca764c6dcf8e37553ba189fbc3d4cabcc..8d97ae637cb1a47bd467f07f1f6b4ae743d9f714 100644 --- a/docs/usage/train/parameters.md +++ b/docs/usage/train/parameters.md @@ -18,7 +18,6 @@ All hyperparameters are specified and editable in the training scripts (meaning | `dataset_params.config.width_divisor` | Factor to reduce the height of the feature vector before feeding the decoder. | `int` | `32` | | `dataset_params.config.padding_value` | Image padding value. | `int` | `0` | | `dataset_params.config.padding_token` | Transcription padding value. | `int` | `None` | -| `dataset_params.config.constraints` | Whether to add end-of-transcription and start-of-transcription tokens in labels. | `list` | `[]` | | `dataset_params.config.preprocessings` | List of pre-processing functions to apply to input images. | `list` | (see [dedicated section](#data-preprocessing)) | | `dataset_params.config.augmentation` | Configuration for data augmentation. | `dict` | (see [dedicated section](#data-augmentation)) | diff --git a/tests/conftest.py b/tests/conftest.py index fb83a1867028d2f6428079549a7e9dd5f311075a..e660869c0f5c0fb80b79a2d8f133ee246e66da52 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -72,7 +72,6 @@ def training_config(): "height_divisor": 32, # Image height will be divided by 32 "padding_value": 0, # Image padding value "padding_token": None, # Label padding value - "constraints": [], "preprocessings": [ { "type": "to_RGB",