diff --git a/dan/encoder.py b/dan/encoder.py index 6057caddb1e96bcb6a6d0c1e2e38cb637e2757a0..0e38f6330b2035b75f1ca4ed87ff446ccb3c0015 100644 --- a/dan/encoder.py +++ b/dan/encoder.py @@ -92,9 +92,7 @@ class FCN_Encoder(Module): self.init_blocks = ModuleList( [ - ConvBlock( - params["input_channels"], 16, stride=(1, 1), dropout=self.dropout - ), + ConvBlock(3, 16, stride=(1, 1), dropout=self.dropout), ConvBlock(16, 32, stride=(2, 2), dropout=self.dropout), ConvBlock(32, 64, stride=(2, 2), dropout=self.dropout), ConvBlock(64, 128, stride=(2, 2), dropout=self.dropout), diff --git a/dan/ocr/document/train.py b/dan/ocr/document/train.py index ad4643b92506fe65befca106430d36ea1bcfa146..97cae76166000d011095d5b50d58c38263b272b0 100644 --- a/dan/ocr/document/train.py +++ b/dan/ocr/document/train.py @@ -138,7 +138,6 @@ def get_config(): }, "transfered_charset": True, # Transfer learning of the decision layer based on charset of the line HTR model "additional_tokens": 1, # for decision layer = [<eot>, ], only for transferred charset - "input_channels": 3, # number of channels of input image "dropout": 0.5, # dropout rate for encoder "enc_dim": 256, # dimension of extracted features "nb_layers": 5, # encoder diff --git a/docs/get_started/training.md b/docs/get_started/training.md index 598eba396917d267d5847ebee28857490146334d..71cdd974da602e45d95e16213bbc39d15eb801ac 100644 --- a/docs/get_started/training.md +++ b/docs/get_started/training.md @@ -53,7 +53,6 @@ parameters: std: [float, float, float] max_char_prediction: int encoder: - input_channels: int dropout: float decoder: enc_dim: int diff --git a/docs/usage/train/parameters.md b/docs/usage/train/parameters.md index d659a1e0d4ce6b25bed4f6b128bf1e808e7f15cb..5dd48610b9098ed44aa9fceaef82186482c68c41 100644 --- a/docs/usage/train/parameters.md +++ b/docs/usage/train/parameters.md @@ -132,7 +132,6 @@ For a detailed description of all augmentation transforms, see the [dedicated pa | `model_params.transfer_learning.decoder` | Model to load for the decoder [state_dict_name, checkpoint_path, learnable, strict]. | `list` | `["encoder", "pretrained_models/dan_rimes_page.pt", True, False]` | | `model_params.transfered_charset` | Transfer learning of the decision layer based on charset of the model to transfer. | `bool` | `True` | | `model_params.additional_tokens` | For decision layer = [<eot>, ], only for transferred charset. | `int` | `1` | -| `model_params.input_channels` | Number of channels of input image. | `int` | `3` | | `model_params.dropout` | Dropout probability in the encoder. | `float` | `0.5` | | `model_params.enc_dim` | Dimension of features extracted by the encoder. | `int` | `256` | | `model_params.nb_layers` | Number of layers in the encoder. | `int` | `5` | diff --git a/tests/conftest.py b/tests/conftest.py index e804cb36734d4d7eb8af90ea2057b8d14e308ff0..4a365be7b90aefcab64fcf500da664ccea4c0b20 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -86,7 +86,6 @@ def training_config(): "transfer_learning": None, "transfered_charset": True, # Transfer learning of the decision layer based on charset of the line HTR model "additional_tokens": 1, # for decision layer = [<eot>, ], only for transferred charset - "input_channels": 3, # number of channels of input image "dropout": 0.5, # dropout rate for encoder "enc_dim": 256, # dimension of extracted features "nb_layers": 5, # encoder diff --git a/tests/data/prediction/parameters.yml b/tests/data/prediction/parameters.yml index c299406c9861aa7509012fb43dd4c693cbccff00..f07baaecb8a44ed36eb56fced287d5a76ad138cf 100644 --- a/tests/data/prediction/parameters.yml +++ b/tests/data/prediction/parameters.yml @@ -5,7 +5,6 @@ parameters: std: [34.084189571536385, 34.084189571536385, 34.084189571536385] max_char_prediction: 200 encoder: - input_channels: 3 dropout: 0.5 decoder: enc_dim: 256