From 4fe23d7a816e1ab4ed3482c67b33327a90eaab88 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Tue, 24 Dec 2024 14:32:13 +0100
Subject: [PATCH 01/17] Antora setup

---
 .gitlab-ci.yml      | 28 ++++++++++++++++++++++++++++
 Makefile            |  5 ++++-
 antora-playbook.yml | 12 ++++++++++++
 antora/antora.yml   |  5 +++++
 4 files changed, 49 insertions(+), 1 deletion(-)
 create mode 100644 antora-playbook.yml
 create mode 100644 antora/antora.yml

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8b4db1cb..372535c2 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -191,3 +191,31 @@ docs-stop-surge:
 
   script:
     - surge teardown ${CI_ENVIRONMENT_URL}
+
+antora-generate:
+  stage: build
+  image: registry.gitlab.teklia.com/internal/mkdocs-to-antora:latest
+
+  script:
+    - teklia-antora
+
+  artifacts:
+    paths:
+      - antora
+
+antora-build:
+  stage: deploy
+  image: node:20
+
+  dependencies:
+    - antora-generate
+
+  before_script:
+    - npm install
+
+  script:
+    - npx antora antora-playbook.yml
+
+  artifacts:
+    paths:
+      - build
diff --git a/Makefile b/Makefile
index f9322fd1..3dcf7922 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,7 @@
-.PHONY: release
+.PHONY: release antora
+
+antora:
+	npx antora antora-playbook.yml
 
 release:
 	# Grep the version from pyproject.toml, squeeze multiple spaces, delete double and single quotes, get 3rd val.
diff --git a/antora-playbook.yml b/antora-playbook.yml
new file mode 100644
index 00000000..59fc7bd1
--- /dev/null
+++ b/antora-playbook.yml
@@ -0,0 +1,12 @@
+site:
+  title: Teklia Documentation
+  start_page: pylaia::index.adoc
+content:
+  sources:
+  - url: .
+    branches: HEAD
+    start_path: antora
+ui:
+  bundle:
+    url: https://gitlab.com/antora/antora-ui-default/-/jobs/artifacts/HEAD/raw/build/ui-bundle.zip?job=bundle-stable
+    snapshot: true
diff --git a/antora/antora.yml b/antora/antora.yml
new file mode 100644
index 00000000..ba9d333b
--- /dev/null
+++ b/antora/antora.yml
@@ -0,0 +1,5 @@
+name: pylaia
+version: ~
+title: PyLaia
+nav:
+- modules/ROOT/nav.adoc
-- 
GitLab


From 5a90437664bc8aeb59b4fddf22e50dae8ef2e51a Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Sat, 28 Dec 2024 11:01:01 +0100
Subject: [PATCH 02/17] Publish on another branch

---
 .gitlab-ci.yml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 372535c2..74fad59a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -198,6 +198,7 @@ antora-generate:
 
   script:
     - teklia-antora
+    - teklia-push-antora-ci
 
   artifacts:
     paths:
-- 
GitLab


From 06a8eef5eca90afd306220cbdf97a1e548140642 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <bastien@nextcairn.com>
Date: Mon, 6 Jan 2025 16:55:20 +0100
Subject: [PATCH 03/17] Install antora

---
 .gitlab-ci.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 74fad59a..71bcf1bf 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -212,7 +212,7 @@ antora-build:
     - antora-generate
 
   before_script:
-    - npm install
+    - npm install antora
 
   script:
     - npx antora antora-playbook.yml
-- 
GitLab


From 0cba3135a9dfc2f797a85d491572e401b96203a5 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Wed, 5 Feb 2025 16:14:53 +0100
Subject: [PATCH 04/17] Promote to normal build

---
 .gitlab-ci.yml | 39 +++++++--------------------------------
 1 file changed, 7 insertions(+), 32 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 71bcf1bf..66bd0671 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -112,16 +112,20 @@ bump-python-deps:
 
 # Make sure docs still build correctly
 .docs:
-  image: python:3.10
+  image: node:20
   artifacts:
     paths:
       - public
 
   before_script:
-    - pip install -e .[docs]
+    - npm install antora
 
   script:
-    - mkdocs build --strict --verbose
+    - npx antora antora-playbook.yml
+
+  artifacts:
+    paths:
+      - public
 
 docs-build:
   extends: .docs
@@ -191,32 +195,3 @@ docs-stop-surge:
 
   script:
     - surge teardown ${CI_ENVIRONMENT_URL}
-
-antora-generate:
-  stage: build
-  image: registry.gitlab.teklia.com/internal/mkdocs-to-antora:latest
-
-  script:
-    - teklia-antora
-    - teklia-push-antora-ci
-
-  artifacts:
-    paths:
-      - antora
-
-antora-build:
-  stage: deploy
-  image: node:20
-
-  dependencies:
-    - antora-generate
-
-  before_script:
-    - npm install antora
-
-  script:
-    - npx antora antora-playbook.yml
-
-  artifacts:
-    paths:
-      - build
-- 
GitLab


From 4fe20b3d565aa7cf0f82f52acc4ad94d012ba4fd Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Wed, 5 Feb 2025 16:32:57 +0100
Subject: [PATCH 05/17] Base conversion

---
 .gitignore                                    |   4 +
 antora-playbook.yml                           |   4 +
 antora/modules/ROOT/nav.adoc                  |  15 +
 .../ROOT/pages/get_started/development.adoc   |  49 ++
 .../modules/ROOT/pages/get_started/index.adoc |  38 ++
 antora/modules/ROOT/pages/index.adoc          |  26 +
 antora/modules/ROOT/pages/original_paper.adoc |  21 +
 antora/modules/ROOT/pages/releases.adoc       | 250 +++++++++
 antora/modules/ROOT/pages/usage/index.adoc    |  22 +
 .../pages/usage/initialization/index.adoc     | 260 +++++++++
 .../pages/usage/language_models/index.adoc    | 249 ++++++++
 .../ROOT/pages/usage/netout/index.adoc        | 236 ++++++++
 .../ROOT/pages/usage/prediction/index.adoc    | 530 ++++++++++++++++++
 .../ROOT/pages/usage/training/index.adoc      | 411 ++++++++++++++
 antora/ui/partials/header-content.hbs         |  66 +++
 package.json                                  |   5 +
 16 files changed, 2186 insertions(+)
 create mode 100644 antora/modules/ROOT/nav.adoc
 create mode 100644 antora/modules/ROOT/pages/get_started/development.adoc
 create mode 100644 antora/modules/ROOT/pages/get_started/index.adoc
 create mode 100644 antora/modules/ROOT/pages/index.adoc
 create mode 100644 antora/modules/ROOT/pages/original_paper.adoc
 create mode 100644 antora/modules/ROOT/pages/releases.adoc
 create mode 100644 antora/modules/ROOT/pages/usage/index.adoc
 create mode 100644 antora/modules/ROOT/pages/usage/initialization/index.adoc
 create mode 100644 antora/modules/ROOT/pages/usage/language_models/index.adoc
 create mode 100644 antora/modules/ROOT/pages/usage/netout/index.adoc
 create mode 100644 antora/modules/ROOT/pages/usage/prediction/index.adoc
 create mode 100644 antora/modules/ROOT/pages/usage/training/index.adoc
 create mode 100644 antora/ui/partials/header-content.hbs
 create mode 100644 package.json

diff --git a/.gitignore b/.gitignore
index c5908ea7..6bf199e2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -90,3 +90,7 @@ test-resources/
 benchmarks/basic
 benchmarks/distributed
 benchmarks/half
+
+# JS
+node_modules/
+package-lock.json
diff --git a/antora-playbook.yml b/antora-playbook.yml
index 59fc7bd1..02694ac8 100644
--- a/antora-playbook.yml
+++ b/antora-playbook.yml
@@ -10,3 +10,7 @@ ui:
   bundle:
     url: https://gitlab.com/antora/antora-ui-default/-/jobs/artifacts/HEAD/raw/build/ui-bundle.zip?job=bundle-stable
     snapshot: true
+  supplemental_files: ./antora/ui
+
+output:
+  dir: ./public
diff --git a/antora/modules/ROOT/nav.adoc b/antora/modules/ROOT/nav.adoc
new file mode 100644
index 00000000..09db71be
--- /dev/null
+++ b/antora/modules/ROOT/nav.adoc
@@ -0,0 +1,15 @@
+* xref:original_paper.adoc[Original paper]
+* Get started
+** xref:get_started/index.adoc[]
+** xref:get_started/development.adoc[Development]
+* Usage
+** xref:usage/index.adoc[]
+** Dataset
+*** xref:usage/datasets/index.adoc[]
+*** xref:usage/datasets/format.adoc[Dataset formatting]
+** xref:usage/initialization/index.adoc[Model initialization]
+** xref:usage/training/index.adoc[Training]
+** xref:usage/prediction/index.adoc[Prediction]
+** xref:usage/netout/index.adoc[Netout]
+** xref:usage/language_models/index.adoc[Explicit language modeling]
+* xref:releases.adoc[Releases]
diff --git a/antora/modules/ROOT/pages/get_started/development.adoc b/antora/modules/ROOT/pages/get_started/development.adoc
new file mode 100644
index 00000000..61ff2e38
--- /dev/null
+++ b/antora/modules/ROOT/pages/get_started/development.adoc
@@ -0,0 +1,49 @@
+[#development]
+= Development
+
+PyLaia uses different tools during its development.
+
+[#linter]
+== Linter
+
+Code syntax is analyzed before submitting the code.
+
+To run the linter tools suite you may use https://pre-commit.com[pre-commit].
+
+[,shell]
+----
+pip install pre-commit
+pre-commit run -a
+----
+
+[#tests]
+== Tests
+
+[#unit-tests]
+=== Unit tests
+
+Tests are executed using https://tox.wiki/en/latest/[tox].
+
+[,shell]
+----
+pip install .[test]
+tox
+----
+
+[#documentation]
+== Documentation
+
+This documentation uses http://www.sphinx-doc.org/[Sphinx] and was generated using https://mkdocs.org/[MkDocs] and https://mkdocstrings.github.io/[mkdocstrings].
+
+[#setup]
+=== Setup
+
+Add the `docs` extra when installing `pylaia`:
+
+[,shell]
+----
+# In a clone of the Git repository
+pip install .[docs]
+----
+
+Build the documentation using `mkdocs serve -v`. You can then write in https://www.markdownguide.org/[Markdown] in the relevant `docs/*.md` files, and see live output on http://localhost:8000.
diff --git a/antora/modules/ROOT/pages/get_started/index.adoc b/antora/modules/ROOT/pages/get_started/index.adoc
new file mode 100644
index 00000000..55e7ffea
--- /dev/null
+++ b/antora/modules/ROOT/pages/get_started/index.adoc
@@ -0,0 +1,38 @@
+[#installation]
+= Installation
+
+To use PyLaia in your own environment, you need to install from PyPi or manually.
+
+[#from-pypi]
+== From PyPi
+
+To install PyLaia from https://pypi.org/project/pylaia/[PyPi], use this command:
+
+[,shell]
+----
+pip install pylaia
+----
+
+[#from-source]
+== From source
+
+To install PyLaia manually, you need to first clone via:
+
+[,shell]
+----
+git clone git@gitlab.teklia.com:atr/pylaia.git
+----
+
+Then you can install it via pip:
+
+[,shell]
+----
+pip install .
+----
+
+'''
+
+Get started with:
+
+* xref:./development.adoc[Development]
+* xref:usage/index.adoc[Usage]
diff --git a/antora/modules/ROOT/pages/index.adoc b/antora/modules/ROOT/pages/index.adoc
new file mode 100644
index 00000000..12269ef8
--- /dev/null
+++ b/antora/modules/ROOT/pages/index.adoc
@@ -0,0 +1,26 @@
+[#pylaia]
+= PyLaia
+
+[#what-is-pylaia]
+== What is PyLaia?
+
+PyLaia is a toolkit for Automatic Text Recognition (ATR) and Keyword Spotting (KWS).
+
+PyLaia is flexible, open-source, device-agnostic, and can be used to express a wide variety of experiments, including (but not limited to) training and inference over Convolutional and Recurrent based deep Neural Network models.
+The software is extensible and easily configurable and provides a rich set of functional layers with a particular focus on ATR.
+
+[#history]
+== History
+
+PyLaia is the successor of https://github.com/jpuigcerver/Laia[Laia]. It was developed by 3 members (https://github.com/jpuigcerver[@jpuigcerver], https://github.com/mauvilsa[@mauvilsa], https://github.com/dmartinalbo[@dmartinalbo]) of the Pattern Recognition and Human Language Technology (PRHLT) research center in 2016.
+
+The toolkit was originally developed using Torch. When Torch's development was discontinued in 2017, it became clear that building PyLaia as a second-generation system using PyTorch as its foundation was a logical step. PyLaia was written in 2018 by https://github.com/jpuigcerver[@jpuigcerver] as a Ph.D. thesis experiment and by https://github.com/carmocca[@carmocca] as an undergraduate final project.
+
+Since 2022, three members of https://teklia.com/[TEKLIA] (https://gitlab.teklia.com/babadie[@babadie], https://gitlab.teklia.com/yschneider[@yschneider], https://gitlab.teklia.com/starride[@starride]) maintain and improve the toolkit.
+
+[#get-started]
+== Get started
+
+Click xref:./original_paper.adoc[here] to learn more about the original paper.
+
+xref:./get_started/index.adoc[Get started with PyLaia] now!
diff --git a/antora/modules/ROOT/pages/original_paper.adoc b/antora/modules/ROOT/pages/original_paper.adoc
new file mode 100644
index 00000000..516802f7
--- /dev/null
+++ b/antora/modules/ROOT/pages/original_paper.adoc
@@ -0,0 +1,21 @@
+[#original-paper]
+= Original paper
+
+The original PyLaia model was presented in the paper entitled: https://ieeexplore.ieee.org/document/8269951[_Are Multidimensional Recurrent Layers Really Necessary for Handwritten Text Recognition?_ from Joan Puigcerver, published in the 14th IAPR International Conference on Document Analysis and Recognition (ICDAR 2017)].
+
+The full text is available on this http://www.jpuigcerver.net/pubs/jpuigcerver_icdar2017.pdf[page].
+
+Recommended citation:
+
+[,bibtex]
+----
+@INPROCEEDINGS{PyLaia,
+  author={Puigcerver, Joan},
+  booktitle={2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)},
+  title={Are Multidimensional Recurrent Layers Really Necessary for Handwritten Text Recognition?},
+  year={2017},
+  volume={01},
+  number={},
+  pages={67-72},
+  doi={10.1109/ICDAR.2017.20}}
+----
diff --git a/antora/modules/ROOT/pages/releases.adoc b/antora/modules/ROOT/pages/releases.adoc
new file mode 100644
index 00000000..1536cce3
--- /dev/null
+++ b/antora/modules/ROOT/pages/releases.adoc
@@ -0,0 +1,250 @@
+[#releases]
+= Releases
+
+[#1-1-1]
+== 1.1.1
+
+Released on *12 August 2024* &bull; View on https://gitlab.teklia.com/atr/pylaia/-/releases/1.1.1[Gitlab]
+
+[#breaking-changes]
+=== Breaking changes
+
+* The https://gitlab.teklia.com/atr/nnutils/[nnutils] library is no longer maintained and is only compatible with Python 3.6, 3.7, 3.8. As such its dependency has been removed. The `crnn.use_masks` parameter has been removed. It is still supported to keep the compatibility with older training configuration but will be ignored.
+
+[#feature]
+=== Feature
+
+* The number of worker processes created in dataloaders is now exposed through the `data.num_workers`  parameter.
+* There is a new command to run basic checks and compute statistics on your training dataset. Learn more about it in https://atr.pages.teklia.com/pylaia/usage/datasets/[the documentation].
+* Pretraining is now available. Load the weights of a previous checkpoint using the `train.pretrain` parameter when fine-tuning a model on a new dataset. Learn more about it in https://atr.pages.teklia.com/pylaia/usage/training/#resume-training-from-a-checkpoint[the documentation].
+* When training on a small dataset, freezing some of the layers can help with model convergence. The `train.freeze_layers` parameter supports freezing:
+ ** convolutional layers,
+ ** recurrent layers,
+ ** linear layers.
+* Proper support for right-to-left (RTL) languages is now available. Enable it using the `data.reading_order` argument both during https://atr.pages.teklia.com/pylaia/usage/training/#train-on-right-to-left-reading-order[training] and https://atr.pages.teklia.com/pylaia/usage/prediction/#predict-on-right-to-left-data[decoding].
+
+[#dependencies]
+=== Dependencies
+
+* Bumped https://pypi.org/project/pytorch-lightning/[pytorch-lightning] to version `1.4.2`.
+* Bumped https://pypi.org/project/textdistance/[textdistance] to version `4.6.1`.
+
+[#misc]
+=== Misc
+
+* A deprecation warning from jsonargparse was fixed.
+* The package's metadata are now stored in `pyproject.toml` as per https://peps.python.org/pep-0621/[PEP-0621].
+* PyLaia now uses https://docs.astral.sh/ruff/[ruff] for linting and formatting.
+
+[#1-1-0]
+== 1.1.0
+
+Released on *22 December 2023* &bull; View on https://gitlab.teklia.com/atr/pylaia/-/releases/1.1.0[Gitlab]
+
+[#breaking-changes-2]
+=== Breaking changes
+
+* Official support for Python3.8 has been dropped. This doesn't mean that the current code doesn't run on python3.8, we simply do not test that compatibility anymore. This decision was made since active support of python 3.8 has stopped for a while now and many libraries in the ML world have stopped supporting it as well.
+
+[#feature-2]
+=== Feature
+
+* A Docker image with the needed code to use this library is now built on every tag.
+* The coverage of our tests suite is displayed again as a GitLab badge on the repository as well as in the README.md file.
+
+[#documentation]
+=== Documentation
+
+* Many sections were added to the documentation:
+ ** for the https://atr.pages.teklia.com/pylaia/usage/initialization/[pylaia-htr-create-model] command,
+ ** for https://atr.pages.teklia.com/pylaia/usage/datasets/[dataset formatting],
+ ** for the https://atr.pages.teklia.com/pylaia/usage/training/[pylaia-htr-train-ctc] command and https://atr.pages.teklia.com/pylaia/usage/training/#resume-training-from-a-checkpoint[fine-tuning],
+ ** for the https://atr.pages.teklia.com/pylaia/usage/prediction/[pylaia-htr-decode-ctc] command,
+ ** for the https://atr.pages.teklia.com/pylaia/usage/netout/[pylaia-htr-netout] command,
+ ** to https://atr.pages.teklia.com/pylaia/usage/language_models/[train] https://kheafield.com/code/kenlm/[KenLM] language models,
+ ** the full Python code reference.
+* A contribution guide and a code of conduct were added for new contributors.
+
+[#dependencies-2]
+=== Dependencies
+
+* Bumped https://pypi.org/project/pytorch-lightning/[pytorch-lightning] to version `1.3.0`
+* Some dependencies were pinned to a version to avoid breakage:
+ ** https://pypi.org/project/natsort/[natsort] was pinned to version `8.4.0`,
+ ** https://pypi.org/project/textdistance/[textdistance] was pinned to version `4.6.0`,
+ ** https://pypi.org/project/scipy/[scipy] was pinned to version `1.11.3`,
+ ** https://pypi.org/project/matplotlib/[matplotlib] was pinned to version `3.8.2`,
+ ** https://pypi.org/project/numpy/[numpy] direct dependency was removed since it's installed through `scipy` and `matplotlib`.
+* PyLaia dropped support for python 3.8 so the https://pypi.org/project/dataclasses/[dataclasses] dependency was dropped.
+
+[#misc-2]
+=== Misc
+
+* The `torch.testing.assert_allclose` has been replaced by `torch.testing.assert_close` since it became deprecated in https://github.com/pytorch/pytorch/issues/61844[PyTorch 1.12.0].
+
+[#1-0-7]
+== 1.0.7
+
+Released on *18 October 2023* &bull; View on https://gitlab.teklia.com/atr/pylaia/-/releases/1.0.7[Gitlab]
+
+[#feature-2]
+=== Feature
+
+* When using a language model, a confidence score is now returned based on the log-likelyhood of the hypothesis.
+
+[#documentation-2]
+=== Documentation
+
+A public documentation is now available on https://atr.pages.teklia.com/pylaia/. It's still under construction but next releases will add more and more content.
+
+[#dependencies-2]
+=== Dependencies
+
+* Bumped https://pypi.org/project/pytorch-lightning/[pytorch-lightning] to version `1.1.7`
+* Bumped GitHub action https://github.com/codecov/codecov-action[codecov/codecov-action] to version `3`
+* Bumped GitHub action https://github.com/actions/setup-python[actions/setup-python] to version `4`
+* Bumped GitHub action https://github.com/actions/checkout[actions/checkout] to version `4`
+
+[#development]
+=== Development
+
+* Releases are now built more easily through a Makefile.
+* The documentation is also redeployed after each push on `master` branch.
+* Fixed a test that behaved differently locally and during CI.
+
+[#1-0-6]
+== 1.0.6
+
+Released on *12 September 2023* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.6[Github]
+
+[#feature-2]
+=== Feature
+
+* During training, too small images are now padded to be able to pass the multiple convolution layers.
+
+[#documentation-2]
+=== Documentation
+
+* Fixed typos.
+
+[#dependencies-2]
+=== Dependencies
+
+* Replaced https://pillow.readthedocs.io/en/stable/releasenotes/2.7.0.html#antialias-renamed-to-lanczos[deprecated Pillow resampling method] `Image.ANTIALIAS` to `Image.Resample.Lanczos`.
+
+[#development-2]
+=== Development
+
+* Pre-commit hooks were updated.
+
+[#1-0-5]
+== 1.0.5
+
+Released on *29 March 2023* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.5[Github]
+
+[#dependencies-2]
+=== Dependencies
+
+* Requires `torch` version `1.13.0` or `1.13.1`.
+* Requires `torchvision` version `0.14.0` or `0.14.1` (depending on `torch` version).
+
+[#1-0-4]
+== 1.0.4
+
+Released on *4 January 2023* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.4[Github]
+
+[#dependencies-2]
+=== Dependencies
+
+* Requires `torch` version `1.13.0`.
+
+[#1-0-3]
+== 1.0.3
+
+Released on *12 December 2022* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.3[Github]
+
+[#feature-2]
+=== Feature
+
+* Now able to decode using a trained Language model through beam search decoding.
+* Exposes https://pytorch.org/docs/stable/data.html#multi-process-data-loading[torch Dataloaders's num_workers] parameter on the Python training function to limit resource usage when needed.
+
+[#dependencies-2]
+=== Dependencies
+
+* Added dependency to `torchaudio` version `0.13.0`.
+
+[#development-2]
+=== Development
+
+* Package version is now tracked through the `VERSION` file.
+
+[#1-0-2]
+== 1.0.2
+
+Released on *7 December 2022* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.2[Github]
+
+[#dependencies-2]
+=== Dependencies
+
+* Pinned dependency to `pytorch-lightning` to version `1.1.0`.
+
+[#1-0-1]
+== 1.0.1
+
+Released on *7 December 2022* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.1[Github]
+
+[#1-0-0]
+== 1.0.0
+
+Released on *2 December 2020* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.0[Github]
+
+[#added]
+=== Added
+
+* Support distributed training
+* Scripts can now be configured using yaml configuration files
+* Add support for the SGD and Adam optimizers
+* Support color images
+* Log the installed version of each module when scripts are called from shell
+* Add char/word segmentation to the decode script
+* Add several badges to the README
+* Support using a `ReduceLROnPlateau` scheduler during training
+* A CSV file (metrics.csv) is now created with the results obtained during training
+* Add CONTRIBUTING file
+* Training now can include GPU stats in the progress bar
+* Add isort to pre-commit to keep consistent imports throughout the codebase
+* Users can run the PyLaia scripts using Python now
+* Support half-precision training for fixed height models.
+* Add script to visualize the segmentation output
+* Use Codecov to produce test coverage reports
+* Code is now analyzed using CodeFactor
+
+[#changed]
+=== Changed
+
+* Make Python 3.6 the minimum supported version
+* Make PyTorch 1.4.0 the minimum supported version
+* Remove `ImageToTensor` in favor of vision transform `ToImageTensor`
+* Remove all of the internal logic (`engine`, `actions`, `hooks`, etc) in favor of pytorch-lightning's constructs
+* Change Travis CI for GitHub actions
+* Greatly improve the progress bar. It is used now in all scripts
+* The entire shell API has changed for the better (thanks to jsonargparse). Arguments are now separated into groups and help messages are clearer.
+* Drastically improve our test suite, we now have a 91% coverage
+
+[#removed]
+=== Removed
+
+* Remove egs directory. These live now at https://github.com/carmocca/PyLaia-examples
+* Remove Baidu's CTC loss in favor of PyTorch's
+* Remove PHOC code. Please open an issue if you were using it
+* Remove Dortmund code. Please open an issue if you were using it
+* Remove CTCLatticeGenerator. Please open an issue if you were using it
+* We no longer support saving checkpoints for more than one metric. Will be added back in a future version
+
+[#fixed]
+=== Fixed
+
+* Fix WER calculation when long delimiters are used
+* Exit training if a delimiter is not present in the vocabulary
+* Hundreds of other minor fixes and refactors to improve the code quality!
diff --git a/antora/modules/ROOT/pages/usage/index.adoc b/antora/modules/ROOT/pages/usage/index.adoc
new file mode 100644
index 00000000..ab241800
--- /dev/null
+++ b/antora/modules/ROOT/pages/usage/index.adoc
@@ -0,0 +1,22 @@
+[#usage]
+= Usage
+
+Once the dataset is formatted and `pylaia` is installed and in your environment, you may use the following commands:
+
+* {blank}
+`pylaia-htr-create-model`:: To create a new PyLaia model. More details in the xref:./initialization/index.adoc[dedicated page].
+* {blank}
+`pylaia-htr-dataset-validate`:: To compute statistics and run validation checks on a dataset. More details in the xref:./datasets/index.adoc[dedicated page].
+* {blank}
+`pylaia-htr-train-ctc`:: To train a PyLaia model. More details in the xref:./training/index.adoc[dedicated page].
+* {blank}
+`pylaia-htr-decode-ctc`:: To predict using a trained PyLaia model. More details in the xref:./prediction/index.adoc[dedicated page].
+* {blank}
+`pylaia-htr-netout`:: To dump features from a PyLaia model. More details in the xref:./netout/index.adoc[dedicated page].
+
+'''
+
+Related pages:
+
+* Learn how to format a xref:./datasets/format.adoc[dataset in PyLaia format]
+* Learn how to use PyLaia with an xref:./language_models/index.adoc[explicit language model]
diff --git a/antora/modules/ROOT/pages/usage/initialization/index.adoc b/antora/modules/ROOT/pages/usage/initialization/index.adoc
new file mode 100644
index 00000000..ae2d11f1
--- /dev/null
+++ b/antora/modules/ROOT/pages/usage/initialization/index.adoc
@@ -0,0 +1,260 @@
+[#model-initialization]
+= Model initialization
+
+The `pylaia-htr-create-model` command can be used to create a PyLaia model. To know more about the options of this command, use `pylaia-htr-create-model --help`.
+
+[#purpose]
+== Purpose
+
+The general architecture of PyLaia is composed of convolutional blocks followed by a set a bi-directionnal recurrent layers and a linear layer. PyLaia is fully configurable by the user, including:
+
+* Number of convolutional blocks,
+* Number of recurrent layers,
+* Batch normalization,
+* Pooling layers,
+* Activation function,
+* ...
+
+This command will create a pickled file (named `model` by default), which is required to initialize the `LaiaCRNN` class before training.
+
+[#parameters]
+== Parameters
+
+The full list of parameters is detailed in this section.
+
+[#general-parameters]
+=== General parameters
+
+|===
+| Parameter | Description | Type | Default
+
+| `syms`
+| Positional argument. Path to a file mapping characters to integers. The CTC symbol must be mapped to integer 0.
+| `str`
+|
+
+| `config`
+| Path to a JSON configuration file
+| `json`
+|
+
+| `fixed_input_height`
+| Height of the input images. If set to 0, a variable height model will be used (see `adaptive_pooling`). This will be used to compute the model output height at the end of the convolutional layers.
+| `int`
+| 0
+
+| `adaptive_pooling`
+| Use custom adaptive pooling layers to enable training with variable height images. Takes into account the size of each individual image within the batch (before padding). Should be in `{avg,max}pool-N`.
+| `str`
+| `avgpool-16`
+
+| `save_model`
+| Whether to save the model to a file.
+| `bool`
+| `True`
+|===
+
+[#common-parameters]
+=== Common parameters
+
+|===
+| Name | Description | Type | Default
+
+| `common.train_path`
+| Directory where the model will be saved
+| `str`
+| `.`
+
+| `common.model_filename`
+| Filename of the model.
+| `str`
+| `model`
+|===
+
+[#logging-arguments]
+=== Logging arguments
+
+|===
+| Name | Description | Type | Default
+
+| `logging.fmt`
+| Logging format.
+| `str`
+| `%(asctime)s %(levelname)s %(name)s] %(message)s`
+
+| `logging.level`
+| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+| `Level`
+| `INFO`
+
+| `logging.filepath`
+| Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname`
+| `Optional[str]`
+|
+
+| `logging.overwrite`
+| Whether to overwrite the logfile or to append.
+| `bool`
+| `False`
+
+| `logging.to_stderr_level`
+| If filename is set, use this to log also to stderr at the given level.
+| `Level`
+| `ERROR`
+|===
+
+[#architecture-arguments]
+=== Architecture arguments
+
+|===
+| Name | Description | Type | Default
+
+| `crnn.num_input_channels`
+| Number of channels of the input images.
+| `int`
+| `1`
+
+| `crnn.vertical_text`
+| Whether the text is written vertically.
+| `bool`
+| `False`
+
+| `crnn.cnn_num_features`
+| Number of features in each convolutional layer.
+| `List`
+| `[16, 16, 32, 32]`
+
+| `crnn.cnn_kernel_size`
+| Kernel size of each convolutional layer (e.g. [n,n,...] or [[h1,w1],[h2,w2],...]).
+| `List`
+| `[3, 3, 3, 3]`
+
+| `crnn.cnn_stride`
+| Stride of each convolutional layer. (e.g. [n,n,...] or [[h1,w1],[h2,w2],...])
+| `List`
+| `[1, 1, 1, 1]`
+
+| `crnn.cnn_dilation`
+| Spacing between each convolutional layer kernel elements. (e.g. [n,n,...] or [[h1,w1],[h2,w2],...])
+| `List`
+| `[1, 1, 1, 1]`
+
+| `crnn.cnn_activation`
+| Type of activation function in each convolutional layer (from `torch.nn`).
+| `List`
+| `['LeakyReLU', 'LeakyReLU', 'LeakyReLU', 'LeakyReLU']`
+
+| `crnn.cnn_poolsize`
+| MaxPooling size after each convolutional layer. (e.g. [n,n,...] or [[h1,w1],[h2,w2],...]).
+| `List`
+| `[2, 2, 2, 0]`
+
+| `crnn.cnn_dropout`
+| Dropout probability at the input of each convolutional layer.
+| `List`
+| `[0.0, 0.0, 0.0, 0.0]`
+
+| `crnn.cnn_batchnorm`
+| Whether to do batch normalization before the activation in each convolutional layer.
+| `List`
+| `[False, False, False, False]`
+
+| `crnn.rnn_layers`
+| Number of recurrent layers.
+| `int`
+| `3`
+
+| `crnn.rnn_units`
+| Number of units in each recurrent layer.
+| `int`
+| `256`
+
+| `crnn.rnn_dropout`
+| Dropout probability at the input of each recurrent layer.
+| `float`
+| `0.5`
+
+| `crnn.rnn_type`
+| Type of recurrent layer (from `torch.nn`).
+| `str`
+| `LSTM`
+
+| `crnn.lin_dropout`
+| Dropout probability at the input of the final linear layer.
+| `float`
+| `0.5`
+|===
+
+[#examples]
+== Examples
+
+The model can be configured using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
+
+[#example-with-command-line-arguments-cli]
+=== Example with Command Line Arguments (CLI)
+
+Run the following command to create a model:
+
+[,sh]
+----
+pylaia-htr-create-model /path/to/syms.txt \
+   --fixed_input_height 128 \
+   --crnn.rnn_layers 4 \
+   --logging.filepath model.log \
+   --common.train_path my_experiments/
+----
+
+[#example-with-a-yaml-configuration-file]
+=== Example with a YAML configuration file
+
+Run the following command to create a model:
+
+[,sh]
+----
+pylaia-htr-create-model --config config_create_model.yaml
+----
+
+Where `config_create_model.yaml` is:
+
+[,yaml]
+----
+crnn:
+  cnn_activation:
+  - LeakyReLU
+  - LeakyReLU
+  - LeakyReLU
+  - LeakyReLU
+  cnn_batchnorm:
+  - true
+  - true
+  - true
+  - true
+  cnn_dilation:
+  - 1
+  - 1
+  - 1
+  - 1
+  cnn_kernel_size:
+  - 3
+  - 3
+  - 3
+  - 3
+  cnn_num_features:
+  - 12
+  - 24
+  - 48
+  - 48
+  cnn_poolsize:
+  - 2
+  - 2
+  - 0
+  - 2
+  lin_dropout: 0.5
+  rnn_dropout: 0.5
+  rnn_layers: 3
+  rnn_type: LSTM
+  rnn_units: 256
+fixed_input_height: 128
+save_model: true
+syms: /path/to/syms.txt
+----
diff --git a/antora/modules/ROOT/pages/usage/language_models/index.adoc b/antora/modules/ROOT/pages/usage/language_models/index.adoc
new file mode 100644
index 00000000..c87521a4
--- /dev/null
+++ b/antora/modules/ROOT/pages/usage/language_models/index.adoc
@@ -0,0 +1,249 @@
+[#explicit-language-modeling-with-n-grams]
+= Explicit language modeling with n-grams
+
+PyLaia supports lattice rescoring using a statistical language model.
+This documentation gives instructions to build a language model with https://kheafield.com/code/kenlm/[kenlm].
+
+NOTE: You can also use http://www.speech.sri.com/projects/srilm/[SRILM] to build an ARPA language model.
+
+To decode with a language model, you need:
+
+* link:./index.md#build-the-language-model[a language model]
+* link:./index.md#list-of-tokens[a list of tokens]
+* link:./index.md#lexicon[a lexicon]
+
+[#build-the-language-model]
+== Build the language model
+
+[#install-kenlm]
+=== Install kenlm
+
+To build the language model, you first need to install and compile https://github.com/kpu/kenlm[kenlm] by following the instructions detailed in the https://github.com/kpu/kenlm#compiling[README].
+
+[#generate-resources-to-train-the-language-model]
+=== Generate resources to train the language model
+
+To train a language model, you need to generate a corpus containing the training text tokenized at character, subword or word level.
+
+[#characters]
+==== Characters
+
+Here is a sample of text tokenized at character-level (`corpus_characters.txt`).
+
+[,text]
+----
+u d e <space> i <space> r e s t a u r a n t e r ,
+v æ r e t <space> u h y r e <space> m e g e t <space> s a m m e n , <space> o f t e <space> t i l <space> m a a l t i d e r <space> o g <space> t i l <space> t h e <space> h o s <space> O s s b a h r ,
+v i <space> s i d d e r <space> v e d <space> k a m i n e n <space> d e r <space> o g <space> s n a k k e r , <space> h v i l k e t <space> e r <space> m e g e t <space> m o r s o m t . <space> N u
+k o m m e r <space> d e r <space> m a n g e <space> r e i s e n d e <space> v e n n e r <space> e l l e r <space> s l æ g t <space> e l l e r <space> p r i n s e s s e r , <space> s o m
+O s s b a h r <space> m a a <space> v æ r e <space> s a m m e n <space> m e d <space> H e d b e r g <space> o f t e <space> o g s a a . <space> M e n <space> v i <space> k a n <space> l e v e
+----
+
+[#subwords]
+==== Subwords
+
+Here is a sample of text tokenized at subword-level (`corpus_subwords.txt`).
+
+[,text]
+----
+ud e <space> i <space> r e st au r ant er ,
+været <space> u h y r e <space> meget <space> sammen , <space> ofte <space> til <space> ma altid er <space> og <space> til <space> th e <space> hos <space> O s s ba h r ,
+vi <space> sidde r <space> ved <space> ka min en <space> der <space> og <space> snakke r , <space> hvilket <space> er <space> meget <space> morsomt . <space> Nu
+kommer <space> der <space> mange <space> r e i sende <space> venner <space> eller <space> s læg t <space> eller <space> pr in s e s ser , <space> som
+O s s ba h r <space> maa <space> være <space> sammen <space> med <space> H e d berg <space> ofte <space> ogsaa . <space> Men <space> vi <space> kan <space> lev e
+----
+
+[#words]
+==== Words
+
+Here is a sample of text tokenized at word-level (`corpus_words.txt`).
+
+[,text]
+----
+ude <space> i <space> restauranter <space> ,
+været <space> uhyre <space> meget <space> sammen <space> , <space> ofte <space> til <space> maaltider <space> og <space> til <space> the <space> hos <space> Ossbahr <space> ,
+vi <space> sidder <space> ved <space> kaminen <space> der <space> og <space> snakker <space> , <space> hvilket <space> er <space> meget <space> morsomt <space> . <space> Nu
+kommer <space> der <space> mange <space> reisende <space> venner <space> eller <space> slægt <space> eller <space> prinsesser <space> , <space> som
+Ossbahr <space> maa <space> være <space> sammen <space> med <space> Hedberg <space> ofte <space> ogsaa <space> . <space> Men <space> vi <space> kan <space> leve
+----
+
+[#train-the-language-model]
+=== Train the language model
+
+Once your corpus is created, you can estimate the n-gram model.
+
+[#characters-2]
+==== Characters
+
+At character-level, we recommend building a 6-gram model. Use the following command:
+
+[,sh]
+----
+bin/lmplz --order 6 \
+    --text my_dataset/language_model/corpus_characters.txt \
+    --arpa my_dataset/language_model/model_characters.arpa \
+    --discount_fallback
+----
+
+NOTE: The `--discount_fallback` option can be removed if your corpus is very large.
+
+The following message should be displayed if the language model was built successfully:
+
+[,sh]
+----
+=== 1/5 Counting and sorting n-grams ===
+Reading language_model/corpus.txt
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+Unigram tokens 111629 types 109
+=== 2/5 Calculating and sorting adjusted counts ===
+Chain sizes: 1:1308 2:784852864 3:1471599104 4:2354558464 5:3433731328 6:4709116928
+Statistics:
+1 109 D1=0.586207 D2=0.534483 D3+=1.5931
+2 1734 D1=0.538462 D2=1.09853 D3+=1.381
+3 7957 D1=0.641102 D2=1.02894 D3+=1.37957
+4 17189 D1=0.747894 D2=1.20483 D3+=1.41084
+5 25640 D1=0.812458 D2=1.2726 D3+=1.57601
+6 32153 D1=0.727411 D2=1.13511 D3+=1.42722
+Memory estimate for binary LM:
+type      kB
+probing 1798 assuming -p 1.5
+probing 2107 assuming -r models -p 1.5
+trie     696 without quantization
+trie     313 assuming -q 8 -b 8 quantization
+trie     648 assuming -a 22 array pointer compression
+trie     266 assuming -a 22 -q 8 -b 8 array pointer compression and quantization
+=== 3/5 Calculating and sorting initial probabilities ===
+Chain sizes: 1:1308 2:27744 3:159140 4:412536 5:717920 6:1028896
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+####################################################################################################
+=== 4/5 Calculating and writing order-interpolated probabilities ===
+Chain sizes: 1:1308 2:27744 3:159140 4:412536 5:717920 6:1028896
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+####################################################################################################
+=== 5/5 Writing ARPA model ===
+----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
+****************************************************************************************************
+Name:lmplz	VmPeak:12643224 kB	VmRSS:6344 kB	RSSMax:1969316 kB	user:0.196445	sys:0.514686	CPU:0.711161	real:0.682693
+----
+
+[#subwords-2]
+==== Subwords
+
+At subword-level, we recommend building a 6-gram model. Use the following command:
+
+[,sh]
+----
+bin/lmplz --order 6 \
+    --text my_dataset/language_model/corpus_subwords.txt \
+    --arpa my_dataset/language_model/model_subwords.arpa \
+    --discount_fallback
+----
+
+NOTE: The `--discount_fallback` option can be removed if your corpus is very large.
+
+[#words-2]
+==== Words
+
+At word-level, we recommend building a 3-gram model. Use the following command:
+
+[,sh]
+----
+bin/lmplz --order 3 \
+    --text my_dataset/language_model/corpus_words.txt \
+    --arpa my_dataset/language_model/model_words.arpa \
+    --discount_fallback
+----
+
+NOTE: The `--discount_fallback` option can be removed if your corpus is very large.
+
+[#predict-with-a-language-model]
+== Predict with a language model
+
+Once the language model is trained, you need to generate a list of tokens and a lexicon.
+
+[#list-of-tokens]
+=== List of tokens
+
+The list of tokens `tokens.txt` lists all the tokens that can be predicted by PyLaia.
+It should be similar to `syms.txt`, but without any index, and can be generated with this command:
+
+[,bash]
+----
+cut -d' ' -f 1 syms.txt > tokens.txt
+----
+
+NOTE: This file does not depend on the tokenization level.
+
+[,text]
+----
+<ctc>
+.
+,
+a
+b
+c
+...
+<space>
+----
+
+[#lexicon]
+=== Lexicon
+
+The lexicon lists all the words in the vocabulary and its decomposition in tokens.
+
+[#characters-2]
+==== Characters
+
+At character-level, words are simply characters, so the `lexicon_characters.txt` file should map characters to characters:
+
+[,text]
+----
+<ctc> <ctc>
+. .
+, ,
+a a
+b b
+c c
+...
+<space> <space>
+----
+
+[#subwords-2]
+==== Subwords
+
+At subword-level, the `lexicon_subwords.txt` file should map subwords with their character decomposition:
+
+[,text]
+----
+<ctc> <ctc>
+. .
+, ,
+altid a l t i d
+ant a n t
+au a u
+...
+<space> <space>
+----
+
+[#words-2]
+==== Words
+
+At word-level, the `lexicon_words.txt` file should map words with their character decomposition:
+
+[,text]
+----
+<ctc> <ctc>
+. .
+, ,
+der d e r
+er e r
+eller e l l e r
+...
+<space> <space>
+----
+
+[#predict-with-pylaia]
+=== Predict with PyLaia
+
+See the link:usage/prediction/index.md#predict-with-a-language-model[dedicated example].
diff --git a/antora/modules/ROOT/pages/usage/netout/index.adoc b/antora/modules/ROOT/pages/usage/netout/index.adoc
new file mode 100644
index 00000000..a5713d48
--- /dev/null
+++ b/antora/modules/ROOT/pages/usage/netout/index.adoc
@@ -0,0 +1,236 @@
+[#netout]
+= Netout
+
+The `pylaia-htr-netout` command can be used to dump the features extracted by PyLaia for a set of text-lines. To know more about the options of this command, use `pylaia-htr-netout --help`.
+
+WARNING: This command was initially designed to combine PyLaia and Kaldi. Since December 2022, combining PyLaia with language models can be achieved more easily by xref:usage/language_models/index.adoc[building a language model with KenLM] and link:usage/prediction/index.md#predict-with-a-language-model[predicting with `pylaia-htr-decode-ctc`].
+
+[#purpose]
+== Purpose
+
+This command outputs the feature matrix and lattice computed by PyLaia in Kaldi format for a given dataset.
+
+It requires:
+
+* a link:usage/datasets/index.md#image-names[list of image ids],
+* the pickled `model` file created during xref:usage/initialization/index.adoc[model initialization],
+* the weights `*.ckpt` of the trained model created during xref:usage/training/index.adoc[model training].
+
+The files generated by this command are designed to combine PyLaia and Kaldi, but could also be used to predict with a custom decoder.
+
+[#parameters]
+== Parameters
+
+The full list of parameters is detailed in this section.
+
+[#general-parameters]
+=== General parameters
+
+|===
+| Parameter | Description | Type | Default
+
+| `img_list`
+| Positional argument. File containing the names of the images to decode (one image per line).
+| `str`
+|
+
+| `img_dirs`
+| Directories containing line images.
+| `str`
+|
+
+| `config`
+| Path to a JSON configuration file
+| `json`
+|
+|===
+
+[#common-parameters]
+=== Common parameters
+
+|===
+| Name | Description | Type | Default
+
+| `common.train_path`
+| Directory where the model will be saved
+| `str`
+| `.`
+
+| `common.model_filename`
+| Filename of the model.
+| `str`
+| `model`
+
+| `common.experiment_dirname`
+| Directory name of the experiment.
+| `experiment`
+| `74565`
+
+| `common.checkpoint`
+| Checkpoint to load. Must be a filepath, a filename, a glob pattern or `None` (in this case, the best checkpoint will be loaded). Note that the checkpoint will be searched in `common.experiment_dirname`, unless you provide an absolute filepath.
+| `int`
+| `None`
+|===
+
+[#data-arguments]
+=== Data arguments
+
+|===
+| Name | Description | Type | Default
+
+| `data.batch_size`
+| Batch size.
+| `int`
+| `8`
+
+| `data.color_mode`
+| Color mode. Must be either `L`, `RGB` or `RGBA`.
+| `ColorMode`
+| `ColorMode.L`
+|===
+
+[#netout-arguments]
+=== Netout arguments
+
+|===
+| Name | Description | Type | Default
+
+| `netout.output_transform`
+| Transformation to apply at the end of the model. Should be `softmax` or `log_softmax`.
+| `str`
+| `None`
+
+| `netout.matrix`
+| Path to the output file containing a list of keys (image ids) and values (output matrix where rows represents timesteps and columns CTC labels). This file can be directly used with Kaldi.
+| `Optional[str]`
+| `None`
+
+| `netout.lattice`
+| Path to the output file containing containing a list of keys (image ids) and values (lattices representing the CTC output). This file can be directly used with Kaldi.
+| `Optional[str]`
+| `None`
+
+| `netout.digits`
+| Number of digits to be used for formatting
+| `int`
+| `10`
+|===
+
+[#logging-arguments]
+=== Logging arguments
+
+|===
+| Name | Description | Type | Default
+
+| `logging.fmt`
+| Logging format.
+| `str`
+| `%(asctime)s %(levelname)s %(name)s] %(message)s`
+
+| `logging.level`
+| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+| `Level`
+| `INFO`
+
+| `logging.filepath`
+| Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname`
+| `Optional[str]`
+|
+
+| `logging.overwrite`
+| Whether to overwrite the logfile or to append.
+| `bool`
+| `False`
+
+| `logging.to_stderr_level`
+| If filename is set, use this to log also to stderr at the given level.
+| `Level`
+| `ERROR`
+|===
+
+[#trainer-arguments]
+=== Trainer arguments
+
+Pytorch Lightning `Trainer` flags can also be set using the `--trainer` argument. See https://github.com/Lightning-AI/lightning/blob/1.7.0/docs/source-pytorch/common/trainer.rst#trainer-flags[the documentation].
+
+This flag is mostly useful to define whether to predict on CPU or GPU.
+
+* `--trainer.gpus 0` to run on CPU,
+* `--trainer.gpus n` to run on `n` GPUs (use with `--training.auto_select True` for auto-selection),
+* `--trainer.gpus -1` to run on all GPUs.
+
+[#examples]
+== Examples
+
+Dumping PyLaia's features can be done using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
+
+[#dumping-features-from-a-model-from-hugging-face]
+=== Dumping features from a model from Hugging Face
+
+First, clone a trained model from Hugging Face:
+
+[,bash]
+----
+git clone https://huggingface.co/Teklia/pylaia-huginmunin
+----
+
+List image names in `img_list.txt`:
+
+[,text]
+----
+docs/assets/219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f
+docs/assets/219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4
+----
+
+Dump PyLaia's output with:
+
+[,bash]
+----
+pylaia-htr-netout --common.experiment_dirname pylaia-huginmunin/ \
+                  --common.model_filename pylaia-huginmunin/model \
+                  --netout.matrix matrix.txt \
+                  --netout.lattice lattice.txt \
+                  --img_dir [docs/assets] \
+                  img_list.txt
+----
+
+Output files will be written in `--common.experiment_dirname`:
+
+----
+├── pylaia-huginmunin/
+    ├── matrix.txt
+    └── lattice.txt
+----
+
+[#dumping-features-using-a-yaml-configuration-file]
+=== Dumping features using a YAML configuration file
+
+Run the following command to dump PyLaia's output:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --config config_netout.yaml
+----
+
+With the following configuration file:
+
+[,yaml]
+----
+common:
+  experiment_dirname: pylaia-huginmunin
+  model_filename: pylaia-huginmunin/model
+img_list: img_list.txt
+img_dirs:
+  - docs/assets/
+netout:
+  matrix: matrix.txt
+  lattice: lattice.txt
+----
+
+Output files will be written in `--common.experiment_dirname`:
+
+----
+├── pylaia-huginmunin/
+    ├── matrix.txt
+    └── lattice.txt
+----
diff --git a/antora/modules/ROOT/pages/usage/prediction/index.adoc b/antora/modules/ROOT/pages/usage/prediction/index.adoc
new file mode 100644
index 00000000..5bb889cc
--- /dev/null
+++ b/antora/modules/ROOT/pages/usage/prediction/index.adoc
@@ -0,0 +1,530 @@
+[#decoding]
+= Decoding
+
+The `pylaia-htr-decode-ctc` command can be used to predict using a trained PyLaia model. To know more about the options of this command, use `pylaia-htr-decode-ctc --help`.
+
+[#purpose]
+== Purpose
+
+This command uses a trained PyLaia model to predict on a dataset.
+
+It requires:
+
+* a link:usage/datasets/index.md#image-names[list of image ids],
+* the pickled `model` file created during xref:usage/initialization/index.adoc[model initialization],
+* the weights `*.ckpt` of the trained model created during xref:usage/training/index.adoc[model training].
+
+[#parameters]
+== Parameters
+
+The full list of parameters is detailed in this section.
+
+[#general-parameters]
+=== General parameters
+
+|===
+| Parameter | Description | Type | Default
+
+| `syms`
+| Positional argument. Path to a file mapping characters to integers. The CTC symbol *must* be mapped to integer 0.
+| `str`
+|
+
+| `img_list`
+| Positional argument. File containing the names of the images to decode (one image per line).
+| `str`
+|
+
+| `img_dirs`
+| Directories containing line images.
+| `str`
+|
+
+| `config`
+| Path to a JSON configuration file
+| `json`
+|
+|===
+
+[#common-parameters]
+=== Common parameters
+
+|===
+| Name | Description | Type | Default
+
+| `common.train_path`
+| Directory where the model will be saved
+| `str`
+| `.`
+
+| `common.model_filename`
+| Filename of the model.
+| `str`
+| `model`
+
+| `common.experiment_dirname`
+| Directory name of the experiment.
+| `experiment`
+| `74565`
+
+| `common.checkpoint`
+| Checkpoint to load. Must be a filepath, a filename, a glob pattern or `None` (in this case, the best checkpoint will be loaded). Note that the checkpoint will be searched in `common.experiment_dirname`, unless you provide an absolute filepath.
+| `int`
+| `None`
+|===
+
+[#data-arguments]
+=== Data arguments
+
+|===
+| Name | Description | Type | Default
+
+| `data.batch_size`
+| Batch size.
+| `int`
+| `8`
+
+| `data.color_mode`
+| Color mode. Must be either `L`, `RGB` or `RGBA`.
+| `ColorMode`
+| `ColorMode.L`
+
+| `data.num_workers`
+| Number of worker processes created in dataloaders
+| `int`
+| `None`
+
+| `data.reading_order`
+| Reading order on the input lines: LTR (Left-to-Right) or RTL (Right-to-Left).
+| `ReadingOrder`
+| `LTR`
+|===
+
+[#decode-arguments]
+=== Decode arguments
+
+|===
+| Name | Description | Type | Default
+
+| `decode.include_img_ids`
+| Include the associated image ids in the decoding/segmentation output
+| `bool`
+| `True`
+
+| `decode.separator`
+| String to use as a separator between the image ids and the decoding/segmentation output.
+| `str`
+| ` `
+
+| `decode.join_string`
+| String to use to join the decoding output.
+| `Optional[str]`
+| ` `
+
+| `decode.use_symbols`
+| Convert the decoding output to symbols instead of symbol index.
+| `bool`
+| `True`
+
+| `decode.convert_spaces`
+| Whether or not to convert spaces.
+| `bool`
+| `False`
+
+| `decode.input_space`
+| Replace the space by this symbol if `convert_spaces` is set. Used for word segmentation and confidence score computation.
+| `str`
+| `<space>`
+
+| `decode.output_space`
+| Space symbol to display during decoding.
+| `str`
+| ` `
+
+| `decode.segmentation`
+| Use CTC alignment to estimate character or word segmentation. Should be `char` or `word`.
+| `Optional[str]`
+| `None `
+
+| `decode.temperature`
+| Temperature parameters used to scale the logits.
+| `float`
+| `1.0`
+
+| `decode.print_line_confidence_scores`
+| Whether to print line confidence scores.
+| `bool`
+| `False`
+
+| `decode.print_line_confidence_scores`
+| Whether to print word confidence scores.
+| `bool`
+| `False`
+
+| `decode.use_language_model`
+| Whether to decode with an external language model.
+| `bool`
+| `False`
+
+| `decode.language_model_path`
+| Path to a KenLM or ARPA n-gram language model.
+| `str`
+| `None`
+
+| `decode.language_model_weight`
+| Weight of the language model.
+| `float`
+| `None`
+
+| `decode.tokens_path`
+| Path to a file containing valid tokens. If using a file, the expected format is for tokens mapping to the same index to be on the same line. The `ctc` symbol should be at index 0.
+| `str`
+| `None`
+
+| `decode.lexicon_path`
+| Path to a lexicon file containing the possible words and corresponding spellings.
+| `str`
+| `None`
+
+| `decode.unk_token`
+| String representing unknown characters.
+| `str`
+| `<unk>`
+
+| `decode.blank_token`
+| String representing the blank/ctc symbol.
+| `str`
+| `<ctc>`
+|===
+
+[#logging-arguments]
+=== Logging arguments
+
+|===
+| Name | Description | Type | Default
+
+| `logging.fmt`
+| Logging format.
+| `str`
+| `%(asctime)s %(levelname)s %(name)s] %(message)s`
+
+| `logging.level`
+| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+| `Level`
+| `INFO`
+
+| `logging.filepath`
+| Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname`
+| `Optional[str]`
+|
+
+| `logging.overwrite`
+| Whether to overwrite the logfile or to append.
+| `bool`
+| `False`
+
+| `logging.to_stderr_level`
+| If filename is set, use this to log also to stderr at the given level.
+| `Level`
+| `ERROR`
+|===
+
+[#trainer-arguments]
+=== Trainer arguments
+
+Pytorch Lightning `Trainer` flags can also be set using the `--trainer` argument. See https://github.com/Lightning-AI/lightning/blob/1.7.0/docs/source-pytorch/common/trainer.rst#trainer-flags[the documentation].
+
+This flag is mostly useful to define whether to predict on CPU or GPU.
+
+* `--trainer.gpus 0` to run on CPU,
+* `--trainer.gpus n` to run on `n` GPUs (use with `--training.auto_select True` for auto-selection),
+* `--trainer.gpus -1` to run on all GPUs.
+
+[#examples]
+== Examples
+
+The prediction can be done using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
+
+We provide some images to try out our models. They can be found in `docs/assets`, on the https://gitlab.teklia.com/atr/pylaia/-/tree/master/docs/assets?ref_type=heads[Gitlab repository]. To test the prediction commands, make sure to download them on your end.
+
+[,shell]
+----
+mkdir images
+wget https://user-images.githubusercontent.com/100838858/219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f.jpg -P images
+wget https://user-images.githubusercontent.com/100838858/219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4.jpg -P images
+----
+
+[#predict-using-a-model-from-hugging-face]
+=== Predict using a model from Hugging Face
+
+First, clone a trained model from Hugging Face:
+
+[,bash]
+----
+git clone https://huggingface.co/Teklia/pylaia-huginmunin
+----
+
+[NOTE]
+// ====
+Some files are stored through https://git-lfs.com/[Git-LFS]. Make sure all files are correctly pulled using the following command, from the cloned folder.
+
+[,bash]
+----
+git lfs ls-files
+----
+
+You should see three files:
+
+* the language model (`language_model.arpa.gz`),
+* the model architecture (`model`),
+* the weights (`weights.ckpt`).
+// ====
+
+List image names in `img_list.txt`:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4
+----
+
+Predict with:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --common.experiment_dirname pylaia-huginmunin/ \
+                      --common.model_filename pylaia-huginmunin/model \
+                      --img_dir [images] \
+                      pylaia-huginmunin/syms.txt \
+                      img_list.txt
+----
+
+Expected output:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f o g <space> V a l s t a d <space> k a n <space> v i <space> v i s t
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 i k k e <space> g j ø r e <space> R e g n i n g <space> p a a ,
+----
+
+Note that by default, each token is separated by a space, and the space symbol is represented by `--decode.input_space` (default: `"<space>"`).
+
+[#predict-with-a-yaml-configuration-file]
+=== Predict with a YAML configuration file
+
+Run the following command to predict a model on CPU using:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --config config_decode_model.yaml
+----
+
+With the following configuration file:
+
+[,yaml]
+----
+syms: pylaia-huginmunin/syms.txt
+img_list: img_list.txt
+img_dirs:
+  - images/
+common:
+  experiment_dirname: pylaia-huginmunin
+  model_filename: pylaia-huginmunin/model
+decode:
+  join_string: ""
+  convert_spaces: true
+trainer:
+  gpus: 0
+----
+
+Expected output:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f og Valstad kan vi vist
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 ikke gjøre Regning paa,
+----
+
+Note that setting `--decode.join_string ""` and `--decode.convert_spaces True` will display the text well formatted.
+
+[#predict-with-confidence-scores]
+=== Predict with confidence scores
+
+PyLaia estimate character probability for each timestep. It is possible to print the probability at line or word level.
+
+[#line-confidence-scores]
+==== Line confidence scores
+
+Run the following command to predict with line confidence scores:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --config config_decode_model.yaml \
+                      --decode.print_line_confidence_score True
+----
+
+Expected output:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f 0.99 og Valstad kan vi vist
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 0.98 ikke gjøre Regning paa,
+----
+
+[#word-confidence-scores]
+==== Word confidence scores
+
+Run the following command to predict with word confidence scores:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --config config_decode_model.yaml \
+                      --decode.print_word_confidence_score True
+----
+
+Expected output:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f ['1.00', '1.00', '1.00', '1.00', '1.00'] og Valstad kan vi vist
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 ['1.00', '0.91', '1.00', '0.99'] ikke gjøre Regning paa,
+----
+
+[#temperature-scaling]
+==== Temperature scaling
+
+PyLaia tends to output overly confident probabilities. https://arxiv.org/pdf/1706.04599.pdf[Temperature scaling] can be used to improve the reliability of confidence scores. The best temperature can be determined with a grid search algorithm by maximizing the correlation between 1-CER and confidence scores.
+
+Run the following command to predict callibrated word confidence scores with `temperature=3.0`
+
+[,bash]
+----
+pylaia-htr-decode-ctc --config config_decode_model.yaml \
+                      --decode.print_word_confidence_score True \
+                      --decode.temperature 3.0
+----
+
+Expected output:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f ['0.93', '0.85', '0.87', '0.93', '0.85'] og Valstad kan vi vist
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 ['0.93', '0.84', '0.86', '0.83'] ikke gjøre Regning paa,
+----
+
+[#predict-with-a-language-model]
+=== Predict with a language model
+
+PyLaia supports KenLM and ARPA language models.
+
+Once the n-gram model is built, run the following command to combine it to your PyLaia model:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --config config_decode_model_lm.yaml
+----
+
+With the following configuration file:
+
+[,yaml]
+----
+syms: pylaia-huginmunin/syms.txt
+img_list: img_list.txt
+img_dirs:
+  - images/
+common:
+  experiment_dirname: pylaia-huginmunin
+  model_filename: pylaia-huginmunin/model
+decode:
+  join_string: ""
+  convert_spaces: true
+  use_language_model: true
+  language_model_path: pylaia-huginmunin/language_model.arpa.gz
+  tokens_path: pylaia-huginmunin/tokens.txt
+  lexicon_path: pylaia-huginmunin/lexicon.txt
+  language_model_weight: 1.5
+  decode.print_line_confidence_score: true
+trainer:
+  gpus: 0
+----
+
+Expected output:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f 0.90 og Valstad kan vi vist
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 0.89 ikke gjøre Regning paa,
+----
+
+[#predict-with-ctc-alignement]
+=== Predict with CTC alignement
+
+It is possible to estimate text localization based on CTC alignments with the `--decode.segmentation` option. It returns a list texts with their estimated coordinates: `(text, x1, y1, x2, y2)`.
+
+[#character-level]
+==== Character level
+
+To output character localization, use the `--decode.segmentation char` option:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --common.experiment_dirname pylaia-huginmunin/ \
+                      --common.model_filename pylaia-huginmunin/model \
+                      --decode.segmentation char \
+                      --img_dir [images] \
+                      pylaia-huginmunin/syms.txt \
+                      img_list.txt
+----
+
+Expected output:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f [('o', 1, 1, 31, 128), ('g', 32, 1, 79, 128), ('<space>', 80, 1, 143, 128), ('V', 144, 1, 167, 128), ('a', 168, 1, 223, 128), ('l', 224, 1, 255, 128), ('s', 256, 1, 279, 128), ('t', 280, 1, 327, 128), ('a', 328, 1, 367, 128), ('d', 368, 1, 407, 128), ('<space>', 408, 1, 496, 128), ('k', 497, 1, 512, 128), ('a', 513, 1, 576, 128), ('n', 577, 1, 624, 128), ('<space>', 625, 1, 712, 128), ('v', 713, 1, 728, 128), ('i', 729, 1, 776, 128), ('<space>', 777, 1, 808, 128), ('v', 809, 1, 824, 128), ('i', 825, 1, 872, 128), ('s', 873, 1, 912, 128), ('t', 913, 1, 944, 128)]
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 [('i', 1, 1, 23, 128), ('k', 24, 1, 71, 128), ('k', 72, 1, 135, 128), ('e', 136, 1, 191, 128), ('<space>', 192, 1, 248, 128), ('g', 249, 1, 264, 128), ('j', 265, 1, 312, 128), ('ø', 313, 1, 336, 128), ('r', 337, 1, 376, 128), ('e', 377, 1, 408, 128), ('<space>', 409, 1, 481, 128), ('R', 482, 1, 497, 128), ('e', 498, 1, 545, 128), ('g', 546, 1, 569, 128), ('n', 570, 1, 601, 128), ('i', 602, 1, 665, 128), ('n', 666, 1, 706, 128), ('g', 707, 1, 762, 128), ('<space>', 763, 1, 794, 128), ('p', 795, 1, 802, 128), ('a', 803, 1, 850, 128), ('a', 851, 1, 890, 128), (',', 891, 1, 914, 128)]
+----
+
+[#word-level]
+==== Word level
+
+To output word localization, use the `--decode.segmentation word` option:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --common.experiment_dirname pylaia-huginmunin/ \
+                      --common.model_filename pylaia-huginmunin/model \
+                      --decode.segmentation word \
+                      --img_dir [images] \
+                      pylaia-huginmunin/syms.txt \
+                      img_list.txt
+----
+
+Expected output:
+
+[,text]
+----
+219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f [('og', 1, 1, 79, 128), ('<space>', 80, 1, 143, 128), ('Valstad', 144, 1, 407, 128), ('<space>', 408, 1, 496, 128), ('kan', 497, 1, 624, 128), ('<space>', 625, 1, 712, 128), ('vi', 713, 1, 776, 128), ('<space>', 777, 1, 808, 128), ('vist', 809, 1, 944, 128)]
+219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 [('ikke', 1, 1, 191, 128), ('<space>', 192, 1, 248, 128), ('gjøre', 249, 1, 408, 128), ('<space>', 409, 1, 481, 128), ('Regning', 482, 1, 762, 128), ('<space>', 763, 1, 794, 128), ('paa,', 795, 1, 914, 128)]
+----
+
+[#predict-on-right-to-left-data]
+=== Predict on Right-To-Left data
+
+To output word localization, use the `--data.reading_order` option:
+
+[,bash]
+----
+pylaia-htr-decode-ctc --common.experiment_dirname pylaia-khatt/ \
+                      --common.model_filename pylaia-khatt/model \
+                      --data.reading_order RTL \
+                      --img_dir [images] \
+                      pylaia-khatt/syms.txt \
+                      img_list.txt
+----
+
+Expected output:
+
+[,text]
+----
+text_line_1302 العلماء على فهم هذه الكتابات بالدراسات اللغوية السامية مثل العبرانية، وباللغة العربية التي
+----
diff --git a/antora/modules/ROOT/pages/usage/training/index.adoc b/antora/modules/ROOT/pages/usage/training/index.adoc
new file mode 100644
index 00000000..32d0523c
--- /dev/null
+++ b/antora/modules/ROOT/pages/usage/training/index.adoc
@@ -0,0 +1,411 @@
+[#training]
+= Training
+
+The `pylaia-htr-train-ctc` command can be used to train a PyLaia model. To know more about the options of this command, use `pylaia-htr-train-ctc --help`.
+
+[#purpose]
+== Purpose
+
+This command trains a PyLaia architecture on a dataset.
+
+It requires:
+
+* a xref:usage/datasets/index.adoc[formatted dataset],
+* the pickled `model` file created during xref:usage/initialization/index.adoc[model initialization].
+
+NOTE: The xref:usage/datasets/index.adoc[`pylaia-htr-dataset-validate`] command can help you analyze your dataset and point out issues.
+
+[#parameters]
+== Parameters
+
+The full list of parameters is detailed in this section.
+
+[#general-parameters]
+=== General parameters
+
+|===
+| Parameter | Description | Type | Default
+
+| `syms`
+| Positional argument. Path to a file mapping characters to integers. The CTC symbol *must* be mapped to integer 0.
+| `str`
+|
+
+| `img_dirs`
+| Positional argument. Directories containing line images.
+| `str`
+|
+
+| `tr_txt_table`
+| Positional argument. Path to a file mapping training image ids and tokenized transcription.
+| `str`
+|
+
+| `va_txt_table`
+| Positional argument. Path to a file mapping validation image ids and tokenized transcription.
+| `str`
+|
+
+| `config`
+| Path to a JSON configuration file
+| `json`
+|
+|===
+
+[#common-parameters]
+=== Common parameters
+
+|===
+| Name | Description | Type | Default
+
+| `common.seed`
+| Seed for random number generators.
+| `int`
+| `74565`
+
+| `common.train_path`
+| Directory where the model will be saved
+| `str`
+| `.`
+
+| `common.model_filename`
+| Filename of the model.
+| `str`
+| `model`
+
+| `common.experiment_dirname`
+| Directory name of the experiment.
+| `str`
+| `experiment`
+
+| `common.monitor`
+| Metric to monitor for early stopping and checkpointing.
+| `Monitor`
+| `Monitor.va_cer`
+
+| `common.checkpoint`
+| Checkpoint to load. Must be a filepath, a filename, a glob pattern or `None` (in this case, the best checkpoint will be loaded). Note that the checkpoint will be searched in `common.experiment_dirname`, unless you provide an absolute filepath.
+| `Optional[str]`
+| `None`
+|===
+
+[#data-arguments]
+=== Data arguments
+
+|===
+| Name | Description | Type | Default
+
+| `data.batch_size`
+| Batch size.
+| `int`
+| `8`
+
+| `data.color_mode`
+| Color mode. Must be either `L`, `RGB` or `RGBA`.
+| `ColorMode`
+| `ColorMode.L`
+
+| `data.num_workers`
+| Number of worker processes created in dataloaders
+| `int`
+| `None`
+
+| `data.reading_order`
+| Reading order on the input lines: LFT (Left-to-Right) or RTL (Right-to-Left).
+| `ReadingOrder`
+| `LFT`
+|===
+
+[#train-arguments]
+=== Train arguments
+
+|===
+| Name | Description | Type | Default
+
+| `train.delimiters`
+| List of symbols representing the word delimiters.
+| `List`
+| `["<space>"]`
+
+| `train.checkpoint_k`
+| Model saving mode: `-1` all models will be saved, `0`: no models are saved, `k` the `k` best models are saved.
+| `int`
+| `3`
+
+| `train.resume`
+| Whether to resume training with a checkpoint. This option can be used to continue training on the same dataset.
+| `bool`
+| `False`
+
+| `train.pretrain`
+| Whether to load pretrained weights from a checkpoint. This option can be used to load pretrained weights when fine-tuning a model on a new dataset.
+| `bool`
+| `False`
+
+| `train.freeze_layers`
+| List of layers to freeze during training: `"conv"` to freeze convolutional layers, `"rnn"` to freeze recurrent layers, `"linear"` to freeze the linear layer
+| `List[str]`
+| `None`
+
+| `train.early_stopping_patience`
+| Number of validation epochs with no improvement after which training will be stopped.
+| `int`
+| `20`
+
+| `train.gpu_stats`
+| Whether to include GPU stats in the training progress bar.
+| `bool`
+| `False`
+
+| `train.augment_training`
+| Whether to use data augmentation.
+| `bool`
+| `False`
+
+| `train.log_to_wandb`
+| Whether to log training metrics and parameters to Weights & Biases.
+| `bool`
+| `False`
+|===
+
+[#logging-arguments]
+=== Logging arguments
+
+|===
+| Name | Description | Type | Default
+
+| `logging.fmt`
+| Logging format.
+| `str`
+| `%(asctime)s %(levelname)s %(name)s] %(message)s`
+
+| `logging.level`
+| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+| `Level`
+| `INFO`
+
+| `logging.filepath`
+| Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname`
+| `Optional[str]`
+|
+
+| `logging.overwrite`
+| Whether to overwrite the logfile or to append.
+| `bool`
+| `False`
+
+| `logging.to_stderr_level`
+| If filename is set, use this to log also to stderr at the given level.
+| `Level`
+| `ERROR`
+|===
+
+[#optimizer-arguments]
+=== Optimizer arguments
+
+|===
+| Name | Description | Type | Default
+
+| `optimizers.name`
+| Optimization algorithm. Must be `SGD`, `RMSProp`, `Adam`.
+| `List`
+| `RMSProp`
+
+| `optimizers.learning_rate`
+| Learning rate.
+| `float`
+| `0.0005`
+
+| `optimizers.momentum`
+| Momentum.
+| `float`
+| `0.0`
+
+| `optimizers.weight_l2_penalty`
+| Apply this L2 weight penalty to the loss function.
+| `float`
+| `0.0`
+
+| `optimizers.nesterov`
+| Whether to use Nesterov momentum.
+| `bool`
+| `False`
+|===
+
+[#scheduler-arguments]
+=== Scheduler arguments
+
+|===
+| Name | Description | Type | Default
+
+| `scheduler.active`
+| Whether to use an on-plateau learning rate scheduler.
+| `bool`
+| `False`
+
+| `scheduler.monitor`
+| Metric for the scheduler to monitor.
+| `Monitor`
+| `Monitor.va_loss`
+
+| `scheduler.patience`
+| Number of epochs with no improvement after which learning rate will be reduced.
+| `int`
+| `5`
+
+| `scheduler.factor`
+| Factor by which the learning rate will be reduced.
+| `float`
+| `0.1`
+|===
+
+[#trainer-arguments]
+=== Trainer arguments
+
+Pytorch Lighning `Trainer` flags can also be set using the `--trainer` argument. See https://github.com/Lightning-AI/lightning/blob/1.7.0/docs/source-pytorch/common/trainer.rst#trainer-flags[the documentation].
+
+[#examples]
+== Examples
+
+The model can be trained using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
+
+[#train-from-scratch-with-command-line-arguments-cli]
+=== Train from scratch with Command Line Arguments (CLI)
+
+Run the following command to train a model:
+
+[,sh]
+----
+pylaia-htr-train-ctc /path/to/syms.txt \
+   `cat img_dirs_args.txt`\
+   /path/to/train.txt \
+   /path/to/val.txt \
+   --trainer.gpus 1 \
+   --data.batch_size 32
+----
+
+[#train-from-scratch-with-a-yaml-configuration-file]
+=== Train from scratch with a YAML configuration file
+
+Run the following command to train a model:
+
+[,sh]
+----
+pylaia-htr-train-ctc --config config_train_model.yaml
+----
+
+Where `config_train_model.yaml` is:
+
+[,yaml]
+----
+syms: /path/to/syms.txt
+img_dirs:
+  - /path/to/images/
+tr_txt_table: /path/to/train.txt
+va_txt_table: /path/to/val.txt
+common:
+  experiment_dirname: experiment-dataset
+logging:
+  filepath: pylaia_training.log
+scheduler:
+  active: true
+train:
+  augment_training: true
+  early_stopping_patience: 80
+trainer:
+  auto_select_gpus: true
+  gpus: 1
+  max_epochs: 600
+----
+
+[#resume-training-from-a-checkpoint]
+=== Resume training from a checkpoint
+
+Run the following command to continue training from a checkpoint for 200 epochs.
+
+[,sh]
+----
+pylaia-htr-train-ctc --config config_train_model.yaml --train.resume true --trainer.max_epochs 200
+----
+
+NOTE: If `common.checkpoint` is not set, PyLaia will select the best checkpoint from `common.experiment_dirname`
+
+[#fine-tune-from-a-checkpoint]
+=== Fine-tune from a checkpoint
+
+Run the following command to load pretrained weights and fine-tune on a new dataset for 200 epochs.
+
+[,sh]
+----
+pylaia-htr-train-ctc --config config_train_model.yaml --common.experiment_dirname experiment/ --common.checkpoint initial_checkpoint.ckpt --train.pretrain true --trainer.max_epochs 200
+----
+
+[WARNING]
+// ====
+This option requires that your model architecture `model` matches the one used to train `initial_checkpoint.ckpt`.
+The last linear layer will be reinitialized using the Xavier initialization to match the new vocabulary size.
+// ====
+
+[NOTE]
+// ====
+The initial checkpoint is expected to be in the following directory: `{common.experiment_dirname}/pretrained/`.
+If it is located in `common.experiment_dirname`, the subdirectory `pretrained` will be created and the checkpoint will be moved there automatically.
+// ====
+
+[#train-on-right-to-left-reading-order]
+=== Train on Right-To-Left reading order
+
+By default, PyLaia expects images with Left-to-Right reading order.
+To train a model on Right-To-Left data, use the following command:
+
+[,sh]
+----
+pylaia-htr-train-ctc --config config_train_model_rtl.yaml
+----
+
+Where `config_train_model_rtl.yaml` is:
+
+[,yaml]
+----
+syms: /path/to/syms.txt
+img_dirs:
+  - /path/to/images/
+tr_txt_table: /path/to/train.txt
+va_txt_table: /path/to/val.txt
+common:
+  experiment_dirname: experiment-dataset
+logging:
+  filepath: pylaia_training.log
+scheduler:
+  active: true
+train:
+  augment_training: true
+  early_stopping_patience: 80
+trainer:
+  auto_select_gpus: true
+  gpus: 1
+  max_epochs: 600
+data:
+  reading_order: RTL
+----
+
+[#train-and-log-to-weights-biases]
+=== Train and log to Weights & Biases
+
+By default, PyLaia logs metrics and losses to a local CSV file. You can chose to log into https://wandb.ai/home[Weights & Biases] instead.
+
+To set up Weights & Biases:
+
+* Run `pip install pylaia[wandb]` to install the required dependencies
+* Sign in to Weights & Biases using `wandb login`
+
+Then, start training with `pylaia-htr-train-ctc --config config_train_model.yaml --train.log_to_wandb true`.
+
+This will create a project called `PyLaia` in W&B with one run for each training. The following are monitored for each run:
+
+* Training and validation metrics (losses, CER, WER)
+* Model gradients
+* System metrics (GPU and CPU utilisation, temperature, allocated memory)
+* Hyperparameters (training configuration)
+
+A public dashboard is available https://wandb.ai/starride-teklia/PyLaia%20demo[here] as an example.
diff --git a/antora/ui/partials/header-content.hbs b/antora/ui/partials/header-content.hbs
new file mode 100644
index 00000000..da31973b
--- /dev/null
+++ b/antora/ui/partials/header-content.hbs
@@ -0,0 +1,66 @@
+<header class="header">
+  <nav class="navbar">
+    <div class="navbar-brand">
+      <div class="navbar-item">
+        <a href="https://antora.org">Antora</a>
+        <span class="separator">//</span>
+        <a href="{{or site.url (or siteRootUrl siteRootPath)}}">Docs</a>
+      </div>
+      {{#if (or env.ALGOLIA_API_KEY env.SITE_SEARCH_PROVIDER)}}
+      <div class="navbar-item search hide-for-print">
+        <input id="search-input" type="text" placeholder="Search the docs"{{#if page.home}} autofocus{{/if}}>
+      </div>
+      {{/if}}
+      <button class="navbar-burger" data-target="topbar-nav">
+        <span></span>
+        <span></span>
+        <span></span>
+      </button>
+    </div>
+    <div id="topbar-nav" class="navbar-menu">
+      <div class="navbar-end">
+        <div class="navbar-item has-dropdown is-hoverable">
+          <a class="navbar-link" href="https://gitlab.com/antora">Projects</a>
+          <div class="navbar-dropdown">
+            <div class="navbar-item"><strong>Core</strong></div>
+            <a class="navbar-item" href="https://gitlab.com/antora/antora">Repository</a>
+            <a class="navbar-item" href="https://gitlab.com/antora/antora/issues">Issue Tracker</a>
+            <hr class="navbar-divider">
+            <div class="navbar-item"><strong>Default UI</strong></div>
+            <a class="navbar-item" href="https://gitlab.com/antora/antora-ui-default">Repository</a>
+            <a class="navbar-item" href="https://gitlab.com/antora/antora-ui-default/issues">Issue Tracker</a>
+            <hr class="navbar-divider">
+            <a class="navbar-item" href="https://gitlab.com/antora/antora/blob/main/contributing.adoc">Contributing</a>
+          </div>
+        </div>
+        <div class="navbar-item has-dropdown is-hoverable">
+          <div class="navbar-link">Tooling</div>
+          <div class="navbar-dropdown is-right">
+            <div class="navbar-item"><strong>Build Automation</strong></div>
+            <a class="navbar-item has-label" href="https://hub.docker.com/r/antora/antora">Docker Image <small>OCI</small></a>
+            <a class="navbar-item has-label" href="https://gitlab.com/antora/antora-maven-plugin">Maven Plugin <small>Java</small></a>
+            <a class="navbar-item has-label" href="https://gitlab.com/antora/gradle-antora-plugin">Gradle Plugin <small>Java</small></a>
+            <hr class="navbar-divider">
+            <div class="navbar-item"><strong>Authoring / Preview</strong></div>
+            <a class="navbar-item has-label" href="https://intellij-asciidoc-plugin.ahus1.de/docs/users-guide/features/advanced/antora.html">AsciiDoc Plugin <small>IntelliJ</small></a>
+          </div>
+        </div>
+        <div class="navbar-item has-dropdown is-hoverable">
+          <a class="navbar-link" href="{{{relativize (resolvePageURL 'antora::project/get-help.adoc')}}}">Community</a>
+          <div class="navbar-dropdown is-right">
+            <a class="navbar-item has-label" href="https://chat.antora.org">Chat <small>Zulip</small></a>
+            <a class="navbar-item has-label" href="https://twitter.com/antoraproject">News <small>Twitter</small></a>
+            <a class="navbar-item" href="https://gitlab.com/antora/antora/-/blob/main/CODE-OF-CONDUCT.adoc">Code of Conduct</a>
+          </div>
+        </div>
+        <a class="navbar-item" href="https://twitter.com/antoraproject">
+          <span class="icon">
+            <svg aria-hidden="true" data-icon="twitter" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512">
+              <path fill="#57aaee" d="M459.37 151.716c.325 4.548.325 9.097.325 13.645 0 138.72-105.583 298.558-298.558 298.558-59.452 0-114.68-17.219-161.137-47.106 8.447.974 16.568 1.299 25.34 1.299 49.055 0 94.213-16.568 130.274-44.832-46.132-.975-84.792-31.188-98.112-72.772 6.498.974 12.995 1.624 19.818 1.624 9.421 0 18.843-1.3 27.614-3.573-48.081-9.747-84.143-51.98-84.143-102.985v-1.299c13.969 7.797 30.214 12.67 47.431 13.319-28.264-18.843-46.781-51.005-46.781-87.391 0-19.492 5.197-37.36 14.294-52.954 51.655 63.675 129.3 105.258 216.365 109.807-1.624-7.797-2.599-15.918-2.599-24.04 0-57.828 46.782-104.934 104.934-104.934 30.213 0 57.502 12.67 76.67 33.137 23.715-4.548 46.456-13.32 66.599-25.34-7.798 24.366-24.366 44.833-46.132 57.827 21.117-2.273 41.584-8.122 60.426-16.243-14.292 20.791-32.161 39.308-52.628 54.253z"></path>
+            </svg>
+          </span>
+        </a>
+      </div>
+    </div>
+  </nav>
+</header>
diff --git a/package.json b/package.json
new file mode 100644
index 00000000..8e2f43b0
--- /dev/null
+++ b/package.json
@@ -0,0 +1,5 @@
+{
+  "dependencies": {
+    "antora": "^3.1.10"
+  }
+}
-- 
GitLab


From fc9268abca219cb22159cfd62eb5e03a6d9d2fa3 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Wed, 5 Feb 2025 16:42:14 +0100
Subject: [PATCH 06/17] Update theme

---
 .gitlab-ci.yml                        |  4 ----
 antora-playbook.yml                   |  2 +-
 antora/ui/partials/footer-content.hbs |  3 +++
 antora/ui/partials/header-content.hbs | 16 +---------------
 4 files changed, 5 insertions(+), 20 deletions(-)
 create mode 100644 antora/ui/partials/footer-content.hbs

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 66bd0671..2a3ee6a8 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -123,10 +123,6 @@ bump-python-deps:
   script:
     - npx antora antora-playbook.yml
 
-  artifacts:
-    paths:
-      - public
-
 docs-build:
   extends: .docs
   stage: build
diff --git a/antora-playbook.yml b/antora-playbook.yml
index 02694ac8..dfdb1496 100644
--- a/antora-playbook.yml
+++ b/antora-playbook.yml
@@ -1,5 +1,5 @@
 site:
-  title: Teklia Documentation
+  title: PyLaia
   start_page: pylaia::index.adoc
 content:
   sources:
diff --git a/antora/ui/partials/footer-content.hbs b/antora/ui/partials/footer-content.hbs
new file mode 100644
index 00000000..21c3b010
--- /dev/null
+++ b/antora/ui/partials/footer-content.hbs
@@ -0,0 +1,3 @@
+<footer class="footer">
+  <p>Copyright © <a href="https://teklia.com" target="_blank">Teklia</a>
+</footer>
diff --git a/antora/ui/partials/header-content.hbs b/antora/ui/partials/header-content.hbs
index da31973b..3907e3ba 100644
--- a/antora/ui/partials/header-content.hbs
+++ b/antora/ui/partials/header-content.hbs
@@ -2,15 +2,8 @@
   <nav class="navbar">
     <div class="navbar-brand">
       <div class="navbar-item">
-        <a href="https://antora.org">Antora</a>
-        <span class="separator">//</span>
-        <a href="{{or site.url (or siteRootUrl siteRootPath)}}">Docs</a>
+        <a href="{{or site.url (or siteRootUrl siteRootPath)}}">{{ site.title }}</a>
       </div>
-      {{#if (or env.ALGOLIA_API_KEY env.SITE_SEARCH_PROVIDER)}}
-      <div class="navbar-item search hide-for-print">
-        <input id="search-input" type="text" placeholder="Search the docs"{{#if page.home}} autofocus{{/if}}>
-      </div>
-      {{/if}}
       <button class="navbar-burger" data-target="topbar-nav">
         <span></span>
         <span></span>
@@ -53,13 +46,6 @@
             <a class="navbar-item" href="https://gitlab.com/antora/antora/-/blob/main/CODE-OF-CONDUCT.adoc">Code of Conduct</a>
           </div>
         </div>
-        <a class="navbar-item" href="https://twitter.com/antoraproject">
-          <span class="icon">
-            <svg aria-hidden="true" data-icon="twitter" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512">
-              <path fill="#57aaee" d="M459.37 151.716c.325 4.548.325 9.097.325 13.645 0 138.72-105.583 298.558-298.558 298.558-59.452 0-114.68-17.219-161.137-47.106 8.447.974 16.568 1.299 25.34 1.299 49.055 0 94.213-16.568 130.274-44.832-46.132-.975-84.792-31.188-98.112-72.772 6.498.974 12.995 1.624 19.818 1.624 9.421 0 18.843-1.3 27.614-3.573-48.081-9.747-84.143-51.98-84.143-102.985v-1.299c13.969 7.797 30.214 12.67 47.431 13.319-28.264-18.843-46.781-51.005-46.781-87.391 0-19.492 5.197-37.36 14.294-52.954 51.655 63.675 129.3 105.258 216.365 109.807-1.624-7.797-2.599-15.918-2.599-24.04 0-57.828 46.782-104.934 104.934-104.934 30.213 0 57.502 12.67 76.67 33.137 23.715-4.548 46.456-13.32 66.599-25.34-7.798 24.366-24.366 44.833-46.132 57.827 21.117-2.273 41.584-8.122 60.426-16.243-14.292 20.791-32.161 39.308-52.628 54.253z"></path>
-            </svg>
-          </span>
-        </a>
       </div>
     </div>
   </nav>
-- 
GitLab


From 409f87bee0c667bf2edb2a25cb7cbc8c2237f90f Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Wed, 5 Feb 2025 16:48:59 +0100
Subject: [PATCH 07/17] Setup search with lunr

---
 .gitlab-ci.yml                        | 2 +-
 antora-playbook.yml                   | 5 +++++
 antora/ui/partials/header-content.hbs | 7 +++++++
 package.json                          | 1 +
 4 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 2a3ee6a8..434f3917 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -118,7 +118,7 @@ bump-python-deps:
       - public
 
   before_script:
-    - npm install antora
+    - npm install
 
   script:
     - npx antora antora-playbook.yml
diff --git a/antora-playbook.yml b/antora-playbook.yml
index dfdb1496..cb8b26da 100644
--- a/antora-playbook.yml
+++ b/antora-playbook.yml
@@ -12,5 +12,10 @@ ui:
     snapshot: true
   supplemental_files: ./antora/ui
 
+
+antora:
+  extensions:
+  - '@antora/lunr-extension'
+
 output:
   dir: ./public
diff --git a/antora/ui/partials/header-content.hbs b/antora/ui/partials/header-content.hbs
index 3907e3ba..a0b60d40 100644
--- a/antora/ui/partials/header-content.hbs
+++ b/antora/ui/partials/header-content.hbs
@@ -4,6 +4,13 @@
       <div class="navbar-item">
         <a href="{{or site.url (or siteRootUrl siteRootPath)}}">{{ site.title }}</a>
       </div>
+      {{#if env.SITE_SEARCH_PROVIDER}}
+      <div class="navbar-item search hide-for-print">
+        <div id="search-field" class="field">
+          <input id="search-input" type="text" placeholder="Search the docs"{{#if page.home}} autofocus{{/if}}>
+        </div>
+      </div>
+      {{/if}}
       <button class="navbar-burger" data-target="topbar-nav">
         <span></span>
         <span></span>
diff --git a/package.json b/package.json
index 8e2f43b0..5bc5ecab 100644
--- a/package.json
+++ b/package.json
@@ -1,5 +1,6 @@
 {
   "dependencies": {
+    "@antora/lunr-extension": "^1.0.0-alpha.9",
     "antora": "^3.1.10"
   }
 }
-- 
GitLab


From 7cdd74fbbe2068cefd5c151d377fac807f6c2e4a Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Wed, 5 Feb 2025 16:58:06 +0100
Subject: [PATCH 08/17] Simpler header

---
 antora/ui/partials/header-content.hbs | 33 +++------------------------
 1 file changed, 3 insertions(+), 30 deletions(-)

diff --git a/antora/ui/partials/header-content.hbs b/antora/ui/partials/header-content.hbs
index a0b60d40..4782603f 100644
--- a/antora/ui/partials/header-content.hbs
+++ b/antora/ui/partials/header-content.hbs
@@ -20,37 +20,10 @@
     <div id="topbar-nav" class="navbar-menu">
       <div class="navbar-end">
         <div class="navbar-item has-dropdown is-hoverable">
-          <a class="navbar-link" href="https://gitlab.com/antora">Projects</a>
+          <span class="navbar-link">Community</span>
           <div class="navbar-dropdown">
-            <div class="navbar-item"><strong>Core</strong></div>
-            <a class="navbar-item" href="https://gitlab.com/antora/antora">Repository</a>
-            <a class="navbar-item" href="https://gitlab.com/antora/antora/issues">Issue Tracker</a>
-            <hr class="navbar-divider">
-            <div class="navbar-item"><strong>Default UI</strong></div>
-            <a class="navbar-item" href="https://gitlab.com/antora/antora-ui-default">Repository</a>
-            <a class="navbar-item" href="https://gitlab.com/antora/antora-ui-default/issues">Issue Tracker</a>
-            <hr class="navbar-divider">
-            <a class="navbar-item" href="https://gitlab.com/antora/antora/blob/main/contributing.adoc">Contributing</a>
-          </div>
-        </div>
-        <div class="navbar-item has-dropdown is-hoverable">
-          <div class="navbar-link">Tooling</div>
-          <div class="navbar-dropdown is-right">
-            <div class="navbar-item"><strong>Build Automation</strong></div>
-            <a class="navbar-item has-label" href="https://hub.docker.com/r/antora/antora">Docker Image <small>OCI</small></a>
-            <a class="navbar-item has-label" href="https://gitlab.com/antora/antora-maven-plugin">Maven Plugin <small>Java</small></a>
-            <a class="navbar-item has-label" href="https://gitlab.com/antora/gradle-antora-plugin">Gradle Plugin <small>Java</small></a>
-            <hr class="navbar-divider">
-            <div class="navbar-item"><strong>Authoring / Preview</strong></div>
-            <a class="navbar-item has-label" href="https://intellij-asciidoc-plugin.ahus1.de/docs/users-guide/features/advanced/antora.html">AsciiDoc Plugin <small>IntelliJ</small></a>
-          </div>
-        </div>
-        <div class="navbar-item has-dropdown is-hoverable">
-          <a class="navbar-link" href="{{{relativize (resolvePageURL 'antora::project/get-help.adoc')}}}">Community</a>
-          <div class="navbar-dropdown is-right">
-            <a class="navbar-item has-label" href="https://chat.antora.org">Chat <small>Zulip</small></a>
-            <a class="navbar-item has-label" href="https://twitter.com/antoraproject">News <small>Twitter</small></a>
-            <a class="navbar-item" href="https://gitlab.com/antora/antora/-/blob/main/CODE-OF-CONDUCT.adoc">Code of Conduct</a>
+            <a class="navbar-item" target="_blank" href="https://gitlab.teklia.com/atr/pylaia/">Contribute</a>
+            <a class="navbar-item" target="_blank" href="https://support.teklia.com/c/machine-learning/pylaia/13">Support forum</a>
           </div>
         </div>
       </div>
-- 
GitLab


From 65e32d3444f90a5be81c3968728efa6aeed6c9fb Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Wed, 5 Feb 2025 17:07:16 +0100
Subject: [PATCH 09/17] Replace markdown !

---
 CONTRIBUTING.md                               |   6 +-
 Makefile                                      |   4 +-
 antora-playbook.yml                           |   4 +-
 {antora => docs}/antora.yml                   |   0
 ...4-f45433e7-99fd-43b0-bce6-93f63fa72a8f.jpg | Bin 26250 -> 0 bytes
 ...8-c0097bb4-c55a-4652-ad2e-bba350bee0e4.jpg | Bin 26616 -> 0 bytes
 docs/get_started/development.md               |  40 ---
 docs/get_started/index.md                     |  32 --
 docs/index.md                                 |  22 --
 {antora => docs}/modules/ROOT/nav.adoc        |   0
 .../ROOT/pages/get_started/development.adoc   |   0
 .../modules/ROOT/pages/get_started/index.adoc |   0
 .../modules/ROOT/pages/index.adoc             |   0
 .../modules/ROOT/pages/original_paper.adoc    |   0
 .../modules/ROOT/pages/releases.adoc          |   0
 .../modules/ROOT/pages/usage/index.adoc       |   0
 .../pages/usage/initialization/index.adoc     |   0
 .../pages/usage/language_models/index.adoc    |   0
 .../ROOT/pages/usage/netout/index.adoc        |   0
 .../ROOT/pages/usage/prediction/index.adoc    |   0
 .../ROOT/pages/usage/training/index.adoc      |   0
 docs/original_paper.md                        |  18 -
 docs/reference/callbacks/decode.md            |   1 -
 docs/reference/callbacks/learning_rate.md     |   1 -
 docs/reference/callbacks/meters/meter.md      |   1 -
 .../callbacks/meters/sequence_error.md        |   1 -
 docs/reference/callbacks/meters/timer.md      |   1 -
 docs/reference/callbacks/netout.md            |   1 -
 docs/reference/callbacks/progress_bar.md      |   1 -
 .../callbacks/progress_bar_gpu_stats.md       |   1 -
 docs/reference/callbacks/segmentation.md      |   1 -
 docs/reference/callbacks/training_timer.md    |   1 -
 docs/reference/common/arguments.md            |   1 -
 docs/reference/common/loader.md               |   1 -
 docs/reference/common/logging.md              |   1 -
 docs/reference/common/saver.md                |   1 -
 docs/reference/common/types.md                |   1 -
 docs/reference/data/image_dataset.md          |   1 -
 .../reference/data/image_from_list_dataset.md |   1 -
 docs/reference/data/padding_collater.md       |   1 -
 docs/reference/data/text_image_dataset.md     |   1 -
 .../text_image_from_text_table_dataset.md     |   1 -
 .../data/transforms/text/transforms.md        |   1 -
 docs/reference/data/transforms/transforms.md  |   1 -
 .../transforms/vision/random_beta_affine.md   |   1 -
 .../vision/random_beta_morphology.md          |   1 -
 .../vision/random_beta_perspective.md         |   1 -
 .../data/transforms/vision/transforms.md      |   1 -
 .../data/unpadded_distributed_sampler.md      |   1 -
 docs/reference/decoders/ctc_alignment.md      |   1 -
 docs/reference/decoders/ctc_greedy_decoder.md |   1 -
 .../decoders/ctc_language_decoder.md          |   1 -
 docs/reference/decoders/ctc_nbest_decoder.md  |   1 -
 .../dummies/data_modules/dummy_mnist.md       |   1 -
 .../dummies/data_modules/dummy_mnist_lines.md |   1 -
 docs/reference/dummies/dummy_model.md         |   1 -
 docs/reference/dummies/dummy_plugin.md        |   1 -
 docs/reference/dummies/dummy_trainer.md       |   1 -
 .../reference/dummies/modules/dummy_engine.md |   1 -
 .../dummies/modules/dummy_evaluator.md        |   1 -
 docs/reference/engine/data_module.md          |   1 -
 docs/reference/engine/engine_exception.md     |   1 -
 docs/reference/engine/engine_module.md        |   1 -
 docs/reference/engine/evaluator_module.md     |   1 -
 docs/reference/engine/feeder.md               |   1 -
 docs/reference/engine/htr_engine_module.md    |   1 -
 docs/reference/engine/index.md                |   1 -
 docs/reference/loggers/epoch_csv_logger.md    |   1 -
 docs/reference/losses/ctc_loss.md             |   1 -
 docs/reference/losses/loss.md                 |   1 -
 docs/reference/models/htr/conv_block.md       |   1 -
 docs/reference/models/htr/gated_crnn.md       |   1 -
 docs/reference/models/htr/laia_crnn.md        |   1 -
 docs/reference/models/index.md                |   1 -
 docs/reference/nn/adaptive_pool_2d.md         |   1 -
 docs/reference/nn/image_pooling_sequencer.md  |   1 -
 docs/reference/nn/image_to_sequence.md        |   1 -
 docs/reference/nn/pyramid_maxpool_2d.md       |   1 -
 docs/reference/nn/resnet.md                   |   1 -
 .../nn/temporal_pyramid_maxpool_2d.md         |   1 -
 docs/reference/scripts/htr/create_model.md    |   1 -
 docs/reference/scripts/htr/dataset/index.md   |   1 -
 .../reference/scripts/htr/dataset/validate.md |   1 -
 docs/reference/scripts/htr/decode_ctc.md      |   1 -
 docs/reference/scripts/htr/netout.md          |   1 -
 docs/reference/scripts/htr/train_ctc.md       |   1 -
 docs/reference/scripts/index.md               |   1 -
 docs/reference/utils/checks.md                |   1 -
 docs/reference/utils/kaldi.md                 |   1 -
 docs/reference/utils/mdutils.md               |   1 -
 docs/reference/utils/stats.md                 |   1 -
 docs/reference/utils/symbols_table.md         |   1 -
 .../reference/utils/visualize_segmentation.md |   1 -
 docs/releases.md                              | 205 -----------
 .../ui/partials/footer-content.hbs            |   0
 .../ui/partials/header-content.hbs            |   0
 docs/usage/datasets/format.md                 | 196 -----------
 docs/usage/datasets/index.md                  | 163 ---------
 docs/usage/index.md                           |  20 --
 docs/usage/initialization/index.md            | 136 --------
 docs/usage/language_models/index.md           | 217 ------------
 docs/usage/netout/index.md                    | 137 --------
 docs/usage/prediction/index.md                | 317 ------------------
 docs/usage/training/index.md                  | 222 ------------
 mkdocs.yml                                    |  89 -----
 pyproject.toml                                |   9 -
 106 files changed, 6 insertions(+), 1902 deletions(-)
 rename {antora => docs}/antora.yml (100%)
 delete mode 100644 docs/assets/219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f.jpg
 delete mode 100644 docs/assets/219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4.jpg
 delete mode 100644 docs/get_started/development.md
 delete mode 100644 docs/get_started/index.md
 delete mode 100644 docs/index.md
 rename {antora => docs}/modules/ROOT/nav.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/get_started/development.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/get_started/index.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/index.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/original_paper.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/releases.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/usage/index.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/usage/initialization/index.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/usage/language_models/index.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/usage/netout/index.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/usage/prediction/index.adoc (100%)
 rename {antora => docs}/modules/ROOT/pages/usage/training/index.adoc (100%)
 delete mode 100644 docs/original_paper.md
 delete mode 100644 docs/reference/callbacks/decode.md
 delete mode 100644 docs/reference/callbacks/learning_rate.md
 delete mode 100644 docs/reference/callbacks/meters/meter.md
 delete mode 100644 docs/reference/callbacks/meters/sequence_error.md
 delete mode 100644 docs/reference/callbacks/meters/timer.md
 delete mode 100644 docs/reference/callbacks/netout.md
 delete mode 100644 docs/reference/callbacks/progress_bar.md
 delete mode 100644 docs/reference/callbacks/progress_bar_gpu_stats.md
 delete mode 100644 docs/reference/callbacks/segmentation.md
 delete mode 100644 docs/reference/callbacks/training_timer.md
 delete mode 100644 docs/reference/common/arguments.md
 delete mode 100644 docs/reference/common/loader.md
 delete mode 100644 docs/reference/common/logging.md
 delete mode 100644 docs/reference/common/saver.md
 delete mode 100644 docs/reference/common/types.md
 delete mode 100644 docs/reference/data/image_dataset.md
 delete mode 100644 docs/reference/data/image_from_list_dataset.md
 delete mode 100644 docs/reference/data/padding_collater.md
 delete mode 100644 docs/reference/data/text_image_dataset.md
 delete mode 100644 docs/reference/data/text_image_from_text_table_dataset.md
 delete mode 100644 docs/reference/data/transforms/text/transforms.md
 delete mode 100644 docs/reference/data/transforms/transforms.md
 delete mode 100644 docs/reference/data/transforms/vision/random_beta_affine.md
 delete mode 100644 docs/reference/data/transforms/vision/random_beta_morphology.md
 delete mode 100644 docs/reference/data/transforms/vision/random_beta_perspective.md
 delete mode 100644 docs/reference/data/transforms/vision/transforms.md
 delete mode 100644 docs/reference/data/unpadded_distributed_sampler.md
 delete mode 100644 docs/reference/decoders/ctc_alignment.md
 delete mode 100644 docs/reference/decoders/ctc_greedy_decoder.md
 delete mode 100644 docs/reference/decoders/ctc_language_decoder.md
 delete mode 100644 docs/reference/decoders/ctc_nbest_decoder.md
 delete mode 100644 docs/reference/dummies/data_modules/dummy_mnist.md
 delete mode 100644 docs/reference/dummies/data_modules/dummy_mnist_lines.md
 delete mode 100644 docs/reference/dummies/dummy_model.md
 delete mode 100644 docs/reference/dummies/dummy_plugin.md
 delete mode 100644 docs/reference/dummies/dummy_trainer.md
 delete mode 100644 docs/reference/dummies/modules/dummy_engine.md
 delete mode 100644 docs/reference/dummies/modules/dummy_evaluator.md
 delete mode 100644 docs/reference/engine/data_module.md
 delete mode 100644 docs/reference/engine/engine_exception.md
 delete mode 100644 docs/reference/engine/engine_module.md
 delete mode 100644 docs/reference/engine/evaluator_module.md
 delete mode 100644 docs/reference/engine/feeder.md
 delete mode 100644 docs/reference/engine/htr_engine_module.md
 delete mode 100644 docs/reference/engine/index.md
 delete mode 100644 docs/reference/loggers/epoch_csv_logger.md
 delete mode 100644 docs/reference/losses/ctc_loss.md
 delete mode 100644 docs/reference/losses/loss.md
 delete mode 100644 docs/reference/models/htr/conv_block.md
 delete mode 100644 docs/reference/models/htr/gated_crnn.md
 delete mode 100644 docs/reference/models/htr/laia_crnn.md
 delete mode 100644 docs/reference/models/index.md
 delete mode 100644 docs/reference/nn/adaptive_pool_2d.md
 delete mode 100644 docs/reference/nn/image_pooling_sequencer.md
 delete mode 100644 docs/reference/nn/image_to_sequence.md
 delete mode 100644 docs/reference/nn/pyramid_maxpool_2d.md
 delete mode 100644 docs/reference/nn/resnet.md
 delete mode 100644 docs/reference/nn/temporal_pyramid_maxpool_2d.md
 delete mode 100644 docs/reference/scripts/htr/create_model.md
 delete mode 100644 docs/reference/scripts/htr/dataset/index.md
 delete mode 100644 docs/reference/scripts/htr/dataset/validate.md
 delete mode 100644 docs/reference/scripts/htr/decode_ctc.md
 delete mode 100644 docs/reference/scripts/htr/netout.md
 delete mode 100644 docs/reference/scripts/htr/train_ctc.md
 delete mode 100644 docs/reference/scripts/index.md
 delete mode 100644 docs/reference/utils/checks.md
 delete mode 100644 docs/reference/utils/kaldi.md
 delete mode 100644 docs/reference/utils/mdutils.md
 delete mode 100644 docs/reference/utils/stats.md
 delete mode 100644 docs/reference/utils/symbols_table.md
 delete mode 100644 docs/reference/utils/visualize_segmentation.md
 delete mode 100644 docs/releases.md
 rename {antora => docs}/ui/partials/footer-content.hbs (100%)
 rename {antora => docs}/ui/partials/header-content.hbs (100%)
 delete mode 100644 docs/usage/datasets/format.md
 delete mode 100644 docs/usage/datasets/index.md
 delete mode 100644 docs/usage/index.md
 delete mode 100644 docs/usage/initialization/index.md
 delete mode 100644 docs/usage/language_models/index.md
 delete mode 100644 docs/usage/netout/index.md
 delete mode 100644 docs/usage/prediction/index.md
 delete mode 100644 docs/usage/training/index.md
 delete mode 100644 mkdocs.yml

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index dea8c88b..3b0394fe 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -120,7 +120,5 @@ Enhancement suggestions are tracked as [GitLab issues](https://gitlab.teklia.com
 - Run the test suite (including a coverage report), through `tox`
 
 ### Improving The Documentation
-- Create a virtual environment, you can use [virtualenvwrapper](https://virtualenvwrapper.readthedocs.io/en/latest/) or [conda](https://docs.conda.io/en/latest/).
-- Install dependencies, through `pip install ".[doc]"`.
-- Visualize the current documentation, through `mkdocs serve -v`.
-- Make sure it builds without warning, through `mkdocs build --strict`.
+- Install dependencies, through `npm install`.
+- Visualize the current documentation, through `make docs`.
diff --git a/Makefile b/Makefile
index 3dcf7922..9c7b04b7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
-.PHONY: release antora
+.PHONY: release docs
 
-antora:
+docs:
 	npx antora antora-playbook.yml
 
 release:
diff --git a/antora-playbook.yml b/antora-playbook.yml
index cb8b26da..28f2e2e5 100644
--- a/antora-playbook.yml
+++ b/antora-playbook.yml
@@ -5,12 +5,12 @@ content:
   sources:
   - url: .
     branches: HEAD
-    start_path: antora
+    start_path: docs
 ui:
   bundle:
     url: https://gitlab.com/antora/antora-ui-default/-/jobs/artifacts/HEAD/raw/build/ui-bundle.zip?job=bundle-stable
     snapshot: true
-  supplemental_files: ./antora/ui
+  supplemental_files: ./docs/ui
 
 
 antora:
diff --git a/antora/antora.yml b/docs/antora.yml
similarity index 100%
rename from antora/antora.yml
rename to docs/antora.yml
diff --git a/docs/assets/219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f.jpg b/docs/assets/219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f.jpg
deleted file mode 100644
index 842a440c763f27636ab7927415f2113e9432d95a..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 26250
zcmeFYXH-*N*EX8ayAXPl5E4r0z4w-c9(t4BdlN*Zccq2Ur6-Wkq>2=!s}vO}7C@vc
z+<+88MTEoseDC+Y=lnQ7&hPWi>^;WZYtD78wXQwJ+;guzbM^6R6~KrxKpFsmKmY)E
z{R3RB0sQr2{Ja4GBoYpw0ssJ{01%M$U(<h1X&?!J=~|b$KK;_k{?n#+t^X^Qz9Es(
zUV(vNDG6CA328Y1fCNBNP5mDqed=qZ|I+3D1-q83HR?0p$jDF?Ny*>{2{+FWcP|N#
zkRZtzw@^uG2`Nc{nr2L>n@50GB-q`{$1hkN^1Xi$0`~J%huF%SNSTD{dinYxV#B>G
zV@<6*Vgo#sJt3MJV6_;Pn4r)guShp=Oi*BOgi4G$<Uiml*ZRNBk`VBJTp|P1A$BI_
zVBL^#FR+}1oP?D4_0n(4K&~;uJ-t=XdItZ6aDAl?`LC%)M@LIU%SwcV`$$SFD=SM%
z$w<n`h+lh%N8AaHbc+!Wju80Y2zp)-9^rnWk$xe;;C~U_+(V)w)gjm8|7T)e*F^sx
z-2Vp!|62aT!T)xR^z;6IasAiwU#|c5QZe_7@d~ul^9%9{j<_CI9U>#EC<_7qx6S_l
zW|Ngtf`I=;P(cQ{`FPp-c}DvF?;ii%jPUaF@x7k+e|wvS`*~e2_TPo6L!=>+*NjR2
z7iIsd|K|>9fd5DShk^fL;C~qS9|r!1f&XFP|9=epcMj(je4V&OUuS4nO8{K}DGABH
z$F-1MALLZz<YZ*zG?bJSRCF|SbhI?IwDb%hMtTM&23lH1Hby397FJePx*O~qY%Clg
z7FL#jL4c&!eaOhE$;qi%=xOO${y)pr5P*pakU+Xf3WNYin1G~Az^gF;&voKTLJIt^
zEcM?^^*ZS#C8MAur@FT4GG1Sjk&}{=kdu&+Qj%Tg#w1MFW`1%CkhC_Xg&Q*@OeT?v
zMJK-o-OeiDKFlWjjvXqe8=kag*>QLzs26b!!U_Ce@-O)R5+H!&dWdV27}NFWKr#~G
zzs&rXjf9Dx^cvj4EsPnGD3gDvgRXhkKD>6d2%saq?#M*S1keQh$wmO7sq{_-K$%l;
zl1uReXSk6tuPOy&njO{5&t+ylD#AI?5}J+xUyQ=YsXcdqJ*hAqdnmSjDF&&HB4h3i
zYSnO$iPUd0Yg5(CxZ`>^^&&kZeHMM53#(zo^P>bp#p<~CXpNXVuK-<eY-t&mVrhiX
z=*m?MZZki>%N(Q5$O+D`c@V-RjJ~Y+RLz-Xx)&37<c>f&g{*JOCc=&D#F`c_^pH*q
zGR;cQXI{a{bNh=jJTaz^{T;jX$g30gy`be8dg90mX}CeDAE4eNH%voKPlq)!6{I#w
z6x$_FaCOOOPAViN&VQYj!Q=9xq)6Qu*=W3<uCPMeC^oj`)3o?cVVPp%HC{*sG8w}I
z_f?9a3dyCv>*Ug4m#G(M<VPB>$}hHIR0n_Bx>IW=?AxJ<ZR=!1d&AH9LD=T0M`AQ4
zulL^hfeUsBBHuq%;}!W|4kuwjTL-4!$r;fgDiz0dF{)@aeb5YgC#d&3nH`%-E`-&J
zRTrn4OW1?l^=g|(F0wm@4KU&`TAjfV>tRQsd4od7JhvYenh4)Wyx{sq30g-Ra>;Kr
zVUJ>xFn9)NaOf7s9Jr71-7iIcC&Rf^9x<jdP#Gppi;%i6SJ>R-no-y7NM`TUC8}WU
z%A*%K`7D4E_K!h>5SxN1fMysFX;TDM{@A5#sS?dd#E$>el!iP3LX9NofBSm+lkR$e
zJfKYB(Fs<J`w;>9DK;b6wt2_9H{T&oqsY~JO;`0Z1>LEI2VakoPYY9%8+Im=<8;YD
zE#Frdg)?z%COv7i?ysJGC!ejY?~efe(4vKRD7AMfYt0xbjv-^cRH`E44tGquzVEMX
zSn;s>3EBt5FcOP*`mz$^++PEdn8I(zkW-h7Ydb>slutNob*5B?fu9cCK1ezwhH^0h
zWj<x}>@a$X74pui{!EpGa5H%jNIo!H)zuAUAYiAw&NW^IL~R?M2-ZAH7)sG5e^jrC
zMyG1|v8t{n<^A$8DN8>8K>deEM}E56HhHB(G7~T*aOgX^E$^=+TIi=Qzfytp!(?_V
zpVzyI!+@Z{nCO7rF1;O;JZLknbw(_&V+NZW!N+KY0A^esE~4w*|4xflvl1n~f5{*;
z+h@=gX}g1s+V}-*kJwU#64bi%$Ug7QSlA=#$@L1tp0t89eJqzcGs3%N{kZN9pYqsC
zav>q@kvL;Tq2X;%OgN`8$mz}llEk|t?%j}S4hF4t*_UKCFnFzn&J;AQ%Ygxl^Y1Ai
zF1jyel((!XMbh1CUH!H|o1t2puS=^yjFCgASDJ1pK{rENPKvwuE`18}_l?Balomy?
zvn~^e9hAf@gT^L15&FR45Me=A`}VKYYF+{XbU<f^C8GEr@U4geAa8(wSB2SVjS>`N
z&|dBYDTUb>sfV?Sk}G1G#}hX<sES<ozD|Py9C_q_Mp6hQ&UzeoOp&nXz?dLyzuBRi
zM~{vbpwAGYZCV0h^g96Rc`~N<{*0VLru~>1axN^jm|X;wJTEex8nPd@%^wBg%upSl
zVP(mqYExe~V&+NfIdnVC21^gRQSW9t((4r^Cvp@rn%9w$!z7%4OM^PJL($8lv1Jc#
z-Ww`HjoFz4do~lN;Wj5?1wBmBgxexvFEcEqVbCPWMff3{Kqn7c{yr^?&fxDk>ay#`
zr>^`NL#P-f)osfSNU_TNz-xd0Gi22t(p@{`L_({uKL$t8Y0tmRu9H)+loJt&#2D@I
zW1b<!HV^ddItl$fL#e3y<NOl&?e|w#`)E5X!!O-f)GPP@j0*RVjl%q9bP_+;Y)+u?
z3G%8vQjFQ;rX%mA{bsRJ64WU(Z;%>%zM{kU{K(~xJrDF|YpGx=ce>Z+Blm92U|!kI
zx@-;=2^$n8U}gl3LGw=Zx5%W?jvi+Ta&R{~g!)?JDg~+=>Tsx&pa4$v1wtvzX7_02
zOz4|18Z)0<%`J()k7!u5?JtkqIaQ`KmPV%R1=m)lN!yauK{QTDwYO%H&M|p=$`5{g
zJRr4S&m##GTVjjh`liK#D{uv@ey0<ZLCn3*NSmP+vW!VpG3<#L%5-IX5Jr4)1z4cc
zhNY&x<piA3-F_Os2r$61OQ;SzXT`t)+1X!8QbaAHl<rcwVxm0<rD{Yhd&3nXSbyl?
zK>MA6t~E5_MtRY`k!07;0meLl;OOv*I1b{xy!1!cNTT+u*_amk06;pXkk|F3h<93z
zv9CfQ{0?JH$bJ2E-rCsUdY!H#;@S&r>fNl2?$z5mtiAUggVsrtsG*fxtNq|7Uw@_`
zB!E8^W)yT2?3^xi_`QNxaltqFwC-~Mj7g>0k7>zAmRLC_pT)E(x_^FXD}hbj6_vPs
zx9b*G#fmEQMhwO`lswNI=o&4Z<C9?4A#U}AHa@CEPTNqtPT3tJhI-GaQ<Qwy(Ty(C
z^+7Ws=?cK)Zk~TvSWXY0+Y_vmL*$H0s(_c^34c?xu(dm|iEUAPG|y9?px!K)u*su#
zDZl>Z?_9&sPbHW!jOlAuf}dS9@X2>JiIGWQv#J&t3))*hY-_i7(81C&kcqM`c^}cG
z8yskTCnHkLN8eTc&cTG3MValc>ghy+&>R(Gb`1D1d=KNpgjy=sKgksD+P#$=$#r2c
z&vdtsV`w0y_C^V2uA0$Eagl}WzJeyO1?W4Em`7X|&@_15*WM%Jbf|}!z9Q|-i~*;G
z(>OrTB&PWu+q6!?KmujlA+^&pm>9ZcktKn~^l{Mlh((5k`vW1JBbnab8^{wjh{<rY
zK-?h|tj#Y5B%M}B1vqEjeg2ND#gsQ;RnMt5Z>$hVj|ODRG}FI~vG)hknbrnF$UwKo
zxV0AWQ0C~+brt|RBOIi-&1OG~G{27F*^<P+cEc)eNnCpl{)GyDb0&rYpH*rCjrbu~
zfcq);_7_^KqiQ>a=C@;`EIpU$L1V*uh5oIxe~9{GJkBYRX_e<e3u(J+_NTwtpag(x
zgJ~g@d0UxsEmvcma|2hRAE#*Jf23{+Gk(;5P>S~?aV*N|<Jb}DRs51MR$U{+M^f(p
z5&_OjvsJH7U`ax8Jo~uFr19A^&-$YS4X8V52p};6<|Rd-S#-k~Kvt>HLnSF<o4v#Z
zkOlxd4O}X*p=y^Y2_AiO*w~M2*8q#?krEQO$)7D|E__~PNdoN(zHf^F`u$W68)e`x
z*mp(qP2c(WLS46*P`|Lwit+WErIz&(gHit$oN*2J;T1uAp5TAA0yfQ4?KjL%^xl({
znve}$oRFU#<we`DaqoG2q=oLEC3OskxjBEpUFglOss<1DYc051$7f`UX<1p>NK+$%
zsoFC#V?kO!H6sS}g~23<2;a(@r|g<+OwADCD&ZEW5sdyk#Zg>E|Itzjykw}e_sBP^
zgei2WM@oh!O8Kn2C&nH#nWx*1$PnCR(B;d><ac-=zy4BR=Wbr5mO}dqmmPqyP&QhU
z(gIw>buWw##bN|-d|iu((bt2KJ2>lQe_p5eV_-qw{{28FbqZuKG}3rrv@~F4Bg0sj
zuBiUSHw3W*<?SDLh0;5eZ}@^c;+piH<%6gc)xyZbyKglAO*^7|V*e~xb!0+gmZW7<
zZ-A12MsD!CFLcLbf1MZhsrST#Y6G45hXY+pR&9j{&+l&WO>(3+*xx=J@$*ta#T_=<
zG<3?haC)X{gL}nK$c;%0jz<JAnCg%=CYYo{aPgn96pOH<708R~olk<R4je+uf7<@8
z8xIVI++75c7!vY7SM!S?El;LZ?2|>X7ZEf%SUkI1mnap9B_YSLH}y&0km##)9W>f`
zB*nI7wAkp*!!B^pJ=tKl62TYWKk8#<XgiWSzxbBv4}pJX8nUt3;C9_HLF=4ajnkaH
z(uof}td#H4S2?@b!Z8@F1)0dLlp28v=~0#RUzitC&Ezo$){~uKfzQNJcy6_O>*0lq
zxR_2wB+s8bt83;LQ4xJ{qdJ3*wLPRL>wW~mh#Ecr)AOSNgXQ>c*m6&r?R#{YN?D&@
zM7^c(3L2Y-G8>ES)AhH^|Fj`4?^@<Ti83i)XMs69U>tw`P99L@+s^$n4UR%{E){?v
zMp!IN5B6nLi`tWdJ&g{%HmP4S1lFwLF(3k+hS0hdXC#7~y7(-sP3CPHfKeyIQQ6)*
z?J~5p9uzdh#8&gM&<_y8n9v>-Ksm_B3>8rbTZFBW+ke;x?BCA9n`)QzYB4oLKg5FZ
zD6%ADlIjXWL`1KYt=9}o3Q@o!{(<2mb~!5+L5F8@2R&hDxd!9pk8-{<ff*+O?CT<e
zpKDFiFiAwmq?iG1k(^=YDXkm(5+KU6G||ZN?w@KIv$X&OIPLiiEWajoq5bYKGw0Ba
zCx?*$B%KBX!0A8x{)Pt)2+NEk`?b!~yrY_t=qDz=G2lWXz?z*9B2^6w&ZAaeA4f6R
zVIZ;Yo3z=}bg`_$<f@E@C0Q_V;>0?SMmZqJm7iva?)A0+I3#F1yNnSB)1uFfgUp_b
z$GKV6Vlg#~f~kNBp@^pfF;+}sEIWW-QfnxZjRS^5kX_!M;v!8eIkhwj-Hf?@HPXi7
zs=|Z<g4k<rtx^G^W4!uIQu_xSbtWsl>%l$Ij5}m5o=FEsk2mi80ZiFF;Y(eAipx@2
zJf80mZ2BzMFZJi;GjXqAvCk>~O_-OYVh<9w4$PP+-x2J7ZF8b#6PP=XX!4_k?Wm}n
zQL|%w17bVR%SgJ4zanDbC8cljBHMe@NDccfH1DrYLBTG&q;ZEU==@FxQLAAQm?=te
zsien2n|;=#<ODQKYKyDxRuzOW^ri+**v1r4K|c~zclMbS83I-H5`>W2u*9|6T1-2>
zYT|*-7qde!a3A3G<WN2^u$d%J$4)bh4lnhT;db&V4)?c`_NbqhgX*z40^NE^MuutA
zLOxqQhK%Uo^Dr(L2$_)4uH?rPu9{&7AfVkSoD9x#e>8b*MG`22w9r+!7^90T_~}b5
z#DZ+yb$@4=Bk1z}uK!3O0rq~UmezvW?eafaq#TAM*@3hc8S3gM&XywB$oSphf3;5N
zT3{oOiZ?PqUhXhI)9sh-+w2Qq;U$FXfI<pwvXA@BXdX6Oy?;EUTHlWhSFG?X`?=f4
zUw_won<BB*U&<Z8;fk8mY`&Am>m(S8&oz3l4VUYJ$-t%XN{4!5N8o+EK>pd<1`9U+
z@eRMc&bi&B(kkRSWvv`b-dgQUYHn=sT@H`_a#3%dar-vWVxr6f9?3(m>#I^iGVQ>I
zGnHY>^ryBrB7gEOTlY6s#UEq!`g@W3;0o;oELq8z1{nk!&^JKa)V8QtDW{{l*nrDp
zNVt7Sh!6KTvY(N2la~%<|0s+u$yV%XTU+Qm#vU5a?w_gXLIVDhPECeN<K6HfgoK!-
zI7@Mse0*=vm&%Up&p^Mnr{a!?&yCv&#E?#0B^M2vW$TsXAR~SjFJe;ne4idp!$Oj`
zix7X6hUDVVe9J`l)_PR9;k=BaddC%YSN~88g=y2#4t1a|#p}s3tuP58y<xf$!%5YJ
z#zFxhY_U+g(&rof&oE@vbAbC30zoxe;Jenx%BcuyBW1ANihN8pU2md{w$0v$Y}v*N
z<B{`#;sqCn)Ol-DL2F)?l)~4<vau8>?M0=)!W-rO8x!`odpO5gyewwx95VWHfg(JF
zB0#kCn?4`R5zkU?{Gvp7p69wKlEai;4kR@hZUoXi&tezKsQLRgsrBKllE~IUwsFmk
z=JQhP-by^bRafd?F&Si^7npA$d|NGk{LGDjn_#))!hk9LSm{YLp(8qCipkmz&JCAe
z-g)eRGk+_CKvIX|WpBVXwt`Ei#?Jrwm0I-(zDZr`BN2eFE34M=HZ{7AW|&RFSy$BD
zbS3T!-&mHNqs0#)<8XQj-3bl@YVcj(ojGw&iuX^Vnl_2kvAtC?vtCYuK~C?8+Q?!-
zRQ$GSN$o40dj*$&>)y|xtzK$(vggE0JTI5>rncLhDV_gO2(KQ-v-Ww<I~hN6T*cia
zvAUBZvh>8a2j6kR>vR#%G+mfzE!&tvjJa2Q@r(<dx1*Esv;0qC=Yt?#ki)a)t^O{z
zHLF=ak+D);6cCz(i~=DQS2kY+u%x?cYqF)Pf0D7OW<j6_eI-T_y<+dPwqFa`J{dh_
zq7`vNky=<%wN5_7CPg{sxFJ6(iQP%wr}`PRO<lwiSNCb!7+{4>SFd|9{X0BY-t{!*
z7EH_pN6^-p)w+Ak!zM5gyNTm>$mo$i`D%B*k^3mp^Ub5Vsd4G!NNXG7!!nq29<qn%
z<I&}3c(;{a$ll;5Ce|?}vRTl;5wEXCJfUecULZyYK}m){f%<i|$XnHggdOov&EL6I
zgS0U#v5yV>M>|_kO}n9eYo=ML&BiVS-D%UBAc3VPZ@CvtZiVA_HCXwT+y%Ergs2N;
z{CU!YG-}NP_Kx0Id?IW_@685Vtml+Jdlfcjl!SQtD^bK$D>U#6lU&{|HO-CAt{@IJ
z_fj&V7L>AWKrU5<N98VsFH~c=lE38cfKSIaY6Q(Z{dhGdb=mp*>d3ASo%Y^4=TQJM
zWL?yVEdO5D9+Gm*|C#=-1pS;^r*+PTY@{WUU!+(}WxIJVB?p<ER2xzwJnb|@qAkux
z9u=Xw!Iv4_VX=9^>dzrAcLFyznOXab-2KJy<>F<X4lUY#E!;h5-1U!7L+L>|;f^8Q
zGz<2`ck{#KDU)CfS3x-U>la5IX+F@9r`#`73p@62+72CFKr%_3)#+8BlZek4&MQFr
zpO-Bfco04*N<?Qmktax5;i;longcOWq6lc{olIa29Ii%*lz>*#NxsJ9$mMgeB&EvO
zwV8g%NJ&>~_>rMygJ&bpP97&BNw(IlUL}yI51uBClhLX{yhT69`*xX`v(m$ngN%LT
zi9c}M6gG#$Zt#!FS+jU@3`Yi(=waQ>9IhvlC{Me$;@&9&=hw>9u-`D>rL=%<0Zr29
zkV-jjW4uan+|WN?MSC^zq~Ucu?;k*XSWYFyJUEjTt=#4P%6d;Zti@`x;CCFT_!S4A
zCVaiWfiyg?px1DZ{$gURbLXkrP?3(aV%W3|M#u8|?JkE>q$Or3$Mh(xwnJKQa*#2@
z=-I1~MP3P7rk<FGG)_f~X!jCEpgjpkLaoEt0UcnXm;dPjgxx&ub{dAHcjcw|GH<%n
zD<m0fQtM%XIY=>Fp)*6H(Q7g)|E>Z0Oxq#3h|a_Ijr~D79I2NN4;FYZ552UcspD#x
zCs2wh@y-Vdm`{JU9h1IrkF$RjNf>MQ^FlO^3z7JS9&5Jf{$uNzI}rO>Wye~<>d^rD
z`Mv1~nb#2(59V`Es8wtDW^*Q(X0h?Mdrp<6taDD5O6x4Yw(k;{pRt~I_1;Exqb+aJ
zTmjCTcweXc9><oaH4&75N_Yk;O&YMLvA93&YE!R88=(qtj?2NH>uOG!6<$qnnz4-U
zG85%GtfipbghLIa8<*MIKa%cBw`uKjw@L8Z#-nn!)~u>iMPIgzo-d+rpRl;d1Ul~U
z#+gVJ@MpM^Qb^VKyR{>><@W;AXWUL*^6<><f#w3Xo4(Au;{2c9yPzm9YX#o!Tmj6t
zzBWGc&$e^+X0}hUqKPe)k44NIRF6ZGz5lK+o*0<S3(ae*@@05Jod&1%@Gmo5!qE-T
zGl7yf>`;v-q13@t+mJ|dB|1a(^{%7_cw3jl&bd>;m~bl9d@!vXvp7aK?}l%X%1p3s
z1Wh3&SYl&9PD_WmpD8$qfpuSS>~N-l(J8ou!~tLa7#;2I&cEQvW-bUXt0mss{kuK{
zS}aXF;I|@4509^xoq+Ue%#(}d)!&xdgy}kG)iUivJ9483Ao7BK{iLJzwRXd7DXhwZ
z`&|_DAAd_VMz$Dte9j1?soCY%hkI;f1TlVNC~pfWmZph(8t-x6LZ53JfPRKDJMa0E
z@l3m$5mWd4EFod+ad=RGZle1aTZL3+_I+*!D#mZ6IQF(UcLc(#-Tm?Up^iKr17^ff
z-CX}ON{V9?iysj20W*tde}8d=#{YR#<+rjtp7CDMOVgLEn#8G4^Ho+RE5Y96EeZTR
z4!grOb+#y0f+S1n{+%r5AId?9mtrItLM4#D&2XiL+QE9HntnpFTgIzX3uzmdI`v-_
zyqf`#2}es=jU}iPS7Q>0S=Vn+V62GtX*`pg8`Im^?swCojWJF{YcH#hh0-4kT?D5v
z4e?Jy#km=b^K&d$!#%gjW*?zkeg{}{W$F!y6|Su2hYPq#igjwfT49C5JSo~DXu6&>
zBn3~-vx@&6`jsX#0qBn&kohD=+8;uffCyhUP)J0XqcXYfaond7Im@kdpxHjKPaB!x
zMWV2h>pT!+n&L~lPNw!OlKNWTvs59URsCZhPRxP9A1WF|$tpJk&{sV><*!O5L!#94
zjvyL}Jluo9xS7{=orgC;tl{(KO_<L$c-6iG;3B6=7Ii3P22Dln8GLo0oUxIs=sqrt
zHKozv&Hku?gmfm4L|R9a_9H0LYM-jzlEnV)SSU5$gV@qjqoPO2+2?2k^R|&!tXEay
zr_w(!*4T;u>dYg38ks1~DF9)R!FheGle?X7I)bNV;e7g?O1==(8!n)RueU-Xuqk}=
zc>gx;pVD*aJZvvJWOKZGc#b?j&v=Y+HN6V}R3!{DqS1Nk%O_0!-4#C*GRzBa-d=&D
z8R`z^=^q&AIMMF8fjE90f}0qje$h9!MI2ZvjHzz}x^5+yuc~t3rl}=EuRqr4rJ&UH
zI+AjWd<D!EBO3xTLY(2MJ>2^=-XOcUjgqMtIT4kLsh<!k;Gpe3@Kq>f_WDpa1M?pP
zP>0x~LTZ{~&wku(sjkQ`#SNhO7HtI@iwFi#9j@(n0F`GA${y;N$g2gXHIGzWIaHrC
zTR6N)DwErrUUHvb1lXfGmSIjMQP(s$#ux={u)7Hc?mQq-Vr(ob1T_Gj946eCYI3>1
z+N-TVnc${XG6^)j6XS}7kt=>$uBfgxW)qp~U0Kp2-Gu@J?u{hzCaSj0R()q9Pn(Zf
zhs)U%G$&P$KVGM?!HNvQ&bp-BtDW})Lg)!lHU=eMLe})s&a6TNlhAGG?erOv?&u6+
zTn+zJVq2QV^uq^s!56x$j75~+sdw@eq1kK3ZLK4GNuqFA{BE$Z9Ttl*d3l!jgII8&
zVQig&YQA`SQHzhw;dw)8q<;2r<|M_bLfc>~U8<lc1#dIgFeX{w21F%DMjm<$y}<~&
zKlPXLkL{h84Fh>{Wdaw(0V=Q5KzUl4yIC`M?^Kz@`>wlWWu<uLV*{76>IB;Q=g(l7
z0$7x4Jn@gAbFmm+6ah{jWcs?b*Q;L?>TfKQR0<LiDcG4RdLZ@vWI}Dg$HuJK$V58A
zrZgOn!!)>pB#_cg9johKvTilZdkz#m6riRq&E4qKr<Wn0L<iz~j(%o;!QGL(B-y#V
zkt94NyMs&A@Whg(PAj4ZmcKu%EBVncKDGPO@~E}WJt=S*o)f*<aY1}srz1w(`7HrF
z_IUp0$BtjIR2~-0s_DHgPs|`lzA}9BT9wfI;CTMWQ&zPjGv~}*<B74{KhI<gf6qN1
zYCEo3WQVRrJ{{Y2vIeQsL9%RXQY?vF2wj<mm4j-I+a^CPazxD(IsSPO^%CmVxjixL
zBYE6y*Noc%eZ!oKY^6H2x}S=mxT(%V$Vc9#xpj4An$JNTyzt3Nz`;c!-luy7<DJr6
zXoRc5H2(p`5dQb7R#BD76gg?Hcdjhow9+m6kzHvXn^sdkU6{(YmDw#UH2(J(b>zc8
z>hc{JgZ(k{<Q?XHz)gYasnW+3uje6;$;WoW;I|q2{A!HXMxVS<lKxhv&zXWN!~_wB
zq3g#T7JhYL)AvPl>oh}TNexG=WX5_OK(b`_cTG{EYO_E72!Gg+@5EFY*n{wZ-3qBs
z!iY`GaE*PWV8g=R&&)S1_*cJ85LbXKX;0QTFW1tT`%0n2e}ZzUc?!4+HK<Kymm%f+
zp4h2z*@|!ztMf*@zYMZt_v=lLzaq;^(-G78l4_5YKCH*cO^SJ_Y$R%Kup+s3LhW6C
zhfk1aC52Sq`jA_v5-|ITlka4qV0U}{<2D5MdQ4h*kKMeDYt$1LN_M-<oNxuugi-px
zHLCx_P~6b#P)uDQH9xI8U1c-aS{weL)jbzlb(USO<rS#4bzp4211h|$lbhIODdY4O
zaqdUq)r@!CR(m^jlNMj(%5i6#QxuBL|M~DvxV4y&1-Vn62blgqTOnrFRT{uJ?HGe|
z_NTF4KThwgs92LDtNuLnr6dD@O`?k4)G{7R-2dFOPgBb^ly)hAJ#9LoEk4&wCs-3~
z+~>?q6nKUIY~Q5G{bqC=_m(TgY3m96qXOoMfBb{zO|K_@%iIE{O4?{<7eE}K#U+`J
zB3iVcf8>_Dz7YQ*lUr|ZV+yl2?JSYzM3!0+CxGEVzGKW1!)2Vn#5Wfc>4*Q6*e>uK
zLtl@HZsF~`sEz7#EY7WfAD=8QS4`jMgrmJup^W#`BC$_i?z}wNgR~sDwG~Hp=BR4_
zNXn~e7l;^x;0Y~yQuIAB7<3n|ANJ<m$cJQY&{$VfxMECqde0!+LmnegemZ)Z&`2mS
zGfnB+HnBsQt72|vvjhHlVmjTM&B4Z*gJ>P|#=u}e2XBRQ7G2R`IzJ)k4h6dpisDcI
z&6v!l#p)q_5OQAITwr)b0vfn^a>TltHHQb|r^Z2qETNV-BKR^-{0cDbndynKqs)Rg
ziG0IzcFW>lVahoRY(V+P#@@}X53T^n004J@LDRh0AH%sl@|aY#wT3?yI)_FLOqT9;
zz^cQMe|vJxz1372F^y63k(tMS7;E|P`XnrX2NQQw=}hOv;J%@})mNG_*$i`|^54cJ
zCA{tx)bVJ%hbL>^u)!Wj2)D(ZkQ9T3x#b$`q}Rmh-ZG!BoxzfL7=T2?Yq`GC*0bUC
zrpd;=5vd96alLU%S?NMSPH!r;C(apDMfzLfPG-7mZIb)f*2u|5V#>g$^c|y5)ZApb
zd@)mHrCFHTybEjr&(d?lhmh6kv{?=gQFBY)Tq#gqW^6hMCzyyOW&T@-0B}4NyTzIm
z_~zbdV((vbP+LpE>VTErGIxS15Gafw>mQ5(lVPaBwgGU*>><7`IjwShhUrEkkB7WR
zOoMH^|3w3~oz?B5KL3xSd*SFP(47X%?fEb0r#b2KQSRa45^_?RE+ymi@Hu|!c2NO@
z_x5)Vb=c09rb&k>U-BHLmtfXpDeq$Ssj+W9?(7$d@YAg`Wp%X{Y};~a;;T_9B6R~U
zuLv|-@fI$Gi565(#8J|Sh_Ad0b<km%=n8WgE4F8!bLy9PK#Th7EoQxGfzrCq!`dUZ
zmJX{<al#|Y$2%!CJbuNSpR6BdHM`qz7j`_Fur6uddTiWRmc2DGzc=9{5Z!e*Qob~~
zF+gumKBD8jEe%>zQJP%7`&CGzx#lq@o{;TarW9w)sVlVL+U0E20w3>&;FZLP&{w#!
z>#7YEqE<x&%POY9_uP`@O=kd{f)%@Ck+sgH<!qt(Q$>5l{pI{!enky|IU@Zd$%&GF
zDbd}~)-tyrzoSLnX1{I};>u#7gx!*OtKU<Kvx;!lJ;mIJhC95RqI8v|-7XblO+H+G
z=L=VWuW!tINfSr`s#K%6saY9$3Run?=JtER_p1B)7NFDx8EiU$b8{s`TXs3Hr98uJ
z77iD&MQV7q*$TRmqZj@{A1LL@1f&G#TCpJ&+-TPKCV!_Q=OXZC!)g4>O`d02b23c$
zd$PKG;bw6HZws>BPB|W3s8~DU-%lW%CL@h0+(EbDJoh(dfWxkOa#~iS1V9jZ0j^&&
zCb=Br4)%xNWm<%YGE>xx#n6cZNRXk_t6}5;aFqnoWMA@8(2a>t8xS(#fFBd?O`Sn(
zFpR^{#hCUy11?W3f+o|CS-!K#^$17m@8PPjEn0?!!RG^mPS_G633-aX&8@dO67G+z
zReM_O);ee*Mb&fGCG=_mWT{Hab~=Amu)3n2gT8*A>9Du%lAJo)V18{cW0M}=NH8NR
zkUn+2^jK4tA89c|-@H0H+10i-6*304ubnPrI_gs4s0O^=7D1r^^VRSC(bcrHyfu0(
z{37>-d~e{2lj;G-%2`ESU{<9NDGY~z-O7v>4L3GpyR@LWYyM<6^r9u*YWu<l!5>cG
zKw=GPAKkD1MOHEw&+ystO@;aR(WjDAdR{?ho%<|Ye*R={79w|{sQ}GI6+@G*tyRf)
zD+k+~H%e%XlxM&8NIl-c6^DT=R9bxcF-KDr8MtK(jXO9^p(m!({B~#K7pZE&HIZ3`
zE^9qx1gKUwD5NBu@r~2Xw;aT>tUEX&(u<qKe)^Ur@d3t+F~O#o!DMg%jz-sx#vL&f
zN(p%MFs?}~Gh7&0021zA0R{l5Je(}sA01;VF#h9{BWrc@QZeEIM*A*Rfvvv&N;wo0
z#rrA=#z!bGxL*2pX6E};)sGr8i_N^bFBxt0lI##r0M8LCj>Q<t=pgQ;IB&E+X4R_?
zf6sfyzg{ig@%dS!$&(c}r;h9YJV5u%ex_odSeQ0|gXksPN?4z@9BlK*&}@^DXw!Gm
z^BV^%_(*kvSR#wf#+1kv;=nZ>y~kujX#4E0&X=W{l9Nt9%yjJCv@pr|q3)&lfxGlR
zmvnkWwWlH7)r;k<Lc`YTD%V8?+Dj##)mUkXzb|G81ml_71ShtqZmiaVp+#hMczvz@
zFa}1>o28->PV_ZhAN#T%+%sSq&_Th+<tB)%v{8cQ5_S=sSqIc^D=d05XT4@6<`}W}
zi_4=^M)*e$)PE)P;zh#7Gc#L<^sfL#n576l$xFe$-?qt(w;U$rX)pHHPg#Fty|)M_
z!!ey_er{D*QPt2a@sF_G4sCws2zM$){WzjC{tRM1&llUX(M^vJ{_~P~XT`jFSJK`(
zW32<<AhZP*A#4GxN(&Bj693v>hg08ws_O<LpK7aqLhL(pjYtm&@w<h~l6y{GIHUjC
zpZE|8g{((zT;`1^x|4Rggt~7660~jrxr=-G7L{N3PE0(sS<pu!(TRVhM!(-6q&nx0
zd7R6t<+A6vdkF>@sbICZiQ3h0S2`rMHt+gKs;HaCa0V8pkImcg$)-<%!95eKZe-{Z
zs?*PdXC@&IMw&}8R;+?{;IOc1?N_ZlBy}~v7g@wb-jkSOzuaFtwZrTMkH~4Xe(mLC
zZ}^?dUaG_&85uAG>@6|C-l)(sDDq?PXzqW1QB&So;cYc4?WW3xLpn|PA*pRJlT<d7
zBAbR<sN_5sGqRWCRf%+|Cbz;K{*!^S+MTebC@TxI;BuCf%GrF%ce(NHR!dayV?nD`
zj+^9$0?gAfpUt**Jgtp|i?9h%;(E7xPIyn{*1LK5KO8+;UCIjFRq3jBPTY*3n6~39
zd4Vq@9(y(VYL((x{XVj-OXy69QH5_SiB%|VX4sVyhw1MQo8mrx7Vp=U$u8r}Ts$)W
zn2ZLA2XE@~d*X+$0KTF$0thiRt)dF*pX`WKPJYX#@cl)W#g#z!vEabSL`GrzSt{6L
zRw~B48@byw^MCraKJ+V{Fu_HP3RQB3Yk@QrOTd_u04XAC_Yc-i4wE{E1#zul7EIxd
zPlLXv_OsfEqeW3IB$8aZa02%YZ%tyamY_(X<g>0SyB>3V*jnj@Z(jekIsAKT0%^PD
z$BXUmnWg^PCjGv%(hWap9|C$IDp#A6rPCle6-bG(yT4Hs$8#8#`)>Wd{<z3jqn8mZ
z3ygwKK}1Y_5jOeUNcDs}Im|SqRYUg??_DvkU1MS~A+$`yw-9Lvlkvi<W4M2R`Oc$x
zB=o+_Si`9~J<(KNxtP0Dq7Yed1vuSmF-EGt{&=rsMWt?mb*s)O!4=J%<-KRJHDi96
z5?wBLi4}_8s{6WJGpC})eB_okJ8wNb*7WULj)1+bd4oJFTpOkpN>L5i6L^lb?ll^}
zD2aY|8mu+(E7)*yo<o5X2J$1~Q+5&O3+F|36R@e@+4)JsQ6vRRCGXoWz88mQlN4vI
z&zHoa#WD-DN&@$odxa9n{H*TG$)9q(EiMk-`RzjPUXZii!4;eHU9iG2`7f#`na2+w
zt{6T1B#O4NHvs=DjQH}1Wfta|DFgVR;J1f;P_@secseMzU_7CD)aqTl^WlA!uB2Dy
ztNAi?-4!6Ctk|z0(mBKPDT(}rrNXAYvs+VR^TmPdEs8w9xW=-F-n~nf%hbLBfZ>C8
zn`_ETY;WVUoSsxx&pwo}mU+7s+WYEGI8C0lF#Bc^v{EZKQm#oP^zo8Xh}C`iUsgNV
zfz-KD)+o8%g6#Dj6KfwrwH3yxMohsq&Z*=80KYV3+K$gy`)Dmvt`b;guchoXPA0T9
z9X>Gkt8fJ79uewPrEghrLStGrxt35WeB9d)jJv6pB|9G+xD%Gc^uFELw3xc4)Y?|V
zSzZlSXk4M}MvdUc7~g9t$1hw;bgqpY8l%uMH*ZymM9EWG7*7%#$rmJ+0a|w>Zq~=_
zya|0%YQ{1B*Ko{p4p~!OrBNVmUUdv>(kXdu`3FlI#95ViPr{UUR{uSW^h3+zGGdcz
z1syhb_44RbP>R#~e&ekU&LF|I+m}^8zLV{|a4AS}wW=*Dr9^j&T1_;m5>cBkCPYLL
z63)5bSs*o{3QKA0@;sm4^RyRuLn=E#dV2;zc()(%U#(14Wz5U_fBGKdHjfH|>Tb(o
zY_t`548?a}u5$}V=2P4fa{0$yPetn7>_kX(GcbS4gr|!0ALC1+$4RK%_O9t8bMn%F
zA|LW~%ZzAL?wrutM6M{ww@Q=U=eKcJ0Qv@2`vCGtoAo(=<B4CdwMoWJ&(R;OtK4o9
zb_hP!D#|@E4;13cwr|y5^0(iy`>{yu)f9O3s1S*-rPt^|Ri03V7qml`rE~Ew#{9g$
z-&AlM^6hA)HA-EZ{R>y|^eq%uq~@XOXM0^@daKUbR6G;ycFgSFS302;-<t!%cJuKi
z`w7>*r$LcVK*Kof(qKmEvu_$3A7%N03R)t(#b6?cWz21+w_l^Om6I$CP{aNwI@Ux;
zFSU&D%wt)b#lf@WoAi<f?bG|2rY*Y?GLIZG3%;###P!Ode@A`Z<|{XFSiJoHSFUA?
zHs{om=PgB)kl8yyQ%*6_6XhmQkv_Cq@rTQ$-^vvL5wua@#w#)P*KrpwH=xqI!NNf|
zHow7faRoTB&RhUdVHp;+c<jP>9Uoio6l00uH=Rx723({>FBFQf6vml+zYAzAJJ6IT
zeDbR$lIHQ$z0)?+wMzB$>LRSnerWvMTiw0=J-fBB*hqim*MNHh%ukz8AAG)Gt#<0M
zSST4;8VV!^cn}2$7P}30P`YJx;r8RF6avxQTApOxz5wsDzXBW>C3B0t9jh|aLVIni
z#Nz4mjWY{SRS6)>(fGGrT?W{5$<Bs;XrcXv&F&40(AGy=6ThB|D&y*w;KtP_K4O1`
zb)umqUq4;});07MZjH6835l<tV!P1O<MX!23oK?lgT{Fxj&nMqSqSREmw}!0G5+aj
zUEJnJOFKdkE@G;+lkg^bZ&-NS=4RA2xJ0gKM%d2UtBXyQNKIh!%VHmAAy_UmN(~F3
zNnMk0h5~tT-?rnajCS0+cYS7tMpeRxYTx>}j8%%93LlCxG~@vrbaE<;Ebj+Wx$Ofg
zUT<ta{F~+#_<7+Wac+-&Vf1*nQQmsNc%ii;EZUaP-!c-J8o1eYzrQ%tUyPEXZuIcC
zNPQh1TZ+bmi*)o;LM-(4Ri>^0RFgqZ`qaUBFWx+60^yz)GJYK}LC&HB3A23#3p|t!
zcD!D<gq->1g^R2q4~-EO%0ZrZRg6j9n}Xol<^^0!;i+mPwv;5T5-qarZnqFQEO28~
z<grYx+t(38p^UMiV+?e*f#pp%sC~fQZ=uFdu*|5S-l^2le#wHYVk0l|Cr9f@gU#2=
zn#>pt>5N@{p2Y>$H-%>Bo^zHDenXefa`N?sZoWnfS$z;}Ae{;y!CIRm#sU~gF}30_
zipzK5s%nv#Pl@(&rhPserP4GK#aq3Y%?}>3?*v<{2q9CJ!3~f%TI)k3N1r=x>JJ}N
zkea2e&S&I9(4a!`XD0|g^jo3J_LDDbWd)Zrv6<zapXv+E^q~!OqfmYzZjzl}C^xuR
zt&rm!gJyv}dAlyI-z_fx;iA<3iQQN2vs^}&Nc?G7mkw%l_Y$0NdE*Apn6>!q7}?%i
zW=DeZ4WKSM!RGTAIf)=U+gfc06_oF1+H4-j_11{CSeX0uj%wg)+D!<=pWGA_<eSy2
z@t%<xFGfDJsh?>s;=ccP-5!!HVh{M!x(|#%85GUF^Am~o@%?(=`yas@v&OxOOf8Vw
zMmL!qKTy5Z))k<Qr;^_yWM4nB<Ib~ye@xhoq&|UgQ%X7Rk2Q+;(u{t>-6w)jBx~UO
zleTP+hWkP<VXwwJi-Md6%+zcUXrj56gRgpn4(u7mDLv@Pcdjow=ZcOM?ZXRn|42vK
zRfNCRr{`K0thljfySHcvc)qPs4!SS(IwBgF5**_HiEm1HU`d0N@NGL{;<L<j$(ehr
z0lOwwV?(||B_86XH{a+Mj{l-R)z%v<us>a#_-@O%JADl+Y~1MJN2LSG8*1x&Zh8fX
z*{I4^Ubi(ChCy7J8{9iUnLelagfHu=mDF(G5yIa{sPT{dX6RE&ME0&}L=tx33Q&y{
zG8!(F*Qq8WDBVv@aTL)=!Q#lp1<qUDjKEUnMlU)``cm~1jh$ICEyDs`v>l`tDN2<R
z|7w4Ntnd1+=HtJ8^0pF0(_qng<rpI%kB{7`D$iTH-Qxz+Nfs<&YV3q9sh34_r|7oR
z+kLJgkH^y$x2G34QI*eclcv4HHnEgyq4Fg@<l);sNEL)qA%Z=^$0BZ<f0H0b79J{7
z^su-*DW<H^!0Y|4Uo46@s+h>h&slglc*t|?>VEpscI_XM>e044hHTTnP9u9&n(C_@
z8vU^(+tcPx5j?b%IXimYL~2=5;dC%Ni3aprz0kv`_#<9fmgz~<*VWnj*ZMMMzaw{P
zX0WiQNa&!T#u8@4&g-5*H~7cmgWJEoJ;XmG^OG@}ENj6`I&(aRUdleFEgmunx`%rU
zr_xs&ytk93N>(FL8}}~RC}Ue}vbQpu??P2KNOm`Jn&>jtl79twTClEsuS~t!g|Bva
zGugtt6lLKl6xDo&iWZdH%U{@4J1sdH5KK#JkvS|pw`}qu%k*~B)B<46ghmZB-#f*Y
z<0=%!k0w2*iQ^;WYGsXet?u<=c^KrXvKY7TR(zU7$L7P9JlNXDA}Op#LBs2&Mx8Bj
zqf%*<s+3AIS!4>&n#g#lS2tV!X<%tOa(PgyvyllZ;*a*4LPfEPwUHKZvGSp%d8S0K
z02{q7OeL8Qy=>OQuK=Q^ILu<Hp*?A*@&jTAYp?{lY4F$;VBCtf;YI_p&`{zmtnY0h
zUsrgLzUQJz$(<`ebB9ck)8p}P-v%`y`Owl6?@W+YD`O#di(!ZM@9YN1Mqz1{uv<(k
zzmC*yUoUlKpLuPK_yK7ZJ)erjI`xHi)VeoMg|3j7>@HV8Y!oZC1p>~^XtdOE@8!I&
zNP!stsNYkzu3&2pJgm4tgsAYrKnIO6lYCY!MwVdd#gWL5<U+=f)67H>tK7-QBD!_t
z>bwZ$(NYC0q)D|8x<>~z>lxFQI|tw_^qH_)a0N`!w0@ln37dbTz0HVWq16oCXJA2C
z%ziyfd1&kiDomS|h}=h_5CEtO)^vgt<59lOsVfu3%!oRg{PdK~5jDY~&o${Yk8k!d
zLAG51bmC)nPMCHT30MqmQoJ#I0Z&(jJIhpgRrj^>@wVAvpf1^tfnL!giGCI!^Y(0Y
z{AZD=UGu?tEojBZXzBS-m3#fOYK4aB@KzHBZf&8@8nH=HJ@cK*m~ADULHcLN(b&{%
zZzlmI^z6LN8*_c*O}OWr2<o;3hSW|fdr9PyoyeNe#QK(F+$LgdBDAcw#x&_*O)@??
zgz57$@l2tUoaw)RD(cFj{u~MAY-r{U%Y5dIqc+OqjDWf`qrFTUwJmR$s6mT*dvlwm
zqFQZO5m}{KrRp+yh12|5*x|R!FR3qdFL{rGYt}ggMT$jt2#ui=Iw+Ctq?81f9kdmB
z>66U#&73}Kak=i&5458@UQnQ=2}01g<X%$M_Dt{2TO{=C`KTv$x#&7lS!7sRSi+->
zHxby15AxdJdj(y-N^fn6^|o}HA~PnhttVBi`n>#bnA>*>F7uY!B5uY}&hW@v4l0+0
z85=5D6TwVbZ!9o3GPP22`(J7oB))S~a@joGMao$n&+7Gp7#UM4sYrc~dkr{qOf!9i
zrtav<c|0?_K-MNb=5d;qC84QODa)TT+J!8Fl(}-}n~HK<y)dx)gByYxEITA8n)>H0
zxz-z>99b)n_Pa*q8koWegvulam+ww}d31W9Kh2rTS7B+Jxf;J(;wW^C@p(-{C}H@=
zK|hbPRN~J-Y<FYr`T|<nV9u}g^-GkNn5GAn4Z1=j<cO{?;cMlLx6ym`k(WP8PPj?(
z3G51c(?^+*d=RVbvK3p4{;!8a_p?La`@WBO6I56r^Ep^V#o=osg2Y;_tM99^g(tuI
zgu=sxG8^lvzQm*-C%~%A^@Z>q;?@mcO{?*E9Ydk9@VN9#Xv1lM@;Zk@sOJpYs9qja
z55L{rW+{`loLy(Q8)rMln&1BL=e+*;$J$K^8_tB?D*z<bQmptApMCkR;FsXe;^D)7
z`?AKa^QTGAXd?5&L@2f{nyvs()Wg!s-aV>x;VZyCdQ|&wSi#}-Z++dCzu2lgR7*{Z
z&)E2deWXxquH`frCE7;vb^m5u<gN{K0aVaw!MJ6t!J&$_j%(MalEhqsvcQdx+vncG
z!_&KuKi(uS+l)_H>pCT1nw29cN)hK+wwA06SF=Uwn%Yw@<N2*_`=17MeJbak?koYP
z&;_zL%tjp;-L!c6n@L`N;e|3L2RGFgRT@>`=H^7Ju8urVu{kw0Fzw8DRyN%!)ahn;
z=pLKX#ay=Wxo~LYIsOVT-@dQ)I|t78SqyYL<YSJ!cG9G*XaIE9D6~N4-cU;KM`h_^
zL3e*xW`<2y`E>=U?!Z0Z2epZclbiQO!w-DR1q+!Q5G9Xf@?-Nw9TI@f^AtxvbI3)H
zBSD^wAQ-<eA3%Q4cVAtzfok$#ShXNK)N8V+3s(plw+&+_Bvcqf;Qo=Hv?>K`-&~hN
z0Vd|Ns<J)gVqyWILMA{x29jt^Hy(6Is&{M0TmM>Da!b^YsUqI%-9`)D?04=R&+J+G
zk;vIvq!Dk~(HJ3~@i;cWgyBa1HwJ!738X_~JBjD)r0E;WDEOHxncs0oj>kVj<~e+s
z8*}jROGfi%1239qDiIcgb+A?~I%;Ts%nq**yF*TTxc~ZnMCd1%NAd*ZX_j0e>C=5S
z%LVEwv_T7^BP*HoXhpi%9+~_O`lcky{|<to_W?BuP3lxKe+AgT8LX~h^{6S$cpPsG
zM~;0}YgCRk;*0QixqSCCM<hOkw3ItX=%K}`UEt)jL8~>ZOdPTEJ*arq<&ObQ^vLO)
zvC&eKVrp08_Z;H_D`7+H3NWR8<3LeO&f8+skJYaDSJCx1<KRYzB;q^s1St9naOwN$
z`ZhZK&J}I?Yud1L;AHiQ+W6uYUPqrE90d;?bp^14`OXbGjzD(GKW#g?8UuX$ob$IP
z>RTVHh_XX@SuiH^2%heVU)&}CIIFz+6>oan%|M%mf>c&Ia;AzAoBkTvVnVoH>i79R
ztVw<32rAy)Znld3Rp`0kf2$VN#V|pc_%r8Gp%@gmcLq@uft^WiNOcw|8%;$~Qii^m
z$r7OtFSmcm<g_#50EE5uS&^%(ic%?7|FH1`<5_CiR<?jPHf=%L@u7x!xR)}%PDZ_J
zJPz^W_p`)sXWZD#$V8*5)pX7WcTyf!I3)T!85{Q(H4;uY6BI9!7kRVlKy!a6Nn;!C
zYIA_$T=<wl;{8fwS<R`MIex1K;3=(`RZ{Z6{%4{%{%3B{6@dHc8-bL_<rzqsL))G~
zy@Q}NXi_JsJEcdmA?CcT4S9p-L+dl%`hRRpZ75}W?e0xP<XZ^hh2Mj^p1N0#F>}vi
zQZ|%BOXQXv3>7Y<+)IMBzZvk#vJ*JM$-3X0l$y`ZOj(F{v61B|tQ?J<#G@l@&10MN
z=L6e|n*KQyBS+TPJHS{||2oW%7Bpp-sNH^8{7a<})yH|~y2Q)-ktT!rMG;Fa4qXFY
zLL()Qc>hpOee|Y8WvTFJlw}pN>6P7aqc;0!7~euQiB4MnQb0rBSkQPF7TEu*vLfdd
z$hU*Sr*TX-AWtNvt#YhI4|vmR`scj=O1tN>5SQMg!L$#pL8!wr<;v#y$-?Lg+1JU?
zuKQemUIOSjaT?ovE;g&$hG}Qyw0$CqnV!n@2HY0MkPQ&`r%~)Hn%2arg*s$A>kHH`
zY*nzi5V*#YSH!g2#l8oOCcx?nR*z_sXfhR<QV7g##i3t%d^4dWKv()@ZHJdTU+#v{
z*jUP17HVcRBkv|)zD9ql_7!Faz8DdE54c;ET?6C~R@K)68;lG7y9GdrN*cE9pK{<3
z-d!?R%V_i*C*0MJbFY<{;aO(-Ske++<h1AVtOhSWN7UJ$cHM{>c*soC(%NiSY-A*2
zSud_%3I@5k=kUMJDm>Lxcm1qT=TMlAM2FFZ2X=bCGI=NBay}LROF)+R*?lzDOjo-|
zW}Ej?vgB)xjMN2JuAi=FVfTn(aD2%|1^=!@;M<t9&<bq$$wth={}uck1LJPGoO-oK
z^7M4c4J`n2KynA2qrdc@pxA#-rc~n9RZ}sn(^x<6c)dY&7cNqOew6<JCOgS3Hk7kn
zf3i;2Fky60q*Yl;eQsmSwQ0rkmMQ|PDi6^mJW7?=3qN?t&wqzpvD6&Saq8NPMu!s6
zge_DRCRU`T*I@fu#C3@@;2>BV^9*Ew>LdsLBZ8FM-FYR2M>@$}1IhU=r4@A8aILRG
z%*5B?Yc(^$J(R7Ze6OHIZ52f<!la4lRJAE8VgUpeJR8Nq3P2<=w=oozBnD^Z@Kbqg
zY~|fB{6A%WmHM;r+Evf4m`<wtvKKgqb_JOu;yfO!eiic9Rk8m7@yV67m{uvEAya7d
zw8EFtr31+B40g2z%q_ou!Yr3B(MpN=CM4w<Zcjc*wAD0lTo3SfmD55c<M^I4n5O9g
zMyj=GmgBNU-@J8fq0{`mkf=>A0fkdxvPc15f*DI-&ZD_LOl?KVl<Y%xf}ybS8k3a&
z08_A7;TooQ%F;8gS;R0(wJu#PbcIP;<=Cagi1;jH=WLVEey;S=qN@(e8M6yU2)V*$
zs;OmHJE%)3I!|^tkF4~Bk9P4|rCQFT+lZkl>E73dPi&PmeD=PI^)oi9a`RVG7b6Z*
zz0~+EKcZ$QwyP2R#%^HCIY02@)9iB^!srU|DOBWsdT=vm&6uYMB}Z>^xNApsRFx$A
z$v=IgPU?qFDLSW=>hk_0I(&**>z1=JoPeNGtwVc8_sP<m!6cU$K2gb?GQqm>m2fO4
z1;WKCm8P#SW@(ZSt1@|VsWQ}lr57ZffC4(k7x*yBm^8$$gBHwG3rG&CYVS?_*hg{7
znNttR7zHK~gwrCDhL%`FxpLA2_b0hKlOD<x<6=xtQ&GRNO)nNolFGJl+Hb<uI%<<L
zW_it%C{Qg){v<$cn^y6>V0{nCSP!D4U=-C=5||>QxSENi_bOR8jpc3y!xP#HL0!QB
zHqdXI{Y|o1;{|l|l=N~_B21J?l#!(}kdTz8;0>e8%sAyPS;4C@I{JiD(NfD)*|Qh2
zQX|__uu$(BMt;fFS>qGIC?_vQ0Zmc)+T<jMs8R?rr4h<?dyl{o%_V+`Ggav3TCL0&
zo++R4>T@Q)7XnrvO0?dNjU~`8a8~+^byZSJobuKJli5jdV<JLKf)Mu|#8{xiQj7Q<
zp_1I~l5HA3GtvB+nKhh8En#(PB&%QLl9fNmV;;_4$v8Gxpp!*};Z&3qvO^+VnILKV
zv%Guqz(FMnWcFwdqNQ>-9$-rpJDu6{H{!o0XPo~4iDm2$E#b2&@T01AOv5lHNthHg
ziI1=YZtoi&Q|Uf&%ya0c=j>jS{3?{oT#UDcOKMMOU?!3sU@*4Uj_IVUar?wfx8aY0
z8(Nc5*tj!F@3V>Zzwp^h)hgNyUWXBxf#Y;266DlMOQlH#&<Gnb*o~sR11j~a3Y9Mp
znVNF_Wi+$^D=KKFDsqov%;(H^0?AMW@3c*Jwad5=Msrcxo<0-(iKw@2Wal&;Jj|7|
zW-l{mO6u8UmZZX^iYZ!q5|>&1<3sBg;c{P4YcMrnm2xmCG?Ny(nX8tmljf4ltz^20
zWAzyN5?`>D5(?Y)g-V3dq8iq}F)7Z}lgb=bRt|lU`Z<JU8ris<>D74dF0hme{9a(6
zHdq0gTaREqBRi|Ogxn{ld7`dDbxf0o&M5<MbddM?joBashWbYD9(G~DWabPKyA7U@
zxiwN(A!+cSL}j<j$fwc6zXgAnPe){lV@%an)ET9j$RGzi*`w$bsFXX9JIBb~GR%0_
z;d>gd%K5s6s;4SXoMusCwDOA5f_Jds*pav*K83*g#gg&*G)|(-n0k<);R~J+v39yt
z#9zdEHDcVH<CM-(!rLQuD|EBBmW9+x{6s|5%t#?l8r`8Rv)#K#3pR@sryv0dFc!Oa
z0vss_ONYOffv?<Lb7)AX)mfM|Sv<p{V}cp^yf&Xu5Z`zzLP&BL!CjC63oUK9fiX$c
zPy(zO#5}2NqF6*{QEt%I0T?y4a`5^I^)t05T$Cze%R3|jN&SR;t<(IiJMksd%G~pg
zEdKz*7*$E9!*Kbs-7`SA)HZHa+>khrtyL)#C8}K1f`tYtP;f_R#G}LMFuGwPlAcv9
zI)0gQP!N!LU@a76;dR-pK1(U&B9>BR$wFnzN_3?u3;>IfFfG#m0L&s(<4*Bfr6mfO
ztsoZC-}Q_vA~-kV>!<mTK4j_-_<Ec3lPD`abyLc`$&LsLPkkos7}+zdokp(1C~(#E
z!n|T2qMl`SMDIqJkQl92a4hzC?n4GMl8~CJqy&W|1Q3uPYd0K6q={uR+4U(QomRYV
z8j_55G@9g^i+9<}Y4YBG%`>J9$&J!rSZ}6MDKhr2Rcd=`oUvfv(gyKnYsk2cUFn`J
zhU7{~RQSaCZ7Out{{WjXsQZ?win&ue6&UHtMSwbAcLvaek6kIVk#J)UO|n=%eCcmS
z^Y#^!>98DMFw)>Scv6*7D+rSHleHI7RY5EiNx1-@G1sz}OtS7yr(CQn4wjkI4ylz>
zkEuRs<f>wwMmEGs2X;Opze-w`R4jHesY+9~F8=@kzTOX*D1Qo>vnl7yQy$7$&PEe1
zsY_A>%5yP#RIOSTHX-@(9WM{)?mviAzGdnLIbStsC{(;TNurh?pI~$Mj<U!sKw<L*
zC<-7FU%8EaO;)MMsyB3Nd2b?MxmglFA%{ss`7*?zQmJ86)H|wSx{2hxY&w8`4`jvK
zI~Ohcz$ns}AUA-~m3@)p<G3XbWTBZwiBd_eq?u(&sFoCy*};z&F7(GLWi^SoMgc)Z
zP>{lOnF$L6pLX%N1O)@d;)TlBFf*SZ#%i*STdD+wjVDTCPL?M~AwzJ+EfT}Aaq$S`
z5H&Xz;vQ8nJ~o}xjQjX(&XxI_F48A6;MIjmpC(ciSD>p<3rM&lg$Itomp>Dka}>hq
z>hU^kO9PuVl0{t%!!0Uy{{Wa0#5{JTl*~=bS}{c|nTjYf(xfKf>9hPtN2unMW|M2a
z4DDBI2BC)ahXuy!DrI2Euf^s_4zWo^Fr=OtN&z?G@w`(_n>tb^DJE8=g=kx9O}0K_
zl_p&<DsY5b+7q;7$pDQ);0qYV+u)dHC@CW3-s)(^K~h#M<Gf^qfv)aZ-Vmf}H-240
z2OAVRQU|%NAZj7h58J#Hr&35NbF+v7SSlbm@Kb2AL<-fYDTDD2pmgbWPc0Rcgq07Z
zk<H=TR;^Bw&1h3004qTT&B=x=k?kQvpD__Bl0VGUkU0=S95@zukqV%l*cRd!OE~}l
zo;$-Nr9`E`Zt*p1vRIFZh^b&G76tnYz?U#F*bT21`fcOlC|s}|o6G`-63|~rDhd&7
zq$LSrFXkzfr7d3^Md=C-3<<9WTMdCA09?Ty)fV>=#6<!U<K5UoY4=#U<}_UlG=k@D
z<VFrBfY9S2oAb2DsYN{GR7w*A>6nsqtK>#_NK#67JU~l}A8+0=myy2*8~jLglNRd6
zSE9o(nyEDFkyGm~T=f*CVTjxVaC}Cuk-8<+tS2u};1!wfP8o$#OZ26u2&5jqlJ+wd
z+0DRs`YfbOcd_p91gj8}_c78slZ;$q@J%O{vS^tPq_`eJT~$Rax{BOJr~Y;&h)y%A
zCN~<6!@!RNIsO~6W_Qd-8^vg5n(2vUszmim3m`C21Cic3y3~-30F6%2r~x38?(xiG
z-tEccQ>9%T!h`VllPKjXT>2WENHY;%TQ+J>YZW!Pjo$^!m|g=%`CKZ3pAM%1v`CT=
z{{XamN|Y8H0~qFCr0W2<@41Z0xB8f`N6p=N{54}a*EpQfm#gGUtO=C#v)w-A8!0ON
ztsa&aH?I|hP}EZ8+94=E=H}{9nATttZ683%3Q|Z=EF#k^Y4!uz`$t8oH3bW_xV6Bw
z2|H^i7r?rC)NFRHVa^%ZRJeIa(?yrkB)NN~ODT44#qSz#U;0OrvTj4K#F2)sZGBxt
znIE63s9f2Sbu1F6Q9J|t$J2otLtK{q<HpUIHwn(zO7QGvlvYu#Nm8YfLlA61;vcCy
zO~3jwlu`1{e?~Bz)tE5Ubh2dpBvUeR41P@2{$EK?(vRy`cC>Unf%t63a!n%WX?ll>
zQ_#$uXR1vtJhT-kh9s#$TgP(4@R=A@EV^1rNT-o4AqbZ#ASDSK05}nvWdOqNV{cbc
zs!s+s#_g+Sor8440>fs?pE5+uO0rrC4OSyHQ|7JrCsAwo4I3<~I(lh?N)s)BNC^x8
z_~J5?Ev)|lFcg&lt{cP07Q1HCHN5$osIpdC#BrLSOo~Y+YLbQpU4Y@qVfT-hdR^3<
zwVAVZetge3?FB6QW%_t-A5eer&Tg<S<(LEA%zZP|Jc&cpJh@eYVoI8dN}W2DAn8?$
zi~PooLHJVX)g=UqbATdxrJa7X$*T1i{{X2^apE_nsR*fWPT!9OD7KY4Jp51bebdY|
zOca&XSe(>=(&=fH?ta9yU|{`v&Ye&-X~>y1D5aI9AD_l2`e_P1k~g;i$5g?(PnB}q
zv<y1{!>NizmMPP3xx8%D)H10+2$q>(S&|qJfFb&@ScE@!W?X#A&MhOR-B8UH+VduM
zm4#BIkNds>CrMgP<65phZyggN^y3D|7;32Sc?Aes%I8g(qN$UY%9NvWV{R%+L-XDg
zGDr;8y`z+zrOPz2ghcy$0}Kz5`@$>@AO{C$d&MJI4R6{D92qG}$F<Z*4C>g$jirK=
zScYh^DlACb%mOttZ_7fFkx-pVxh>ek1nDdbgRz2_L;2hot65JhKZxBBszF&dcI+UD
zTHeY5uKU4RbrPRzvqJ>{pbG478Kfo}BpP)RxyoB`)(}>Ot4gJoa^=OEGG<Fjb`NK_
zKWN6Q#AMO{sq>L0Shl680R6nfT@izru@&I76DpG?Wb%*#CCW)rQq7VZ$s!*sW%T9z
z9!(e_b5iS=w1x_pqExf@Q83hXl~{#xh^F*uB>;8JvX)NFI2_(R9bg3m@PF$9NiyiT
zX;8Tb$l@s;q_n7(Bfy8001V^*0CW@&s1-VU{{Yf707weCBfAJ>GK&@-dqrtVwGx(S
zv9?jOaRSK^S_Y*!0r|8jE(MwyNlKFJJiHQ}Xdng;-Xau@AQ56LVs1D{iJ*bKqA4tb
z#246`MMMx10l)7U^|TT(8d>!966Ms>PDw<`Q&XucN#KLHizQ4l)}*b^Vgtq6l2S_+
z9L35W3>qvhmk*XC($dPOl9TjFmL;TpfQ+FiCuF%i!zE#y0)9D&3P{w>_rCEYsI82d
zVJRWUn5hYLfaHee94M5L{Jc6yQbo4r8ZsoUT37~DHilxf-V3CB_kw`HDMX(T^hIo$
zXWi~9+887R5a+Na477pu8}MOPT&l#e<V93KBr<?5+=zqP1KqiS?n=l>vEYaxl0Dtv
zLO@ij*u)^~A=n+-843(aS`>m*$^pDYLWH{B;Ek^gCAw@MlKjCA>I`3l-W4x<-Qubs
ztqO1Sot_{;3FN~Gh)@Z419!9nO~7FGv_Aw=H4yFr9pR7_bE&lP4iug)J4Jq`l1F{x
zBmt>YPLf2h5AO^TRcE;E1dx;v06q*!Ug30rTHYeB0s<5RKHHXp6h({nyb~;i6>4vY
zDjF0B8omqwGC%;3UhQ})N|fc;k=`o>({K`BXqw8DecQrAr38nt5C|_|U*-(8O94Sg
zdj{em3=A|yDN2IW>T>a6Pqekk@eCAGGR|D{3>3;gtFu6r30RkXikqGx0#pNl-Xi^W
zR>|;Uu#&brwfEi{pb>~k3_(;$XaOWAfA@=lWU&5tgjB$h--E;eW6z_LKGP*7N3y^_
z!HgMMZwQrAXUoH>5k0i}iI9+{?Cl=A0v_ru6@rkZIQWe1kArsjB1V!zK^EXNMCnL<
zkC<YjtBt%u0xaouL%<NIumOn&kpfJeWRgy@T$r$(A<{@a+6#stec?bbKqP@;H)sVU
zh9qfzcz`D97qC2mc&$rIu^OHf@I}CGK_IsmXd18W4kD>bNYu`dEbK%FWs(`sJQ%E!
z9a^0)v)znExC$q4?FByMf{@2%fE@&lk6{PN0VoXO;Q87VN(m%}e=%rD0oA0A(E?b5
zx4?p|L_%9o4|}`q3b|@ik=dV^y(K-I_aYHW)IfH(czyuK^h<zN=e$&D78mN5){|%1
zxs0HxNg7hHkP8RiEeu`Vhw3qh8x+dqm1xvM7ufL(xnv!#V3g~7gW4bgiBZzgw1Ucg
z66A;P4w6}jAf3pIl9e{X-eE`y9?^e1!$o4VTmnei@KUAdbs&)YK}^Ls3IQSbyaJN9
zKTBQUPS62}W=*`r0i1-3?+)r_XC7drJGld}5Ybsu&~)nYw=jXoVl^>$<}NN=7Gbmt
zTK@nr2!v^~6&OP=Mxsu`Zqbm0x{oi&FwWZm{o+V0!xBXFSda+Vh*~5WN>r^YLJ{<+
z*dK_^y3e?2F+|g#xZ9BMR&044NLMV|I5AqJB`CQq<i&u6%YUSQaS#x;AOHoTqbmYS
zL6&S?JG?k0DZPm<@iim@Zv_b~?r*qgQJ?^#qQnvL66RsOp^}9YaTUx$Qb{Qf*mDtB
zQlWP*=e!UANlTDV5lX5OiHJ+FHcDdh616)WUlAfJC=TT*F&Q#}U=w(=Wdw_P?Es|;
zQ9F@m;v|B=#feGZh_BNlQFFG(h`v|}Nf!gWDZA~*-U<?#E|n4)xOgiiK!8Jc5Xwpl
zg^VI30-~Uz3AczcutR{!(vrk)76PD`A(&V;@m$78_c<NYyjYcm4nQ5_L@0Zc?tPjo
zT+o0{$M=R{o0lns63(y^03Kj$QbA1DcMO;TW!Z;@i<2VCdx)#he{l`)BP#14QBuXo
zxfcZS6-x?S&ClPM%}lZh<UyAxrPS9pzi6qmNs5&yC)|H%gnEf^_S*M?#Ho*M+8~l1
z#{0Z%$wI+E8!$H_EtfPY(k*Txx}Q>Al904{EYM`hOO=$fc#K|Y8dXL@R1bFkV3e>7
zN<rY>0MtgS5DYUpCH&epi~xd-iSG(EsmTZALm+do^UOq$#ViRYad<>mAT#;5`@tTf
zfz*Ax0{|&0)Es@chEg?Hm-7N5rA$hdq=KL~0wPip?|axC+8HU(qBPv|6_DpiAgiAO
zGRU&m_LUY4(5H(O@?yXO2z6Pw;vmwLn|wSdgw?D&6aMg_PO^ecgfU1>fiKz-V1jhG
zK6#1Yf{BSGKmu+)@cxoaK#;%(5j`aGp%Pw5b|4Z6ZXr@s9fOhBK`K!u$Fsa9wWY~X
zenY#(Yl1CRlkTxFi6kd+=FbHS3P~q$8s-*a$}XGi!V(!O)PrtpS}Oxnm25XI(2Jg#
zDJxQfQV(t8jc1On;#sF2smUEUNU6sI%df^2{{V)=5_xg;Ozb!@sK;<$M%kUZsUr!+
z5tlF;bmQ5&p${t$lQEGcKmAGTzovovo&-Kw$dj<ia`BvUs%AZz*p)9?Oo?QZ?2|0n
z2^J5zXC^YbZPSc5AYn<Qr<P{dOiR_*&t=c4StO}b7S5}Z!0j7wLRB%?dtHnYY2k2f
zD&>-nh*L=^dy-VAB)K;+Y_|mINH)1J;VA>!H3PdsrC>M#n0A7IB|5>`J`5ryCg}i6
z1NnvuPJ`}t?*vR_?i3EpA*4YF(Cx4na`84J>20DoRl<sY-T-i$r){F7xBwPa_<Of7
z#RE!<F$ZG+5akB^+8a?1=ZKPBkvAU@KmnY;#0Mnb+T&tkA80?z2xtVDk?v7RJ>C*h
z0ZDZb{r53sqUlg`<`#KOpq^Aq2+LiH9Vi+Y6A&bQOzRd9t^!Cm?gSrEHwh#M*us(_
zkbs?rm)t@9FK_lRB`;HuLG2NW$I{Gu#*zXBXm&65ygrwH144znfJ2TULc$9%FKCHm
zWQ~63e$dHN0-`MM6(wHcKp-?&l>!P$<=!GIDkWBOI2=Jr$Vt>M%s`M*>34XENJu-C
zKGBj2P?OwUfQ1Q3R-H^=Xmk}`*7<~Bv$%1zR4EoE7dML5f&z%Me=szX<Z1!$76lTL
z0S)3FLxE6%q`FwZNS2VmXd5{;h9QN8`G8ZXo*!^KMq05I7jg9gcV-ZMG4-?(;W~F5
z2(1%VgK+P#gKF$Y($_a9j$ug>g1#ZjQvB_B0#lGc4mdHc09j6^W@0xm<P?IdKLo{T
z)RINoEd?w<a$Y2esGzAKlt~<M1T3Oi3b&roY14KDo?sF*yORYFijsx3&)U#YDS;Zj
zqrKuGI!pfmFvr>+<L!7hu1E|U>vs&%0u;m&&1Y!fJx1#mD?``po071~gwxSZT4z_N
zr`<3sQzH8YOWT<5Qa$M>f+KrcN%;%QUJE{uQf}6UNF_f=OLl>&!3M%26bLWQ+(3v8
zVgP1<CGJ=PIWWm25F8%lKvF?TONNQ83K1$Ur0!Z2OG!aCzTx4NE|&gboheF~hkjs7
z86iS_F{!xX3Q!h2#8a$3$DYs;FVjBY4{tCeL2~42e}l9g8eCXepeaEh>Ey&p`8#nH
zWQY&Fif!f@>_8uJ6JErC2Z%sjI&SX@!4XI-eaw46v$Jj@z%98`j?lV81N=oqCbb12
zIuxXm#O&WN<;ejlH~t`N31Bz#42AojPVi9-2>^}lu!*oG&)O7Rma*lcC1fQ-jfgR#
zAOME<Hy$B$31E&%+`*ME)Ac{pUw8{DDJ3PFk}m-XI#;Svw2%rMgQehw6r%ec{Kh)!
zy*bI1=*&=1KJY!G<ixB`uS%eTscH;T#f)aqnW$4CSK1wlhVFOUXu^{<VJ@_UDE^RM
zG6*V3Vp2%@o0!^QI6A`z7kDa4fh>55Ehl$9n`i(k_btpOMD&ON5zlCS1;dR%9pUv5
zHg4d+Nnyhd@lqi{19sn-glQ(kj`0<whbeRO4B@#PUI9f(0U+Dr2?<Cp3Fgq(c5P6_
zFLu&*i6&N76lQ%Y)8vR*Nm)_VcI6?W*WH6R?FUmYXE0ET!747!e8n0TRHybAvtH48
zP%8HPM0G#ZLGB?A3XlLhmfHJ8f?3%`+i0q;ofQZPT3jJPNgiU!M3k$B*ucCI6ru|j
zE#3lDKqMDExQIel3T{>AD@`VXms0GarUisb#I+}#H|%#Xik78V9V}e`0MZy14yUz&
zjbeXPa-}9^8FpqhMvQS}0YtU3$u|S!_IT7{<%A8<u_~25u$Xri1e*x8DN#Wucib3c
ztSaeasv<?mZ{{;VF8cs}@e+k<Ew}cD33Kgb4n!2#5P(>34GoYT!ELT#NeOYd+75)5
z8xgT4Ax{0rcqpu@VvwdS{{ZeI*d?_80JJluSLehK$_(Yx{KafQm3wx$EbkB%6L?|u
zkj@*t320IhrC46kazS?t05KdSZGRj@Qo@J@HsHZZQXITM32qWg-^siva4t#UMa-0q
zLgo=^1nT`q`$R~lTEGMVSQvDYw{Tmt!9=hF2J!I@62p&}Rf$TJ%F+WCcxiJIpaKhl
zXNa(|s4#BuB~u9@OyCma1I%S#L^6)4iZ>wI@S{?crY8JFiAhVjE#3i03vgU8F;D?1
zI$cEC2~M9Qu;v|r0mHwTS&1a@8)(u%SGd~z4WT8eOrf{LK9t<*7TuxP1cne$p%59$
zhN8{1Fj>-eHth_LYJ-p1?-h})!6XB~JH^2vb*L9RZSw>yrR*D!V1$4HcL^Lp04n5M
z@M2fd3V9@IYrq630IT>TXkxUF58fb7pe$YO5P(c6EIq*32#A>Ek;ec;32i`@J;Ve`
h&=j8i%ofOpQQTS-0sR6j2x47CFo5IwU+obe|JgN2Q|SNz

diff --git a/docs/assets/219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4.jpg b/docs/assets/219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4.jpg
deleted file mode 100644
index 6be7c26391adf7975c8ba2390f8c39a4dee38bf6..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 26616
zcmeFXXIxWF*FKtr-XZi}1B4!$^p23wLq|}06OrD#^d^MRJ0w5=m8yU!AWeE#nn*_w
z6a)lO@NnP1=l#Fud^jJ@=krYV&TnScnrmjRy+2&D_WHZ{cO5{DxC_4v00IF3;O!6a
zcLU(59pd2%0Knlw0Ac_DfCm5r@%}OWd&7Vr0L^V(^7i&f!vD8V>$d(+F?D|&5bWaR
z#U&voB_Rfr1^_?+PzA}qK95Omk^WPM{{wbg{%(-`bPovdQ4kmR_7`(-e(dNX=JfcH
zc!-0KI801J9H68c;^W}-&?SJ&(Z$WfTbbwk>mD914`*c_OBn+R10PKncMsh#KNr(5
zLo=tahfea&JgO>ON+Aj%k9;1v1UPVoJo57PR|rw&`4?Q_w*F7II1ksqE&&gfd8`bK
zxila9xo}B~NsCE{-flfelIIq~&)HQ0sde{12)8X|p8qU0I5=1=SW4`%pPM*LUS3{Y
zLQ-5(QuNkC)IZcaz#&A`+n@J;5VTzUo%}p}0z4jjbNz$p;P^N&K$+)u{(l$da!d68
z!To<w@Q>wR4*thAz{B<b#q}S{e_a0=rC{t4;^Jkc<?+bH+y8c6Wgbasm<$is|Jtno
zZ#F4Oc^<BR5ES5#9Nb(iJ)8sF|7XPicI&!$xVhi1`+vQS{5)K4H~Y`VlzCu0;<t>6
z|A(@Fs{iK+sBrza{Fj0MGVosp{>#9B8Tc;)|Nmp)e>I$o_pNXXzEx;{R{)v-JP_!g
z>$c$EUIfGh1o-#_q(nr7#N?#p<Yc5|WE7NOY6?mkN-{EP25K5wI(m9~aw<k920A7%
z9X;JYAV9p^G57=|1Oy~>6l4^1|DVg>UH}a-AO^1y56A-m(E#yifPV)7?6<-dga-uv
zN2cB?)mzbvM?gq~PfT)a)}+2|;}hWFf$#_jh;Kb_n=}ACd~O0lFif4u#DSK_S2C8E
zE}K^aS=mg_;Mh0AC#7lXw{d&|k!Fm0f_irg!g34$Z~0dV|N937-SR+qYZIlpogV}O
z0*UYl{<DVL8Mt9!bvzS%S{_LU-|X1RV<}{F-^|J1WdJ!I@OJHZGyqk=?<P3Gh-u~!
z#k(g~tXibx>W&cu8cCu*pAPUn?P3l><T|c0k+^{I<i8WR=_<;;qm~e(8_uU`1gbMQ
zY9zTk#-pjT=+`fj-<bqorigm##oaB3mGvRPRjqaVy5G2XY%5X{$Gr__p6{-n>*RSt
zB~FA~qNHpQ9ulJ(<onP3tjaw#?GW}{v!<JJgCc0hSz7(hgpb-rG6R)2GEsj44<)+o
zyk1Mn@q?j44Z@bHr=~)5<@)H@{ZU{XkWF4-d&pKaPcx$I#Kr|8NQ{mjLqwrtdWqZg
zX34~jB{c1k##r`y#c~NchiMF-{qaS$I`MRuGNzV^Sr%|k#Y%d>Ag*tm=ZOUtpkzQ9
zr+W<kqC^iNPysohn8yjL0#slbgCZ;FK>)UB!Eo#<#jj*FHvrza8h=wZF7fdAy+vCb
zifWm%)azoqBNdqoU%+Ic?Bb=#Qpr@xyYa_n>Wa%80&#G1vL{gv1qu8+qTn$tH2_Ne
z_#zSdp()$7n?IL2vfA=7bszWPy{n<@tT6AP_2eML-2ox~=fJ;!Hk!x&2Z|~)NKifi
zL*+}!K^%}Yk0@@_h&tFLqt6QTU1K3|iCQNip>UiL#WXWKfdRQ7Q{*d=o7_-~8rfuG
zwCciR39^R)t49Rf>U~r<#<C{dq9<fuFeLWgHpIrNFc4lDr!PqzMxuF>g`hr<y*KKH
zn<1;IclDTu$|R*_KD^f$ieKzYI88VD^EJ`Kh(6<hn7B!%HZZEf`?}|xOq75tzsG&l
zvkwFUgR_{(<m2w5JIQU1u|yU%R1Ea7u7sjM00Cd7Y#KE}El4)D&O?zp`p0w(zqNuo
zAEh|JCYO)@cNy0h5TWuhvI@;<dQrqw?}$X`^lS8vQhs*_*&xQOIJQDS4&pdI!KZ&D
z9+=>FD!=Xr_=+3b+lC|?GaFj{3_&)jD__z8O|)MsZ?2P6SiycJN*-R7#}FsZg1kn5
zP)Se~7l1qfw#7lI?~Ug*Bl=k4+G(TN`b+|>M-B~7iUDdSaKC9lhofz93~5}q?DVoN
zVRJ$ZpgIUJpw&dT&pYBShz}xhi`!OS4NbWqh)JLs4l$?uotWRM(c~W<8oQ6v_}UZY
z<Pn=K(Mg~X+P?020b7ZajekVygJoc#k*2q89e8Pys?&D#9wt&zdDbumqwZq~_HP3-
z{ba9lk2hFe&r)0l7%ca_Omc27K1<dFX$}JPtA-~kNx?ZsEZv$=1R#qfChPeI<i2bI
z7Z)m5!Tc4uY@EkX%d_HWiI!SHeH(@kP4+q?Y3*LXfM1`sl#F?Z1Lc1Ag5N#6c6iVB
zP22%rAtlI!O?GC6=brFc0jiioH;IIN=bk@JzZ{EBa$_t`Kf8|N&rleHL>^6DSBi4F
z7Dn`r$?pMpYc|fVJVYVb=WvCos;`22U^OTPlL7FjL?Lwn#3qs2G2g+dg1NzwKcDO$
zD7Dcv_{csvfZ)G|C9OJ}o7O$Ss#69h<Ge`?MmUYBU7ksKPv)&Mxj75w>HbRONKS=j
zLB=FMnX>w|hIDcf)B#=J{Cp~PVC5FM=+F4}*(dImhXZOrh)tt7O&r16bpO=L>sK*k
zg85Ms^!9b>OxJwDG0jKh#X%wA!VmwHR*0%U0k$q5^K1DBQq%UbZ)9SygP%iwOiI!7
z6>`r9`L=~Y*aZQCD1ya-cY=eIV&ZF{*rhu}7Zv4x>WiT%jk(Y<4t%q0Vl;h4hid|o
z=qZgSoKn4;KcCcZXm|htMHsuRzy-p4^P1YjQR<N^(J<%UrWT6Bm3|P-jc~LaPC`ht
ze&hQ&kV*pB9}CpwHmbvf>=K(MhKHuo*ragx@_|0n7?YZos=sUtBY(vQ+G^9PKv2|t
z56XNbd(RBnmgh<?3+N?<;Q3sC=SV=y@o~nIss%mda5B>z@-SHZaX7QD-&VO2(uSXw
zRja6hqZDIK2Ujh|YO!{8|4jXWrYj>+Z45L0P8md0<{$)}k%I{7b@=?zXzJTTw_n9g
zDlwgdbiT{qWZQOnph?wD+LIa5zylIgkz25~yRbWW(Gpaz`%Y;t*s=k25Jkf8?8Jrm
zZzR`_77JL^fj8xBT8Lh__!XIYZk=rw@~BPA#4G@ULW%r-7mpru!F~4K6;)7kBiH~C
zK%OdQ11piwY+@;HjF;hybse9R1%V_g>s8uE<AAE0tz#Y+03BKoZzWl~Q&%kbb((gp
zHX81xn4GHn5V{geNB_yq*s{&KIK;7mOqGiKosI7NMe^0GM$F?q8oViK>dMzD@8s_w
zu>!wmWY?ocC|L&r7x+Ab8Cp-+a&Sm=_mo5i#Xgsv!J6{@(d8d(f67JKQxL#kd*SaA
zxKQgC_{D0AsQ$=hdZ;jjJBHEyhh3>&vXFn%y0)j4qYTZ%(70kizT<I2o>-N2X9f**
z9h8p@xT*u2{A6Bd;gXB`5yoff>Jd}adCH8G7<q8YpVal5dv%CW(T#j!p7d(<MHZ>Y
zHGOJ?5hpS>UyJNOzxns+y;rl>TOs@T`c2P^ceXd?!RHg!=bjG*!fVY86vu5k`DKfq
zuI7eMzfs*ENU!97<YI0+?k2Gi^Uyc7N!fb%Qs-EYjms)xXhYACR?_*1<lI#P`Q1mB
zdUiGol><{S?HRY`Tk8AS8ZiWY<+X68Enp(1PC<@Gi?JCGNlihZvj&LP-^~}ITyiT%
zWBn3?*(=Bn%}7>*sFw>s%WvhMDl5%MbNY&R1<yLnR?zjLGj~tSa)Ge{S-M2yYMfU1
zw8PYpi5!?V2@<iq@%@5&oFuq|d<x`cKVI3le5{z(CWLAYhyi?Wjzbx|3y6_r)o!9^
zRk<N3qZ^|IlZBTt`0^m;(GSg+#3n<iIBZz<TfRivZ1WVt1EJiHF`+P6rB3rGnH*lS
z8#1BTO`OA8YR4QZwKGmj-PUD+TF=}Rcdipe1+5X2uQNL8J}d~QW*cUmP!29m3``?>
z&vX~;E0i)w_8f;C)s}7{kp*q@DIfW=$3hTlB6%5YTb4`3%_z9vqZ0y={D&Fe3G^#p
zb5hgARA_d5r>;vgfd}J<ZZeCQMaidLi}C>|8utpIh8K=pOmt6zH;+LNO!+@c?32uz
znZG>|X}~mXYgK4vv3dUPhVqGIdC4Uqn6dyJAzE1V2j)-t3Djv3a00~wArhq3{Xc3p
z#BgGOaNJ|k16{!H+;Oe<QWCRP8Wh2REO~dwW<P@g9d1hR@R(ykQdFKB@8mA2t|{I3
zWx}}Tq2t-@eFDuh5WqqtC>myr&yEm(T-z3g%<k*BF|LPNZLE`s<hBCTk_%n8X?LEH
zq8DMWo-p7;8`-(qj4dD;LJk)+U^8~Ht;ypNB9m6g^J(x$^8U;dYUVRkG{&QIsy;+T
zcc@8(=dJUg<(9cdQJz-Yfr;OsPK!FvN#{O`8Rs|3lOH+{dSnDn6z4qBAkxcN5*MwP
zFRX2P{sL4-6+$*fJydad2cqTHmCkRHzPj^)48C_#qkC)2``=D5d<no6L~fxb4XCN9
z^*}_+0AMn~R6&th7Q*TkbxM(5lS1aM7>TBEe2mhd2hnQwIoV*U0>DonA5&`J;}~R8
z@Q|z7)ansku7`>0M|X$1YfGi&>HT1l0VWV#F3(lC4ILey1VxC5+MbyR8b!j@BY_7c
zQCd~MBXmf3j1x2H5B^SQ`^H1S>9ivig!=hNe4M|u_{o3*3Td4#WF}q5O4F?dMWc{d
zP(c01#eOVQ$ZxRH8lvMza72y<2EFz=#*gg3H&~HN02#WmCnQk&JRqdb(^oJE%tB<*
zRg5ae*b*a~P`#_C16Ciz{sJ)2yKM>uK$>_#kEEw}o~rJ)Er=m|wc}9mt8R^&JZ)}d
zA{vVjHt~<zmER5iI%zO+(^Q=HzVR7ax(K2Ebprl_wux<x&kT!^4#d|F=Kl2Zgyxf$
zhq3p)kS#ljKL$HWr;^$P=189sS9iQ0?P`@$bRinKJwqo;Srw7J36v}fqu`|RbpEm=
zApneF7eHbx#(LFfFxByh>;U+?@h@Ofsw#b?;|>UfvfAhV(~EdgMn#?AAV#MK13Ku%
z7#wv-G_nS%*mD~u`K)m#jOBR)sRbEDisc_u8<#Q!hbu`XdMl}s+<;kgNbxm5fp<=K
zfg}+1V8Zbkx)FiZT^js1PdY*#XcMc=BH2PWg*0~gs~pYHDI(Gh{yUpzF+eeVp`9*x
zxJ(%&jQU|#ui_D%;4@kP$vk_SH@?l)ClNIU30rPRQj*fUyrER=0n$!)y5*34z#2pQ
z`>mrCd&j2yN1K+P-s9F`=3@H8s!b<ahGe#bA{Aoy81-WGUTe&T&Un0GXGT?a@aApu
zsNuh6d^PS%SrQn`Hg?`-ZG>ZrSe0UYaJcRGjb6_xBrpx)_6oLQ{7j9NwqFOXtz-BR
zm%g7ka)(?+6d%CWNJT*nCVp8F5|Sc@wF>H7mS3NtY{;6XBkOGSwr{y;>Y-u#-46Vj
zs?ib`DnuPAYnk||QZvy<UxA(L?YE6B!i3rPeAFZD<I+u3A{V}38&n|StAa=kr{#Au
z7y{@cZIOrptrop+(P)t-+t!#M_qI}@)dGmCnu+7Yi#9V1xH9^Bu<Ol0#^f}>{*M19
zy^98qRp;KgHWm^qI?;o%t~(4~ro+X~4|Z$2TWct5!feMLhQRziO?=c(2!wR?(qh{y
z5;Ef8qMa!kgR^t8qPuL6SU?Pq$I7Z6Sw~!-S`ZDAD@Qk)bC14RjESi&pa1A*JY|Ju
z4t)%Xh85RE5=j8+2>bjPl=}8VNDebeC-B!+2-<`Urv^ZT@(t*}84CgNxK=vMHauzS
zmIGv6JZ8O%6vV0bWaFvLuQxA)=G7kKUi%Uv;M}X%>vdeP7NY^7Kz?^t;G50fK!qT@
z^SBmp%*tPYt}(S&WLH|>G}-GlQVW7k)<HefIGuU16VDjZv_$eynvtuFI#-Xk6FXH3
z%{Fmjz;Jrmld-D=q@kk~LL19v{b#s(>BSmT`DmcINR}paDp?SLjoa_x8+<uG@ygqQ
z8Zm3V=zU*lskIWhSEFGL(}M8&_h#zN-_$N_(xaE8c#U!`i<%`g4&LIdoD(mM3U}US
zk7TBmoI+#`#+Y8po8^j?#T5pUX?SBL7Ad!zeh)pJX3H>n;<EHBi8;4g(pA2eW&r&8
zTf1s4R5=dW_qy9#?y{gkHmY=6=zhr#DH}`1!;+$a9N9{bi>ZRDuLUoJ#GLFKUz}wX
zO6tuUw1m#etGfSu=4fMq&;t&Rb7^?JBoVdO-lF><8uYNURs03*C{rFg{GLg|ykTuY
zytyeup{N#ZU1-Q|PrQs1={{Tg=DC0UxusbcSy?;O2DDW2shxncJ7~1t+{|<moxmJb
zg+LT8^(%!)=2>7yem$n_t1Fo^f$L-FBN5(4DBJ5TUq2IHehz&!HY_CooAVxN)n7-r
zbmIuqfCFDArS2LMTF(<^g(kOY;J)S>ahvMux7Dti$!5wdWOlp|r?>A6F#Wi-0A^G&
z5Dul2RO6c}(QE?9vGt$3HJrCTtlf^t(1+P>C{xHI$42{iikWlhUsh1A`Y+3*C(IUF
zX*-vc^R%9}zn5%QnoDV|uXsIDgMYKn{r-k#SGJ<^MDOG;z(733e(^7W+?toEw~1Uw
z!qukLE@PKuq`_>q9jalGuO{9srbkg){R)Fg@;R|nz{ddC08BszZ(Uc#j^yp#Rcwoc
z0})cKJvJWEPFwRSp60A!b=ZwJ1PIl`us>kmbdK@Mwg<3pnNKc$qc_l&qO31JF#Eta
znh%HM$~t{R<_EEsiL%-*IXa_qd@6=FWg=8-<-{11*)+5g>YG%LY7NW=VH;zMzRNIS
z&$?BcrS~nv=DB%cY3Vzj^@X%esrc|(tTsK*tTQWxFqem;5$9J{Rhz5d@c}uYi}Hyr
zdO>4swMJrv&6wR`OtfA6gN4j`2Rp?%b!}bW^tt=lzx0I)S!xW-dxamy|AL*qFbLC2
z4>Wyll_^u?R!ftUFsXH@shQ$f?Tg6u(hxavr5F!BQIELM=W9Q3eVoVJ`tpY1hYBoi
z-KxfU!a$01_#j#qms4nJBKoGHjQvvwId4}*1*SN=&0If&M3+RDLj*s}?6bj2$XHIX
zU$#V3{4{9_KrYUkiZ3Vzr>#dShQUPg(-S=3!2h{gFhb-0Cas4WOKJ>R{K1uQfASqY
z+yoyqHI7{O$r|^H&=Letey1TkjGCsawFcocH}(gu(d(m?R4S0cuN>&>;#-M28Dq3P
zg)|<_+h4RPn4$7IH`z(Zg6;R(?gD5BTniD2Siwb!O$M*o*bcqCZtJsL!9`%3JM-5D
zrn@$chEzs+<^@u88})`%iM6)7pOIs2ZNJPs_}2)E&pPX-tLUSKND*-bOuNhuk;a3~
zq+#RcOoT*<<$lNe9)%{xt9;}5d<=)13VynheQzt*IB_~+C|#21Jr2)(-RIb1?ywnA
zhg57GRxO0r{Y=2b&svRQ7=}mJtbmk<5hv6FiFLbr0iKElJ}-Y3M`(jxu7piRO7HfT
zzU8^#D^_BNvBN{zfQq&Oj%2SNFN4&h&r{W~+G$CbG=n{0WyR9MCR=fiitJw{I$|HH
zx^>k8C((4|1CZxG@gsCRDWscjY&FK!g7%)!LUB6o^(}?F7~Hx0jsXc0*_IIBNUfQX
zyeUp9lo!w@p~rfY4J1~$`xh|Fn5CvhZgtOT+)R2@9atvHCDI)IfkqOEGGqI5LbFIO
zjOa7L+fGat%jc7jP1LZn-3P5F(`NDO+4lGal0hNKDm;NMJjtun(HMLK6G)ER>$|+7
z>!f1^(c6Q87ThBG$T`VpHXnd|`#~LS{>&O}BY~n`We9ZN>w!6_J0L;kdxr$Uo2j~>
zSpI;{btt|l{{~!)_=F>t&*PNkd5Vi#9lO5t@jJF0OrFzl0d)4M2jh2A#N%&VWjs}p
zy>=-=@dopM0mg@_lOj+ccR~`saGr=5>bhD{h{V!|U}Zq|g^y$2DfN{H{6j_TE&$3(
z5#`tbc!1CUTZv9E=HuF*0YIMb@jFOizwFQn0I^s`c~a-5QoFIFTZ0}gPjR9(wlqpG
zuAZ+eAretR{{^Jz^8)8psy0=ZYlXHuC4JczB&}`xrQ1?TqEGot%Uun3mHZ{;_iwbu
z<=ToC3?_2i8D$$-aMzf@2?R;_A|HE>1IsMQeR3Cmory@NDszjFM%-9|*+K$*?9%xY
zElB6juh%G@Z;2oUeyjaS+mXe`+sElFd6{~EXW&@miX)b_;X4cChMVM;buHGcBlC`(
z6iuU@{<^@V$~l*4(%5~4w*u)%d$j4-22UCTJ#GSj{s~hElojQ4Uu(YP-p6(A<<`Bw
zfWb+P-i5ia?(iS8`>-%(RC-Y%@}0#ii&Fep-(dFS%)Z%nN5TCYJErD>hkpUwe3QtV
zs%Lo7uyMh<`X1_~m-*o5)#dQHLw@^WZ3AT7lawjKxm1H|Sg+zu^U6V;S*7rtCB`Ld
zCxt)>+x>J_VkKQJ^6??!m8+UUtfi9L{0A|;Z@eS?Z_6U}N9FxZex*PWxqO^{NL!GO
z<ILAQE7<3zxFA6hYi$eGHPS48E>PB^PHH@8pZE!lWVvaBrHeKfE+2#6;m}G^8DrAA
zvPoF}P%-<>dVD0DaFdJi!tCPk9gp4NyVZV}Zj=8hX$!(Dk3IcvhSmCkH91l-yBkdS
zk_#|?a=*uAj98G;ZY*LDLW!2(|Fofb7@XY8|4kntV|N7N;`1aWA1FQn=sRk9?sY2w
zp?u|Sge;m?m$5$7h>~nf=E|Tpk7K$2YO=3{@UmVx5An|giQma+(Sp?n;?rAawC*P?
z^5$kanqp@2aa30kgNqi-0tEfrCL>aeJO~YH*T!f1vVpAS-Qjo*aXQJV8YTIUbKFz-
znnd?wg($0-KqAK)OcGrw!KrAv1jw7cSIN!7zJBAj1-M17d#K-;S{lqNA>2TlM%HmK
zQy#_R4`M?xY~>PI;NAU;;3WJ+3a7pVAt-BnXIf00$zy-MpD_&RwAGL`5@W}E$VyX3
zEO@I;O?}0;BQCokm`zg>k>WR8fUI@xU`*0tCUI<F*rbyanHoA~_{xP+WAx_2WaWP@
zNT>p50pPEfk5lpMB%*eKmYrH-CVJnvr%hpt)PTe3HskotCJuc948%4+fj0)OJnnAy
zc+xzSm)pXTPHIk^?xUC;TT@oGf`^rdJ^QL+`?AmkpLR|~TtJ>LP-V{#3|wUz+*z=g
z>=Sg|6x1(TLK%1dnAYQVmIr?8T|UXSZOXD9QS7#i6Ul5bcVh|f#?Rn-64Dr+Y*?&v
z;=skub6qk3KoFc^0O8s);P?@H0(6!pO~@HSr1<G9eS4afdLuoDHQZeKM09k#S1<nP
zl%q;BYdec4-`M7oprD^Qfv1<I{W87QicT__^xG!4hLwcFDV<*NfxcOdW7&h~<TvVR
zO|tFlxNVFk*U#`aHIUxwr35$^MfsT;i74$OSQ$6>A~kG2%a2n>cr#^eq~5I>_FmQS
z6e6W^ZGs8r@ejPF{tO$vN;2kig{Rm@yFT&XR4BO84dSF3Rd~v4=wdYwJj=TIxT$=9
z>{{=N`Bt>91M&78XbKav^6sz=sM!R+=}3#zWBff_%IjKKESztbVdBy@nhTRbU3<o~
z+eyatG92pi9<heG>^E_91!eHcJ1i^)`SwYPQqZj$fOmu`8*5RK`>1vTeovk6bal2t
zCK-A9MUC1>HzO&*ZM+vhev3x7m#Gk~!7_@%B%z4iPwL2bdU)@;`*8}noyc$?)1eb=
zqcAA-kt^gLPa?{c-mtC!4G{~^;aH;QiD-u5i{5@zfOl{WYo%Y41!ia~N)tY8?)VOb
zBl`1+!rvLzY4hbi_e7<@@pA>6@J5pCQ=xgqi+_ee2X`#}DSd}`ouB7(nLA2;KyS(|
zTO$S4MUW)TmRU!GKjnrZ2E)#u&hK~?D^ydAwjD^GD2+h*m@%v)SrLB$j5i$Bu`YjJ
z&5D$C2DEH%SjphG8<xsgLXtW?6>&PhhPB?T-Qjt}Vn~pEJejnkjp&E|1-z6B67116
zGweRNN&v%6R=LH@!hzvK8@WC#hQ&LnUFmw%`d3S%=B|)r1JSRBirMS3=-L=ShDDTT
zZLh%C#NrOSfuT$aW^O%%4fC*0t2z>@yQJ5WSM8ig<Ua2oY*z30Qw(GI(R3^HppDc^
z96H@LLPyJ>j;LwKB~us+p$I8)5xI?*94}x({MY=a?2uE^%u-^87MA^aIp&robu~?<
zaebzEQDc_hu;=V!I<p0KV4TENC~2{zZ;q(oxtlO@zOPgV6+ZNI_I{b+h?U5%%UqK|
zDFy+lQ>#+H!H?26Uk=e}z)b~^48F|!Z;tY{$Ip17kSq#q2R;hkkRU%VZnG+im#c$;
zxqj}rUBhx2u=AK9cA}sHj7~$rmnD<5bLcAx(l}{`6`m}lyEd?tqr<|J$lYLi4)<(Y
zR7I2Unr$*tf6e^`)SYa}4@(51y4ROD!j4z#uY^zUmIw4jBKKn*jW}xhGpZj~_Da6c
z@MC{ui{VeB!sfXxm>XE&7D(|9kWc4Zyi}a3F6~5G1fHa9E!};La7B(*))g0pJu9N?
za^7qw<9iRk+U4f#ihsJlC>><`BFR3IXn#<t#E3dqNSm#ytEls(xz_BC=Uf-s@ZFyK
z?vgl5Y0`(kfXL=6*uBzIg=;o40{xG6%G`iT(Z2u+itjdm0p}dwDmLi1D07TbpFd@v
z`W0WFM3sg1tm%i;ahkZpI}ph2#w5sFUc;5k^3xlh+K1C$?D!6Dm_M+oynRhv{j3Iq
zE#rIGkO$^x2-c%*3*f^X=5g+^3>!#liyQuMrH`5Q5X5Amg*;6$;}5=zWNxp?I@e_Z
z#D}WcRx;iyK#Us@g@(&I;@w;}5XeLdn|6^cb(Qom`BzHC%)OEY;aY18Tu7Lu@W-L%
z6w5yt)R71WLuLQ#I|bPp-TBq^V^g6)t`a2>xgAh5WJx*>OX#57_PQr|5<d$IhWpy{
zZg~~YkiUNLk&VkaCHI`^{{0ex%}dNurQd=vYCl)hz)$x=Dp8;<vLN2wFkgJ^?w{;b
zEW=*_Qg|X;1bIMEp6tx4mlx2jwR8l8T2!}|E=y$b=snOQ))IswbP-P|KfpYklNg++
z>%dHx4x~Y5yv;iueCni7^$w!EL}NFIr7FgMvLn#27+VN_%~S$9&^_~ju-6!H?GyF7
zso;Yj%`VE^Gza>%l03VM9F^XqpA4K$$>FgIn5}k_byx)Us;bIoW&{+n%k>J>*FOs>
zQ%-(yDwoMEcVN4e6k~9fNwg*xt=_te82RN|t?b(Es(s&g*mc}o!=-K#NAe+HRI9Di
zu=gf=`mSPm*C$O$3nHOtqn01nwbak@3<42)PxiT@UsH^ThaAbtt=?z+CB=N-Voag(
zgNWA$$I$57v*ZEaZUsc`CjDvepLP}vIMmN-T-14J;?jRgZrCL3L6Q^GGT=F~1|bE`
zqm47usasV(RJ>bV@*|IP?tT%{S>&auIMet!N&ldMI=}Odh|>O7MHDr~r7wFDze|`%
zX`as^D@Fc3a%MyyE#YY89g*XI3jRn54YHZycVo&5CqCVzZ;=?4KMyp;zOO+)k@Q+h
zwU19j8M(&ZvolIWPT}gyio!euKT3KH;$v?GpT2KIe>z8lvE#yJm+QtGSOpq4I!}&U
zY|gXI8FlV(ZMOF2B@t+;#)}W^XFjO46>Jj08rQQuOX4>DdXUdWZ_X`nVXJ_wQe(^K
zS4r2M7P0%4NqcEIM#@A9EBVUDY`&C=HFbXYi7rK8R%iGZko^-&@}t;m-uo6gb<b#o
z#@O+>1st7#Wv79ZcSk$&?%Z#yD$9fyRa}^`@`XZ@)};JxkZ?D=Y)*kdL=gAKx6c~{
zOef4r3iEeK3fd8tm88a9vC0>^NB#q@BzKS@%!rx8FTtC#^|f?T{x3!4=3Rvb#`>D}
z&EAfH&*={42~g%o_^<c*RWMq;ZPrS|y`5B&_EsM^U#33u$LRJsn~4=uye)+5ce;Sp
zqPFjioN|WF%~#oG!JRMo6@RU7@f|#4I=RT1kL>%Dyr|(}fwIMG%{r-fz5jfN4TJEt
zjY=)^Sk^6Kgy}SC*mm2y?R75|yB5luiWQMU?)(La7l1Goln6emux^!Xjws4!#fE3z
z%=xk#p_-()fo7W#X)_eq440FOxfi=y_HGwu2Rm?KG?yT7Vlo#sD^dMmR_HIZ6Cc(U
zjMOYve(@f5D!%zquoiDLB+^?xZDC73v#SCt<8R?IH$tq39z&IT>Kr`s=hQ#Tr>K3W
z;GYi_d<PlP#?Rp9jtu}AjqnwG()olFTbvTQgU=QI^viSw^unGi4qP2MxWQ<`UrK5_
zh$Ybtgu2B1%2e#Lpw11HtlNGbuz;_pHeY4d3+#mb2sV@AaQZqy$ZPV%2`O1?c)XvM
zt4jN<#N{SXZ8bg(4PqjCJ+yF{`&EVAWUf1yXsq27hS*OAd%A6mNb$|;30*Mf=(K@F
zCNI*1HyQ-&<@Stxyo{KJJWe_Tbmzp$@1v2^qGMn4qHy^FHm{eJCDJPyx1ZUMd33oB
zUMlzcmJrpp(xlZqBf;Tp51bcJKvXY87v6=mzSRiRBto4{x=UO4$2wmTjLXPKUuv6m
zogo2*t++xz;}`26M9keBn%o!3sBPsQ4fvu<Fgv0}@Y~_3^VZ*~u}|Bc?7U3UU&(_n
zdUMGkUVV;bYS0)I6gg0QL?hXI_b6PuX#RnH&L$oOpJbXkk2P|xz5g4K(vgp0&WG|M
zjpAGJD;g4!?U<0*O4`O(U5DdN#eolTf%2hdp%29n20?y<*KJ6u8Hkuht^Of;^_3`V
zrq6_#wUn46(<Ij$X|$iY%%iRXD=NOlh+(NOcdc>5p27+_1=yU7t?KJMt%0@7EY77d
zy&u{m+f1wA@j}kZu7J<A=p`!rKRxyJtA-U<3zzXL<Mky`SlZX}B61XL-%tEY2`%>x
zBI!EcmU;_m^Hr4BVb>h~0<NC!vA?2umXl;+W<GDAUlb}RTnKYpM#pU5WS_3#<+%5N
ztrlFp##{C##-zX6@slOx3K<uQa0Z;qOjd~~5@$cR-NTqivP^CK1)Si;JFv_2`|S`i
z+ubX~#byhW)#l>N5q{4c!XbA$6?JT?_a|ZV9_Y)==%SQ>*_x{szfYiZQ^G6#o#F}O
zBj-QET>CGP{eD!NUzz$O!h8*q^!pJz2+dMTv|!9{qL8aygobHkbx-kAG2^+Z?E!D-
z;F&4Ee!aEij$R&`8vx-V1W?4#2$+r58sZwjbED`Qyj}90N-(-XG8lGDWqVa!cr{YK
zyW{mn#cNggl&1`pS%SVAqa^(>bb+7LM8R}?`8m(lJKs5Hv+?&L@_iw)9tR;V!F;)W
z<%IwpR4dm`i|BG6zxy)x3wK;x^h@I%giNffvH7B&!a{+-Z`|Ks56x4?gsajFlYUtn
zRwrS=&vr{@kj|c*3`w`JpOCY#e79%0xo~zF$c8yE8%>D?eR^FEl1mUrwp!ZxOG1B$
zY8<mwtIMbs5A%<DwChA0BF&B--FUBXG(YiNOSe&ZGRp?LcCbL9vtJcRPCw&<u3E3j
zcwNuyQ`Lq@p6L5^N~6F5RaO^bjy(w^s|9;+cpvz?ooj$WPwJJVL!VF$3K(YV*cq70
z!sldd3vo8svQfaYisuyboZ{Z$iwvj0#9Hp{FnL;jZXpHXhA~OJn@vP=)8p>2zX1F~
zdaJv}FUH$>uWOl#X4gJ9=jjrCtE&E)IX2WGe+TXQ39B;)fI?5V3R1<G7Sq<)Ko_Ee
zRfTgq$V$%wB%1%gCJmreus%_vm4)?Y@+W#w(}d@tn9ZqlUVLHzNBL}azt+BPaTP^o
z+qTxA^$kYV*BtO7WtUGrba|w07VPTvHIsWt?+<w5=2SePwt9vY>ttobNhE@&VGh!t
zX&;m98DGX=@p(<cAB_Xx6mB=oJ1+)s$6fGt`arOU$27Zttq3>ocCk9CsQx+@iq3_(
z06SoQ{D6GJoK3l>CS894*ZFzA5jsK+V?%t?WBLo54+-cD&A=n~PbvNag4XP#m=$`G
z1C7KsbD0LN{8MJl3q5lT#5GJrFm!o--@kP~3fW%Yo8|S8&Rk;%#rsnQbmiZRB~Dq#
z92y?-J{bR`-Yc03_Ej=8P&)6>mQ&ahNo#Sz&Q~AvGKa}zPNXE>9_WK5kQFV+-j^q*
zNoIcmw3oZc+WcgPC50EiIsD$yByYapGJuFlSA-}11+*&OM(Cz*nrCjs{n!h*R=NiA
zjl!ri<Jrf4KXpbzh2Ncm;gjh<EQech_-XNVcid%-a{5o`Ydp2*HKiR|#U8!$E2XX<
z6y7lzfn*<W`i*Qr8BLDy{aR+w#d2by8gNp)RUH}!W8|YhTe~+^QDP?9QW_Fd-xdOv
z&E2hVb^Jy+k#B7kNxZFX!M4u|kN>R1nK%Z}&Y0+!is)<2Gvae7anjV13<*7Qh@XoH
zmwIwwzb#FT8s)kk*TgXD0Z*9z^cM*Ftx?=du{6?3MR?rx8cAKhO<XH)bQWnzA1jt<
zERQ!FI7!`?rbFs<d7Vo{0lq~LZ&_X3YaR0FPk3zD;&n9+OLMd;((17|`Yj(8%R90r
zxW7j6;A5i;^NI{YeN1}JOhTUq9_o_De5&)Ev5DwPC%U#O9|F|?XT<|R$UHz0NJ0Eb
zgM;aBCZ(*#z)(xF`z~kEujnB{^>iCqxj%@8X38SrKUcan#n-BK(%CtNbp1jz4OYA$
z>iEr~gRu$cPk_yMXNl9aU1`y}RbDOa>;#QB<Qj=ypW$RCNMxel0|z#7lKQ`ZQ%lRc
zqC7aMABkr7Qy-}s7<l-YN4^XHb64ku^}DJ^${gCgd;Es-nQ?`!@1E)Cs;(JWugjDS
z-66x4iwn)|{HZb|$Y~nUvT!9y@yPW6<Q8&5Mhe20wlGoj6F2b<mj+v~z}KQK=_^S)
zVW}w#1)q?p_&Jl&^Yj*j_U50<e*rYpG*ZS8k;mN;PcF^gqog>EPsWe<5tH;fk}fi!
zdROKgbBSv`A;nVuy+0=BKOk{zk~**+p>&qivyp<`MY9CE(Ffhs=n4YQ*HZ5qu_VY5
zV0^^Uhv<RvRU$eI(-9P*a6uk|-;jhXUq960_bA(&Bv?6{?amOS%OVogjVz=msg*(%
zj+;SnY9g6OueE3Q_?;^-oE}SC8<Ytqu7^h-%qN+S5VZ)ShEzx79zoscJQXI_OGXfQ
z)c%y_!mCrYl@+BJZS{5MB)rzrRmpG9%VzZU<;d|AnA9pn_TwIkWGQy@b+wse@9NKs
z;9>ikRNV^_ISrC$CTi<Yg)RJ;8fWaPV2}#$y@}9%{QZ}9rOBKAF0;lX<RIqx=mg#z
z4$FKvJF2gh+izMAr_c+iOKvjCs|rBZl?|yF4~3=8M>TyEaY=<1>IgPV!RmWlabAJo
z>ZZSd+VhOB70ijrA1z!;@F#~QN@O44EDLHU`HbqVyNs#GIm4+6j=q&Rt}K6<*!vP}
zNi0T*$r(u_4(5WL42X1k16FfhKO%2?VVI?vylWB&p~N!&*5m`(mG`Bdp=!mij>xr3
zsJygBG{ym~5<BsJ->URA{K2JgqeBx%6{lJfr}@u^PC4fsUaf_eIH5L-6eWu?$HZyj
zKcS(@3dz7|tJ|1%ty|&;be5V(gtlkVpiS(oy!+`=%K(&HEa-%Z{Iuk_J3IejbGijq
zdOo0Ke6YbUf^xeU$h?lOLs9-7R-JkyRXEQ!5l?R6-PoKYaEhoPUocllHo|=(F<3#_
z7k$@;pG7dwi&boudLFotHp|n~1q23yolU0YN%A3jKt51!E@=A#c4ZJ}jOcGVu<;O-
z+(zUTy6ZS&07PGSx67*6=`LSvQ9&JkOlOOgm1zx*sd0P)Ire&s(0b3DM+rQ2`1Jt4
zZr~)WGe8;%wv?>$Ys2#X1<36xt9XP@1)&<5;zMw5t)CxQ@+ayqr{ZR}k)!IiBWV^{
zw;>86fDT#1{X3UwgA}6B;`p;!t<A!A0NTv)%5da{S?hF6`v=x3I=QF|F<O4ZTl1yJ
ztO(+1U?BpItg*iyaZZ}Opjp9rU}na0c@VTbW^M6U5<LhM7{ea#w0#>q=);zlDG`Y|
zbUq}@PTDYQad*cE=y!rmvA5Bq$v?x?2b{(<MN)RM6x#aTj%8ZMMQmb-HwC#!vU=ya
zL21-LDZ27eweZJpILnPw_a7>TdG&`Y=^zw@^QI)nu|0_Y@HfE7eJKn+GkU+Y>iNt1
z&XBh<R%o6_&LbXbxuV?hg=&$4?D6+uaoaz&R$OCCPg2;4#}-0cr9L!B?xpL8NpBPq
z8xvqh^ui(7)zT?g+8>^?p{hvb$H-RF9m}nPgM8UC2abJRAq8{@fmURU`%$TWbwz%z
zuPri=UD?ipUENfn$ZKES6I~5eL%&@i?!VzU`6JA>&tX-Z@I?zJom^W8A-IO7khe87
zhkx#-D}Nohwq$2l%7j<Rg)M&NPT)fMO~;ApFj{7V`F{Gs=4FN~yJ2={yQv27+F%Pk
z^;PB0y!Lp`oGtk#m_}-@IAb$r1f#u(=KEbk(?gR$WI03o<%{*_x(+do{v7MH1(K+$
zI6hMZf&X6PVWB&t$z`U$L5`HTO@s4CH$HK^QT3WRml0D^w<+->bCV;tX~nfWBBisV
zN+7INnW$Cwa6wJ%fus2dDUu0A3spqu`a6H_%-DfQmZZgYx<erezvx20eA#arWj1U3
z;qtx~l`UqAgOebTvH*(eZSet)=y7(71BjT~NQ=n>UWz)cg9He-->wlnI{bG4@59A{
zLw3J|c30}cP*o^GE-`RW-9V|(o^xzW)oh`&y|m{(ulV%awCS^I(J*aj-OxddU8FB1
zuKu&pHCJ&|bBE04<=WL>K#k4z*XrCYbjN3{lM-ICF*7{HKq5(g=YvKg8lz6|Y(K&(
zR+NOV$%jeDao^3L-tV|HV;3>+6x!7;YMM;fgwEtcaCXK18FyFqnwWnB`^v93zF7V|
zoh6X|F1x}b1sM+r+*pX{;_l81_it7fP`%U{y?RVpY6>lwuCV1B@p{kAV+Pw2+Vd4L
zT1nk2vcCYqMi2O#&)tr`-78Oa>it#e2KL<{(ezsDNtc9sM=YLU&n!!X+)SJg!PV4)
z5Yj<kl83cbd1~M8VoMvdFES&19TeZ)@fs{>T74BiH2_l>p)*6`J@-0~uigA}`84~A
zNPy)l<Zc+MMBl-R1Cm@{OI=@Ec(Woe)N*@hsZ`Kj_(^j{K&3MzbswgGk`f(uaGTx-
z#qsM&*5Qb6NW?UrUgqsenD-<`@KTMRTc<&tm@<tA_eynsK!j`g@-1+l<myLFn+h5}
zsUAl^<z87U1R0H7-qq>dKW=2mv?Cyo89@+?JjChba=_EiUMKn1W}aX-m^dEQ)anjM
ziPA@0Yk%;WGEfL<x_?sm&gT%3={(YrDDp5hWey6a6|%uQ#>*Mnm0yk2%aK;&R$siJ
zz74RC;N@@xjB|fRi8Dg9#oI4V=&4g!?yjim0+uI(slSSm5%PmHmj{5|px$l|Zi}UA
zlLH;ZF=f5|kK^&MGUw$`Q?2bVQ&PHIfOc}mn<z5QTNO3_&Pw9@I6_@PnmlEiJbNy5
ztUPXgHruS?b75Jvf>RKwMgQiV&abzb2mTfs)ab6fs@r3`s{0uxW;<<|{VO3PdC%4}
zr<RG>>d&T$NyZ0=7O4%-3+(5Ef&t*t=0bN%&6&wu7wNaEK%Q51=JDjDTDit9zl0cV
z-#lomJ86s~@@2PV9sDBsf!;F3ksiG7?%`6HQ#)>o&*#r{_>jG?D>U~ytTVa)_(0y{
z_RPpA!VI3y?#k??*fM|j<2iI*-^Ng;XrEp?aZXUbzvXI|fRDjf0YfI_=4>l4l6LNV
zKPmGm<%_R2+nY>plvWk_M@s~d4*p0JMH;80VkOR+2T&cwf{$Upli5BMw0zcU+OOAW
zPN|B5NZMVqXep%XvU)yd3(l-9ms(?;YkZOHWL5NzZ?<Z7Hgb-^mYalq{?Tn5dhUuj
z`-BB*mH2o9C^W&G0Iw^_qtb))xN&jx!c?Mo^?rZ;V$r1##h7I35E79p<e_1pbSu7u
zteOyN_NwZPHTPrKdfZ)F<ssGOZeADn#KAXTcg*0=$Ni@*u==JW75RSwvi^^H+F4Pn
zcfrm}&!f1ydhYPA%XHkgt1VC2wMIp+&S1vPS?HZ73IL*h(wXo>-t@D374jFHV}Whf
zMO{(0eK6=~eM7zo)e-3qL)${Wv()0+$=Q_ARiGxy-BUwgasVc@RQ4taVi|gF8l9pO
zBTK>nlD%d@N+<Rx4b5NHGDU^c6~FXh`P>(6cmjAv7dY=pf-sdd@O;JDJ@_4u<PY}-
z3N(|tyE>RUO8`oBv8YFa#g9b&2anjVk-dDi-Zoq&j=*`UkkRsNq3zqRy>t64-p2b3
zMkC0x4JLYe>0K&i<iN<-4~trU*&PHI1JLMoae<=<F<bkqG&xcC-G!Z8srVbgE?7#S
zlkkiH5wtL6<1|;+HYEo{3lk!*xISdw(%GLfR(LhHN?1%dc;$O9eRdjXBo<K^D}^9H
z$&-4YoZlTda8VVd#CZsEVWezkIuHmpiI4L#jCsB`UV97UA0X+9{2NP-aBD>ER$y8E
zY}uv_>(!92GDCU6mz}Z6r5x(<^k!H<%hV2(&hD&G_bTTvzyvlMpJm~7XI&esADsT<
zG0Sgy<jji=Ybx>e4v|CtBoYj!pC81iCu9u%u#s6g_AA4%{CI0_IpS`hb4jb7$mG>_
zg7}O;XWmsbI$+6O9xwJ4h%`p>lBnB7TcmR2r{l3wPm;?4zOe(^PY?>>^h$Nc=TePj
z<`#sk5}x3}7mi+k)6$9~aeDimQ8y0DYZTkfo_IX4-)j5m_ka+-%5Z)HA2Appa-Yj+
zJ`ff|kSr^4v*4}rjyNHVRBdU>-JHT+7*4S%+RR}5SOVvx{W)1xqlEQDM)h=|lT2iu
zp8O8eka5u))_NOVZdE_0mxw=anY9ofQlx*Nu4=-O@v*CifDAX>FTCG>0kLzVwdG<@
z7^`skmg@H;E$jFU;K9&s>h<K0yJyv`le+o5O1pGhOcCvJ{Sr(3D6Mlm7ehzH@TDzP
z!7C1#{oi?I;}*PPtuNY71wC9RYWA;{KGFLpBN@}OBf(Q(O(Nr~j?B&^2fD?(to*3V
zGCO6Z6h|{0vi~0mh1X`Mrz1>NtCd2mpCXB(PH`tkW4r+C+JkBpU^NbW1JqP*vy!oc
zYDSopu@+{*VWc*%$_nhlfcLH7D(aDKkxk%ap}b^P%z+JhgwwyxudL&S6LWt51=*lr
zvy6SnE=RPBO+3A)t{6cw*At%y1vXUu^WBDplC6m@L#=UyIlsc{pYW@Bf6=^6#}4p&
z5^w4~TWOT@Q2+Rxwjtz$Q}(^MfED8VjsxWFJl=wuCUYRNmN9}CO$<aKzdj#N6-q>*
zT}8ZIAN4iE<=QqO@G`+%34|_RBQ;*!1)ncxh{Iw)Dk(gmg>NjE-zK1v*bp7N)R9~d
z*E2z4F%52tgHLnPZRP5tbHt!?Z`iUqQ^;i-pIZ>*Nr}mu(bn-8vf<#$Z^rD23ZRV#
zy8-p!omq!MT1wteCi0Fi>u$4jin&-be(TtuA@e>Du8TSHReGxd<OQC>a)oB~E+_<A
z_iixGjZ`(Ajt%UC^d=>(fsP66<cIcGm4UR%IO5zhE{a3h%#0baa*pDFQ;NRbeVJr<
zu~}O&^QZ!P4cO#8^L)Jp8dg}`^Y;7{)%_+;$Sa*+Yd#kG(4fW~6%hvx0d84OM4F7h
zu^nh=d0{{eF-BbHdb=0s=0Y#@YgS*WRzGGPGmnlwg9T)i<Jbc%7*lTJDL*1fPh~v|
zw0bnugtB$vYkq@C*W#x{0s#URyaJMLw@2phuFh&d>noyL=7@X>krU5WhlXdp^ETdy
zjX9B53fGmiGq=l_zL1UR??OjcY~DeS3+oZWqny|l%n-RQ<)Tq?{_*3g!VaN98XA<`
zauX$b9r@z+M7f+h?DuMaB?)b37OuDKI(38z;@lxz^J0|%PIv#e--6O#nTCnyhc)wO
zF#W-eN$W}RJ=dBORbMey?sH%&X}$E`|7rD1+m7Kuj%-AG!kD>D`gi@LE{#T2-Db39
zih6Buw9mpADw0vS!Cy+h6XnOjC?z?rHv7<3aBfEL&YEXZ&jwe&aljbOxC?#ZN7mxh
z0|cTe^-hjj0_YG)iOaa*8Ey-J)BW&pA@OsmK@!cP`S_@s(sD3dkQW1v*TB;Uo2``{
zAENq>Q%hd)n=LqKI2wSgS+dlw<a{P-$308gz7iR?M3ETf!NkfPFS`$AkuGDUqJ*{+
zl<&#rnG+|eXwH9Wn2|vJ9AAtHm_Qjbvwt8|RD)(89<ZdBi%s5d46c_O-yX$`Evh`g
z+Sr)~F4SQC{sL~K0ejzO4)SM7^<A4~3SUq+OvpgF-E*v-(V<o@`2?1?x=M!Ok|@;n
zgEk=sqHkF%Cv3mq>s>RAyCvC4t|d6T`~wIx9la!lytW+ibj}}M`@xQ<C4N&Itn%CX
z6Dlu+_dJ*ar3c)l#uJ!RS@)4*IX_O$3-kCz!1q6W2NE9qJQ~+UL@BNx^w;33vm8D#
zPRL~_?OW&6T`u?_&0@tV5rT73a9tsRkX6ZGogYK+veXh)GPLm7zFou**Lm?yDLT0S
zxObO~pz_lYZF--8y1th;u8h+kq}J5?T5lfIzk`MOs-S!*v#c;rT=4p6PKRhOjVI)(
zB+Nc+cOUzV_^Sa7vJj~c_Kg`#eQZ>okqErnCsK2}!^7vI%6=Re9dm%dG;X)C%S<gu
z&0Ig5_xOs{csJffO5(@RG>)_k>PcRe95(ccXlkFzOIbMo>LA(m)}vhc0jcHOF^lYo
zC)HWvWg;?`ASXImG2Gd#&0nQ9KwLD?cut%tTY9PnA5m9Z<noN`*oe0;gBSZbY1JS5
zD_hDZeMdPoU|gs)(Xf`K#v2fs#7OSc(=3oI_}J&si#DDe`XO<SQkF-`RfR<Quanlh
zBTg>U2M+wpf=88QFlEkjn>ngd_c4M=C{!f2!@2HHTO^z1p%`8W>oCz*!@GmW-50Te
z!oVsftOb?GXnjA53a4Pgr=S#LQ0-GU;rH=JkFOyd6BI#3mi@#bU|xDQho?yEHrqs$
zfkc;_=tugkBmvDNjQpxey0deje~-$<Eagk2(AD%abS@<wMRNMmG{6Dl?>=U>UubP5
z?1S>w^QE33mL(_;3C6dg`2Cnvc|C68lzB!CZc){XH|}9)abUqbM~*d3+mvF9tt!(n
z8!eT7g2nqY1(F@@8Vw_%b(=TPRw{%GBX^-FbjFB4@~^cVVS_(<drk!~Gn%<SVxHR$
zZOh3rqXhX9FGqZPxbB2BrRH3=C49(;u6V;gsF0mGd7$D~EE(ntGUthhVhuI3#*75X
znfV9H6!tcGs#@{Byi}1Wa`JTGszGQX(1y9xa)6-BN<BDFsuoTXS*V*=DyRYO<MIN;
zTEls|io8-&;HCVI0b(}&ShdAC)zLfK#l<}a;%D3@a*sDu8T+7NiiwCEYXhE$cKtug
zmM}p}Tw|yCZx6>1_x-?6#$#u5^m=+$jW%uUs=4o`JuMrWr`IFjrz5ZM=5{{b_{y77
z(iRYc-)r~u9!Be~9+z+=;a3~TP>3qi3ySc~A;8lr?`TV01cVk-u?FQ*=VLE^z0091
z&iU~ZsFFmF)6gxUdZGM(1yvrZ;VDqDN>Je^jm%NfAHz;5J1x&vZ<@MQR8BQ3qoyG<
zr;;ou&K~VA`+&E2`jJ?DF{)J2As(q3OvW0Uv+mL9MLH#<033H5L3t&^gi>Cwu}hVo
z7Ib2&v+(<$G5So)muuwF)|AVah*C&g<RB$Ph9h*4M%NMag(q1laWJ`fJ_C>A6M)nL
zaP`SUKk?1dBY#yg6yd=ghcM|tN&-M~{F_Fl<#WuINl9x*IK(k-pkngs7f*&z<+>zF
zQuWl#{N+&~5&}?`Kz19GA35auE~Dp+=qvMXPpiyS#Q`{Y#!sP1Ot%j;Xl<G70WtQW
zmX|t?_Ow={B@41caewC-kCIWkOC_kYg7qgN;*$DLnriXt$#u18rI|9GhY!^zjzF_A
zcUqb5uh2$c@TY`G!n!qr5g<sPQ|eSsQ(j_<%M<njI}Jq?nyC{d&Ip+)V5Ff*a6$1J
z-UrjV46RUugjChx^dUum+0thJ0JFI+NJ#{zeSrco8OcS#_fPO0N}r*%@aaA*B8k-^
zR-&>P2PD`Jm~ub_0$<F0@A$Lo7AKdmIvf`Os}iw1Qevdckc8@(xoR3f53`$hi4^B0
z1=&^;Z2A^d<jxwAr9qq#781w=yN=P%Iu(>Le3_7FXmPAYb|Hvq^%iEVs7i1eT1nwQ
z>m3hLPqe@K#c3wec14wJsE4}*lgPYsj-~pD%<Bmj^pUaoj<!fjK?2dqx|t<YGct%D
z@KLe(k4LkAP?A=TzLgCmq$G#k;!Ac@`-s1YCRiY`Vd4rvW4){#N1m+FNMcKeZI5Vo
z)Sq`76AqNQEG|4mYX(RS+(Iaa6Y8(wy&3a6Q0B}lFjQsCa+N4asE~l5q@}<+f)vD#
z&BSeTx=l@tBL_xe#BmJCD-3y((_W<dQ>N;1HewP0Q3A;Zz;PV~DQj51;EO|zM{5BZ
zw^evWDU~S_lE^}3q-+_4Xnk7he4d?QQjt$j{im351$r1>7cBd#CjS7EM&!pq(nxYZ
z`$m~t)0%m`MyJh%27PS0Psxv2ZixtTN|2WNl;9KE5ov7-vkTGAnEwEDXex1RRezq%
zc(T7Sh}SKOwHx|LJRv&);yPL-0vv}^!+?;R1|hB@02_TSV&&zLyC|O_H4>&tQc`8C
z6p*0cjhUn8>Mz0_HVuu>slhXrKY~zBo9e2Lj%Yv*O@SXaac?p7FhL}MSaA(34T`Vi
zZx%9?UB%HpjN}*(;gcv~ReznuQ(KxVfz&7Cbq1gV;ReI`$54?*nn_5LODPg$ATm)c
z0Vz+w5s^w%?nl}I3RzM^2JgH_Z!MPkMJSZtP-PxoH7=@Vdc2d7F^po0igqJWO(d#a
zG?W#UDA*DO^nzUY5xDHOySCfFQWCxF@tP_)&*AH*Q?UFcdBZTOWnOK^keZ6AwFzZN
zLAqq1AEh8QzB@-_N;H7{#i=Pu1KdKTDk&=}*w}cDHrs8P(xX=DCr@xrre*cv808=4
zX$pdnq#ZLQtN4(v40F62@TZq>N_mvDIM!yPqnJXKCPpf$X)5l?0y}t7Q#jOqA~8tV
z3)(DOj++L7lX@MMGFq2Dmku@6t5H#{#O4~Q+qeNsb_2w3GRlA@L=&@^u_T9acfW{b
zA?yc}6K>ePicQLxU(7HCB!hFnc(6z$wy0GhO1A^_fDu1ICdaf*Kuf$?EP$dN$n&&O
zRAz}Y45@yk!E~cjKi&YfOoReh{{WOS2vSQp2KS2eDL^^*5H$lHiD4jz40(#O1s?DV
zIfE=4>^s5IuW^0H5R%KJmSQb<0Sc0myWQXfolmef-UfwCRfD{FaV$bk4@*9#reKv%
zPc2hsO;*WB4o~PKDT80?MtYHvFj;j7L94|pRw;1$Bwcki3bIlMVYxg2V^_dwDEcv%
zEAd=+5?(Qwv5K!+p>ojEt11*35+tGS2FV-?mW?uAYnE}^ZlY#dTJP;DgEEIJ<;h7V
zbjfDku8tJ9a-(E|BckN)pH@}k=QU>aORUF~{{WPwP2^!{Ke;dXmVbGBnChu4l@#DT
zKCI0-l3r@gDibRt9awG<OUT2M+>Mtvy0#k#=;|qzEtEiRVUmL=S+tK4#D5YUO`U_$
z(B(WPeM)hxX=#@=bW#l2l$51fW=&+=79<b3h?ABG-4)QuPhGl-F)5OQ@>W@?RJw4W
z;}ES)jKU7`(QrGM?mAH|CtwHZ7)-H*VYxFaQBYB#G?;Y_I6^g+0U%vUCzH7*Ikk3c
z#yZQ1)?nFEqHY(Jq`a!!#)LYmp2k%vKX7JUxQp{!=o=P0pX&1tD^r%TT*Obr<dBN&
zO-R*GhmJ$+xuk5KH!-l~9FK=&x(Tu<Wvx@E%rmO0>J_eiOu|Vkn7&W)8wmDg%)5u>
zTppVZ!l`A^(a27eN|rxTPbBV1jHI-c20y0phVKY05h-J<P(B_VKeV!s=>W+}Lyg1(
zDN92Fq?7x=L|EAe2tqp*JQZErOLGdKet)bgS<dV?v>O8u=-=imGd(hG{Nlo=B_`?d
z3Z&{Jy_@xcMnat+kpPlZY~#3sfD+()j53SdCsFMZfT0?Qec;$?w*11&Qp(og1o7Sq
zrI0s25di>E4$wgg1RIz{i!-?}K{`Tr`^7+F95^I5^NPw1oB}s-2SHLuxbehSCs9an
z-NV2v76MysM}~p^>@x!K60IpDkWXmxO~mDT=#i<EqRw_Fv-j~CbW_+0bcao%wM{8f
ztQ)*U8)@DI#NCVl=_pZjm+uTxNBtrzLM-oN$cuDYhU0h;RF89{{D`n5BoZ8$sUhXL
z7kITP(&;-9XNr+o2BO6A5h(@19|Ua-B@3ID8wen<Pyv#A#*qryBoIgq5Dh7N#0g0R
zq=)%{qUr!8?aWF6NE#L&m_mTC%whDfw&#%%8jw34;uH|1q!Qp35z0Epn5NdN`>RP-
zyf#Gn3aG{LbL~Y982Y{21+VD?c5@kSwsm5Mr5QG>6{{MmR6Qy=0N!-s3lEM@i20YI
z`6D^?zM^GDZl6?geCo4^P6{Sjubp6!IWogHU;_y04;pFCT3@*?u(odVFg$nh1&53W
zEzg;ix@Shz;;+*pW+If>dTIXvmu_r%eWU0epX6LGr`c|k1;b>fRXsGYhhP;jwbrH=
zQD-JH_+A?$38IxrNhK8WB_JYPxj~YYjer~o^(AFpqdc2c(N+rz5|LwbYX|@_-H*f+
zxedp7DN~&#JAXKoK|mqw3WPG!N?bUG^oZu$k+Z=iOyBp3MJoqY$Yz4cQb~AHbtQ-?
zFvv>_pZmZFl(>8S;uL^4v)U!xf?~&Xw*LSypjV|O?gib%Dp^jQK%N>bQa@&F0g6%x
zai;$O#mqoDz(55qRSX8Vbh(8m_H0M`z!ORnYnTC&6m0(hse>^(2zDkA@nS%`0S~OG
zb};AxBq{BGm`RFt5G~!|(2~aQ@?aD`!OwPu0VVRq?+i<%s1N}24gpJ>5O!e*l>YE7
z+9d)Yr1Sa+flf~k%sLVhKdcpO><kjHB4w}K+YjdnI$AWTYuH?qu=5z~%bIBN-4LHv
zr73GtWl51JYMD7?utF2bKg)9%%uadCs$LH()TaRmU2N1FAtb+~6|qrk_DS*}(xRgu
z(oDT83&Js4$x_Uzc(x@`h)Y2QEQKkNZX6~4@w=%k<fm?4DoGN6N`0vS0i-Yj8AK?D
zJ4J5DKo&zij{g7<t5OgYOAR(8n?%$lr9_=0%zWL|&aYxUK+MSLzCoX=rlCreiQ-c3
znKJfRN$dl;`wih3xjI3$6>RqA%zk&`2R6*fxD7((<mx2@jpDKceqmtRo?5#WxgL1m
zyXCwNi!WhPF#JBAT|Ff_1i4aWhDuU45rXO0PVk<c<$6pT440xrkn5bG@0`EhTD+h8
zM&6WrY)KqJwB1Kv%#~w%71k634)=n^%3uEQ$a80Zm{cW1kl!XD1yr)`<bebwEX28s
zG_q6!{h>WqYccZ{s96F_jmL2iAU>^5`v@gun_10_1eB=nao?CwB7iotj?fXPt(<7`
z8W~Dtlc_5BFr{o;OSPZ~St=|av?9_J2_RT)7E4%Aaxdlq(xI@ih!Ck#f&hIlz!0bA
z?eP}N0S?9cc8q>2S6L!LKOsaa=Pi0kT+uBu$MF)y{srPPqY0h!jTTD5Cs)<$nKYkH
zo{~^`)iV#%E?Rj}{#(Z@$Xy!6F^ugus2SdB7m7?FR9I3DsdOo3NGFv6Zw2F~;W+^Z
z8~)zMY6dfmA;mn!OVcWmKf@^wSl;R`qZ@3bpgxqaHY9NcX+Aq<v8Gi|C(@J>E|KQ(
zVo45NyF(Q+k73>dR7oJQ{{T460=tClNPWz5-lP?Oc>Ssa_6Xto$5sJ}unoIxBbId{
zevHVV0#EEweLpej_I2@@dOF?0PX7RSDlU>uuXcbXl1OmfpeP5iym2VW3;|8Sxqe^*
z0DgPGDGmWqa|IVlqWysa3Y>x;fMBGxh;)|cV+$~%LKQM%(uAZrB}@SG9t;yPVR@2F
z`PglBJ#?uPRU%}{d79FhiQO(a4kFW{S&;;RacE~MPs}P*5(_c=?FyG5aQwi9h;$^q
z$bcpSlBQ!5NOY6#zVOLNWq7;53Y(X?9w4NLal2meQo_AJ5%lxK6%>ZuO4u|@Eew?@
zWZK(7ayTGF)1}?VFce8i2~&pHh@>0>lXf500n%N7By$J_$R^|YLy#5D@UcR(UCA*C
zAY57)Cd4>lyF)N3E=g|jQm_Op3%lA5qQdrogj|wBVTO`^rXEaHU<XKY0o4!>s_r-5
z3xYOiM370@FWM*x)KpkM%mf1Ps+yTo9ZCR^{R}Xe54krHEDEWCB%LpNn2B17NiF(C
zsw@ivr|}+odB&kj8dQ{|vPy$L+5mZB0ZL%R_x}LT5pJso7x>x{X(LhCfH>YJvJyqE
zEbvZ|2T0So_<{m%IrxQ^rD*pSFv)#)b~f)CSs__oly9fWkDdDC)f_7s%ts{2n?AaA
zIh8tY6^Ep%ZXrCY_H@NU$UtpV3E_u$=)HB%)!7><AviTO%G||@P81krJhbX6oS=Qj
z{*Vd3Xv1_*pj4eZl)PgSm~l+iT0L6KLU;;N6rt}z4`+8fn9@oslQes}XVWY~lNFhp
zb8S*_9MMxO%9kopKbOO#pZGFI^;az&JZJ*R@B|_dq?QWM8-cVkH0ia&$2)Ie#b#`J
zn=N2ChAUk(Nu;Ebsnb?fvQi#6cTBOoJ1pTi$5b=C`FM6Andb?VvZgwwOQiv%%0LdH
z;A$XvjpMJqIL7*~l<D&J4PQ@($eUJ$S4^L(@_-}&2L9$P{NVKe0HC-wOq)ktS3aXL
zV>GgpDph_azfj3Y3<RX?f$@JasJPa4DeTt~f@WU1tWU<jGnX<dh*X><CrwTwj`PU6
z5NvdEH;&DQVbU<#NmP{55l>GgArj?FNJ>(XumDBkR-gbZ{h|c~sG&rI!92!Xo7gN0
z2oGo}(jNLp^@T!1lc>A$A(VlC>LOjBAybySL!xOLADAgXXLp4t1>Eg%7a(O2K`sO!
zT0(dv@X+DNI*Y#z3Zy0N3Sg*}+_(MUYf_n;cE1o=Njo@g#7SThZV4faMnwfFaKx8*
zh)<?ScfFn*Ac3f;HqFdBkz!50V`%^bsIyoPs27BLZvIRlniZ=^C0-)bD!^%s><9@b
zYj+Sx8f*xM(o&@c8hgI*NeTJ`_z`FbY>$YoI#m2~c)w1o?038_jaLjjG$<9WT1d6N
zBga1vO!c92C8A=Gtw|~mrCbr%#z-Yh1A+;Gq+N@Yo+A`2^C1NYW(gi36rRPq$E`uh
zc&$mj-)OW7Y0b#K-!X7dQ7rGa(FC;VAX~%&fQBsXXdo0gNz@107#kf#KX@e!Z+KFI
z%KhW~;-Es}>9?3sFrRAA!$CuXb|bVEO9WUk-UtAtB|wp}v4Ewo+`#s{M82)<$c9^`
zOj-c0SQ51)4e#?FTwfHCgkkgRDkh0E@@1Ken6kkj0!jM__AX*&OHnppO{3+^<(V;W
z#Abb7Nx&B}MoYuyq~kc8m0F;W_-H?7ZcCko>Rhl@jF@~YFI9dkx|cs5#8o<9Pw<8M
z6p|I~H7X29Z#$^)7)RBV6cRA%NmP{bQ6ipFLM6+V87WAx0B|CVr<Cx_n}E|{SXCUl
z8VYm-N|P&L<o73XPVtx09gnoMSxb}pEfHX>CC=nF_TC~~1Ic~1h9nOsW)K9nPjwy-
zc*q$cfnq<>5&bog_wN!~h3?ndEYcKsJZ%I5m4+qnv_z0RdGiGz7X(|x1f}&2z%T(N
z$pd}M#6Hv(E^V|ffRz^Cv={a<AtXQxcOgpN4U3X3=J8a(q`HF>+7?oXB&~-GA|XYf
zl9Cf~!@{W~64r5h!AqbJ1^0qfR2;bZfC2@-i-s_Yk`nA#h>1`_i8o`hhZ8{zLq7mS
z0?9US=N1IquyMhJ2=^P1IJ8=VpbA~!fdX{B#41^2kQ{u$LP#Xs4?MsHkF;7S1;c_z
z>kO@}b{s;xumBZ)jSx1CN)7jj38jH>L;nC+*E61Zqsmm$5U{$uO08a%79S$>YG!sw
z;0?-+fZi_7xxR-aWsE}`#AbSQlyZ#Km$~eb<dz?J;=LvQ)6Y4S=IZqdjKNzV`YCNx
z_#GmTwFCI4FLFNS+wL*6<<bWK0E6K8qA^9(*Up!SW=fzvRUrQWvtd#$$d>!4_uWS2
zfQ|1D!zu9kKT1+Gf^{gh)T6qOa(RWqFo_sN3Tb4ma~kTKJzAEv9n_=AKlb81x(G@4
zY8{A<do6&6vpP&bl*>!(2k8uylW)?{7KfIreV!mbcl<>4wyO`Umyq&@sZ*KpBAGK6
zCwg?N_ec2DKS6lhdI`}QPL*IJ%@XQy+Q1cAR0H4sT2Jtg=3K>{HRz2tPt17EWyNX{
zjAl$qdZ#X^%l61gB!tU*i!dj9M^#6)AtbTlI`Uk2NA>!%EoQ_r%(Cshq4lyhyKM0S
zf=S=BC<KyJ2rt3nF`|Sj`if4NZ9t2H2X641!ctrjeatkqI<~)<CJ5~Q*7EQ*I!GIi
zS{cf0<L?M}+QXPIK%%D(KG0H!H{-B{OoXLck_aSjc#BJvlAYP$v#=>bN(dVc4$y_i
zx$nzF7fpdtExZF~bAIpv2@cO;v%-}7Tv!W`B3;PR<Gck0CrAaH@x%haWlWY`A&U`i
z&<wv({{V&d5E2lHb{xftUbK*H-K`lZR+SI{at9E=`d|d7!L%JZ3X6S1zR*%spzpTO
zAao}|?-c|k0F!TsXDN4bAOh}EAI}k86$qyUt8#f8G;)rj%qYx@1pffSLose6ttMeg
z89*!T9H*($b!KvP3&=+wG27YkJ1<9E)na|XByM34wJA@egq~U~N|KpHukv9;!Kt-6
zfZ{j<%ZS#^qChE;GJW=xmm)nB^Swzxkf~*XDOTY5ixkC}_&TFf&)DSpItf@#89YnI
z@j|H-sz`GvH@<TY5HIplIfBE<Og<$cT~)+taMaCItf@siMOx)JIE3VvGJ?P5C7EsY
z8*B_`vPK7nVA3%aVRWdWqp7M=lO{~CiGI)}x<Igf1jX1c4NsLawH^^b6qL%8Buggh
zDnC-sl1WiMV>?GnE{cgVP(NoaAt@Z%G@a<WLu&y{H$B7&Q;fx_B^q?L@KZB?X#<X4
z86Yhe{UR9=*2Fjfa}i2PC6{uN*AVFfW0eLm3R0j_*5>=cCIv!<lXmjaXS6k+v1k(X
z08%yqnc$#5EZjzr2vJbjFoe`h!d-(i4WRPCl3)@A&e0kmlIAIDDbuHoMlJ>H7#{n;
z2h>6F!7*4~qD6;r3@)}(^ruN;9Jb8`DRuU4ID`r#x$N;ZkO?;;HmJz!16MYM2(xxC
z421);5<J8ONpr_B0E;?@-<ZQ@oYO&+Fj|~b6RDR=M^1vNlV%nGb{)y!a~YYFQz->Y
z`apN>90NC1(r2o=%+Z--N>@*Cq^?#SM!96n#VQr-^+I_L=$1KsUSnw|u#7fM>lQiq
zhp){zP6;Aj1w}<E3Z$-`sdEdhh?tjQsUYpNcbvOWFsU@SW+RA7orsA#^nwyr1(Z^q
z*jxEVnb3}$Q{>FKKD6P;!!bN&X|AcI3I71=gY-!FLT&O@9UXXmJj%&+E2fmXsX(O@
ziUy<uxCe0U7@JN_#bBz^rh3vfIoq?rQb;6{EFd9mVxaKIKGJy{v=ShRI-P^}g5(!#
zM3l0PZUUcU0%C#zH(|s9RJG0%)vYNq$gy(oc(l(*H#=Xzf)%8DgMs~GsgmW)8dR4X
zv59q{Jj!`h3QDFWD^#<oB&0Y#4-sLqSuO#9kjWu|e(*rmxwmN00V+^C+5*@R+m6v_
zE$s!NR6YgT6atldNnj1OipgL}EJ@tMfC1XkRM|pozj8wY_wFD9m5ZHE5leL4&k)92
zqz`CG3|cFZOX$l|0dzm_T!9b_0MV(7L#29BZ1A9+3<ZQlt5XAS5CI8EAx=SUi~%u~
z^TaSZ0e1Vu)RizT8UmB<H@EeOnWakDw|*elxYEPPv{x}DE>auIxQ=bK%93ZGEyEqV
z7ytyg4(2GOsr<?{r3wJJ8iPlnDj<!XHl32FP?D68?&c7;xLvulIU?tC^Rz0~2Bd<{
zA9w@LFG^Ou3JD{2k3u|O<}o;S0*pGl5T7Y)2&-CVlqp2%Pz*?YorckoLm?zA{{T2x
zE^~5!Lkp~2c7;g+$#pwk90nzT{db9dfs-(V8wcV6Q8u$TXaiEecS-q%O1A{L8xGJ+
z1OlLMZX!w`1&7<1M5L&;lVkNh(P~wvf%6(fp>&Nyy|2t7R#h=c0f^tRhEW8mw=k*>
z>UL{}v=^X6DGAtt;KJMjQ|=>(lBOzRRjds_?G6YRDKFwPS)zay6$^*kc!hq`$Q;6{
z0YrYS%vc$_js#qSzfh1_FhME=7B_=rr1m@w;Gm~SvtA-h6o62*e()!lB}5ao`$gpz
zazO1BFg%BYUW7h<{8M#T6wA1FAC$7n)&1kgRwbcQOH4$vbo+d$+4umM-})ufNj);i
z^cYb}W>*pG*3SjZ$?Cd`_8SiZJ-C-huzXvo7=CuD6K3Lc6sDR?qDQ10mRk|^2OxaL
z>?NfjI!Gh5Tlf@f?rO7TX{wZ|s*!(GBq^I<fAs-LW4fEW^A}4<LY$;OeqoN2X1Tmg
zq7@`46J`PyaZm%>sp97jMZ@`kKqr#jq66nnyyJae!LwEzGDSD$u$id<rcE+tku(#i
zOebWJ-J@mc?sn>$u){kw<cVcPR{qbdLMG`OyrnCDg-2#Yq^(BQF%vB*dodlvZB9xY
zT2=dsjoDn8rE*ZEB}gS8l%3OUkC?HNY<VtP3Y_f16c7myVHp9~*t5YK5O(Ci3T;nw
zZQ_NmLjXm~8$<=G(vZc^m_VOV`5Q%amGsr3ccD2G{Uw=G47CChp}I?QJ4dBRN}Rh6
z0D%h0&<Rl^ZehR$p5x2~OVq6-9}mniNYVqhffj*Eq=hJ{ue3y!B|*{~<}Nc5Qs2%L
z=_w@d*`qX2Ay@GXS9Jjxe9M_|{HudiVz`}BDJUsWS2k+)0DoVB8lOxtY+pOfpPK6D
z&Y{996Y1gDLV#f?D75wSt$nruqfv`$!O%!5LVzS6h%(YbORMe$;son)a3X0axFCQ&
zV=H6{UX?!Nzz0&E!Qkmj-N|T$ECJwlii&7mUg<qQR&Z)eDx(po!|3JJCZcj=08HQk
zWZ!F-d&b9>vc(2Z!Kv`9E|nB?ROtzqDFgYEcPEl#G!^O<FlHcL6=0HYam-AmN#M8Y
zL==>QQpb@DkEH(qlp<17cQ57`AhEl@06`Fh>KmOSh;eIa3Hge^wV~3Iq>;H1P!%a+
zreuhkL%P_u&kjSl7KB(Jv9-5}pb(*8LmtrxQ=4gsl)m7T-T^@_0*;;_38b@@fD#lG
zr@ww+r2!yrN0@-`Yj}nL#4{Irc7=CkI|t$m7H5ADfT$+s!<eA3D^XUWNQ%n+mMk{7
zf=MZ5DU$od1KV&y9vcAC0znoVKuI9K)e}RTS-22XNCW|M5dglRNNe#GE?`(d79fTv
z<{n`QNN<8#6VXMKprsLDd_?ss2xNIKU>8ZX%fp64-N(!n5M0={;6Q?a9Fg}g1q>4E
z1Q;bjPBtG9x-}Lj+II}l&>~iqfH>L#Wbj#;prJYz>;N2*0P4RFv<g62ld%@Oa=xX8
p%xSSNPJ_41bpkzvrJQc!Ik!=fuP`L3Qvos&^li*`@Vc1j|JeZd$fN)O

diff --git a/docs/get_started/development.md b/docs/get_started/development.md
deleted file mode 100644
index a6f551b7..00000000
--- a/docs/get_started/development.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Development
-
-PyLaia uses different tools during its development.
-
-## Linter
-
-Code syntax is analyzed before submitting the code.
-
-To run the linter tools suite you may use [pre-commit](https://pre-commit.com).
-
-```shell
-pip install pre-commit
-pre-commit run -a
-```
-
-## Tests
-
-### Unit tests
-
-Tests are executed using [tox](https://tox.wiki/en/latest/).
-
-```shell
-pip install .[test]
-tox
-```
-
-## Documentation
-
-This documentation uses [Sphinx](http://www.sphinx-doc.org/) and was generated using [MkDocs](https://mkdocs.org/) and [mkdocstrings](https://mkdocstrings.github.io/).
-
-### Setup
-
-Add the `docs` extra when installing `pylaia`:
-
-```shell
-# In a clone of the Git repository
-pip install .[docs]
-```
-
-Build the documentation using `mkdocs serve -v`. You can then write in [Markdown](https://www.markdownguide.org/) in the relevant `docs/*.md` files, and see live output on http://localhost:8000.
diff --git a/docs/get_started/index.md b/docs/get_started/index.md
deleted file mode 100644
index 12c52bc1..00000000
--- a/docs/get_started/index.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# Installation
-
-To use PyLaia in your own environment, you need to install from PyPi or manually.
-
-## From PyPi
-
-To install PyLaia from [PyPi](https://pypi.org/project/pylaia/), use this command:
-
-```shell
-pip install pylaia
-```
-
-## From source
-
-To install PyLaia manually, you need to first clone via:
-
-```shell
-git clone git@gitlab.teklia.com:atr/pylaia.git
-```
-
-Then you can install it via pip:
-
-```shell
-pip install .
-```
-
----
-
-Get started with:
-
-- [Development](development.md)
-- [Usage](../usage/index.md)
diff --git a/docs/index.md b/docs/index.md
deleted file mode 100644
index 2f5b4aac..00000000
--- a/docs/index.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# PyLaia
-
-## What is PyLaia?
-
-PyLaia is a toolkit for Automatic Text Recognition (ATR) and Keyword Spotting (KWS).
-
-PyLaia is flexible, open-source, device-agnostic, and can be used to express a wide variety of experiments, including (but not limited to) training and inference over Convolutional and Recurrent based deep Neural Network models.
-The software is extensible and easily configurable and provides a rich set of functional layers with a particular focus on ATR.
-
-## History
-
-PyLaia is the successor of [Laia](https://github.com/jpuigcerver/Laia). It was developed by 3 members ([@jpuigcerver](https://github.com/jpuigcerver), [@mauvilsa](https://github.com/mauvilsa), [@dmartinalbo](https://github.com/dmartinalbo)) of the Pattern Recognition and Human Language Technology (PRHLT) research center in 2016.
-
-The toolkit was originally developed using Torch. When Torch's development was discontinued in 2017, it became clear that building PyLaia as a second-generation system using PyTorch as its foundation was a logical step. PyLaia was written in 2018 by [@jpuigcerver](https://github.com/jpuigcerver) as a Ph.D. thesis experiment and by [@carmocca](https://github.com/carmocca) as an undergraduate final project.
-
-Since 2022, three members of [TEKLIA](https://teklia.com/) ([@babadie](https://gitlab.teklia.com/babadie), [@yschneider](https://gitlab.teklia.com/yschneider), [@starride](https://gitlab.teklia.com/starride)) maintain and improve the toolkit.
-
-## Get started
-
-Click [here](original_paper.md) to learn more about the original paper.
-
-[Get started with PyLaia](get_started/index.md) now!
diff --git a/antora/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc
similarity index 100%
rename from antora/modules/ROOT/nav.adoc
rename to docs/modules/ROOT/nav.adoc
diff --git a/antora/modules/ROOT/pages/get_started/development.adoc b/docs/modules/ROOT/pages/get_started/development.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/get_started/development.adoc
rename to docs/modules/ROOT/pages/get_started/development.adoc
diff --git a/antora/modules/ROOT/pages/get_started/index.adoc b/docs/modules/ROOT/pages/get_started/index.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/get_started/index.adoc
rename to docs/modules/ROOT/pages/get_started/index.adoc
diff --git a/antora/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/index.adoc
rename to docs/modules/ROOT/pages/index.adoc
diff --git a/antora/modules/ROOT/pages/original_paper.adoc b/docs/modules/ROOT/pages/original_paper.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/original_paper.adoc
rename to docs/modules/ROOT/pages/original_paper.adoc
diff --git a/antora/modules/ROOT/pages/releases.adoc b/docs/modules/ROOT/pages/releases.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/releases.adoc
rename to docs/modules/ROOT/pages/releases.adoc
diff --git a/antora/modules/ROOT/pages/usage/index.adoc b/docs/modules/ROOT/pages/usage/index.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/usage/index.adoc
rename to docs/modules/ROOT/pages/usage/index.adoc
diff --git a/antora/modules/ROOT/pages/usage/initialization/index.adoc b/docs/modules/ROOT/pages/usage/initialization/index.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/usage/initialization/index.adoc
rename to docs/modules/ROOT/pages/usage/initialization/index.adoc
diff --git a/antora/modules/ROOT/pages/usage/language_models/index.adoc b/docs/modules/ROOT/pages/usage/language_models/index.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/usage/language_models/index.adoc
rename to docs/modules/ROOT/pages/usage/language_models/index.adoc
diff --git a/antora/modules/ROOT/pages/usage/netout/index.adoc b/docs/modules/ROOT/pages/usage/netout/index.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/usage/netout/index.adoc
rename to docs/modules/ROOT/pages/usage/netout/index.adoc
diff --git a/antora/modules/ROOT/pages/usage/prediction/index.adoc b/docs/modules/ROOT/pages/usage/prediction/index.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/usage/prediction/index.adoc
rename to docs/modules/ROOT/pages/usage/prediction/index.adoc
diff --git a/antora/modules/ROOT/pages/usage/training/index.adoc b/docs/modules/ROOT/pages/usage/training/index.adoc
similarity index 100%
rename from antora/modules/ROOT/pages/usage/training/index.adoc
rename to docs/modules/ROOT/pages/usage/training/index.adoc
diff --git a/docs/original_paper.md b/docs/original_paper.md
deleted file mode 100644
index b7133403..00000000
--- a/docs/original_paper.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Original paper
-
-The original PyLaia model was presented in the paper entitled: [*Are Multidimensional Recurrent Layers Really Necessary for Handwritten Text Recognition?* from Joan Puigcerver, published in the 14th IAPR International Conference on Document Analysis and Recognition (ICDAR 2017)](https://ieeexplore.ieee.org/document/8269951).
-
-The full text is available on this [page](http://www.jpuigcerver.net/pubs/jpuigcerver_icdar2017.pdf).
-
-Recommended citation:
-```bibtex
-@INPROCEEDINGS{PyLaia,
-  author={Puigcerver, Joan},
-  booktitle={2017 14th IAPR International Conference on Document Analysis and Recognition (ICDAR)},
-  title={Are Multidimensional Recurrent Layers Really Necessary for Handwritten Text Recognition?},
-  year={2017},
-  volume={01},
-  number={},
-  pages={67-72},
-  doi={10.1109/ICDAR.2017.20}}
-```
diff --git a/docs/reference/callbacks/decode.md b/docs/reference/callbacks/decode.md
deleted file mode 100644
index 654ccf88..00000000
--- a/docs/reference/callbacks/decode.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.decode
diff --git a/docs/reference/callbacks/learning_rate.md b/docs/reference/callbacks/learning_rate.md
deleted file mode 100644
index 0a0af357..00000000
--- a/docs/reference/callbacks/learning_rate.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.learning_rate
diff --git a/docs/reference/callbacks/meters/meter.md b/docs/reference/callbacks/meters/meter.md
deleted file mode 100644
index 455b4011..00000000
--- a/docs/reference/callbacks/meters/meter.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.meters.meter
diff --git a/docs/reference/callbacks/meters/sequence_error.md b/docs/reference/callbacks/meters/sequence_error.md
deleted file mode 100644
index 51dce4f9..00000000
--- a/docs/reference/callbacks/meters/sequence_error.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.meters.sequence_error
diff --git a/docs/reference/callbacks/meters/timer.md b/docs/reference/callbacks/meters/timer.md
deleted file mode 100644
index 79e892b5..00000000
--- a/docs/reference/callbacks/meters/timer.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.meters.timer
diff --git a/docs/reference/callbacks/netout.md b/docs/reference/callbacks/netout.md
deleted file mode 100644
index 3ce68ab8..00000000
--- a/docs/reference/callbacks/netout.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.netout
diff --git a/docs/reference/callbacks/progress_bar.md b/docs/reference/callbacks/progress_bar.md
deleted file mode 100644
index 251b23c5..00000000
--- a/docs/reference/callbacks/progress_bar.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.progress_bar
diff --git a/docs/reference/callbacks/progress_bar_gpu_stats.md b/docs/reference/callbacks/progress_bar_gpu_stats.md
deleted file mode 100644
index 15cabb7d..00000000
--- a/docs/reference/callbacks/progress_bar_gpu_stats.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.progress_bar_gpu_stats
diff --git a/docs/reference/callbacks/segmentation.md b/docs/reference/callbacks/segmentation.md
deleted file mode 100644
index 1c1d58fe..00000000
--- a/docs/reference/callbacks/segmentation.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.segmentation
diff --git a/docs/reference/callbacks/training_timer.md b/docs/reference/callbacks/training_timer.md
deleted file mode 100644
index d7ec51f4..00000000
--- a/docs/reference/callbacks/training_timer.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.callbacks.training_timer
diff --git a/docs/reference/common/arguments.md b/docs/reference/common/arguments.md
deleted file mode 100644
index 3a39fc7e..00000000
--- a/docs/reference/common/arguments.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.common.arguments
diff --git a/docs/reference/common/loader.md b/docs/reference/common/loader.md
deleted file mode 100644
index 83b47de0..00000000
--- a/docs/reference/common/loader.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.common.loader
diff --git a/docs/reference/common/logging.md b/docs/reference/common/logging.md
deleted file mode 100644
index 6d75685d..00000000
--- a/docs/reference/common/logging.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.common.logging
diff --git a/docs/reference/common/saver.md b/docs/reference/common/saver.md
deleted file mode 100644
index 0c8aa3c2..00000000
--- a/docs/reference/common/saver.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.common.saver
diff --git a/docs/reference/common/types.md b/docs/reference/common/types.md
deleted file mode 100644
index 0947b2ef..00000000
--- a/docs/reference/common/types.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.common.types
diff --git a/docs/reference/data/image_dataset.md b/docs/reference/data/image_dataset.md
deleted file mode 100644
index 8e935d1c..00000000
--- a/docs/reference/data/image_dataset.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.image_dataset
diff --git a/docs/reference/data/image_from_list_dataset.md b/docs/reference/data/image_from_list_dataset.md
deleted file mode 100644
index 77cd2205..00000000
--- a/docs/reference/data/image_from_list_dataset.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.image_from_list_dataset
diff --git a/docs/reference/data/padding_collater.md b/docs/reference/data/padding_collater.md
deleted file mode 100644
index 8998b5c8..00000000
--- a/docs/reference/data/padding_collater.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.padding_collater
diff --git a/docs/reference/data/text_image_dataset.md b/docs/reference/data/text_image_dataset.md
deleted file mode 100644
index 2dacfff6..00000000
--- a/docs/reference/data/text_image_dataset.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.text_image_dataset
diff --git a/docs/reference/data/text_image_from_text_table_dataset.md b/docs/reference/data/text_image_from_text_table_dataset.md
deleted file mode 100644
index a7abfdb3..00000000
--- a/docs/reference/data/text_image_from_text_table_dataset.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.text_image_from_text_table_dataset
diff --git a/docs/reference/data/transforms/text/transforms.md b/docs/reference/data/transforms/text/transforms.md
deleted file mode 100644
index 44a8d117..00000000
--- a/docs/reference/data/transforms/text/transforms.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.transforms.text.transforms
diff --git a/docs/reference/data/transforms/transforms.md b/docs/reference/data/transforms/transforms.md
deleted file mode 100644
index 663a8189..00000000
--- a/docs/reference/data/transforms/transforms.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.transforms.transforms
diff --git a/docs/reference/data/transforms/vision/random_beta_affine.md b/docs/reference/data/transforms/vision/random_beta_affine.md
deleted file mode 100644
index a1e7d9f3..00000000
--- a/docs/reference/data/transforms/vision/random_beta_affine.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.transforms.vision.random_beta_affine
diff --git a/docs/reference/data/transforms/vision/random_beta_morphology.md b/docs/reference/data/transforms/vision/random_beta_morphology.md
deleted file mode 100644
index 1b71a7ad..00000000
--- a/docs/reference/data/transforms/vision/random_beta_morphology.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.transforms.vision.random_beta_morphology
diff --git a/docs/reference/data/transforms/vision/random_beta_perspective.md b/docs/reference/data/transforms/vision/random_beta_perspective.md
deleted file mode 100644
index 1b3b7eb4..00000000
--- a/docs/reference/data/transforms/vision/random_beta_perspective.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.transforms.vision.random_beta_perspective
diff --git a/docs/reference/data/transforms/vision/transforms.md b/docs/reference/data/transforms/vision/transforms.md
deleted file mode 100644
index 6a165144..00000000
--- a/docs/reference/data/transforms/vision/transforms.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.transforms.vision.transforms
diff --git a/docs/reference/data/unpadded_distributed_sampler.md b/docs/reference/data/unpadded_distributed_sampler.md
deleted file mode 100644
index 19018eab..00000000
--- a/docs/reference/data/unpadded_distributed_sampler.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.data.unpadded_distributed_sampler
diff --git a/docs/reference/decoders/ctc_alignment.md b/docs/reference/decoders/ctc_alignment.md
deleted file mode 100644
index 29361041..00000000
--- a/docs/reference/decoders/ctc_alignment.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.decoders.ctc_alignment
diff --git a/docs/reference/decoders/ctc_greedy_decoder.md b/docs/reference/decoders/ctc_greedy_decoder.md
deleted file mode 100644
index 257f1460..00000000
--- a/docs/reference/decoders/ctc_greedy_decoder.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.decoders.ctc_greedy_decoder
diff --git a/docs/reference/decoders/ctc_language_decoder.md b/docs/reference/decoders/ctc_language_decoder.md
deleted file mode 100644
index 5c359dde..00000000
--- a/docs/reference/decoders/ctc_language_decoder.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.decoders.ctc_language_decoder
diff --git a/docs/reference/decoders/ctc_nbest_decoder.md b/docs/reference/decoders/ctc_nbest_decoder.md
deleted file mode 100644
index 7d7f86a8..00000000
--- a/docs/reference/decoders/ctc_nbest_decoder.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.decoders.ctc_nbest_decoder
diff --git a/docs/reference/dummies/data_modules/dummy_mnist.md b/docs/reference/dummies/data_modules/dummy_mnist.md
deleted file mode 100644
index 56861584..00000000
--- a/docs/reference/dummies/data_modules/dummy_mnist.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.dummies.data_modules.dummy_mnist
diff --git a/docs/reference/dummies/data_modules/dummy_mnist_lines.md b/docs/reference/dummies/data_modules/dummy_mnist_lines.md
deleted file mode 100644
index 2bb63744..00000000
--- a/docs/reference/dummies/data_modules/dummy_mnist_lines.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.dummies.data_modules.dummy_mnist_lines
diff --git a/docs/reference/dummies/dummy_model.md b/docs/reference/dummies/dummy_model.md
deleted file mode 100644
index 7ad268b5..00000000
--- a/docs/reference/dummies/dummy_model.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.dummies.dummy_model
diff --git a/docs/reference/dummies/dummy_plugin.md b/docs/reference/dummies/dummy_plugin.md
deleted file mode 100644
index 56acbc00..00000000
--- a/docs/reference/dummies/dummy_plugin.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.dummies.dummy_plugin
diff --git a/docs/reference/dummies/dummy_trainer.md b/docs/reference/dummies/dummy_trainer.md
deleted file mode 100644
index 09710003..00000000
--- a/docs/reference/dummies/dummy_trainer.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.dummies.dummy_trainer
diff --git a/docs/reference/dummies/modules/dummy_engine.md b/docs/reference/dummies/modules/dummy_engine.md
deleted file mode 100644
index e7e261d7..00000000
--- a/docs/reference/dummies/modules/dummy_engine.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.dummies.modules.dummy_engine
diff --git a/docs/reference/dummies/modules/dummy_evaluator.md b/docs/reference/dummies/modules/dummy_evaluator.md
deleted file mode 100644
index ead6cac4..00000000
--- a/docs/reference/dummies/modules/dummy_evaluator.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.dummies.modules.dummy_evaluator
diff --git a/docs/reference/engine/data_module.md b/docs/reference/engine/data_module.md
deleted file mode 100644
index 95e51e09..00000000
--- a/docs/reference/engine/data_module.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.engine.data_module
diff --git a/docs/reference/engine/engine_exception.md b/docs/reference/engine/engine_exception.md
deleted file mode 100644
index ac3fa804..00000000
--- a/docs/reference/engine/engine_exception.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.engine.engine_exception
diff --git a/docs/reference/engine/engine_module.md b/docs/reference/engine/engine_module.md
deleted file mode 100644
index 8ece90be..00000000
--- a/docs/reference/engine/engine_module.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.engine.engine_module
diff --git a/docs/reference/engine/evaluator_module.md b/docs/reference/engine/evaluator_module.md
deleted file mode 100644
index 9f914168..00000000
--- a/docs/reference/engine/evaluator_module.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.engine.evaluator_module
diff --git a/docs/reference/engine/feeder.md b/docs/reference/engine/feeder.md
deleted file mode 100644
index fe848e05..00000000
--- a/docs/reference/engine/feeder.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.engine.feeder
diff --git a/docs/reference/engine/htr_engine_module.md b/docs/reference/engine/htr_engine_module.md
deleted file mode 100644
index cf412c47..00000000
--- a/docs/reference/engine/htr_engine_module.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.engine.htr_engine_module
diff --git a/docs/reference/engine/index.md b/docs/reference/engine/index.md
deleted file mode 100644
index 05a8adcc..00000000
--- a/docs/reference/engine/index.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.engine
diff --git a/docs/reference/loggers/epoch_csv_logger.md b/docs/reference/loggers/epoch_csv_logger.md
deleted file mode 100644
index f1c9f10f..00000000
--- a/docs/reference/loggers/epoch_csv_logger.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.loggers.epoch_csv_logger
diff --git a/docs/reference/losses/ctc_loss.md b/docs/reference/losses/ctc_loss.md
deleted file mode 100644
index 1763816b..00000000
--- a/docs/reference/losses/ctc_loss.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.losses.ctc_loss
diff --git a/docs/reference/losses/loss.md b/docs/reference/losses/loss.md
deleted file mode 100644
index 207b0c1c..00000000
--- a/docs/reference/losses/loss.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.losses.loss
diff --git a/docs/reference/models/htr/conv_block.md b/docs/reference/models/htr/conv_block.md
deleted file mode 100644
index 293a8ba5..00000000
--- a/docs/reference/models/htr/conv_block.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.models.htr.conv_block
diff --git a/docs/reference/models/htr/gated_crnn.md b/docs/reference/models/htr/gated_crnn.md
deleted file mode 100644
index 696ccd09..00000000
--- a/docs/reference/models/htr/gated_crnn.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.models.htr.gated_crnn
diff --git a/docs/reference/models/htr/laia_crnn.md b/docs/reference/models/htr/laia_crnn.md
deleted file mode 100644
index 942ba12a..00000000
--- a/docs/reference/models/htr/laia_crnn.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.models.htr.laia_crnn
diff --git a/docs/reference/models/index.md b/docs/reference/models/index.md
deleted file mode 100644
index af6229fa..00000000
--- a/docs/reference/models/index.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.models
diff --git a/docs/reference/nn/adaptive_pool_2d.md b/docs/reference/nn/adaptive_pool_2d.md
deleted file mode 100644
index 0aa6f776..00000000
--- a/docs/reference/nn/adaptive_pool_2d.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.nn.adaptive_pool_2d
diff --git a/docs/reference/nn/image_pooling_sequencer.md b/docs/reference/nn/image_pooling_sequencer.md
deleted file mode 100644
index 46a7404c..00000000
--- a/docs/reference/nn/image_pooling_sequencer.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.nn.image_pooling_sequencer
diff --git a/docs/reference/nn/image_to_sequence.md b/docs/reference/nn/image_to_sequence.md
deleted file mode 100644
index 7eb166bc..00000000
--- a/docs/reference/nn/image_to_sequence.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.nn.image_to_sequence
diff --git a/docs/reference/nn/pyramid_maxpool_2d.md b/docs/reference/nn/pyramid_maxpool_2d.md
deleted file mode 100644
index b11c9470..00000000
--- a/docs/reference/nn/pyramid_maxpool_2d.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.nn.pyramid_maxpool_2d
diff --git a/docs/reference/nn/resnet.md b/docs/reference/nn/resnet.md
deleted file mode 100644
index 4be46ba0..00000000
--- a/docs/reference/nn/resnet.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.nn.resnet
diff --git a/docs/reference/nn/temporal_pyramid_maxpool_2d.md b/docs/reference/nn/temporal_pyramid_maxpool_2d.md
deleted file mode 100644
index 7b99bfca..00000000
--- a/docs/reference/nn/temporal_pyramid_maxpool_2d.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.nn.temporal_pyramid_maxpool_2d
diff --git a/docs/reference/scripts/htr/create_model.md b/docs/reference/scripts/htr/create_model.md
deleted file mode 100644
index 436a7738..00000000
--- a/docs/reference/scripts/htr/create_model.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.scripts.htr.create_model
diff --git a/docs/reference/scripts/htr/dataset/index.md b/docs/reference/scripts/htr/dataset/index.md
deleted file mode 100644
index 453b0152..00000000
--- a/docs/reference/scripts/htr/dataset/index.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.scripts.htr.dataset
diff --git a/docs/reference/scripts/htr/dataset/validate.md b/docs/reference/scripts/htr/dataset/validate.md
deleted file mode 100644
index 68d92fc1..00000000
--- a/docs/reference/scripts/htr/dataset/validate.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.scripts.htr.dataset.validate
diff --git a/docs/reference/scripts/htr/decode_ctc.md b/docs/reference/scripts/htr/decode_ctc.md
deleted file mode 100644
index 5189973f..00000000
--- a/docs/reference/scripts/htr/decode_ctc.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.scripts.htr.decode_ctc
diff --git a/docs/reference/scripts/htr/netout.md b/docs/reference/scripts/htr/netout.md
deleted file mode 100644
index f8c64c20..00000000
--- a/docs/reference/scripts/htr/netout.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.scripts.htr.netout
diff --git a/docs/reference/scripts/htr/train_ctc.md b/docs/reference/scripts/htr/train_ctc.md
deleted file mode 100644
index 9b900b88..00000000
--- a/docs/reference/scripts/htr/train_ctc.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.scripts.htr.train_ctc
diff --git a/docs/reference/scripts/index.md b/docs/reference/scripts/index.md
deleted file mode 100644
index 4e6118b1..00000000
--- a/docs/reference/scripts/index.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.scripts
diff --git a/docs/reference/utils/checks.md b/docs/reference/utils/checks.md
deleted file mode 100644
index b32df681..00000000
--- a/docs/reference/utils/checks.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.utils.checks
diff --git a/docs/reference/utils/kaldi.md b/docs/reference/utils/kaldi.md
deleted file mode 100644
index bf088688..00000000
--- a/docs/reference/utils/kaldi.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.utils.kaldi
diff --git a/docs/reference/utils/mdutils.md b/docs/reference/utils/mdutils.md
deleted file mode 100644
index 126b74b1..00000000
--- a/docs/reference/utils/mdutils.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.utils.mdutils
diff --git a/docs/reference/utils/stats.md b/docs/reference/utils/stats.md
deleted file mode 100644
index 1da41c79..00000000
--- a/docs/reference/utils/stats.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.utils.stats
diff --git a/docs/reference/utils/symbols_table.md b/docs/reference/utils/symbols_table.md
deleted file mode 100644
index 188abcf6..00000000
--- a/docs/reference/utils/symbols_table.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.utils.symbols_table
diff --git a/docs/reference/utils/visualize_segmentation.md b/docs/reference/utils/visualize_segmentation.md
deleted file mode 100644
index 8e689434..00000000
--- a/docs/reference/utils/visualize_segmentation.md
+++ /dev/null
@@ -1 +0,0 @@
-::: laia.utils.visualize_segmentation
diff --git a/docs/releases.md b/docs/releases.md
deleted file mode 100644
index b3adea9f..00000000
--- a/docs/releases.md
+++ /dev/null
@@ -1,205 +0,0 @@
-# Releases
-
-## 1.1.1
-
-Released on **12 August 2024** &bull; View on [Gitlab](https://gitlab.teklia.com/atr/pylaia/-/releases/1.1.1)
-
-### Breaking changes
-
-- The [nnutils](https://gitlab.teklia.com/atr/nnutils/) library is no longer maintained and is only compatible with Python 3.6, 3.7, 3.8. As such its dependency has been removed. The `crnn.use_masks` parameter has been removed. It is still supported to keep the compatibility with older training configuration but will be ignored.
-
-### Feature
-
-- The number of worker processes created in dataloaders is now exposed through the `data.num_workers`  parameter.
-- There is a new command to run basic checks and compute statistics on your training dataset. Learn more about it in [the documentation](https://atr.pages.teklia.com/pylaia/usage/datasets/).
-- Pretraining is now available. Load the weights of a previous checkpoint using the `train.pretrain` parameter when fine-tuning a model on a new dataset. Learn more about it in [the documentation](https://atr.pages.teklia.com/pylaia/usage/training/#resume-training-from-a-checkpoint).
-- When training on a small dataset, freezing some of the layers can help with model convergence. The `train.freeze_layers` parameter supports freezing:
-
-    - convolutional layers,
-    - recurrent layers,
-    - linear layers.
-
-- Proper support for right-to-left (RTL) languages is now available. Enable it using the `data.reading_order` argument both during [training](https://atr.pages.teklia.com/pylaia/usage/training/#train-on-right-to-left-reading-order) and [decoding](https://atr.pages.teklia.com/pylaia/usage/prediction/#predict-on-right-to-left-data).
-
-### Dependencies
-
-- Bumped [pytorch-lightning](https://pypi.org/project/pytorch-lightning/) to version `1.4.2`.
-- Bumped [textdistance](https://pypi.org/project/textdistance/) to version `4.6.1`.
-
-### Misc
-
-- A deprecation warning from jsonargparse was fixed.
-- The package's metadata are now stored in `pyproject.toml` as per [PEP-0621](https://peps.python.org/pep-0621/).
-- PyLaia now uses [ruff](https://docs.astral.sh/ruff/) for linting and formatting.
-
-## 1.1.0
-
-Released on **22 December 2023** &bull; View on [Gitlab](https://gitlab.teklia.com/atr/pylaia/-/releases/1.1.0)
-
-### Breaking changes
-
-- Official support for Python3.8 has been dropped. This doesn't mean that the current code doesn't run on python3.8, we simply do not test that compatibility anymore. This decision was made since active support of python 3.8 has stopped for a while now and many libraries in the ML world have stopped supporting it as well.
-
-### Feature
-
-- A Docker image with the needed code to use this library is now built on every tag.
-- The coverage of our tests suite is displayed again as a GitLab badge on the repository as well as in the README.md file.
-
-### Documentation
-
-- Many sections were added to the documentation:
-
-    - for the [pylaia-htr-create-model](https://atr.pages.teklia.com/pylaia/usage/initialization/) command,
-    - for [dataset formatting](https://atr.pages.teklia.com/pylaia/usage/datasets/),
-    - for the [pylaia-htr-train-ctc](https://atr.pages.teklia.com/pylaia/usage/training/) command and [fine-tuning](https://atr.pages.teklia.com/pylaia/usage/training/#resume-training-from-a-checkpoint),
-    - for the [pylaia-htr-decode-ctc](https://atr.pages.teklia.com/pylaia/usage/prediction/) command,
-    - for the [pylaia-htr-netout](https://atr.pages.teklia.com/pylaia/usage/netout/) command,
-    - to [train](https://atr.pages.teklia.com/pylaia/usage/language_models/) [KenLM](https://kheafield.com/code/kenlm/) language models,
-    - the full Python code reference.
-
-- A contribution guide and a code of conduct were added for new contributors.
-
-### Dependencies
-
-- Bumped [pytorch-lightning](https://pypi.org/project/pytorch-lightning/) to version `1.3.0`
-- Some dependencies were pinned to a version to avoid breakage:
-
-    - [natsort](https://pypi.org/project/natsort/) was pinned to version `8.4.0`,
-    - [textdistance](https://pypi.org/project/textdistance/) was pinned to version `4.6.0`,
-    - [scipy](https://pypi.org/project/scipy/) was pinned to version `1.11.3`,
-    - [matplotlib](https://pypi.org/project/matplotlib/) was pinned to version `3.8.2`,
-    - [numpy](https://pypi.org/project/numpy/) direct dependency was removed since it's installed through `scipy` and `matplotlib`.
-
-- PyLaia dropped support for python 3.8 so the [dataclasses](https://pypi.org/project/dataclasses/) dependency was dropped.
-
-### Misc
-
-- The `torch.testing.assert_allclose` has been replaced by `torch.testing.assert_close` since it became deprecated in [PyTorch 1.12.0](https://github.com/pytorch/pytorch/issues/61844).
-
-
-## 1.0.7
-
-Released on **18 October 2023** &bull; View on [Gitlab](https://gitlab.teklia.com/atr/pylaia/-/releases/1.0.7)
-
-### Feature
-- When using a language model, a confidence score is now returned based on the log-likelyhood of the hypothesis.
-
-### Documentation
-A public documentation is now available on <https://atr.pages.teklia.com/pylaia/>. It's still under construction but next releases will add more and more content.
-
-### Dependencies
-- Bumped [pytorch-lightning](https://pypi.org/project/pytorch-lightning/) to version `1.1.7`
-- Bumped GitHub action [codecov/codecov-action](https://github.com/codecov/codecov-action) to version `3`
-- Bumped GitHub action [actions/setup-python](https://github.com/actions/setup-python) to version `4`
-- Bumped GitHub action [actions/checkout](https://github.com/actions/checkout) to version `4`
-
-### Development
-- Releases are now built more easily through a Makefile.
-- The documentation is also redeployed after each push on `master` branch.
-- Fixed a test that behaved differently locally and during CI.
-
-## 1.0.6
-
-Released on **12 September 2023** &bull; View on [Github](https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.6)
-
-### Feature
-- During training, too small images are now padded to be able to pass the multiple convolution layers.
-
-### Documentation
-- Fixed typos.
-
-### Dependencies
-- Replaced [deprecated Pillow resampling method](https://pillow.readthedocs.io/en/stable/releasenotes/2.7.0.html#antialias-renamed-to-lanczos) `Image.ANTIALIAS` to `Image.Resample.Lanczos`.
-
-### Development
-- Pre-commit hooks were updated.
-
-## 1.0.5
-
-Released on **29 March 2023** &bull; View on [Github](https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.5)
-
-### Dependencies
-- Requires `torch` version `1.13.0` or `1.13.1`.
-- Requires `torchvision` version `0.14.0` or `0.14.1` (depending on `torch` version).
-
-## 1.0.4
-
-Released on **4 January 2023** &bull; View on [Github](https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.4)
-
-### Dependencies
-- Requires `torch` version `1.13.0`.
-
-## 1.0.3
-
-Released on **12 December 2022** &bull; View on [Github](https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.3)
-
-### Feature
-- Now able to decode using a trained Language model through beam search decoding.
-- Exposes [torch Dataloaders's num_workers](https://pytorch.org/docs/stable/data.html#multi-process-data-loading) parameter on the Python training function to limit resource usage when needed.
-
-### Dependencies
-- Added dependency to `torchaudio` version `0.13.0`.
-
-### Development
-- Package version is now tracked through the `VERSION` file.
-
-## 1.0.2
-
-Released on **7 December 2022** &bull; View on [Github](https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.2)
-
-### Dependencies
-- Pinned dependency to `pytorch-lightning` to version `1.1.0`.
-
-## 1.0.1
-
-Released on **7 December 2022** &bull; View on [Github](https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.1)
-
-## 1.0.0
-
-Released on **2 December 2020** &bull; View on [Github](https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.0)
-
-### Added
-
-- Support distributed training
-- Scripts can now be configured using yaml configuration files
-- Add support for the SGD and Adam optimizers
-- Support color images
-- Log the installed version of each module when scripts are called from shell
-- Add char/word segmentation to the decode script
-- Add several badges to the README
-- Support using a `ReduceLROnPlateau` scheduler during training
-- A CSV file (metrics.csv) is now created with the results obtained during training
-- Add CONTRIBUTING file
-- Training now can include GPU stats in the progress bar
-- Add isort to pre-commit to keep consistent imports throughout the codebase
-- Users can run the PyLaia scripts using Python now
-- Support half-precision training for fixed height models.
-- Add script to visualize the segmentation output
-- Use Codecov to produce test coverage reports
-- Code is now analyzed using CodeFactor
-
-### Changed
-
-- Make Python 3.6 the minimum supported version
-- Make PyTorch 1.4.0 the minimum supported version
-- Remove `ImageToTensor` in favor of vision transform `ToImageTensor`
-- Remove all of the internal logic (`engine`, `actions`, `hooks`, etc) in favor of pytorch-lightning's constructs
-- Change Travis CI for GitHub actions
-- Greatly improve the progress bar. It is used now in all scripts
-- The entire shell API has changed for the better (thanks to jsonargparse). Arguments are now separated into groups and help messages are clearer.
-- Drastically improve our test suite, we now have a 91% coverage
-
-### Removed
-
-- Remove egs directory. These live now at https://github.com/carmocca/PyLaia-examples
-- Remove Baidu's CTC loss in favor of PyTorch's
-- Remove PHOC code. Please open an issue if you were using it
-- Remove Dortmund code. Please open an issue if you were using it
-- Remove CTCLatticeGenerator. Please open an issue if you were using it
-- We no longer support saving checkpoints for more than one metric. Will be added back in a future version
-
-### Fixed
-
-- Fix WER calculation when long delimiters are used
-- Exit training if a delimiter is not present in the vocabulary
-- Hundreds of other minor fixes and refactors to improve the code quality!
diff --git a/antora/ui/partials/footer-content.hbs b/docs/ui/partials/footer-content.hbs
similarity index 100%
rename from antora/ui/partials/footer-content.hbs
rename to docs/ui/partials/footer-content.hbs
diff --git a/antora/ui/partials/header-content.hbs b/docs/ui/partials/header-content.hbs
similarity index 100%
rename from antora/ui/partials/header-content.hbs
rename to docs/ui/partials/header-content.hbs
diff --git a/docs/usage/datasets/format.md b/docs/usage/datasets/format.md
deleted file mode 100644
index 599e2f0c..00000000
--- a/docs/usage/datasets/format.md
+++ /dev/null
@@ -1,196 +0,0 @@
-# Dataset formatting
-
-To train PyLaia, you need line images and their corresponding transcriptions. The dataset should be divided into three sets: training, validation and test sets.
-
-The dataset should be formatted as follows:
-```bash
-# Images
-├── images
-    ├── train/
-    ├── val/
-    └── test/
-# Image ids (used for prediction)
-├── train_ids.txt
-├── val_ids.txt
-├── test_ids.txt
-# Tokenized transcriptions (used for training)
-├── train.txt
-├── val.txt
-├── test.txt
-# Transcriptions (used for evaluation)
-├── train_text.txt
-├── val_text.txt
-├── test_text.txt
-# Symbol list
-└── syms.txt
-```
-
-## Images
-
-By default, images should be resized to a fixed height (recommended value: 128 pixels). This can be done using [ImageMagick's `mogrify`](https://imagemagick.org/script/mogrify.php) function:
-```
-mogrify -resize x128 images/*.jpg
-```
-
-Note that PyLaia can also support variable size images by setting `--fixed_input_height 0` during [model initialization](../initialization/index.md).
-
-
-## Ground truth
-
-### Tokenized transcriptions
-
-Two files `{train|val}.txt` are required to train the model. They should map image names and tokenized transcriptions for the training and validation sets.
-
-Example:
-
-```text title="train.txt"
-train/im01 f o r <space> d e t <space> t i l f æ l d e <space> d e t <space> s k u l d e <space> l y k k e s <space> D i g
-train/im02 a t <space> o p d r i v e <space> d e t <space> o m s k r e v n e <space> e x p l : <space> a f
-train/im03 « F r u <space> I n g e r » , <space> a t <space> s e n d e <space> m i g <space> s a m m e
-```
-
-### Transcriptions
-
-Three files `{train|val|test}_text.txt` are required to evaluate your models. They should map image names and non-tokenized transcriptions.
-
-Example:
-```text title="train_text.txt"
-train/im01 for det tilfælde det skulde lykkes Dig
-train/im02 at opdrive det omskrevne expl: af
-train/im03 «Fru Inger», at sende mig samme
-```
-
-### Image list
-
-Three files `{train|val|test}_ids.txt` are required to run predictions. They should list image names without transcriptions and can be obtained with:
-```bash
-cut -d' ' -f1 train_text.txt > train_ids.txt
-```
-
-Example:
-```text title="train_ids.txt"
-train/im01
-train/im02
-train/im03
-```
-
-### Symbol list
-
-Finally, a file named `syms.txt` is required, mapping tokens from the training set and their index, starting with the `<ctc>` token.
-
-Example:
-
-```text title="syms.txt"
-<ctc> 0
-! 1
-" 2
-& 3
-' 4
-( 5
-) 6
-+ 7
-, 8
-- 9
-. 10
-/ 11
-0 12
-1 13
-2 14
-3 15
-4 16
-5 17
-6 18
-7 19
-8 20
-9 21
-: 22
-; 23
-< 24
-= 25
-> 26
-? 27
-A 28
-B 29
-C 30
-D 31
-E 32
-F 33
-G 34
-H 35
-I 36
-J 37
-K 38
-L 39
-M 40
-N 41
-O 42
-P 43
-Q 44
-R 45
-S 46
-T 47
-U 48
-V 49
-W 50
-X 51
-Y 52
-Z 53
-[ 54
-] 55
-a 56
-b 57
-c 58
-d 59
-e 60
-f 61
-g 62
-h 63
-i 64
-j 65
-k 66
-l 67
-m 68
-n 69
-o 70
-p 71
-q 72
-r 73
-s 74
-t 75
-u 76
-v 77
-w 78
-x 79
-y 80
-z 81
-« 82
-¬ 83
-» 84
-¼ 85
-½ 86
-Ã… 87
-Æ 88
-Ø 89
-à 90
-á 91
-â 92
-ä 93
-Ã¥ 94
-æ 95
-ç 96
-è 97
-é 98
-ê 99
-ö 100
-ø 101
-ù 102
-û 103
-ü 104
-– 105
-— 106
-’ 107
-„ 108
-… 109
-<unk> 110
-<space> 111
-```
diff --git a/docs/usage/datasets/index.md b/docs/usage/datasets/index.md
deleted file mode 100644
index ac16063a..00000000
--- a/docs/usage/datasets/index.md
+++ /dev/null
@@ -1,163 +0,0 @@
-# Dataset
-
-PyLaia datasets must be formatted following a specific format. Learn how to build a dataset by following this [page](./format.md).
-
-Once the dataset is created, you may use the `pylaia-htr-dataset-validate` command to compute statistics and make sure your dataset is valid. To know more about the options of this command, use `pylaia-htr-dataset-validate --help`.
-
-
-## Purpose
-
-This command will:
-
-* issue a warning if some images are missing (they will be ignored during training)
-* issue a warning if some images have an invalid width (they will be padded during training)
-* fail if images have variable height when  `fixed_input_height>0`
-* fail if a character is missing in the list of symbols `syms.txt`
-
-If the dataset is valid, the script will:
-
-* display `Dataset is valid` and
-* save a summary of the dataset statistics in a Markdown file named after the argument provided in `--statistics_output`.
-
-## Parameters
-
-The full list of parameters is detailed in this section.
-
-### General parameters
-
-| Parameter            | Description                                                                                                                                                                                          | Type   | Default |
-| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | ------- |
-| `syms`               | Positional argument. Path to a file mapping characters to integers. The CTC symbol must be mapped to integer 0.                                                                                      | `str`  |         |
-| `img_dirs`           | Positional argument. Directories containing line images.                                                                                                                                             | `str`  |         |
-| `tr_txt_table`       | Positional argument. Path to a file mapping training image ids and tokenized transcription.                                                                                                          | `str`  |         |
-| `va_txt_table`       | Positional argument. Path to a file mapping validation image ids and tokenized transcription.                                                                                                        | `str`  |         |
-| `te_txt_table`       | Positional argument. Path to a file mapping validation image ids and tokenized transcription.                                                                                                        | `str`  |         |
-| `fixed_input_height` | Height of the input images. If set to 0, a variable height model will be used (see `adaptive_pooling`). This will be used to compute the model output height at the end of the convolutional layers. | `int`  | 0       |
-| `statistics_output`  | Where the Markdown summary will be written.                                                                                                                                                          | `str`  | `"statistics.md"`       |
-| `config`             | Path to a JSON configuration file                                                                                                                                                                    | `json` |         |
-
-### Common parameters
-
-| Name                    | Description                             | Type  | Default |
-| ----------------------- | --------------------------------------- | ----- | ------- |
-| `common.train_path`     | Directory where the model will be saved | `str` | `.`     |
-| `common.model_filename` | Filename of the model.                  | `str` | `model` |
-
-### Logging arguments
-
-| Name                      | Description                                                                                                    | Type            | Default                                           |
-| ------------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- | ------------------------------------------------- |
-| `logging.fmt`             | Logging format.                                                                                                | `str`           | `%(asctime)s %(levelname)s %(name)s] %(message)s` |
-| `logging.level`           | Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`                                       | `Level`         | `INFO`                                            |
-| `logging.filepath`        | Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname` | `Optional[str]` |                                                   |
-| `logging.overwrite`       | Whether to overwrite the logfile or to append.                                                                 | `bool`          | `False`                                           |
-| `logging.to_stderr_level` | If filename is set, use this to log also to stderr at the given level.                                         | `Level`         | `ERROR`                                           |
-
-### Train arguments
-
-| Name               | Description                                       | Type   | Default       |
-| ------------------ | ------------------------------------------------- | ------ | ------------- |
-| `train.delimiters` | List of symbols representing the word delimiters. | `List` | `["<space>"]` |
-
-## Examples
-
-This arguments can be passed using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
-
-### Example with Command Line Arguments (CLI)
-
-Run the following command to create a model:
-```sh
-pylaia-htr-dataset-validate /data/Esposalles/dataset/syms.txt \
-                            [/data/Esposalles/dataset/images/] \
-                            /data/Esposalles/dataset/train.txt \
-                            /data/Esposalles/dataset/val.txt \
-                            /data/Esposalles/dataset/test.txt \
-                            --common.experiment_dirname experiment-esposalles/ \
-                            --fixed_input_size 128 \
-                            --statistice_ouput statistics.md
-```
-
-Will output:
-```bash
-[2024-04-23 12:58:31,399 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
-[2024-04-23 12:58:32,010 INFO laia] Installed:
-[2024-04-23 12:58:32,050 INFO laia.common.loader] Loaded model model
-[2024-04-23 12:58:32,094 INFO laia] Dataset is valid
-[2024-04-23 12:58:32,094 INFO laia] Statistics written to statistics.md
-```
-
-### Example with a YAML configuration file
-
-Run the following command to validate a dataset:
-```sh
-pylaia-htr-dataset-validate --config config_dataset.yaml
-```
-
-Where `config_dataset.yaml` is:
-
-```yaml
-syms: /data/Esposalles/dataset/syms.txt
-img_dirs: [/data/Esposalles/dataset/images/]
-tr_txt_table: /data/Esposalles/dataset/train.txt
-va_txt_table: /data/Esposalles/dataset/val.txt
-te_txt_table: /data/Esposalles/dataset/test.txt
-fixed_input_height: 128
-statistics_output: statistics.md
-common:
-  experiment_dirname: experiment-esposalles
-```
-
-### Example with perfect dataset
-
-```bash
-[2024-04-23 12:58:31,399 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
-[2024-04-23 12:58:32,010 INFO laia] Installed:
-[2024-04-23 12:58:32,050 INFO laia.common.loader] Loaded model model
-[2024-04-23 12:58:32,094 INFO laia] Dataset is valid
-[2024-04-23 12:58:32,094 INFO laia] Statistics written to statistics.md
-```
-
-### Example with missing images
-
-```bash
-[2024-04-23 13:01:34,646 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
-[2024-04-23 13:01:35,200 INFO laia] Installed:
-[2024-04-23 13:01:35,232 INFO laia.common.loader] Loaded model model
-[2024-04-23 13:01:35,782 WARNING laia.data.text_image_from_text_table_dataset] No image file found for image ID '0d7cf548-742b-4067-9084-52478806091d_Line0_30af78fd-e15d-4873-91d1-69ad7c0623c3.jpg', ignoring example...
-[2024-04-23 13:01:35,783 WARNING laia.data.text_image_from_text_table_dataset] No image file found for image ID '0d7cf548-742b-4067-9084-52478806091d_Line0_b1fb9275-5d49-4266-9de0-e6a93fc6dfaf.jpg', ignoring example...
-[2024-04-23 13:01:35,894 INFO laia] Dataset is valid
-[2024-04-23 13:01:35,894 INFO laia] Statistics written to statistics.md
-```
-
-### Example with small images
-
-```sh
-[2024-04-23 13:01:34,646 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
-[2024-04-23 13:01:35,200 INFO laia] Installed:
-[2024-04-23 13:01:35,232 INFO laia.common.loader] Loaded model model
-[2024-04-23 13:01:36,052 ERROR laia] Issues found in the dataset.
-[2024-04-23 13:01:36,052 ERROR laia] train - Found some images too small for convolutions (width<8). They will be padded during training.
-```
-
-### Example with variable image height
-
-```sh
-[2024-04-23 13:01:34,646 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
-[2024-04-23 13:01:35,200 INFO laia] Installed:
-[2024-04-23 13:01:35,232 INFO laia.common.loader] Loaded model model
-[2024-04-23 13:01:36,052 ERROR laia] Issues found in the dataset.
-[2024-04-23 13:01:36,052 ERROR laia] train - Found images with variable heights: ['/data/Esposalles/dataset/images/f6d2b699-e910-4191-bc7d-f56e60fe979a_Line2_91b43b71-ea60-4f42-a896-880676aed723.jpg'].
-[2024-04-23 13:01:36,052 ERROR laia] test - Found images with variable heights: ['/data/Esposalles/dataset/images/fd1e6b3b-48cb-41c0-b1e9-2924b9562876_Line3_27e23ff1-f730-44ac-844f-479e5cc9e9aa.jpg'].
-```
-
-### Example with missing symbol
-
-```sh
-[2024-04-23 13:01:34,646 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
-[2024-04-23 13:01:35,200 INFO laia] Installed:
-[2024-04-23 13:01:35,232 INFO laia.common.loader] Loaded model model
-[2024-04-23 13:01:36,052 ERROR laia] Issues found in the dataset.
-[2024-04-23 13:01:36,052 ERROR laia] train - Found some unknown symbols: {'='}
-[2024-04-23 13:01:36,052 ERROR laia] val - Found some unknown symbols: {'='}
-[2024-04-23 13:01:36,052 ERROR laia] test - Found some unknown symbols: {'='}
-```
diff --git a/docs/usage/index.md b/docs/usage/index.md
deleted file mode 100644
index 3f14c12b..00000000
--- a/docs/usage/index.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Usage
-
-Once the dataset is formatted and `pylaia` is installed and in your environment, you may use the following commands:
-
-* `pylaia-htr-create-model`
-: To create a new PyLaia model. More details in the [dedicated page](./initialization/index.md).
-* `pylaia-htr-dataset-validate`
-: To compute statistics and run validation checks on a dataset. More details in the [dedicated page](./datasets/index.md).
-* `pylaia-htr-train-ctc`
-: To train a PyLaia model. More details in the [dedicated page](./training/index.md).
-* `pylaia-htr-decode-ctc`
-: To predict using a trained PyLaia model. More details in the [dedicated page](./prediction/index.md).
-* `pylaia-htr-netout`
-: To dump features from a PyLaia model. More details in the [dedicated page](./netout/index.md).
-
----
-Related pages:
-
-* Learn how to format a [dataset in PyLaia format](./datasets/format.md)
-* Learn how to use PyLaia with an [explicit language model](./language_models/index.md)
diff --git a/docs/usage/initialization/index.md b/docs/usage/initialization/index.md
deleted file mode 100644
index d55a2551..00000000
--- a/docs/usage/initialization/index.md
+++ /dev/null
@@ -1,136 +0,0 @@
-# Model initialization
-
-The `pylaia-htr-create-model` command can be used to create a PyLaia model. To know more about the options of this command, use `pylaia-htr-create-model --help`.
-
-## Purpose
-
-The general architecture of PyLaia is composed of convolutional blocks followed by a set a bi-directionnal recurrent layers and a linear layer. PyLaia is fully configurable by the user, including:
-
-- Number of convolutional blocks,
-- Number of recurrent layers,
-- Batch normalization,
-- Pooling layers,
-- Activation function,
-- ...
-
-This command will create a pickled file (named `model` by default), which is required to initialize the `LaiaCRNN` class before training.
-
-## Parameters
-
-The full list of parameters is detailed in this section.
-
-
-### General parameters
-
-| Parameter            | Description                                                                                                                                                                                                | Type   | Default      |
-| -------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | ------------ |
-| `syms`               | Positional argument. Path to a file mapping characters to integers. The CTC symbol must be mapped to integer 0.                                                                                            | `str`  |              |
-| `config`             | Path to a JSON configuration file                                                                                                                                                                          | `json` |              |
-| `fixed_input_height` | Height of the input images. If set to 0, a variable height model will be used (see `adaptive_pooling`). This will be used to compute the model output height at the end of the convolutional layers.       | `int`  | 0            |
-| `adaptive_pooling`   | Use custom adaptive pooling layers to enable training with variable height images. Takes into account the size of each individual image within the batch (before padding). Should be in `{avg,max}pool-N`. | `str`  | `avgpool-16` |
-| `save_model`         | Whether to save the model to a file.                                                                                                                                                                       | `bool` | `True`       |
-
-### Common parameters
-
-| Name                    | Description                             | Type  | Default |
-| ----------------------- | --------------------------------------- | ----- | ------- |
-| `common.train_path`     | Directory where the model will be saved | `str` | `.`     |
-| `common.model_filename` | Filename of the model.                  | `str` | `model` |
-
-### Logging arguments
-
-| Name                      | Description                                                                                                    | Type            | Default                                           |
-| ------------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- | ------------------------------------------------- |
-| `logging.fmt`             | Logging format.                                                                                                | `str`           | `%(asctime)s %(levelname)s %(name)s] %(message)s` |
-| `logging.level`           | Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`                                       | `Level`         | `INFO`                                            |
-| `logging.filepath`        | Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname` | `Optional[str]` |                                                   |
-| `logging.overwrite`       | Whether to overwrite the logfile or to append.                                                                 | `bool`          | `False`                                           |
-| `logging.to_stderr_level` | If filename is set, use this to log also to stderr at the given level.                                         | `Level`         | `ERROR`                                           |
-
-### Architecture arguments
-
-
-| Name                      | Description                                                                                         | Type    | Default                                                |
-| ------------------------- | --------------------------------------------------------------------------------------------------- | ------- | ------------------------------------------------------ |
-| `crnn.num_input_channels` | Number of channels of the input images.                                                             | `int`   | `1`                                                    |
-| `crnn.vertical_text`      | Whether the text is written vertically.                                                             | `bool`  | `False`                                                |
-| `crnn.cnn_num_features`   | Number of features in each convolutional layer.                                                     | `List`  | `[16, 16, 32, 32]`                                     |
-| `crnn.cnn_kernel_size`    | Kernel size of each convolutional layer (e.g. [n,n,...] or [[h1,w1],[h2,w2],...]).                  | `List`  | `[3, 3, 3, 3]`                                         |
-| `crnn.cnn_stride`         | Stride of each convolutional layer. (e.g. [n,n,...] or [[h1,w1],[h2,w2],...])                       | `List`  | `[1, 1, 1, 1]`                                         |
-| `crnn.cnn_dilation`       | Spacing between each convolutional layer kernel elements. (e.g. [n,n,...] or [[h1,w1],[h2,w2],...]) | `List`  | `[1, 1, 1, 1]`                                         |
-| `crnn.cnn_activation`     | Type of activation function in each convolutional layer (from `torch.nn`).                          | `List`  | `['LeakyReLU', 'LeakyReLU', 'LeakyReLU', 'LeakyReLU']` |
-| `crnn.cnn_poolsize`       | MaxPooling size after each convolutional layer. (e.g. [n,n,...] or [[h1,w1],[h2,w2],...]).          | `List`  | `[2, 2, 2, 0]`                                         |
-| `crnn.cnn_dropout`        | Dropout probability at the input of each convolutional layer.                                       | `List`  | `[0.0, 0.0, 0.0, 0.0]`                                 |
-| `crnn.cnn_batchnorm`      | Whether to do batch normalization before the activation in each convolutional layer.                | `List`  | `[False, False, False, False]`                         |
-| `crnn.rnn_layers`         | Number of recurrent layers.                                                                         | `int`   | `3`                                                    |
-| `crnn.rnn_units`          | Number of units in each recurrent layer.                                                            | `int`   | `256`                                                  |
-| `crnn.rnn_dropout`        | Dropout probability at the input of each recurrent layer.                                           | `float` | `0.5`                                                  |
-| `crnn.rnn_type`           | Type of recurrent layer (from `torch.nn`).                                                          | `str`   | `LSTM`                                                 |
-| `crnn.lin_dropout`        | Dropout probability at the input of the final linear layer.                                         | `float` | `0.5`                                                  |
-
-## Examples
-
-The model can be configured using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
-
-
-### Example with Command Line Arguments (CLI)
-
-Run the following command to create a model:
-```sh
-pylaia-htr-create-model /path/to/syms.txt \
-   --fixed_input_height 128 \
-   --crnn.rnn_layers 4 \
-   --logging.filepath model.log \
-   --common.train_path my_experiments/
-```
-
-### Example with a YAML configuration file
-
-Run the following command to create a model:
-```sh
-pylaia-htr-create-model --config config_create_model.yaml
-```
-
-Where `config_create_model.yaml` is:
-
-```yaml
-crnn:
-  cnn_activation:
-  - LeakyReLU
-  - LeakyReLU
-  - LeakyReLU
-  - LeakyReLU
-  cnn_batchnorm:
-  - true
-  - true
-  - true
-  - true
-  cnn_dilation:
-  - 1
-  - 1
-  - 1
-  - 1
-  cnn_kernel_size:
-  - 3
-  - 3
-  - 3
-  - 3
-  cnn_num_features:
-  - 12
-  - 24
-  - 48
-  - 48
-  cnn_poolsize:
-  - 2
-  - 2
-  - 0
-  - 2
-  lin_dropout: 0.5
-  rnn_dropout: 0.5
-  rnn_layers: 3
-  rnn_type: LSTM
-  rnn_units: 256
-fixed_input_height: 128
-save_model: true
-syms: /path/to/syms.txt
-```
diff --git a/docs/usage/language_models/index.md b/docs/usage/language_models/index.md
deleted file mode 100644
index 11463ff1..00000000
--- a/docs/usage/language_models/index.md
+++ /dev/null
@@ -1,217 +0,0 @@
-# Explicit language modeling with n-grams
-
-PyLaia supports lattice rescoring using a statistical language model.
-This documentation gives instructions to build a language model with [kenlm](https://kheafield.com/code/kenlm/).
-
-!!! note
-    You can also use [SRILM](http://www.speech.sri.com/projects/srilm/) to build an ARPA language model.
-
-To decode with a language model, you need:
-
-* [a language model](./index.md#build-the-language-model)
-* [a list of tokens](./index.md#list-of-tokens)
-* [a lexicon](./index.md#lexicon)
-
-## Build the language model
-
-### Install kenlm
-
-To build the language model, you first need to install and compile [kenlm](https://github.com/kpu/kenlm) by following the instructions detailed in the [README](https://github.com/kpu/kenlm#compiling).
-
-### Generate resources to train the language model
-
-To train a language model, you need to generate a corpus containing the training text tokenized at character, subword or word level.
-
-#### Characters
-
-Here is a sample of text tokenized at character-level (`corpus_characters.txt`).
-```text title="corpus_characters.txt"
-u d e <space> i <space> r e s t a u r a n t e r ,
-v æ r e t <space> u h y r e <space> m e g e t <space> s a m m e n , <space> o f t e <space> t i l <space> m a a l t i d e r <space> o g <space> t i l <space> t h e <space> h o s <space> O s s b a h r ,
-v i <space> s i d d e r <space> v e d <space> k a m i n e n <space> d e r <space> o g <space> s n a k k e r , <space> h v i l k e t <space> e r <space> m e g e t <space> m o r s o m t . <space> N u
-k o m m e r <space> d e r <space> m a n g e <space> r e i s e n d e <space> v e n n e r <space> e l l e r <space> s l æ g t <space> e l l e r <space> p r i n s e s s e r , <space> s o m
-O s s b a h r <space> m a a <space> v æ r e <space> s a m m e n <space> m e d <space> H e d b e r g <space> o f t e <space> o g s a a . <space> M e n <space> v i <space> k a n <space> l e v e
-```
-
-#### Subwords
-
-Here is a sample of text tokenized at subword-level (`corpus_subwords.txt`).
-```text title="corpus_subwords.txt"
-ud e <space> i <space> r e st au r ant er ,
-været <space> u h y r e <space> meget <space> sammen , <space> ofte <space> til <space> ma altid er <space> og <space> til <space> th e <space> hos <space> O s s ba h r ,
-vi <space> sidde r <space> ved <space> ka min en <space> der <space> og <space> snakke r , <space> hvilket <space> er <space> meget <space> morsomt . <space> Nu
-kommer <space> der <space> mange <space> r e i sende <space> venner <space> eller <space> s læg t <space> eller <space> pr in s e s ser , <space> som
-O s s ba h r <space> maa <space> være <space> sammen <space> med <space> H e d berg <space> ofte <space> ogsaa . <space> Men <space> vi <space> kan <space> lev e
-```
-
-#### Words
-Here is a sample of text tokenized at word-level (`corpus_words.txt`).
-```text title="corpus_words.txt"
-ude <space> i <space> restauranter <space> ,
-været <space> uhyre <space> meget <space> sammen <space> , <space> ofte <space> til <space> maaltider <space> og <space> til <space> the <space> hos <space> Ossbahr <space> ,
-vi <space> sidder <space> ved <space> kaminen <space> der <space> og <space> snakker <space> , <space> hvilket <space> er <space> meget <space> morsomt <space> . <space> Nu
-kommer <space> der <space> mange <space> reisende <space> venner <space> eller <space> slægt <space> eller <space> prinsesser <space> , <space> som
-Ossbahr <space> maa <space> være <space> sammen <space> med <space> Hedberg <space> ofte <space> ogsaa <space> . <space> Men <space> vi <space> kan <space> leve
-```
-
-### Train the language model
-
-Once your corpus is created, you can estimate the n-gram model.
-
-#### Characters
-
-At character-level, we recommend building a 6-gram model. Use the following command:
-
-```sh
-bin/lmplz --order 6 \
-    --text my_dataset/language_model/corpus_characters.txt \
-    --arpa my_dataset/language_model/model_characters.arpa \
-    --discount_fallback
-```
-
-!!! note
-    The `--discount_fallback` option can be removed if your corpus is very large.
-
-The following message should be displayed if the language model was built successfully:
-
-```sh
-=== 1/5 Counting and sorting n-grams ===
-Reading language_model/corpus.txt
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-****************************************************************************************************
-Unigram tokens 111629 types 109
-=== 2/5 Calculating and sorting adjusted counts ===
-Chain sizes: 1:1308 2:784852864 3:1471599104 4:2354558464 5:3433731328 6:4709116928
-Statistics:
-1 109 D1=0.586207 D2=0.534483 D3+=1.5931
-2 1734 D1=0.538462 D2=1.09853 D3+=1.381
-3 7957 D1=0.641102 D2=1.02894 D3+=1.37957
-4 17189 D1=0.747894 D2=1.20483 D3+=1.41084
-5 25640 D1=0.812458 D2=1.2726 D3+=1.57601
-6 32153 D1=0.727411 D2=1.13511 D3+=1.42722
-Memory estimate for binary LM:
-type      kB
-probing 1798 assuming -p 1.5
-probing 2107 assuming -r models -p 1.5
-trie     696 without quantization
-trie     313 assuming -q 8 -b 8 quantization
-trie     648 assuming -a 22 array pointer compression
-trie     266 assuming -a 22 -q 8 -b 8 array pointer compression and quantization
-=== 3/5 Calculating and sorting initial probabilities ===
-Chain sizes: 1:1308 2:27744 3:159140 4:412536 5:717920 6:1028896
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-####################################################################################################
-=== 4/5 Calculating and writing order-interpolated probabilities ===
-Chain sizes: 1:1308 2:27744 3:159140 4:412536 5:717920 6:1028896
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-####################################################################################################
-=== 5/5 Writing ARPA model ===
-----5---10---15---20---25---30---35---40---45---50---55---60---65---70---75---80---85---90---95--100
-****************************************************************************************************
-Name:lmplz	VmPeak:12643224 kB	VmRSS:6344 kB	RSSMax:1969316 kB	user:0.196445	sys:0.514686	CPU:0.711161	real:0.682693
-```
-
-#### Subwords
-
-At subword-level, we recommend building a 6-gram model. Use the following command:
-
-```sh
-bin/lmplz --order 6 \
-    --text my_dataset/language_model/corpus_subwords.txt \
-    --arpa my_dataset/language_model/model_subwords.arpa \
-    --discount_fallback
-```
-
-!!! note
-    The `--discount_fallback` option can be removed if your corpus is very large.
-
-#### Words
-
-At word-level, we recommend building a 3-gram model. Use the following command:
-
-```sh
-bin/lmplz --order 3 \
-    --text my_dataset/language_model/corpus_words.txt \
-    --arpa my_dataset/language_model/model_words.arpa \
-    --discount_fallback
-```
-
-!!! note
-    The `--discount_fallback` option can be removed if your corpus is very large.
-
-## Predict with a language model
-
-Once the language model is trained, you need to generate a list of tokens and a lexicon.
-
-### List of tokens
-
-The list of tokens `tokens.txt` lists all the tokens that can be predicted by PyLaia.
-It should be similar to `syms.txt`, but without any index, and can be generated with this command:
-```bash
-cut -d' ' -f 1 syms.txt > tokens.txt
-```
-
-!!! note
-    This file does not depend on the tokenization level.
-
-```text title="tokens.txt"
-<ctc>
-.
-,
-a
-b
-c
-...
-<space>
-```
-
-### Lexicon
-
-The lexicon lists all the words in the vocabulary and its decomposition in tokens.
-
-#### Characters
-
-At character-level, words are simply characters, so the `lexicon_characters.txt` file should map characters to characters:
-
-```text title="lexicon_characters.txt"
-<ctc> <ctc>
-. .
-, ,
-a a
-b b
-c c
-...
-<space> <space>
-```
-
-#### Subwords
-At subword-level, the `lexicon_subwords.txt` file should map subwords with their character decomposition:
-
-```text title="lexicon_subwords.txt"
-<ctc> <ctc>
-. .
-, ,
-altid a l t i d
-ant a n t
-au a u
-...
-<space> <space>
-```
-
-#### Words
-At word-level, the `lexicon_words.txt` file should map words with their character decomposition:
-
-```text title="lexicon_words.txt"
-<ctc> <ctc>
-. .
-, ,
-der d e r
-er e r
-eller e l l e r
-...
-<space> <space>
-```
-
-### Predict with PyLaia
-
-See the [dedicated example](../prediction/index.md#predict-with-a-language-model).
diff --git a/docs/usage/netout/index.md b/docs/usage/netout/index.md
deleted file mode 100644
index 039c3a90..00000000
--- a/docs/usage/netout/index.md
+++ /dev/null
@@ -1,137 +0,0 @@
-# Netout
-
-The `pylaia-htr-netout` command can be used to dump the features extracted by PyLaia for a set of text-lines. To know more about the options of this command, use `pylaia-htr-netout --help`.
-
-!!! warning
-    This command was initially designed to combine PyLaia and Kaldi. Since December 2022, combining PyLaia with language models can be achieved more easily by [building a language model with KenLM](../language_models/index.md) and [predicting with `pylaia-htr-decode-ctc`](../prediction/index.md#predict-with-a-language-model).
-
-## Purpose
-
-This command outputs the feature matrix and lattice computed by PyLaia in Kaldi format for a given dataset.
-
-It requires:
-
-- a [list of image ids](../datasets/index.md#image-names),
-- the pickled `model` file created during [model initialization](../initialization/index.md),
-- the weights `*.ckpt` of the trained model created during [model training](../training/index.md).
-
-The files generated by this command are designed to combine PyLaia and Kaldi, but could also be used to predict with a custom decoder.
-
-## Parameters
-
-The full list of parameters is detailed in this section.
-
-### General parameters
-
-| Parameter  | Description                                                                                  | Type   | Default |
-| ---------- | -------------------------------------------------------------------------------------------- | ------ | ------- |
-| `img_list` | Positional argument. File containing the names of the images to decode (one image per line). | `str`  |         |
-| `img_dirs` | Directories containing line images.                                                          | `str`  |         |
-| `config`   | Path to a JSON configuration file                                                            | `json` |         |
-
-### Common parameters
-
-| Name                        | Description                                                                                                                                                                                                                                         | Type         | Default |
-| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | ------- |
-| `common.train_path`         | Directory where the model will be saved                                                                                                                                                                                                             | `str`        | `.`     |
-| `common.model_filename`     | Filename of the model.                                                                                                                                                                                                                              | `str`        | `model` |
-| `common.experiment_dirname` | Directory name of the experiment.                                                                                                                                                                                                                   | `experiment` | `74565` |
-| `common.checkpoint`         | Checkpoint to load. Must be a filepath, a filename, a glob pattern or `None` (in this case, the best checkpoint will be loaded). Note that the checkpoint will be searched in `common.experiment_dirname`, unless you provide an absolute filepath. | `int`        | `None`  |
-
-### Data arguments
-
-| Name              | Description                                      | Type        | Default       |
-| ----------------- | ------------------------------------------------ | ----------- | ------------- |
-| `data.batch_size` | Batch size.                                      | `int`       | `8`           |
-| `data.color_mode` | Color mode. Must be either `L`, `RGB` or `RGBA`. | `ColorMode` | `ColorMode.L` |
-
-### Netout arguments
-
-| Name                      | Description                                                                                                                                                                                 | Type            | Default |
-| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------- |
-| `netout.output_transform` | Transformation to apply at the end of the model. Should be `softmax` or `log_softmax`.                                                                                                      | `str`           | `None`  |
-| `netout.matrix`           | Path to the output file containing a list of keys (image ids) and values (output matrix where rows represents timesteps and columns CTC labels). This file can be directly used with Kaldi. | `Optional[str]` | `None`  |
-| `netout.lattice`          | Path to the output file containing containing a list of keys (image ids) and values (lattices representing the CTC output). This file can be directly used with Kaldi.                      | `Optional[str]` | `None`  |
-| `netout.digits`           | Number of digits to be used for formatting                                                                                                                                                  | `int`           | `10`    |
-
-### Logging arguments
-
-| Name                      | Description                                                                                                    | Type            | Default                                           |
-| ------------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- | ------------------------------------------------- |
-| `logging.fmt`             | Logging format.                                                                                                | `str`           | `%(asctime)s %(levelname)s %(name)s] %(message)s` |
-| `logging.level`           | Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`                                       | `Level`         | `INFO`                                            |
-| `logging.filepath`        | Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname` | `Optional[str]` |                                                   |
-| `logging.overwrite`       | Whether to overwrite the logfile or to append.                                                                 | `bool`          | `False`                                           |
-| `logging.to_stderr_level` | If filename is set, use this to log also to stderr at the given level.                                         | `Level`         | `ERROR`                                           |
-
-### Trainer arguments
-
-Pytorch Lightning `Trainer` flags can also be set using the `--trainer` argument. See [the documentation](https://github.com/Lightning-AI/lightning/blob/1.7.0/docs/source-pytorch/common/trainer.rst#trainer-flags).
-
-This flag is mostly useful to define whether to predict on CPU or GPU.
-
-* `--trainer.gpus 0` to run on CPU,
-* `--trainer.gpus n` to run on `n` GPUs (use with `--training.auto_select True` for auto-selection),
-* `--trainer.gpus -1` to run on all GPUs.
-
-
-## Examples
-
-Dumping PyLaia's features can be done using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
-
-### Dumping features from a model from Hugging Face
-
-First, clone a trained model from Hugging Face:
-```bash
-git clone https://huggingface.co/Teklia/pylaia-huginmunin
-```
-
-List image names in `img_list.txt`:
-```text title="img_list.txt"
-docs/assets/219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f
-docs/assets/219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4
-```
-
-Dump PyLaia's output with:
-```bash
-pylaia-htr-netout --common.experiment_dirname pylaia-huginmunin/ \
-                  --common.model_filename pylaia-huginmunin/model \
-                  --netout.matrix matrix.txt \
-                  --netout.lattice lattice.txt \
-                  --img_dir [docs/assets] \
-                  img_list.txt
-```
-
-Output files will be written in `--common.experiment_dirname`:
-```
-├── pylaia-huginmunin/
-    ├── matrix.txt
-    └── lattice.txt
-```
-
-### Dumping features using a YAML configuration file
-
-Run the following command to dump PyLaia's output:
-```bash
-pylaia-htr-decode-ctc --config config_netout.yaml
-```
-
-With the following configuration file:
-```yaml title="config_netout.yaml"
-common:
-  experiment_dirname: pylaia-huginmunin
-  model_filename: pylaia-huginmunin/model
-img_list: img_list.txt
-img_dirs:
-  - docs/assets/
-netout:
-  matrix: matrix.txt
-  lattice: lattice.txt
-```
-
-Output files will be written in `--common.experiment_dirname`:
-```
-├── pylaia-huginmunin/
-    ├── matrix.txt
-    └── lattice.txt
-```
diff --git a/docs/usage/prediction/index.md b/docs/usage/prediction/index.md
deleted file mode 100644
index 134255a4..00000000
--- a/docs/usage/prediction/index.md
+++ /dev/null
@@ -1,317 +0,0 @@
-# Decoding
-
-The `pylaia-htr-decode-ctc` command can be used to predict using a trained PyLaia model. To know more about the options of this command, use `pylaia-htr-decode-ctc --help`.
-
-## Purpose
-
-This command uses a trained PyLaia model to predict on a dataset.
-
-It requires:
-
-- a [list of image ids](../datasets/index.md#image-names),
-- the pickled `model` file created during [model initialization](../initialization/index.md),
-- the weights `*.ckpt` of the trained model created during [model training](../training/index.md).
-
-## Parameters
-
-The full list of parameters is detailed in this section.
-
-### General parameters
-
-| Parameter  | Description                                                                                                         | Type   | Default |
-| ---------- | ------------------------------------------------------------------------------------------------------------------- | ------ | ------- |
-| `syms`     | Positional argument. Path to a file mapping characters to integers. The CTC symbol **must** be mapped to integer 0. | `str`  |         |
-| `img_list` | Positional argument. File containing the names of the images to decode (one image per line).                        | `str`  |         |
-| `img_dirs` | Directories containing line images.                                                                                 | `str`  |         |
-| `config`   | Path to a JSON configuration file                                                                                   | `json` |         |
-
-### Common parameters
-
-| Name                        | Description                                                                                                                                                                                                                                         | Type         | Default |
-| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | ------- |
-| `common.train_path`         | Directory where the model will be saved                                                                                                                                                                                                             | `str`        | `.`     |
-| `common.model_filename`     | Filename of the model.                                                                                                                                                                                                                              | `str`        | `model` |
-| `common.experiment_dirname` | Directory name of the experiment.                                                                                                                                                                                                                   | `experiment` | `74565` |
-| `common.checkpoint`         | Checkpoint to load. Must be a filepath, a filename, a glob pattern or `None` (in this case, the best checkpoint will be loaded). Note that the checkpoint will be searched in `common.experiment_dirname`, unless you provide an absolute filepath. | `int`        | `None`  |
-
-### Data arguments
-
-| Name               | Description                                       | Type        | Default       |
-| ------------------ | ------------------------------------------------- | ----------- | ------------- |
-| `data.batch_size`  | Batch size.                                       | `int`       | `8`           |
-| `data.color_mode`  | Color mode. Must be either `L`, `RGB` or `RGBA`.  | `ColorMode` | `ColorMode.L` |
-| `data.num_workers` | Number of worker processes created in dataloaders | `int`       | `None`        |
-| `data.reading_order` | Reading order on the input lines: LTR (Left-to-Right) or RTL (Right-to-Left). | `ReadingOrder`       | `LTR`        |
-
-### Decode arguments
-
-| Name                                  | Description                                                                                                                                                                         | Type            | Default   |
-| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | --------- |
-| `decode.include_img_ids`              | Include the associated image ids in the decoding/segmentation output                                                                                                                | `bool`          | `True`    |
-| `decode.separator`                    | String to use as a separator between the image ids and the decoding/segmentation output.                                                                                            | `str`           | ` `       |
-| `decode.join_string`                  | String to use to join the decoding output.                                                                                                                                          | `Optional[str]` | ` `       |
-| `decode.use_symbols`                  | Convert the decoding output to symbols instead of symbol index.                                                                                                                     | `bool`          | `True`    |
-| `decode.convert_spaces`               | Whether or not to convert spaces.                                                                                                                                                   | `bool`          | `False`   |
-| `decode.input_space`                  | Replace the space by this symbol if `convert_spaces` is set. Used for word segmentation and confidence score computation.                                                           | `str`           | `<space>` |
-| `decode.output_space`                 | Space symbol to display during decoding.                                                                                                                                            | `str`           | ` `       |
-| `decode.segmentation`                 | Use CTC alignment to estimate character or word segmentation. Should be `char` or `word`.                                                                                           | `Optional[str]` | `None `   |
-| `decode.temperature`                  | Temperature parameters used to scale the logits.                                                                                                                                    | `float`         | `1.0`     |
-| `decode.print_line_confidence_scores` | Whether to print line confidence scores.                                                                                                                                            | `bool`          | `False`   |
-| `decode.print_line_confidence_scores` | Whether to print word confidence scores.                                                                                                                                            | `bool`          | `False`   |
-| `decode.use_language_model`           | Whether to decode with an external language model.                                                                                                                                  | `bool`          | `False`   |
-| `decode.language_model_path`          | Path to a KenLM or ARPA n-gram language model.                                                                                                                                      | `str`           | `None`    |
-| `decode.language_model_weight`        | Weight of the language model.                                                                                                                                                       | `float`         | `None`    |
-| `decode.tokens_path`                  | Path to a file containing valid tokens. If using a file, the expected format is for tokens mapping to the same index to be on the same line. The `ctc` symbol should be at index 0. | `str`           | `None`    |
-| `decode.lexicon_path`                 | Path to a lexicon file containing the possible words and corresponding spellings.                                                                                                   | `str`           | `None`    |
-| `decode.unk_token`                    | String representing unknown characters.                                                                                                                                             | `str`           | `<unk>`   |
-| `decode.blank_token`                  | String representing the blank/ctc symbol.                                                                                                                                           | `str`           | `<ctc>`   |
-
-
-### Logging arguments
-
-| Name                      | Description                                                                                                    | Type            | Default                                           |
-| ------------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- | ------------------------------------------------- |
-| `logging.fmt`             | Logging format.                                                                                                | `str`           | `%(asctime)s %(levelname)s %(name)s] %(message)s` |
-| `logging.level`           | Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`                                       | `Level`         | `INFO`                                            |
-| `logging.filepath`        | Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname` | `Optional[str]` |                                                   |
-| `logging.overwrite`       | Whether to overwrite the logfile or to append.                                                                 | `bool`          | `False`                                           |
-| `logging.to_stderr_level` | If filename is set, use this to log also to stderr at the given level.                                         | `Level`         | `ERROR`                                           |
-
-### Trainer arguments
-
-Pytorch Lightning `Trainer` flags can also be set using the `--trainer` argument. See [the documentation](https://github.com/Lightning-AI/lightning/blob/1.7.0/docs/source-pytorch/common/trainer.rst#trainer-flags).
-
-This flag is mostly useful to define whether to predict on CPU or GPU.
-
-* `--trainer.gpus 0` to run on CPU,
-* `--trainer.gpus n` to run on `n` GPUs (use with `--training.auto_select True` for auto-selection),
-* `--trainer.gpus -1` to run on all GPUs.
-
-
-## Examples
-
-The prediction can be done using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
-
-We provide some images to try out our models. They can be found in `docs/assets`, on the [Gitlab repository](https://gitlab.teklia.com/atr/pylaia/-/tree/master/docs/assets?ref_type=heads). To test the prediction commands, make sure to download them on your end.
-
-```shell
-mkdir images
-wget https://user-images.githubusercontent.com/100838858/219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f.jpg -P images
-wget https://user-images.githubusercontent.com/100838858/219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4.jpg -P images
-```
-
-### Predict using a model from Hugging Face
-
-First, clone a trained model from Hugging Face:
-```bash
-git clone https://huggingface.co/Teklia/pylaia-huginmunin
-```
-
-!!! note
-    Some files are stored through [Git-LFS](https://git-lfs.com/). Make sure all files are correctly pulled using the following command, from the cloned folder.
-    ```bash
-    git lfs ls-files
-    ```
-
-    You should see three files:
-
-    - the language model (`language_model.arpa.gz`),
-    - the model architecture (`model`),
-    - the weights (`weights.ckpt`).
-
-List image names in `img_list.txt`:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4
-```
-
-Predict with:
-```bash
-pylaia-htr-decode-ctc --common.experiment_dirname pylaia-huginmunin/ \
-                      --common.model_filename pylaia-huginmunin/model \
-                      --img_dir [images] \
-                      pylaia-huginmunin/syms.txt \
-                      img_list.txt
-```
-
-Expected output:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f o g <space> V a l s t a d <space> k a n <space> v i <space> v i s t
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 i k k e <space> g j ø r e <space> R e g n i n g <space> p a a ,
-```
-
-Note that by default, each token is separated by a space, and the space symbol is represented by `--decode.input_space` (default: `"<space>"`).
-
-### Predict with a YAML configuration file
-
-Run the following command to predict a model on CPU using:
-```bash
-pylaia-htr-decode-ctc --config config_decode_model.yaml
-```
-
-With the following configuration file:
-```yaml title="config_decode_model.yaml"
-syms: pylaia-huginmunin/syms.txt
-img_list: img_list.txt
-img_dirs:
-  - images/
-common:
-  experiment_dirname: pylaia-huginmunin
-  model_filename: pylaia-huginmunin/model
-decode:
-  join_string: ""
-  convert_spaces: true
-trainer:
-  gpus: 0
-```
-
-Expected output:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f og Valstad kan vi vist
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 ikke gjøre Regning paa,
-```
-
-Note that setting `--decode.join_string ""` and `--decode.convert_spaces True` will display the text well formatted.
-
-### Predict with confidence scores
-
-PyLaia estimate character probability for each timestep. It is possible to print the probability at line or word level.
-
-#### Line confidence scores
-
-Run the following command to predict with line confidence scores:
-```bash
-pylaia-htr-decode-ctc --config config_decode_model.yaml \
-                      --decode.print_line_confidence_score True
-```
-
-Expected output:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f 0.99 og Valstad kan vi vist
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 0.98 ikke gjøre Regning paa,
-```
-
-#### Word confidence scores
-
-Run the following command to predict with word confidence scores:
-```bash
-pylaia-htr-decode-ctc --config config_decode_model.yaml \
-                      --decode.print_word_confidence_score True
-```
-
-Expected output:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f ['1.00', '1.00', '1.00', '1.00', '1.00'] og Valstad kan vi vist
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 ['1.00', '0.91', '1.00', '0.99'] ikke gjøre Regning paa,
-```
-
-#### Temperature scaling
-
-PyLaia tends to output overly confident probabilities. [Temperature scaling](https://arxiv.org/pdf/1706.04599.pdf) can be used to improve the reliability of confidence scores. The best temperature can be determined with a grid search algorithm by maximizing the correlation between 1-CER and confidence scores.
-
-Run the following command to predict callibrated word confidence scores with `temperature=3.0`
-```bash
-pylaia-htr-decode-ctc --config config_decode_model.yaml \
-                      --decode.print_word_confidence_score True \
-                      --decode.temperature 3.0
-```
-
-Expected output:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f ['0.93', '0.85', '0.87', '0.93', '0.85'] og Valstad kan vi vist
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 ['0.93', '0.84', '0.86', '0.83'] ikke gjøre Regning paa,
-```
-
-### Predict with a language model
-
-PyLaia supports KenLM and ARPA language models.
-
-Once the n-gram model is built, run the following command to combine it to your PyLaia model:
-```bash
-pylaia-htr-decode-ctc --config config_decode_model_lm.yaml
-```
-
-With the following configuration file:
-```yaml title="config_decode_model_lm.yaml"
-syms: pylaia-huginmunin/syms.txt
-img_list: img_list.txt
-img_dirs:
-  - images/
-common:
-  experiment_dirname: pylaia-huginmunin
-  model_filename: pylaia-huginmunin/model
-decode:
-  join_string: ""
-  convert_spaces: true
-  use_language_model: true
-  language_model_path: pylaia-huginmunin/language_model.arpa.gz
-  tokens_path: pylaia-huginmunin/tokens.txt
-  lexicon_path: pylaia-huginmunin/lexicon.txt
-  language_model_weight: 1.5
-  decode.print_line_confidence_score: true
-trainer:
-  gpus: 0
-```
-
-Expected output:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f 0.90 og Valstad kan vi vist
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 0.89 ikke gjøre Regning paa,
-```
-
-### Predict with CTC alignement
-
-It is possible to estimate text localization based on CTC alignments with the `--decode.segmentation` option. It returns a list texts with their estimated coordinates: `(text, x1, y1, x2, y2)`.
-
-#### Character level
-
-To output character localization, use the `--decode.segmentation char` option:
-```bash
-pylaia-htr-decode-ctc --common.experiment_dirname pylaia-huginmunin/ \
-                      --common.model_filename pylaia-huginmunin/model \
-                      --decode.segmentation char \
-                      --img_dir [images] \
-                      pylaia-huginmunin/syms.txt \
-                      img_list.txt
-```
-
-Expected output:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f [('o', 1, 1, 31, 128), ('g', 32, 1, 79, 128), ('<space>', 80, 1, 143, 128), ('V', 144, 1, 167, 128), ('a', 168, 1, 223, 128), ('l', 224, 1, 255, 128), ('s', 256, 1, 279, 128), ('t', 280, 1, 327, 128), ('a', 328, 1, 367, 128), ('d', 368, 1, 407, 128), ('<space>', 408, 1, 496, 128), ('k', 497, 1, 512, 128), ('a', 513, 1, 576, 128), ('n', 577, 1, 624, 128), ('<space>', 625, 1, 712, 128), ('v', 713, 1, 728, 128), ('i', 729, 1, 776, 128), ('<space>', 777, 1, 808, 128), ('v', 809, 1, 824, 128), ('i', 825, 1, 872, 128), ('s', 873, 1, 912, 128), ('t', 913, 1, 944, 128)]
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 [('i', 1, 1, 23, 128), ('k', 24, 1, 71, 128), ('k', 72, 1, 135, 128), ('e', 136, 1, 191, 128), ('<space>', 192, 1, 248, 128), ('g', 249, 1, 264, 128), ('j', 265, 1, 312, 128), ('ø', 313, 1, 336, 128), ('r', 337, 1, 376, 128), ('e', 377, 1, 408, 128), ('<space>', 409, 1, 481, 128), ('R', 482, 1, 497, 128), ('e', 498, 1, 545, 128), ('g', 546, 1, 569, 128), ('n', 570, 1, 601, 128), ('i', 602, 1, 665, 128), ('n', 666, 1, 706, 128), ('g', 707, 1, 762, 128), ('<space>', 763, 1, 794, 128), ('p', 795, 1, 802, 128), ('a', 803, 1, 850, 128), ('a', 851, 1, 890, 128), (',', 891, 1, 914, 128)]
-```
-
-#### Word level
-
-To output word localization, use the `--decode.segmentation word` option:
-```bash
-pylaia-htr-decode-ctc --common.experiment_dirname pylaia-huginmunin/ \
-                      --common.model_filename pylaia-huginmunin/model \
-                      --decode.segmentation word \
-                      --img_dir [images] \
-                      pylaia-huginmunin/syms.txt \
-                      img_list.txt
-```
-
-Expected output:
-```text
-219007024-f45433e7-99fd-43b0-bce6-93f63fa72a8f [('og', 1, 1, 79, 128), ('<space>', 80, 1, 143, 128), ('Valstad', 144, 1, 407, 128), ('<space>', 408, 1, 496, 128), ('kan', 497, 1, 624, 128), ('<space>', 625, 1, 712, 128), ('vi', 713, 1, 776, 128), ('<space>', 777, 1, 808, 128), ('vist', 809, 1, 944, 128)]
-219008758-c0097bb4-c55a-4652-ad2e-bba350bee0e4 [('ikke', 1, 1, 191, 128), ('<space>', 192, 1, 248, 128), ('gjøre', 249, 1, 408, 128), ('<space>', 409, 1, 481, 128), ('Regning', 482, 1, 762, 128), ('<space>', 763, 1, 794, 128), ('paa,', 795, 1, 914, 128)]
-```
-
-### Predict on Right-To-Left data
-
-To output word localization, use the `--data.reading_order` option:
-```bash
-pylaia-htr-decode-ctc --common.experiment_dirname pylaia-khatt/ \
-                      --common.model_filename pylaia-khatt/model \
-                      --data.reading_order RTL \
-                      --img_dir [images] \
-                      pylaia-khatt/syms.txt \
-                      img_list.txt
-```
-
-Expected output:
-```text
-text_line_1302 العلماء على فهم هذه الكتابات بالدراسات اللغوية السامية مثل العبرانية، وباللغة العربية التي
-```
diff --git a/docs/usage/training/index.md b/docs/usage/training/index.md
deleted file mode 100644
index e6858ae1..00000000
--- a/docs/usage/training/index.md
+++ /dev/null
@@ -1,222 +0,0 @@
-# Training
-
-The `pylaia-htr-train-ctc` command can be used to train a PyLaia model. To know more about the options of this command, use `pylaia-htr-train-ctc --help`.
-
-## Purpose
-
-This command trains a PyLaia architecture on a dataset.
-
-It requires:
-
-- a [formatted dataset](../datasets/index.md),
-- the pickled `model` file created during [model initialization](../initialization/index.md).
-
-!!! note
-
-    The [`pylaia-htr-dataset-validate`](../datasets/index.md) command can help you analyze your dataset and point out issues.
-
-## Parameters
-
-The full list of parameters is detailed in this section.
-
-### General parameters
-
-| Parameter      | Description                                                                                                         | Type   | Default |
-| -------------- | ------------------------------------------------------------------------------------------------------------------- | ------ | ------- |
-| `syms`         | Positional argument. Path to a file mapping characters to integers. The CTC symbol **must** be mapped to integer 0. | `str`  |         |
-| `img_dirs`     | Positional argument. Directories containing line images.                                                            | `str`  |         |
-| `tr_txt_table` | Positional argument. Path to a file mapping training image ids and tokenized transcription.                         | `str`  |         |
-| `va_txt_table` | Positional argument. Path to a file mapping validation image ids and tokenized transcription.                       | `str`  |         |
-| `config`       | Path to a JSON configuration file                                                                                   | `json` |         |
-
-### Common parameters
-
-| Name                        | Description                                                                                                                                                                                                                                         | Type            | Default          |
-| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ---------------- |
-| `common.seed`               | Seed for random number generators.                                                                                                                                                                                                                  | `int`           | `74565`          |
-| `common.train_path`         | Directory where the model will be saved                                                                                                                                                                                                             | `str`           | `.`              |
-| `common.model_filename`     | Filename of the model.                                                                                                                                                                                                                              | `str`           | `model`          |
-| `common.experiment_dirname` | Directory name of the experiment.                                                                                                                                                                                                                   | `str`           | `experiment`     |
-| `common.monitor`            | Metric to monitor for early stopping and checkpointing.                                                                                                                                                                                             | `Monitor`       | `Monitor.va_cer` |
-| `common.checkpoint`         | Checkpoint to load. Must be a filepath, a filename, a glob pattern or `None` (in this case, the best checkpoint will be loaded). Note that the checkpoint will be searched in `common.experiment_dirname`, unless you provide an absolute filepath. | `Optional[str]` | `None`           |
-
-### Data arguments
-
-| Name                 | Description                                                                   | Type           | Default       |
-| -------------------- | ----------------------------------------------------------------------------- | -------------- | ------------- |
-| `data.batch_size`    | Batch size.                                                                   | `int`          | `8`           |
-| `data.color_mode`    | Color mode. Must be either `L`, `RGB` or `RGBA`.                              | `ColorMode`    | `ColorMode.L` |
-| `data.num_workers`   | Number of worker processes created in dataloaders                             | `int`          | `None`        |
-| `data.reading_order` | Reading order on the input lines: LFT (Left-to-Right) or RTL (Right-to-Left). | `ReadingOrder` | `LFT`         |
-
-### Train arguments
-
-| Name                            | Description                                                                                                                                                  | Type        | Default       |
-| ------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------- | ------------- |
-| `train.delimiters`              | List of symbols representing the word delimiters.                                                                                                            | `List`      | `["<space>"]` |
-| `train.checkpoint_k`            | Model saving mode: `-1` all models will be saved, `0`: no models are saved, `k` the `k` best models are saved.                                               | `int`       | `3`           |
-| `train.resume`                  | Whether to resume training with a checkpoint. This option can be used to continue training on the same dataset.                                              | `bool`      | `False`       |
-| `train.pretrain`                | Whether to load pretrained weights from a checkpoint. This option can be used to load pretrained weights when fine-tuning a model on a new dataset.          | `bool`      | `False`       |
-| `train.freeze_layers`           | List of layers to freeze during training: `"conv"` to freeze convolutional layers, `"rnn"` to freeze recurrent layers, `"linear"` to freeze the linear layer | `List[str]` | `None`        |
-| `train.early_stopping_patience` | Number of validation epochs with no improvement after which training will be stopped.                                                                        | `int`       | `20`          |
-| `train.gpu_stats`               | Whether to include GPU stats in the training progress bar.                                                                                                   | `bool`      | `False`       |
-| `train.augment_training`        | Whether to use data augmentation.                                                                                                                            | `bool`      | `False`       |
-| `train.log_to_wandb`            | Whether to log training metrics and parameters to Weights & Biases.                                                                                          | `bool`      | `False`       |
-
-
-### Logging arguments
-
-| Name                      | Description                                                                                                    | Type            | Default                                           |
-| ------------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- | ------------------------------------------------- |
-| `logging.fmt`             | Logging format.                                                                                                | `str`           | `%(asctime)s %(levelname)s %(name)s] %(message)s` |
-| `logging.level`           | Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`                                       | `Level`         | `INFO`                                            |
-| `logging.filepath`        | Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname` | `Optional[str]` |                                                   |
-| `logging.overwrite`       | Whether to overwrite the logfile or to append.                                                                 | `bool`          | `False`                                           |
-| `logging.to_stderr_level` | If filename is set, use this to log also to stderr at the given level.                                         | `Level`         | `ERROR`                                           |
-
-### Optimizer arguments
-
-| Name                           | Description                                               | Type    | Default   |
-| ------------------------------ | --------------------------------------------------------- | ------- | --------- |
-| `optimizers.name`              | Optimization algorithm. Must be `SGD`, `RMSProp`, `Adam`. | `List`  | `RMSProp` |
-| `optimizers.learning_rate`     | Learning rate.                                            | `float` | `0.0005`  |
-| `optimizers.momentum`          | Momentum.                                                 | `float` | `0.0`     |
-| `optimizers.weight_l2_penalty` | Apply this L2 weight penalty to the loss function.        | `float` | `0.0`     |
-| `optimizers.nesterov`          | Whether to use Nesterov momentum.                         | `bool`  | `False`   |
-
-### Scheduler arguments
-
-| Name                 | Description                                                                     | Type      | Default           |
-| -------------------- | ------------------------------------------------------------------------------- | --------- | ----------------- |
-| `scheduler.active`   | Whether to use an on-plateau learning rate scheduler.                           | `bool`    | `False`           |
-| `scheduler.monitor`  | Metric for the scheduler to monitor.                                            | `Monitor` | `Monitor.va_loss` |
-| `scheduler.patience` | Number of epochs with no improvement after which learning rate will be reduced. | `int`     | `5`               |
-| `scheduler.factor`   | Factor by which the learning rate will be reduced.                              | `float`   | `0.1`             |
-
-### Trainer arguments
-
-Pytorch Lighning `Trainer` flags can also be set using the `--trainer` argument. See [the documentation](https://github.com/Lightning-AI/lightning/blob/1.7.0/docs/source-pytorch/common/trainer.rst#trainer-flags).
-
-
-## Examples
-
-The model can be trained using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
-
-
-### Train from scratch with Command Line Arguments (CLI)
-
-Run the following command to train a model:
-```sh
-pylaia-htr-train-ctc /path/to/syms.txt \
-   `cat img_dirs_args.txt`\
-   /path/to/train.txt \
-   /path/to/val.txt \
-   --trainer.gpus 1 \
-   --data.batch_size 32
-```
-
-### Train from scratch with a YAML configuration file
-
-Run the following command to train a model:
-```sh
-pylaia-htr-train-ctc --config config_train_model.yaml
-```
-
-Where `config_train_model.yaml` is:
-
-```yaml
-syms: /path/to/syms.txt
-img_dirs:
-  - /path/to/images/
-tr_txt_table: /path/to/train.txt
-va_txt_table: /path/to/val.txt
-common:
-  experiment_dirname: experiment-dataset
-logging:
-  filepath: pylaia_training.log
-scheduler:
-  active: true
-train:
-  augment_training: true
-  early_stopping_patience: 80
-trainer:
-  auto_select_gpus: true
-  gpus: 1
-  max_epochs: 600
-```
-
-### Resume training from a checkpoint
-
-Run the following command to continue training from a checkpoint for 200 epochs.
-```sh
-pylaia-htr-train-ctc --config config_train_model.yaml --train.resume true --trainer.max_epochs 200
-```
-
-!!! note
-    If `common.checkpoint` is not set, PyLaia will select the best checkpoint from `common.experiment_dirname`
-
-### Fine-tune from a checkpoint
-
-Run the following command to load pretrained weights and fine-tune on a new dataset for 200 epochs.
-```sh
-pylaia-htr-train-ctc --config config_train_model.yaml --common.experiment_dirname experiment/ --common.checkpoint initial_checkpoint.ckpt --train.pretrain true --trainer.max_epochs 200
-```
-
-!!! warning
-    This option requires that your model architecture `model` matches the one used to train `initial_checkpoint.ckpt`.
-    The last linear layer will be reinitialized using the Xavier initialization to match the new vocabulary size.
-
-!!! note
-    The initial checkpoint is expected to be in the following directory: `{common.experiment_dirname}/pretrained/`.
-    If it is located in `common.experiment_dirname`, the subdirectory `pretrained` will be created and the checkpoint will be moved there automatically.
-
-### Train on Right-To-Left reading order
-
-By default, PyLaia expects images with Left-to-Right reading order.
-To train a model on Right-To-Left data, use the following command:
-```sh
-pylaia-htr-train-ctc --config config_train_model_rtl.yaml
-```
-
-Where `config_train_model_rtl.yaml` is:
-
-```yaml title="config_train_model_rtl.yaml"
-syms: /path/to/syms.txt
-img_dirs:
-  - /path/to/images/
-tr_txt_table: /path/to/train.txt
-va_txt_table: /path/to/val.txt
-common:
-  experiment_dirname: experiment-dataset
-logging:
-  filepath: pylaia_training.log
-scheduler:
-  active: true
-train:
-  augment_training: true
-  early_stopping_patience: 80
-trainer:
-  auto_select_gpus: true
-  gpus: 1
-  max_epochs: 600
-data:
-  reading_order: RTL
-```
-
-### Train and log to Weights & Biases
-
-By default, PyLaia logs metrics and losses to a local CSV file. You can chose to log into [Weights & Biases](https://wandb.ai/home) instead.
-
-To set up Weights & Biases:
-* Run `pip install pylaia[wandb]` to install the required dependencies
-* Sign in to Weights & Biases using `wandb login`
-
-Then, start training with `pylaia-htr-train-ctc --config config_train_model.yaml --train.log_to_wandb true`.
-
-This will create a project called `PyLaia` in W&B with one run for each training. The following are monitored for each run:
-* Training and validation metrics (losses, CER, WER)
-* Model gradients
-* System metrics (GPU and CPU utilisation, temperature, allocated memory)
-* Hyperparameters (training configuration)
-
-A public dashboard is available [here](https://wandb.ai/starride-teklia/PyLaia%20demo) as an example.
diff --git a/mkdocs.yml b/mkdocs.yml
deleted file mode 100644
index da08526c..00000000
--- a/mkdocs.yml
+++ /dev/null
@@ -1,89 +0,0 @@
-site_name: PyLaia
-site_dir: public
-
-theme:
-  name: material
-
-  font:
-    text: Roboto
-    code: Roboto Mono
-
-  features:
-  - navigation.top
-  - navigation.tracking
-  - navigation.indexes
-  - navigation.instant
-  - navigation.instant.progress
-  - content.code.copy
-
-  palette:
-    # Palette toggle for light mode
-    - media: "(prefers-color-scheme: light)"
-      scheme: default
-      toggle:
-        icon: material/brightness-7
-        name: Switch to dark mode
-
-    # Palette toggle for dark mode
-    - media: "(prefers-color-scheme: dark)"
-      scheme: slate
-      toggle:
-        icon: material/brightness-4
-        name: Switch to light mode
-
-plugins:
-- search
-- autorefs
-- literate-nav:
-    nav_file: SUMMARY.md
-- mkdocstrings:
-    handlers:
-      python:
-        options:
-          show_root_toc_entry: false
-          show_object_full_path: false
-          show_root_heading: yes
-          show_source: true
-          docstring_style: google
-          merge_init_into_class: yes
-          show_category_heading: yes
-          show_signature_annotations: yes
-          separate_signature: yes
-          members_order: source
-          unwrap_annotated: yes
-          show_if_no_docstring: yes
-          filters:
-            - "!^_"
-            - "!^__"
-
-nav:
-  - Home: index.md
-  - Original paper: original_paper.md
-  - Get started:
-    - get_started/index.md
-    - Development: get_started/development.md
-  - Usage:
-    - usage/index.md
-    - Dataset:
-      - usage/datasets/index.md
-      - Dataset formatting: usage/datasets/format.md
-    - Model initialization: usage/initialization/index.md
-    - Training: usage/training/index.md
-    - Prediction: usage/prediction/index.md
-    - Netout: usage/netout/index.md
-    - Explicit language modeling: usage/language_models/index.md
-  # defer to literate-nav
-  - Code Reference: reference/
-  - Releases: releases.md
-
-copyright:  Copyright &copy; Teklia
-
-extra:
-  social:
-    - icon: fontawesome/brands/gitlab
-      name: Git repository for this project
-      link: https://gitlab.teklia.com/atr/pylaia
-
-markdown_extensions:
-  - pymdownx.superfences
-  - admonition
diff --git a/pyproject.toml b/pyproject.toml
index 79672355..3d8dce8a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -56,15 +56,6 @@ dev = [
 test = [
     "tox==4.18.0",
 ]
-docs = [
-    "black==24.8.0",
-    "mkdocs-autorefs==1.1.0",
-    "mkdocs-gen-files==0.5.0",
-    "mkdocs-literate-nav==0.6.1",
-    "mkdocs-material==9.5.33",
-    "mkdocs-section-index==0.3.9",
-    "mkdocstrings-python==1.10.8",
-]
 wandb = ["wandb==0.18.5"]
 
 [project.scripts]
-- 
GitLab


From d4af0374bc670743bc32566b3508c2d241a55d30 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Wed, 5 Feb 2025 17:20:28 +0100
Subject: [PATCH 10/17] Fix ids

---
 docs/modules/ROOT/pages/releases.adoc         | 52 +++++++++----------
 .../pages/usage/language_models/index.adoc    | 10 ++--
 2 files changed, 31 insertions(+), 31 deletions(-)

diff --git a/docs/modules/ROOT/pages/releases.adoc b/docs/modules/ROOT/pages/releases.adoc
index 1536cce3..7999f025 100644
--- a/docs/modules/ROOT/pages/releases.adoc
+++ b/docs/modules/ROOT/pages/releases.adoc
@@ -6,12 +6,12 @@
 
 Released on *12 August 2024* &bull; View on https://gitlab.teklia.com/atr/pylaia/-/releases/1.1.1[Gitlab]
 
-[#breaking-changes]
+[#breaking-changes-1-1-1]
 === Breaking changes
 
 * The https://gitlab.teklia.com/atr/nnutils/[nnutils] library is no longer maintained and is only compatible with Python 3.6, 3.7, 3.8. As such its dependency has been removed. The `crnn.use_masks` parameter has been removed. It is still supported to keep the compatibility with older training configuration but will be ignored.
 
-[#feature]
+[#features-1-1-1]
 === Feature
 
 * The number of worker processes created in dataloaders is now exposed through the `data.num_workers`  parameter.
@@ -23,7 +23,7 @@ Released on *12 August 2024* &bull; View on https://gitlab.teklia.com/atr/pylaia
  ** linear layers.
 * Proper support for right-to-left (RTL) languages is now available. Enable it using the `data.reading_order` argument both during https://atr.pages.teklia.com/pylaia/usage/training/#train-on-right-to-left-reading-order[training] and https://atr.pages.teklia.com/pylaia/usage/prediction/#predict-on-right-to-left-data[decoding].
 
-[#dependencies]
+[#dependencies-1-1-1]
 === Dependencies
 
 * Bumped https://pypi.org/project/pytorch-lightning/[pytorch-lightning] to version `1.4.2`.
@@ -41,18 +41,18 @@ Released on *12 August 2024* &bull; View on https://gitlab.teklia.com/atr/pylaia
 
 Released on *22 December 2023* &bull; View on https://gitlab.teklia.com/atr/pylaia/-/releases/1.1.0[Gitlab]
 
-[#breaking-changes-2]
+[#breaking-changes-1-1-0]
 === Breaking changes
 
 * Official support for Python3.8 has been dropped. This doesn't mean that the current code doesn't run on python3.8, we simply do not test that compatibility anymore. This decision was made since active support of python 3.8 has stopped for a while now and many libraries in the ML world have stopped supporting it as well.
 
-[#feature-2]
+[#feature-1-1-0]
 === Feature
 
 * A Docker image with the needed code to use this library is now built on every tag.
 * The coverage of our tests suite is displayed again as a GitLab badge on the repository as well as in the README.md file.
 
-[#documentation]
+[#documentation-1-1-10]
 === Documentation
 
 * Many sections were added to the documentation:
@@ -65,7 +65,7 @@ Released on *22 December 2023* &bull; View on https://gitlab.teklia.com/atr/pyla
  ** the full Python code reference.
 * A contribution guide and a code of conduct were added for new contributors.
 
-[#dependencies-2]
+[#dependencies-1-1-0]
 === Dependencies
 
 * Bumped https://pypi.org/project/pytorch-lightning/[pytorch-lightning] to version `1.3.0`
@@ -77,7 +77,7 @@ Released on *22 December 2023* &bull; View on https://gitlab.teklia.com/atr/pyla
  ** https://pypi.org/project/numpy/[numpy] direct dependency was removed since it's installed through `scipy` and `matplotlib`.
 * PyLaia dropped support for python 3.8 so the https://pypi.org/project/dataclasses/[dataclasses] dependency was dropped.
 
-[#misc-2]
+[#misc-1-1-0]
 === Misc
 
 * The `torch.testing.assert_allclose` has been replaced by `torch.testing.assert_close` since it became deprecated in https://github.com/pytorch/pytorch/issues/61844[PyTorch 1.12.0].
@@ -87,17 +87,17 @@ Released on *22 December 2023* &bull; View on https://gitlab.teklia.com/atr/pyla
 
 Released on *18 October 2023* &bull; View on https://gitlab.teklia.com/atr/pylaia/-/releases/1.0.7[Gitlab]
 
-[#feature-2]
+[#feature-1-0-7]
 === Feature
 
 * When using a language model, a confidence score is now returned based on the log-likelyhood of the hypothesis.
 
-[#documentation-2]
+[#documentation-1-0-7]
 === Documentation
 
 A public documentation is now available on https://atr.pages.teklia.com/pylaia/. It's still under construction but next releases will add more and more content.
 
-[#dependencies-2]
+[#dependencies-1-0-7]
 === Dependencies
 
 * Bumped https://pypi.org/project/pytorch-lightning/[pytorch-lightning] to version `1.1.7`
@@ -105,7 +105,7 @@ A public documentation is now available on https://atr.pages.teklia.com/pylaia/.
 * Bumped GitHub action https://github.com/actions/setup-python[actions/setup-python] to version `4`
 * Bumped GitHub action https://github.com/actions/checkout[actions/checkout] to version `4`
 
-[#development]
+[#development-1-0-7]
 === Development
 
 * Releases are now built more easily through a Makefile.
@@ -117,22 +117,22 @@ A public documentation is now available on https://atr.pages.teklia.com/pylaia/.
 
 Released on *12 September 2023* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.6[Github]
 
-[#feature-2]
+[#feature-1-0-6]
 === Feature
 
 * During training, too small images are now padded to be able to pass the multiple convolution layers.
 
-[#documentation-2]
+[#documentation-1-0-6]
 === Documentation
 
 * Fixed typos.
 
-[#dependencies-2]
+[#dependencies-1-0-6]
 === Dependencies
 
 * Replaced https://pillow.readthedocs.io/en/stable/releasenotes/2.7.0.html#antialias-renamed-to-lanczos[deprecated Pillow resampling method] `Image.ANTIALIAS` to `Image.Resample.Lanczos`.
 
-[#development-2]
+[#development-1-0-6]
 === Development
 
 * Pre-commit hooks were updated.
@@ -142,7 +142,7 @@ Released on *12 September 2023* &bull; View on https://github.com/jpuigcerver/Py
 
 Released on *29 March 2023* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.5[Github]
 
-[#dependencies-2]
+[#dependencies-1-0-5]
 === Dependencies
 
 * Requires `torch` version `1.13.0` or `1.13.1`.
@@ -153,7 +153,7 @@ Released on *29 March 2023* &bull; View on https://github.com/jpuigcerver/PyLaia
 
 Released on *4 January 2023* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.4[Github]
 
-[#dependencies-2]
+[#dependencies-1-0-4]
 === Dependencies
 
 * Requires `torch` version `1.13.0`.
@@ -163,18 +163,18 @@ Released on *4 January 2023* &bull; View on https://github.com/jpuigcerver/PyLai
 
 Released on *12 December 2022* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.3[Github]
 
-[#feature-2]
+[#feature-1-0-3]
 === Feature
 
 * Now able to decode using a trained Language model through beam search decoding.
 * Exposes https://pytorch.org/docs/stable/data.html#multi-process-data-loading[torch Dataloaders's num_workers] parameter on the Python training function to limit resource usage when needed.
 
-[#dependencies-2]
+[#dependencies-1-0-3]
 === Dependencies
 
 * Added dependency to `torchaudio` version `0.13.0`.
 
-[#development-2]
+[#development-1-0-3]
 === Development
 
 * Package version is now tracked through the `VERSION` file.
@@ -184,7 +184,7 @@ Released on *12 December 2022* &bull; View on https://github.com/jpuigcerver/PyL
 
 Released on *7 December 2022* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.2[Github]
 
-[#dependencies-2]
+[#dependencies-1-0-2]
 === Dependencies
 
 * Pinned dependency to `pytorch-lightning` to version `1.1.0`.
@@ -199,7 +199,7 @@ Released on *7 December 2022* &bull; View on https://github.com/jpuigcerver/PyLa
 
 Released on *2 December 2020* &bull; View on https://github.com/jpuigcerver/PyLaia/releases/tag/1.0.0[Github]
 
-[#added]
+[#added-1-0-0]
 === Added
 
 * Support distributed training
@@ -220,7 +220,7 @@ Released on *2 December 2020* &bull; View on https://github.com/jpuigcerver/PyLa
 * Use Codecov to produce test coverage reports
 * Code is now analyzed using CodeFactor
 
-[#changed]
+[#changed-1-0-0]
 === Changed
 
 * Make Python 3.6 the minimum supported version
@@ -232,7 +232,7 @@ Released on *2 December 2020* &bull; View on https://github.com/jpuigcerver/PyLa
 * The entire shell API has changed for the better (thanks to jsonargparse). Arguments are now separated into groups and help messages are clearer.
 * Drastically improve our test suite, we now have a 91% coverage
 
-[#removed]
+[#removed-1-0-0]
 === Removed
 
 * Remove egs directory. These live now at https://github.com/carmocca/PyLaia-examples
@@ -242,7 +242,7 @@ Released on *2 December 2020* &bull; View on https://github.com/jpuigcerver/PyLa
 * Remove CTCLatticeGenerator. Please open an issue if you were using it
 * We no longer support saving checkpoints for more than one metric. Will be added back in a future version
 
-[#fixed]
+[#fixed-1-0-0]
 === Fixed
 
 * Fix WER calculation when long delimiters are used
diff --git a/docs/modules/ROOT/pages/usage/language_models/index.adoc b/docs/modules/ROOT/pages/usage/language_models/index.adoc
index c87521a4..295abea7 100644
--- a/docs/modules/ROOT/pages/usage/language_models/index.adoc
+++ b/docs/modules/ROOT/pages/usage/language_models/index.adoc
@@ -72,7 +72,7 @@ Ossbahr <space> maa <space> være <space> sammen <space> med <space> Hedberg <sp
 
 Once your corpus is created, you can estimate the n-gram model.
 
-[#characters-2]
+[#training-characters]
 ==== Characters
 
 At character-level, we recommend building a 6-gram model. Use the following command:
@@ -127,7 +127,7 @@ Chain sizes: 1:1308 2:27744 3:159140 4:412536 5:717920 6:1028896
 Name:lmplz	VmPeak:12643224 kB	VmRSS:6344 kB	RSSMax:1969316 kB	user:0.196445	sys:0.514686	CPU:0.711161	real:0.682693
 ----
 
-[#subwords-2]
+[#training-subwords]
 ==== Subwords
 
 At subword-level, we recommend building a 6-gram model. Use the following command:
@@ -192,7 +192,7 @@ c
 
 The lexicon lists all the words in the vocabulary and its decomposition in tokens.
 
-[#characters-2]
+[#prediction-characters-2]
 ==== Characters
 
 At character-level, words are simply characters, so the `lexicon_characters.txt` file should map characters to characters:
@@ -209,7 +209,7 @@ c c
 <space> <space>
 ----
 
-[#subwords-2]
+[#prediction-subwords]
 ==== Subwords
 
 At subword-level, the `lexicon_subwords.txt` file should map subwords with their character decomposition:
@@ -226,7 +226,7 @@ au a u
 <space> <space>
 ----
 
-[#words-2]
+[#prediction-words]
 ==== Words
 
 At word-level, the `lexicon_words.txt` file should map words with their character decomposition:
-- 
GitLab


From cfff612b4c92d6e53b5b3cda264e5d8c487ae673 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <abadie@teklia.com>
Date: Wed, 5 Feb 2025 17:42:37 +0100
Subject: [PATCH 11/17] Generate index folders

---
 .gitlab-ci.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 434f3917..402ff46e 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -121,7 +121,7 @@ bump-python-deps:
     - npm install
 
   script:
-    - npx antora antora-playbook.yml
+    - npx antora antora-playbook.yml --html-url-extension-style=indexify
 
 docs-build:
   extends: .docs
-- 
GitLab


From 7a8745ccb74907728ff00f891f04096c145551a1 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <bastien@nextcairn.com>
Date: Fri, 7 Feb 2025 11:57:43 +0100
Subject: [PATCH 12/17] Add missing datasets

---
 .gitignore                                    |   2 +-
 .../ROOT/pages/usage/datasets/format.adoc     | 213 +++++++++++++++
 .../ROOT/pages/usage/datasets/index.adoc      | 257 ++++++++++++++++++
 3 files changed, 471 insertions(+), 1 deletion(-)
 create mode 100644 docs/modules/ROOT/pages/usage/datasets/format.adoc
 create mode 100644 docs/modules/ROOT/pages/usage/datasets/index.adoc

diff --git a/.gitignore b/.gitignore
index 6bf199e2..098ada10 100644
--- a/.gitignore
+++ b/.gitignore
@@ -83,7 +83,7 @@ kk/
 laia/version.py
 
 # data
-datasets/
+^datasets/
 test-resources/
 
 # benchmarks
diff --git a/docs/modules/ROOT/pages/usage/datasets/format.adoc b/docs/modules/ROOT/pages/usage/datasets/format.adoc
new file mode 100644
index 00000000..700a97b2
--- /dev/null
+++ b/docs/modules/ROOT/pages/usage/datasets/format.adoc
@@ -0,0 +1,213 @@
+[#dataset-formatting]
+= Dataset formatting
+
+To train PyLaia, you need line images and their corresponding transcriptions. The dataset should be divided into three sets: training, validation and test sets.
+
+The dataset should be formatted as follows:
+
+[,bash]
+----
+# Images
+├── images
+    ├── train/
+    ├── val/
+    └── test/
+# Image ids (used for prediction)
+├── train_ids.txt
+├── val_ids.txt
+├── test_ids.txt
+# Tokenized transcriptions (used for training)
+├── train.txt
+├── val.txt
+├── test.txt
+# Transcriptions (used for evaluation)
+├── train_text.txt
+├── val_text.txt
+├── test_text.txt
+# Symbol list
+└── syms.txt
+----
+
+[#images]
+== Images
+
+By default, images should be resized to a fixed height (recommended value: 128 pixels). This can be done using https://imagemagick.org/script/mogrify.php[ImageMagick's `mogrify`] function:
+
+----
+mogrify -resize x128 images/*.jpg
+----
+
+Note that PyLaia can also support variable size images by setting `--fixed_input_height 0` during xref:usage/initialization/index.adoc[model initialization].
+
+[#ground-truth]
+== Ground truth
+
+[#tokenized-transcriptions]
+=== Tokenized transcriptions
+
+Two files `{train|val}.txt` are required to train the model. They should map image names and tokenized transcriptions for the training and validation sets.
+
+Example:
+
+[,text]
+----
+train/im01 f o r <space> d e t <space> t i l f æ l d e <space> d e t <space> s k u l d e <space> l y k k e s <space> D i g
+train/im02 a t <space> o p d r i v e <space> d e t <space> o m s k r e v n e <space> e x p l : <space> a f
+train/im03 « F r u <space> I n g e r » , <space> a t <space> s e n d e <space> m i g <space> s a m m e
+----
+
+[#transcriptions]
+=== Transcriptions
+
+Three files `{train|val|test}_text.txt` are required to evaluate your models. They should map image names and non-tokenized transcriptions.
+
+Example:
+
+[,text]
+----
+train/im01 for det tilfælde det skulde lykkes Dig
+train/im02 at opdrive det omskrevne expl: af
+train/im03 «Fru Inger», at sende mig samme
+----
+
+[#image-list]
+=== Image list
+
+Three files `{train|val|test}_ids.txt` are required to run predictions. They should list image names without transcriptions and can be obtained with:
+
+[,bash]
+----
+cut -d' ' -f1 train_text.txt > train_ids.txt
+----
+
+Example:
+
+[,text]
+----
+train/im01
+train/im02
+train/im03
+----
+
+[#symbol-list]
+=== Symbol list
+
+Finally, a file named `syms.txt` is required, mapping tokens from the training set and their index, starting with the `<ctc>` token.
+
+Example:
+
+[,text]
+----
+<ctc> 0
+! 1
+" 2
+& 3
+' 4
+( 5
+) 6
++ 7
+, 8
+- 9
+. 10
+/ 11
+0 12
+1 13
+2 14
+3 15
+4 16
+5 17
+6 18
+7 19
+8 20
+9 21
+: 22
+; 23
+< 24
+= 25
+> 26
+? 27
+A 28
+B 29
+C 30
+D 31
+E 32
+F 33
+G 34
+H 35
+I 36
+J 37
+K 38
+L 39
+M 40
+N 41
+O 42
+P 43
+Q 44
+R 45
+S 46
+T 47
+U 48
+V 49
+W 50
+X 51
+Y 52
+Z 53
+[ 54
+] 55
+a 56
+b 57
+c 58
+d 59
+e 60
+f 61
+g 62
+h 63
+i 64
+j 65
+k 66
+l 67
+m 68
+n 69
+o 70
+p 71
+q 72
+r 73
+s 74
+t 75
+u 76
+v 77
+w 78
+x 79
+y 80
+z 81
+« 82
+¬ 83
+» 84
+¼ 85
+½ 86
+Ã… 87
+Æ 88
+Ø 89
+à 90
+á 91
+â 92
+ä 93
+Ã¥ 94
+æ 95
+ç 96
+è 97
+é 98
+ê 99
+ö 100
+ø 101
+ù 102
+û 103
+ü 104
+– 105
+— 106
+’ 107
+„ 108
+… 109
+<unk> 110
+<space> 111
+----
diff --git a/docs/modules/ROOT/pages/usage/datasets/index.adoc b/docs/modules/ROOT/pages/usage/datasets/index.adoc
new file mode 100644
index 00000000..a3d92996
--- /dev/null
+++ b/docs/modules/ROOT/pages/usage/datasets/index.adoc
@@ -0,0 +1,257 @@
+[#dataset]
+= Dataset
+
+PyLaia datasets must be formatted following a specific format. Learn how to build a dataset by following this xref:./format.adoc[page].
+
+Once the dataset is created, you may use the `pylaia-htr-dataset-validate` command to compute statistics and make sure your dataset is valid. To know more about the options of this command, use `pylaia-htr-dataset-validate --help`.
+
+[#purpose]
+== Purpose
+
+This command will:
+
+* issue a warning if some images are missing (they will be ignored during training)
+* issue a warning if some images have an invalid width (they will be padded during training)
+* fail if images have variable height when  `fixed_input_height>0`
+* fail if a character is missing in the list of symbols `syms.txt`
+
+If the dataset is valid, the script will:
+
+* display `Dataset is valid` and
+* save a summary of the dataset statistics in a Markdown file named after the argument provided in `--statistics_output`.
+
+[#parameters]
+== Parameters
+
+The full list of parameters is detailed in this section.
+
+[#general-parameters]
+=== General parameters
+
+|===
+| Parameter | Description | Type | Default
+
+| `syms`
+| Positional argument. Path to a file mapping characters to integers. The CTC symbol must be mapped to integer 0.
+| `str`
+|
+
+| `img_dirs`
+| Positional argument. Directories containing line images.
+| `str`
+|
+
+| `tr_txt_table`
+| Positional argument. Path to a file mapping training image ids and tokenized transcription.
+| `str`
+|
+
+| `va_txt_table`
+| Positional argument. Path to a file mapping validation image ids and tokenized transcription.
+| `str`
+|
+
+| `te_txt_table`
+| Positional argument. Path to a file mapping validation image ids and tokenized transcription.
+| `str`
+|
+
+| `fixed_input_height`
+| Height of the input images. If set to 0, a variable height model will be used (see `adaptive_pooling`). This will be used to compute the model output height at the end of the convolutional layers.
+| `int`
+| 0
+
+| `statistics_output`
+| Where the Markdown summary will be written.
+| `str`
+| `"statistics.md"`
+
+| `config`
+| Path to a JSON configuration file
+| `json`
+|
+|===
+
+[#common-parameters]
+=== Common parameters
+
+|===
+| Name | Description | Type | Default
+
+| `common.train_path`
+| Directory where the model will be saved
+| `str`
+| `.`
+
+| `common.model_filename`
+| Filename of the model.
+| `str`
+| `model`
+|===
+
+[#logging-arguments]
+=== Logging arguments
+
+|===
+| Name | Description | Type | Default
+
+| `logging.fmt`
+| Logging format.
+| `str`
+| `%(asctime)s %(levelname)s %(name)s] %(message)s`
+
+| `logging.level`
+| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+| `Level`
+| `INFO`
+
+| `logging.filepath`
+| Filepath for the logs file. Can be a filepath or a filename to be created in `train_path`/`experiment_dirname`
+| `Optional[str]`
+|
+
+| `logging.overwrite`
+| Whether to overwrite the logfile or to append.
+| `bool`
+| `False`
+
+| `logging.to_stderr_level`
+| If filename is set, use this to log also to stderr at the given level.
+| `Level`
+| `ERROR`
+|===
+
+[#train-arguments]
+=== Train arguments
+
+|===
+| Name | Description | Type | Default
+
+| `train.delimiters`
+| List of symbols representing the word delimiters.
+| `List`
+| `["<space>"]`
+|===
+
+[#examples]
+== Examples
+
+This arguments can be passed using command-line arguments or a YAML configuration file. Note that CLI arguments override the values from the configuration file.
+
+[#example-with-command-line-arguments-cli]
+=== Example with Command Line Arguments (CLI)
+
+Run the following command to create a model:
+
+[,sh]
+----
+pylaia-htr-dataset-validate /data/Esposalles/dataset/syms.txt \
+                            [/data/Esposalles/dataset/images/] \
+                            /data/Esposalles/dataset/train.txt \
+                            /data/Esposalles/dataset/val.txt \
+                            /data/Esposalles/dataset/test.txt \
+                            --common.experiment_dirname experiment-esposalles/ \
+                            --fixed_input_size 128 \
+                            --statistice_ouput statistics.md
+----
+
+Will output:
+
+[,bash]
+----
+[2024-04-23 12:58:31,399 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
+[2024-04-23 12:58:32,010 INFO laia] Installed:
+[2024-04-23 12:58:32,050 INFO laia.common.loader] Loaded model model
+[2024-04-23 12:58:32,094 INFO laia] Dataset is valid
+[2024-04-23 12:58:32,094 INFO laia] Statistics written to statistics.md
+----
+
+[#example-with-a-yaml-configuration-file]
+=== Example with a YAML configuration file
+
+Run the following command to validate a dataset:
+
+[,sh]
+----
+pylaia-htr-dataset-validate --config config_dataset.yaml
+----
+
+Where `config_dataset.yaml` is:
+
+[,yaml]
+----
+syms: /data/Esposalles/dataset/syms.txt
+img_dirs: [/data/Esposalles/dataset/images/]
+tr_txt_table: /data/Esposalles/dataset/train.txt
+va_txt_table: /data/Esposalles/dataset/val.txt
+te_txt_table: /data/Esposalles/dataset/test.txt
+fixed_input_height: 128
+statistics_output: statistics.md
+common:
+  experiment_dirname: experiment-esposalles
+----
+
+[#example-with-perfect-dataset]
+=== Example with perfect dataset
+
+[,bash]
+----
+[2024-04-23 12:58:31,399 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
+[2024-04-23 12:58:32,010 INFO laia] Installed:
+[2024-04-23 12:58:32,050 INFO laia.common.loader] Loaded model model
+[2024-04-23 12:58:32,094 INFO laia] Dataset is valid
+[2024-04-23 12:58:32,094 INFO laia] Statistics written to statistics.md
+----
+
+[#example-with-missing-images]
+=== Example with missing images
+
+[,bash]
+----
+[2024-04-23 13:01:34,646 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
+[2024-04-23 13:01:35,200 INFO laia] Installed:
+[2024-04-23 13:01:35,232 INFO laia.common.loader] Loaded model model
+[2024-04-23 13:01:35,782 WARNING laia.data.text_image_from_text_table_dataset] No image file found for image ID '0d7cf548-742b-4067-9084-52478806091d_Line0_30af78fd-e15d-4873-91d1-69ad7c0623c3.jpg', ignoring example...
+[2024-04-23 13:01:35,783 WARNING laia.data.text_image_from_text_table_dataset] No image file found for image ID '0d7cf548-742b-4067-9084-52478806091d_Line0_b1fb9275-5d49-4266-9de0-e6a93fc6dfaf.jpg', ignoring example...
+[2024-04-23 13:01:35,894 INFO laia] Dataset is valid
+[2024-04-23 13:01:35,894 INFO laia] Statistics written to statistics.md
+----
+
+[#example-with-small-images]
+=== Example with small images
+
+[,sh]
+----
+[2024-04-23 13:01:34,646 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
+[2024-04-23 13:01:35,200 INFO laia] Installed:
+[2024-04-23 13:01:35,232 INFO laia.common.loader] Loaded model model
+[2024-04-23 13:01:36,052 ERROR laia] Issues found in the dataset.
+[2024-04-23 13:01:36,052 ERROR laia] train - Found some images too small for convolutions (width<8). They will be padded during training.
+----
+
+[#example-with-variable-image-height]
+=== Example with variable image height
+
+[,sh]
+----
+[2024-04-23 13:01:34,646 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
+[2024-04-23 13:01:35,200 INFO laia] Installed:
+[2024-04-23 13:01:35,232 INFO laia.common.loader] Loaded model model
+[2024-04-23 13:01:36,052 ERROR laia] Issues found in the dataset.
+[2024-04-23 13:01:36,052 ERROR laia] train - Found images with variable heights: ['/data/Esposalles/dataset/images/f6d2b699-e910-4191-bc7d-f56e60fe979a_Line2_91b43b71-ea60-4f42-a896-880676aed723.jpg'].
+[2024-04-23 13:01:36,052 ERROR laia] test - Found images with variable heights: ['/data/Esposalles/dataset/images/fd1e6b3b-48cb-41c0-b1e9-2924b9562876_Line3_27e23ff1-f730-44ac-844f-479e5cc9e9aa.jpg'].
+----
+
+[#example-with-missing-symbol]
+=== Example with missing symbol
+
+[,sh]
+----
+[2024-04-23 13:01:34,646 INFO laia] Arguments: {'syms': '/data/Esposalles/dataset/syms.txt', 'img_dirs': ['/data/Esposalles/dataset/images/'], 'tr_txt_table': '/data/Esposalles/dataset/train.txt', 'va_txt_table': '/data/Esposalles/dataset/val.txt', 'te_txt_table': '/data/Esposalles/dataset/test.txt', 'fixed_input_height': 128, 'common': CommonArgs(seed=74565, train_path='', model_filename='model', experiment_dirname='experiment-esposalles', monitor=<Monitor.va_cer: 'va_cer'>, checkpoint=None), 'train': TrainArgs(delimiters=['<space>'], checkpoint_k=3, resume=False, early_stopping_patience=80, gpu_stats=False, augment_training=True)}
+[2024-04-23 13:01:35,200 INFO laia] Installed:
+[2024-04-23 13:01:35,232 INFO laia.common.loader] Loaded model model
+[2024-04-23 13:01:36,052 ERROR laia] Issues found in the dataset.
+[2024-04-23 13:01:36,052 ERROR laia] train - Found some unknown symbols: {'='}
+[2024-04-23 13:01:36,052 ERROR laia] val - Found some unknown symbols: {'='}
+[2024-04-23 13:01:36,052 ERROR laia] test - Found some unknown symbols: {'='}
+----
-- 
GitLab


From 8cf45b6e51f5a23ad43ea1c98441d9a820d342bc Mon Sep 17 00:00:00 2001
From: Bastien Abadie <bastien@nextcairn.com>
Date: Fri, 7 Feb 2025 12:01:29 +0100
Subject: [PATCH 13/17] Edit block

---
 docs/modules/ROOT/pages/usage/prediction/index.adoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/modules/ROOT/pages/usage/prediction/index.adoc b/docs/modules/ROOT/pages/usage/prediction/index.adoc
index 5bb889cc..7deddfa4 100644
--- a/docs/modules/ROOT/pages/usage/prediction/index.adoc
+++ b/docs/modules/ROOT/pages/usage/prediction/index.adoc
@@ -265,7 +265,7 @@ git clone https://huggingface.co/Teklia/pylaia-huginmunin
 ----
 
 [NOTE]
-// ====
+====
 Some files are stored through https://git-lfs.com/[Git-LFS]. Make sure all files are correctly pulled using the following command, from the cloned folder.
 
 [,bash]
@@ -278,7 +278,7 @@ You should see three files:
 * the language model (`language_model.arpa.gz`),
 * the model architecture (`model`),
 * the weights (`weights.ckpt`).
-// ====
+====
 
 List image names in `img_list.txt`:
 
-- 
GitLab


From 4359e1785bb3ca750154bdd1f6d3f802eacebf49 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <bastien@nextcairn.com>
Date: Fri, 7 Feb 2025 14:41:18 +0100
Subject: [PATCH 14/17] Edit list of options in table

---
 docs/modules/ROOT/pages/usage/training/index.adoc | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/docs/modules/ROOT/pages/usage/training/index.adoc b/docs/modules/ROOT/pages/usage/training/index.adoc
index 32d0523c..8997bb5b 100644
--- a/docs/modules/ROOT/pages/usage/training/index.adoc
+++ b/docs/modules/ROOT/pages/usage/training/index.adoc
@@ -180,7 +180,15 @@ The full list of parameters is detailed in this section.
 | `%(asctime)s %(levelname)s %(name)s] %(message)s`
 
 | `logging.level`
-| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+a| Logging level. Should be in 
+
+* `NOTSET`
+* `DEBUG`
+* `INFO`
+* `WARNING`
+* `ERROR`
+* `CRITICAL`
+
 | `Level`
 | `INFO`
 
-- 
GitLab


From 32a4ab09c26c35197528a924fc6b1bf779059be6 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <bastien@nextcairn.com>
Date: Fri, 7 Feb 2025 15:36:49 +0100
Subject: [PATCH 15/17] Convert link: to xref: with teklia-antora

---
 docs/modules/ROOT/pages/usage/language_models/index.adoc | 8 ++++----
 docs/modules/ROOT/pages/usage/netout/index.adoc          | 4 ++--
 docs/modules/ROOT/pages/usage/prediction/index.adoc      | 2 +-
 3 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/docs/modules/ROOT/pages/usage/language_models/index.adoc b/docs/modules/ROOT/pages/usage/language_models/index.adoc
index 295abea7..f9898c70 100644
--- a/docs/modules/ROOT/pages/usage/language_models/index.adoc
+++ b/docs/modules/ROOT/pages/usage/language_models/index.adoc
@@ -8,9 +8,9 @@ NOTE: You can also use http://www.speech.sri.com/projects/srilm/[SRILM] to build
 
 To decode with a language model, you need:
 
-* link:./index.md#build-the-language-model[a language model]
-* link:./index.md#list-of-tokens[a list of tokens]
-* link:./index.md#lexicon[a lexicon]
+* xref:./index.adoc#build-the-language-model[a language model]
+* xref:./index.adoc#list-of-tokens[a list of tokens]
+* xref:./index.adoc#lexicon[a lexicon]
 
 [#build-the-language-model]
 == Build the language model
@@ -246,4 +246,4 @@ eller e l l e r
 [#predict-with-pylaia]
 === Predict with PyLaia
 
-See the link:usage/prediction/index.md#predict-with-a-language-model[dedicated example].
+See the xref:usage/prediction/index.adoc#predict-with-a-language-model[dedicated example].
\ No newline at end of file
diff --git a/docs/modules/ROOT/pages/usage/netout/index.adoc b/docs/modules/ROOT/pages/usage/netout/index.adoc
index a5713d48..14299cc6 100644
--- a/docs/modules/ROOT/pages/usage/netout/index.adoc
+++ b/docs/modules/ROOT/pages/usage/netout/index.adoc
@@ -3,7 +3,7 @@
 
 The `pylaia-htr-netout` command can be used to dump the features extracted by PyLaia for a set of text-lines. To know more about the options of this command, use `pylaia-htr-netout --help`.
 
-WARNING: This command was initially designed to combine PyLaia and Kaldi. Since December 2022, combining PyLaia with language models can be achieved more easily by xref:usage/language_models/index.adoc[building a language model with KenLM] and link:usage/prediction/index.md#predict-with-a-language-model[predicting with `pylaia-htr-decode-ctc`].
+WARNING: This command was initially designed to combine PyLaia and Kaldi. Since December 2022, combining PyLaia with language models can be achieved more easily by xref:usage/language_models/index.adoc[building a language model with KenLM] and xref:usage/prediction/index.adoc#predict-with-a-language-model[predicting with `pylaia-htr-decode-ctc`].
 
 [#purpose]
 == Purpose
@@ -12,7 +12,7 @@ This command outputs the feature matrix and lattice computed by PyLaia in Kaldi
 
 It requires:
 
-* a link:usage/datasets/index.md#image-names[list of image ids],
+* a xref:usage/datasets/index.adoc#image-names[list of image ids],
 * the pickled `model` file created during xref:usage/initialization/index.adoc[model initialization],
 * the weights `*.ckpt` of the trained model created during xref:usage/training/index.adoc[model training].
 
diff --git a/docs/modules/ROOT/pages/usage/prediction/index.adoc b/docs/modules/ROOT/pages/usage/prediction/index.adoc
index 7deddfa4..c1bb575b 100644
--- a/docs/modules/ROOT/pages/usage/prediction/index.adoc
+++ b/docs/modules/ROOT/pages/usage/prediction/index.adoc
@@ -10,7 +10,7 @@ This command uses a trained PyLaia model to predict on a dataset.
 
 It requires:
 
-* a link:usage/datasets/index.md#image-names[list of image ids],
+* a xref:usage/datasets/index.adoc#image-names[list of image ids],
 * the pickled `model` file created during xref:usage/initialization/index.adoc[model initialization],
 * the weights `*.ckpt` of the trained model created during xref:usage/training/index.adoc[model training].
 
-- 
GitLab


From 73dfe1574220c30dbf1efbde0c7d6ba13a843340 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <bastien@nextcairn.com>
Date: Fri, 7 Feb 2025 15:40:15 +0100
Subject: [PATCH 16/17] Fix trailing whitespaces

---
 docs/modules/ROOT/pages/usage/language_models/index.adoc | 2 +-
 docs/modules/ROOT/pages/usage/training/index.adoc        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/modules/ROOT/pages/usage/language_models/index.adoc b/docs/modules/ROOT/pages/usage/language_models/index.adoc
index f9898c70..b1756bd1 100644
--- a/docs/modules/ROOT/pages/usage/language_models/index.adoc
+++ b/docs/modules/ROOT/pages/usage/language_models/index.adoc
@@ -246,4 +246,4 @@ eller e l l e r
 [#predict-with-pylaia]
 === Predict with PyLaia
 
-See the xref:usage/prediction/index.adoc#predict-with-a-language-model[dedicated example].
\ No newline at end of file
+See the xref:usage/prediction/index.adoc#predict-with-a-language-model[dedicated example].
diff --git a/docs/modules/ROOT/pages/usage/training/index.adoc b/docs/modules/ROOT/pages/usage/training/index.adoc
index 8997bb5b..3693cd3a 100644
--- a/docs/modules/ROOT/pages/usage/training/index.adoc
+++ b/docs/modules/ROOT/pages/usage/training/index.adoc
@@ -180,7 +180,7 @@ The full list of parameters is detailed in this section.
 | `%(asctime)s %(levelname)s %(name)s] %(message)s`
 
 | `logging.level`
-a| Logging level. Should be in 
+a| Logging level. Should be in
 
 * `NOTSET`
 * `DEBUG`
-- 
GitLab


From 19ed68072f4cced075a7f6301a05026eb7d16ab0 Mon Sep 17 00:00:00 2001
From: Bastien Abadie <bastien@nextcairn.com>
Date: Mon, 10 Feb 2025 16:57:27 +0100
Subject: [PATCH 17/17] Update log levels

---
 docs/modules/ROOT/pages/usage/datasets/index.adoc      | 10 +++++++++-
 .../modules/ROOT/pages/usage/initialization/index.adoc | 10 +++++++++-
 docs/modules/ROOT/pages/usage/netout/index.adoc        | 10 +++++++++-
 docs/modules/ROOT/pages/usage/prediction/index.adoc    | 10 +++++++++-
 4 files changed, 36 insertions(+), 4 deletions(-)

diff --git a/docs/modules/ROOT/pages/usage/datasets/index.adoc b/docs/modules/ROOT/pages/usage/datasets/index.adoc
index a3d92996..94db2caa 100644
--- a/docs/modules/ROOT/pages/usage/datasets/index.adoc
+++ b/docs/modules/ROOT/pages/usage/datasets/index.adoc
@@ -101,7 +101,15 @@ The full list of parameters is detailed in this section.
 | `%(asctime)s %(levelname)s %(name)s] %(message)s`
 
 | `logging.level`
-| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+a| Logging level. Should be in
+
+* `NOTSET`
+* `DEBUG`
+* `INFO`
+* `WARNING`
+* `ERROR`
+* `CRITICAL`
+
 | `Level`
 | `INFO`
 
diff --git a/docs/modules/ROOT/pages/usage/initialization/index.adoc b/docs/modules/ROOT/pages/usage/initialization/index.adoc
index ae2d11f1..60c5a3ac 100644
--- a/docs/modules/ROOT/pages/usage/initialization/index.adoc
+++ b/docs/modules/ROOT/pages/usage/initialization/index.adoc
@@ -83,7 +83,15 @@ The full list of parameters is detailed in this section.
 | `%(asctime)s %(levelname)s %(name)s] %(message)s`
 
 | `logging.level`
-| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+a| Logging level. Should be in
+
+* `NOTSET`
+* `DEBUG`
+* `INFO`
+* `WARNING`
+* `ERROR`
+* `CRITICAL`
+
 | `Level`
 | `INFO`
 
diff --git a/docs/modules/ROOT/pages/usage/netout/index.adoc b/docs/modules/ROOT/pages/usage/netout/index.adoc
index 14299cc6..fe7d857c 100644
--- a/docs/modules/ROOT/pages/usage/netout/index.adoc
+++ b/docs/modules/ROOT/pages/usage/netout/index.adoc
@@ -128,7 +128,15 @@ The full list of parameters is detailed in this section.
 | `%(asctime)s %(levelname)s %(name)s] %(message)s`
 
 | `logging.level`
-| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+a| Logging level. Should be in
+
+* `NOTSET`
+* `DEBUG`
+* `INFO`
+* `WARNING`
+* `ERROR`
+* `CRITICAL`
+
 | `Level`
 | `INFO`
 
diff --git a/docs/modules/ROOT/pages/usage/prediction/index.adoc b/docs/modules/ROOT/pages/usage/prediction/index.adoc
index c1bb575b..ee4ccd09 100644
--- a/docs/modules/ROOT/pages/usage/prediction/index.adoc
+++ b/docs/modules/ROOT/pages/usage/prediction/index.adoc
@@ -209,7 +209,15 @@ The full list of parameters is detailed in this section.
 | `%(asctime)s %(levelname)s %(name)s] %(message)s`
 
 | `logging.level`
-| Logging level. Should be in `{NOTSET,DEBUG,INFO,WARNING,ERROR,CRITICAL}`
+a| Logging level. Should be in
+
+* `NOTSET`
+* `DEBUG`
+* `INFO`
+* `WARNING`
+* `ERROR`
+* `CRITICAL`
+
 | `Level`
 | `INFO`
 
-- 
GitLab