From 48d4aa0d375fb87dcc0252e09bea5914c9436a96 Mon Sep 17 00:00:00 2001
From: Manon Blanco <blanco@teklia.com>
Date: Wed, 8 Nov 2023 11:43:51 +0000
Subject: [PATCH] Catch runtimeError when formatting LM files

---
 dan/datasets/extract/arkindex.py |  3 +++
 dan/datasets/extract/utils.py    | 22 ++++++++++++++++------
 2 files changed, 19 insertions(+), 6 deletions(-)

diff --git a/dan/datasets/extract/arkindex.py b/dan/datasets/extract/arkindex.py
index 5fa595eb..e13578a7 100644
--- a/dan/datasets/extract/arkindex.py
+++ b/dan/datasets/extract/arkindex.py
@@ -384,6 +384,9 @@ class ArkindexExtractor:
             subword_vocab_size=self.subword_vocab_size,
         )
 
+        if not tokenizer.sentencepiece_model:
+            return
+
         for level, tokenize in (
             ("characters", tokenizer.char_tokenize),
             ("words", tokenizer.word_tokenize),
diff --git a/dan/datasets/extract/utils.py b/dan/datasets/extract/utils.py
index 8ee14af3..6bd3693c 100644
--- a/dan/datasets/extract/utils.py
+++ b/dan/datasets/extract/utils.py
@@ -186,12 +186,22 @@ class Tokenizer:
         with NamedTemporaryFile(dir=self.outdir, suffix=".txt", mode="w") as tmp:
             tmp.write("\n".join(self.training_corpus))
             tmp.flush()
-            spm.SentencePieceTrainer.train(
-                input=tmp.name,
-                vocab_size=self.subword_vocab_size,
-                model_prefix=self.prefix,
-                user_defined_symbols=self.special_tokens,
-            )
+
+            try:
+                spm.SentencePieceTrainer.train(
+                    input=tmp.name,
+                    vocab_size=self.subword_vocab_size,
+                    model_prefix=self.prefix,
+                    user_defined_symbols=self.special_tokens,
+                    minloglevel=1,
+                )
+            except Exception as e:
+                logger.warning(
+                    f"Failed to train a sentencepiece model for subword tokenization: {e} "
+                    "Try again by editing the `--subword-vocab-size` parameter."
+                )
+                self.sentencepiece_model = None
+                return
 
         # Load the model
         self.sentencepiece_model = spm.SentencePieceProcessor(
-- 
GitLab