diff --git a/dan/datasets/extract/arkindex.py b/dan/datasets/extract/arkindex.py
index 5fa595ebdafe8cabafe6eb1a9a745419419b151b..e13578a7c5052551e47a6a3f6e0aa6d7fb9cd4a4 100644
--- a/dan/datasets/extract/arkindex.py
+++ b/dan/datasets/extract/arkindex.py
@@ -384,6 +384,9 @@ class ArkindexExtractor:
             subword_vocab_size=self.subword_vocab_size,
         )
 
+        if not tokenizer.sentencepiece_model:
+            return
+
         for level, tokenize in (
             ("characters", tokenizer.char_tokenize),
             ("words", tokenizer.word_tokenize),
diff --git a/dan/datasets/extract/utils.py b/dan/datasets/extract/utils.py
index 8ee14af3685aaeb1842fce56704159eb2bacfa74..6bd3693c68c9422b8709c001f5166fc6a4d54b4c 100644
--- a/dan/datasets/extract/utils.py
+++ b/dan/datasets/extract/utils.py
@@ -186,12 +186,22 @@ class Tokenizer:
         with NamedTemporaryFile(dir=self.outdir, suffix=".txt", mode="w") as tmp:
             tmp.write("\n".join(self.training_corpus))
             tmp.flush()
-            spm.SentencePieceTrainer.train(
-                input=tmp.name,
-                vocab_size=self.subword_vocab_size,
-                model_prefix=self.prefix,
-                user_defined_symbols=self.special_tokens,
-            )
+
+            try:
+                spm.SentencePieceTrainer.train(
+                    input=tmp.name,
+                    vocab_size=self.subword_vocab_size,
+                    model_prefix=self.prefix,
+                    user_defined_symbols=self.special_tokens,
+                    minloglevel=1,
+                )
+            except Exception as e:
+                logger.warning(
+                    f"Failed to train a sentencepiece model for subword tokenization: {e} "
+                    "Try again by editing the `--subword-vocab-size` parameter."
+                )
+                self.sentencepiece_model = None
+                return
 
         # Load the model
         self.sentencepiece_model = spm.SentencePieceProcessor(