diff --git a/worker_generic_training_dataset/worker.py b/worker_generic_training_dataset/worker.py index 783065786c52fcd8d0588d14992b0be180123a07..4066a6fbe1cd7797169e68a156fe5fc904258fe0 100644 --- a/worker_generic_training_dataset/worker.py +++ b/worker_generic_training_dataset/worker.py @@ -154,7 +154,7 @@ class DatasetExtractor(DatasetWorker): state=classification.state, worker_run_id=get_object_id(classification.worker_run), ) - for classification in list_classifications(element.id) + for classification in list_classifications(element.id).iterator() ] if classifications: logger.info(f"Inserting {len(classifications)} classification(s)") @@ -178,7 +178,7 @@ class DatasetExtractor(DatasetWorker): worker_version_id=get_object_id(transcription.worker_version), worker_run_id=get_object_id(transcription.worker_run), ) - for transcription in list_transcriptions(element.id) + for transcription in list_transcriptions(element.id).iterator() ] if transcriptions: logger.info(f"Inserting {len(transcriptions)} transcription(s)") @@ -194,7 +194,9 @@ class DatasetExtractor(DatasetWorker): entities: List[CachedEntity] = [] transcription_entities: List[CachedTranscriptionEntity] = [] for transcription in transcriptions: - for transcription_entity in list_transcription_entities(transcription.id): + for transcription_entity in list_transcription_entities( + transcription.id + ).iterator(): entity = CachedEntity( id=transcription_entity.entity.id, type=transcription_entity.entity.type.name, @@ -329,7 +331,7 @@ class DatasetExtractor(DatasetWorker): # List children children = list_children(element.id) nb_children: int = children.count() - for child_idx, child in enumerate(children, start=1): + for child_idx, child in enumerate(children.iterator(), start=1): logger.info(f"Processing child ({child_idx}/{nb_children})") # Insert child self.insert_element(child, parent_id=element.id)