From 46d5a373438c0be996b6274bccf5bb48b3dc4c28 Mon Sep 17 00:00:00 2001
From: revuk100 <renato.vukovic@uni-duesseldorf.de>
Date: Thu, 5 Sep 2024 17:17:45 +0200
Subject: [PATCH] update inference script

---
 experiments/TOD_ontology_inference.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/experiments/TOD_ontology_inference.py b/experiments/TOD_ontology_inference.py
index 5f0f48d..6e987e8 100644
--- a/experiments/TOD_ontology_inference.py
+++ b/experiments/TOD_ontology_inference.py
@@ -154,7 +154,7 @@ def main():
                     else:
                         LLM_input = prompt_generator.generate_prompt(step=i, dialogue=text, term_list=terms, relations_so_far=relations_so_far, additional_input=current_responses)
 
-                    try: #if it fails save the current dict and then throw the error
+                    try: #if it fails save the current dict and then log the error and continue
 
                         relationlist=["has slot", "has value", "has domain", "refers to same concept as"]
                         if config.only_hasslot:
@@ -179,7 +179,7 @@ def main():
                         logger.info(f"Checkpoint saved at {checkpoint_filename} after {counter} dialogues")
                         logger.error(f"Error at dialogue {dial_id} in split {split}")
                         logger.error(f"Error message: {e}")
-                        e.with_traceback()
+                        continue
 
                     if config.predict_for_cot_decoding:
                         output_string += "Step " + str(i) + " response:\n"
-- 
GitLab