Skip to content
Snippets Groups Projects
Unverified Commit 5ff93d57 authored by Carel van Niekerk's avatar Carel van Niekerk Committed by GitHub
Browse files

Bug fix (#103)


* Seperate test and train domains

* Add progress bars in ontology embedder

* Update custom_util.py

* Fix custom_util things I broke

* Github master

* Save dialogue ids in prediction file

* Fix bug in ontology enxtraction

* Return dialogue ids in predictions file and fix bugs

* Add setsumbt starting config loader

* Add script to extract golden labels from dataset to match model predictions

* Add more setsumbt configs

* Add option to use local files only in transformers package

* Update starting configurations for setsumbt

* Github master

* Update README.md

* Update README.md

* Update convlab/dialog_agent/agent.py

* Revert custom_util.py

* Update custom_util.py

Co-authored-by: default avatarCarel van Niekerk <carel.niekerk@hhu.de>
Co-authored-by: default avatarMichael Heck <michael.heck@hhu.de>
Co-authored-by: default avatarChristian Geishauser <christian.geishauser@hhu.de>
parent d19e48a3
Branches
No related tags found
No related merge requests found
...@@ -64,7 +64,7 @@ class PipelineAgent(Agent): ...@@ -64,7 +64,7 @@ class PipelineAgent(Agent):
===== ===== ====== === == === ===== ===== ====== === == ===
""" """
def __init__(self, nlu: NLU, dst: DST, policy: Policy, nlg: NLG, name: str): def __init__(self, nlu: NLU, dst: DST, policy: Policy, nlg: NLG, name: str, return_semantic_acts: bool = False):
"""The constructor of PipelineAgent class. """The constructor of PipelineAgent class.
Here are some special combination cases: Here are some special combination cases:
...@@ -95,6 +95,7 @@ class PipelineAgent(Agent): ...@@ -95,6 +95,7 @@ class PipelineAgent(Agent):
self.dst = dst self.dst = dst
self.policy = policy self.policy = policy
self.nlg = nlg self.nlg = nlg
self.return_semantic_acts = return_semantic_acts
self.init_session() self.init_session()
self.agent_saves = [] self.agent_saves = []
...@@ -199,6 +200,8 @@ class PipelineAgent(Agent): ...@@ -199,6 +200,8 @@ class PipelineAgent(Agent):
self.turn += 1 self.turn += 1
self.agent_saves.append(self.save_info()) self.agent_saves.append(self.save_info())
if self.return_semantic_acts:
return self.output_action
return model_response return model_response
def save_info(self): def save_info(self):
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
"batchsz": 1000, "batchsz": 1000,
"seed": 0, "seed": 0,
"epoch": 10, "epoch": 10,
"eval_frequency": 1, "eval_frequency": 5,
"process_num": 4, "process_num": 4,
"sys_semantic_to_usr": false, "sys_semantic_to_usr": false,
"num_eval_dialogues": 500 "num_eval_dialogues": 500
......
...@@ -25,8 +25,7 @@ import signal ...@@ -25,8 +25,7 @@ import signal
slot_mapping = {"pricerange": "price range", "post": "postcode", "arriveBy": "arrive by", "leaveAt": "leave at", slot_mapping = {"pricerange": "price range", "post": "postcode", "arriveBy": "arrive by", "leaveAt": "leave at",
"Id": "trainid", "ref": "reference"} "Id": "train id", "ref": "reference", "trainID": "train id"}
sys.path.append(os.path.dirname(os.path.dirname( sys.path.append(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))) os.path.dirname(os.path.abspath(__file__)))))
...@@ -103,7 +102,8 @@ def load_config_file(filepath: str = None) -> dict: ...@@ -103,7 +102,8 @@ def load_config_file(filepath: str = None) -> dict:
def save_config(terminal_args, config_file_args, config_save_path, policy_config=None): def save_config(terminal_args, config_file_args, config_save_path, policy_config=None):
config_save_path = os.path.join(config_save_path, f'config_saved.json') config_save_path = os.path.join(config_save_path, f'config_saved.json')
args_dict = {"args": terminal_args, "config": config_file_args, "policy_config": policy_config} args_dict = {"args": terminal_args,
"config": config_file_args, "policy_config": policy_config}
json.dump(args_dict, open(config_save_path, 'w')) json.dump(args_dict, open(config_save_path, 'w'))
...@@ -165,7 +165,8 @@ def eval_policy(conf, policy_sys, env, sess, save_eval, log_save_path, single_do ...@@ -165,7 +165,8 @@ def eval_policy(conf, policy_sys, env, sess, save_eval, log_save_path, single_do
goals = [] goals = []
for seed in range(1000, 1000 + conf['model']['num_eval_dialogues']): for seed in range(1000, 1000 + conf['model']['num_eval_dialogues']):
set_seed(seed) set_seed(seed)
goal = create_goals(goal_generator, 1, single_domain_goals, allowed_domains) goal = create_goals(goal_generator, 1,
single_domain_goals, allowed_domains)
goals.append(goal[0]) goals.append(goal[0])
if conf['model']['process_num'] == 1: if conf['model']['process_num'] == 1:
...@@ -177,14 +178,16 @@ def eval_policy(conf, policy_sys, env, sess, save_eval, log_save_path, single_do ...@@ -177,14 +178,16 @@ def eval_policy(conf, policy_sys, env, sess, save_eval, log_save_path, single_do
'sys_semantic_to_usr'], 'sys_semantic_to_usr'],
save_flag=save_eval, save_path=log_save_path, goals=goals) save_flag=save_eval, save_path=log_save_path, goals=goals)
total_acts = book_acts + inform_acts + request_acts + select_acts + offer_acts + recommend_acts total_acts = book_acts + inform_acts + request_acts + \
select_acts + offer_acts + recommend_acts
else: else:
complete_rate, success_rate, success_rate_strict, avg_return, turns, \ complete_rate, success_rate, success_rate_strict, avg_return, turns, \
avg_actions, task_success, book_acts, inform_acts, request_acts, \ avg_actions, task_success, book_acts, inform_acts, request_acts, \
select_acts, offer_acts, recommend_acts = \ select_acts, offer_acts, recommend_acts = \
evaluate_distributed(sess, list(range(1000, 1000 + conf['model']['num_eval_dialogues'])), evaluate_distributed(sess, list(range(1000, 1000 + conf['model']['num_eval_dialogues'])),
conf['model']['process_num'], goals) conf['model']['process_num'], goals)
total_acts = book_acts + inform_acts + request_acts + select_acts + offer_acts + recommend_acts total_acts = book_acts + inform_acts + request_acts + \
select_acts + offer_acts + recommend_acts
task_success_gathered = {} task_success_gathered = {}
for task_dict in task_success: for task_dict in task_success:
...@@ -196,12 +199,18 @@ def eval_policy(conf, policy_sys, env, sess, save_eval, log_save_path, single_do ...@@ -196,12 +199,18 @@ def eval_policy(conf, policy_sys, env, sess, save_eval, log_save_path, single_do
policy_sys.is_train = True policy_sys.is_train = True
mean_complete, err_complete = np.average(complete_rate), np.std(complete_rate) / np.sqrt(len(complete_rate)) mean_complete, err_complete = np.average(complete_rate), np.std(
mean_success, err_success = np.average(success_rate), np.std(success_rate) / np.sqrt(len(success_rate)) complete_rate) / np.sqrt(len(complete_rate))
mean_success_strict, err_success_strict = np.average(success_rate_strict), np.std(success_rate_strict) / np.sqrt(len(success_rate_strict)) mean_success, err_success = np.average(success_rate), np.std(
mean_return, err_return = np.average(avg_return), np.std(avg_return) / np.sqrt(len(avg_return)) success_rate) / np.sqrt(len(success_rate))
mean_turns, err_turns = np.average(turns), np.std(turns) / np.sqrt(len(turns)) mean_success_strict, err_success_strict = np.average(success_rate_strict), np.std(
mean_actions, err_actions = np.average(avg_actions), np.std(avg_actions) / np.sqrt(len(avg_actions)) success_rate_strict) / np.sqrt(len(success_rate_strict))
mean_return, err_return = np.average(avg_return), np.std(
avg_return) / np.sqrt(len(avg_return))
mean_turns, err_turns = np.average(
turns), np.std(turns) / np.sqrt(len(turns))
mean_actions, err_actions = np.average(avg_actions), np.std(
avg_actions) / np.sqrt(len(avg_actions))
logging.info(f"Complete: {mean_complete}+-{round(err_complete, 2)}, " logging.info(f"Complete: {mean_complete}+-{round(err_complete, 2)}, "
f"Success: {mean_success}+-{round(err_success, 2)}, " f"Success: {mean_success}+-{round(err_success, 2)}, "
...@@ -380,7 +389,6 @@ def evaluate(sess, num_dialogues=400, sys_semantic_to_usr=False, save_flag=False ...@@ -380,7 +389,6 @@ def evaluate(sess, num_dialogues=400, sys_semantic_to_usr=False, save_flag=False
complete = sess.evaluator.complete complete = sess.evaluator.complete
task_succ = sess.evaluator.success task_succ = sess.evaluator.success
task_succ_strict = sess.evaluator.success_strict task_succ_strict = sess.evaluator.success_strict
break
else: else:
complete = 0 complete = 0
task_succ = 0 task_succ = 0
...@@ -423,12 +431,12 @@ def evaluate(sess, num_dialogues=400, sys_semantic_to_usr=False, save_flag=False ...@@ -423,12 +431,12 @@ def evaluate(sess, num_dialogues=400, sys_semantic_to_usr=False, save_flag=False
save_file.close() save_file.close()
# save dialogue_info and clear mem # save dialogue_info and clear mem
return np.average(task_success['All_user_sim']), np.average(task_success['All_evaluator']), \ return task_success['All_user_sim'], task_success['All_evaluator'], task_success['All_evaluator_strict'], \
np.average(task_success['All_evaluator_strict']), np.average(task_success['total_return']), \ task_success['total_return'], task_success['turns'], task_success['avg_actions'], task_success, \
np.average(task_success['turns']), np.average(task_success['avg_actions']), task_success, \
np.average(task_success['total_booking_acts']), np.average(task_success['total_inform_acts']), \ np.average(task_success['total_booking_acts']), np.average(task_success['total_inform_acts']), \
np.average(task_success['total_request_acts']), np.average(task_success['total_select_acts']), \ np.average(task_success['total_request_acts']), np.average(task_success['total_select_acts']), \
np.average(task_success['total_offer_acts']), np.average(task_success['total_recommend_acts']) np.average(task_success['total_offer_acts']), np.average(
task_success['total_recommend_acts'])
def model_downloader(download_dir, model_path): def model_downloader(download_dir, model_path):
...@@ -622,7 +630,8 @@ def get_config(filepath, args) -> dict: ...@@ -622,7 +630,8 @@ def get_config(filepath, args) -> dict:
cls_path = infos.get('class_path', '') cls_path = infos.get('class_path', '')
cls = map_class(cls_path) cls = map_class(cls_path)
conf[unit + '_class'] = cls conf[unit + '_class'] = cls
conf[unit + '_activated'] = conf[unit + '_class'](**conf[unit][model]['ini_params']) conf[unit + '_activated'] = conf[unit +
'_class'](**conf[unit][model]['ini_params'])
print("Loaded " + model + " for " + unit) print("Loaded " + model + " for " + unit)
return conf return conf
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment