diff --git a/.gitignore b/.gitignore index 26674a5fd9f614722dc094f742feb0f91bb0ed3e..241b431f02ef837d258e064b1d91a2605e6d59d3 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,9 @@ data/schema/dstc8-schema-guided-dialogue-master data/mdbt/data data/mdbt/models data/mdbt/word-vectors +convlab2/**/data +convlab2/**/output +convlab2/**/cache convlab2/nlg/sclstm/**/resource/* convlab2/nlg/sclstm/**/resource_usr/* convlab2/nlg/sclstm/**/sclstm.pt diff --git a/convlab2/base_models/bert/create_data.py b/convlab2/base_models/bert/create_data.py index 825c736d2259552017166c93db168069fe0a6976..fcaad6d711a1d20a9fd94c70b994696e001ba2c7 100644 --- a/convlab2/base_models/bert/create_data.py +++ b/convlab2/base_models/bert/create_data.py @@ -4,8 +4,10 @@ from tqdm import tqdm from convlab2.util import load_dataset, load_nlu_data, load_dst_data, load_policy_data, load_nlg_data, load_e2e_data, load_rg_data from nltk.tokenize import TreebankWordTokenizer, PunktSentenceTokenizer from collections import Counter +import json_lines +from convlab2.util.unified_datasets_util import create_delex_data -def create_bio_data(dataset, data_dir): +def create_bio_data(dataset, data_dir, args): data_by_split = load_nlu_data(dataset, speaker='all') os.makedirs(data_dir, exist_ok=True) @@ -47,7 +49,7 @@ def create_bio_data(dataset, data_dir): f.writelines(data) print('num of spans in utterances', cnt) -def create_dialogBIO_data(dataset, data_dir): +def create_dialogBIO_data(dataset, data_dir, args): data_by_split = load_nlu_data(dataset, split_to_turn=False) os.makedirs(data_dir, exist_ok=True) @@ -94,16 +96,83 @@ def create_dialogBIO_data(dataset, data_dir): f.writelines(data) print('num of spans in utterances', cnt) +def create_revert_dialogBIO_data(dataset, data_dir, args): + def tag2da(tokens, tags): + assert len(tokens)==len(tags) + triples = [] + i = 0 + utt = '' + while i < len(tags): + tag = tags[i] + if tag == 'B': + value = tokens[i] + j = i + 1 + while j < len(tags): + next_tag = tags[j] + if next_tag == 'I': + value += ' ' + tokens[j] + i += 1 + j += 1 + else: + break + triples.append({'intent':'', 'domain':'', 'slot':'', 'value': value, 'start': len(utt), 'end': len(utt)+len(value)}) + utt += value + ' ' + assert utt[triples[-1]['start']:triples[-1]['end']] == value, print(utt[triples[-1]['start']:triples[-1]['end']],triples[-1]) + else: + utt += tokens[i] + ' ' + i += 1 + utt = utt[:-1] + assert utt == ' '.join(tokens), print(utt, '\n', ' '.join(tokens)) + return triples + + def dialog2turn(tokens, labels): + turns = [] + turn = {'tokens': [], 'tags': []} + i = 0 + while i < len(tokens): + token = tokens[i] + if i < len(tokens) - 1 and token in ['user', 'system'] and tokens[i+1] == ':': + turns.append(turn) + turn = {'tokens': [], 'tags': []} + i += 2 + continue + turn['tokens'].append(token) + turn['tags'].append(labels[i]) + i += 1 + turns.pop(0) + for turn in turns: + da = {'binary': [], 'categorical': [], 'non-categorical': []} + da['non-categorical'] = tag2da(turn['tokens'], turn['tags']) + turn['utterance'] = ' '.join(turn['tokens']) + turn['dialogue_acts'] = da + return turns + + for data_split in dataset: + infer_output_data_path = os.path.join(args.infer_data_dir, f'{data_split}.json') + for original_dial, bio_dial in zip(dataset[data_split], json_lines.reader(open(infer_output_data_path))): + bio_turns = dialog2turn(bio_dial['tokens'], bio_dial['labels']) + original_dial['turns'] = original_dial['turns'][:len(bio_turns)] + assert len(bio_turns) == len(original_dial['turns']), print(len(bio_turns), len(original_dial['turns'])) + for ori_turn, new_turn in zip(original_dial['turns'], bio_turns): + ori_turn['original_utterance'] = ori_turn['utterance'] + ori_turn['utterance'] = new_turn['utterance'] + ori_turn['original_dialogue_acts'] = ori_turn['dialogue_acts'] + ori_turn['dialogue_acts'] = new_turn['dialogue_acts'] + dataset, _ = create_delex_data(dataset, delex_func=lambda d,s,v: f'<v>{v}</v>') + os.makedirs(data_dir, exist_ok=True) + json.dump(dataset, open(os.path.join(data_dir, 'data.json'), 'w', encoding='utf-8'), ensure_ascii=False, indent=2) + if __name__ == '__main__': from argparse import ArgumentParser parser = ArgumentParser(description="create data for seq2seq training") - parser.add_argument('--tasks', metavar='task_name', nargs='*', choices=['bio', 'dialogBIO'], help='names of tasks') + parser.add_argument('--tasks', metavar='task_name', nargs='*', choices=['bio', 'dialogBIO', 'revert_dialogBIO'], help='names of tasks') parser.add_argument('--datasets', metavar='dataset_name', nargs='*', help='names of unified datasets') parser.add_argument('--save_dir', metavar='save_directory', type=str, default='data', help='directory to save the data, default: data/$task_name/$dataset_name') + parser.add_argument('--infer_data_dir', metavar='infer_data_dir', type=str, default=None, help='directory of inference output data, default: None') args = parser.parse_args() print(args) for dataset_name in tqdm(args.datasets, desc='datasets'): dataset = load_dataset(dataset_name) for task_name in tqdm(args.tasks, desc='tasks', leave=False): data_dir = os.path.join(args.save_dir, task_name, dataset_name) - eval(f"create_{task_name}_data")(dataset, data_dir) + eval(f"create_{task_name}_data")(dataset, data_dir, args) diff --git a/convlab2/base_models/bert/infer_bio.sh b/convlab2/base_models/bert/infer_bio.sh index ed784c515c6703088313da0809b7c0442bcec333..c07f55375dac3f589845781a5cf91ba7b140557c 100644 --- a/convlab2/base_models/bert/infer_bio.sh +++ b/convlab2/base_models/bert/infer_bio.sh @@ -1,5 +1,5 @@ set -e -n_gpus=3 +n_gpus=2 task_name="dialogBIO" dataset_name="multiwoz21" data_dir="data/${task_name}/${dataset_name}" @@ -8,14 +8,14 @@ cache_dir="cache" logging_dir="${output_dir}/runs" source_column="tokens" target_column="labels" -model_name_or_path="output/dialogBIO/sgd" -per_device_eval_batch_size=16 +model_name_or_path="output/dialogBIO/sgd+tm1+tm2+tm3" +per_device_eval_batch_size=32 python create_data.py --tasks ${task_name} --datasets ${dataset_name} --save_dir "data" for split in test validation train do - python -m torch.distributed.launch \ + CUDA_VISIBLE_DEVICES=1,2 python -m torch.distributed.launch \ --nproc_per_node ${n_gpus} run_token_classification.py \ --task_name ${task_name} \ --train_file ${data_dir}/${split}.json \ diff --git a/convlab2/base_models/bert/run_token_classification.py b/convlab2/base_models/bert/run_token_classification.py index c97fc60aa49a50d42a8470522d2dfaa09227b2ce..19c581efb3f423863dd46e51c39dc73dee9ce3a4 100644 --- a/convlab2/base_models/bert/run_token_classification.py +++ b/convlab2/base_models/bert/run_token_classification.py @@ -135,7 +135,7 @@ class DataTrainingArguments: metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( - default=None, + default=512, metadata={ "help": "The maximum total input sequence length after tokenization. If set, sequences longer " "than this will be truncated, sequences shorter will be padded." diff --git a/convlab2/base_models/bert/train_bio.sh b/convlab2/base_models/bert/train_bio.sh index db2ee860d2464c57dfb20d57a54ea5b34cda85b1..59973634fe819c0c2d8b8c7fb6b2b79d420ecc2d 100644 --- a/convlab2/base_models/bert/train_bio.sh +++ b/convlab2/base_models/bert/train_bio.sh @@ -1,6 +1,7 @@ -n_gpus=3 +set -e +n_gpus=2 task_name="dialogBIO" -dataset_name="sgd" +dataset_name="sgd+tm1+tm2+tm3" data_dir="data/${task_name}/${dataset_name}" output_dir="output/${task_name}/${dataset_name}" cache_dir="cache" @@ -18,9 +19,21 @@ lr=2e-5 num_train_epochs=1 metric_for_best_model="f1" -python create_data.py --tasks ${task_name} --datasets ${dataset_name} --save_dir "data" +names=$(echo ${dataset_name} | tr "+" "\n") +mkdir -p ${data_dir} +for name in ${names}; +do + echo "preprocessing ${name}" + python create_data.py --tasks ${task_name} --datasets ${name} --save_dir "data" + if [ "${name}" != "${dataset_name}" ]; then + cat "data/${task_name}/${name}/train.json" >> ${train_file} + cat "data/${task_name}/${name}/validation.json" >> ${validation_file} + cat "data/${task_name}/${name}/test.json" >> ${test_file} + fi +done -python -m torch.distributed.launch \ + +CUDA_VISIBLE_DEVICES=1,2 python -m torch.distributed.launch \ --nproc_per_node ${n_gpus} run_token_classification.py \ --task_name ${task_name} \ --train_file ${train_file} \ @@ -39,7 +52,6 @@ python -m torch.distributed.launch \ --cache_dir ${cache_dir} \ --output_dir ${output_dir} \ --logging_dir ${logging_dir} \ - --overwrite_output_dir \ --preprocessing_num_workers 4 \ --per_device_train_batch_size ${per_device_train_batch_size} \ --per_device_eval_batch_size ${per_device_eval_batch_size} \ diff --git a/convlab2/base_models/gpt/keyword_extraction/gen_pretraining_data.py b/convlab2/base_models/gpt/keyword_extraction/gen_pretraining_data.py new file mode 100644 index 0000000000000000000000000000000000000000..0f9c841257387a293866b6d0727900d626c8047f --- /dev/null +++ b/convlab2/base_models/gpt/keyword_extraction/gen_pretraining_data.py @@ -0,0 +1,49 @@ +import json +import os +import random +from tqdm import tqdm + +def main(args): + random.seed(45) + os.makedirs(args.output_dir, exist_ok=True) + filenames = [f for (_, _, fs) in os.walk(args.input_dir) for f in fs if 'keywords' in f] + for filename in filenames: + data = json.load(open(os.path.join(args.input_dir, filename))) + fout = open(os.path.join(args.output_dir, f"{filename.split('/')[-1].split('_')[1]}.json"), 'w', encoding='utf-8') + turn_keywords = [turn['keywords'] for dial in data for turn in dial] + random.shuffle(turn_keywords) + cnt = 0 + # keywords_set = {keyword for keywords in turn_keywords_set for keyword in keywords} + for dial in tqdm(data): + context = [] + for i, turn in enumerate(dial): + speaker = 'user' if i%2 == 0 else 'system' + random.shuffle(turn['keywords']) + keywords = ' | '.join(turn['keywords']) + utt = turn['utterance'] + context_seq = '\n'.join([f"{turn['speaker']}: {turn['utt']}" for turn in context]+[f'{speaker}: ']) + input_seq = f'keywords: {keywords}\n\ncontext: {context_seq}' + context.append({'speaker': speaker, 'utt':utt}) + fout.write(json.dumps({'keywords+context': input_seq, 'response': utt}, ensure_ascii=False)+'\n') + + # min_neg = len(turn['keywords']) + # max_neg = 4 * min_neg + # negative_keywords = random.sample(keywords_set, random.randint(min_neg, max_neg)) + # negative_keywords = random.sample(turn_keywords_set, 1)[0] + negative_keywords = turn_keywords[cnt] + cnt += 1 + possible_keywords = turn['keywords'] + list(negative_keywords) + random.shuffle(possible_keywords) + possible_keywords = ' | '.join(possible_keywords) + input_seq = f'possible keywords: {possible_keywords}\n\ncontext: {context_seq}' + fout.write(json.dumps({'keywords+context': input_seq, 'response': utt}, ensure_ascii=False)+'\n') + + +if __name__ == '__main__': + from argparse import ArgumentParser + parser = ArgumentParser(description="calculate NLU metrics for unified datasets") + parser.add_argument('--input_dir', '-i', type=str, help='path to the input files') + parser.add_argument('--output_dir', '-o', type=str, help='path to the output files') + args = parser.parse_args() + print(args) + main(args) diff --git a/convlab2/base_models/gpt/keyword_extraction/gen_pretraining_data.sh b/convlab2/base_models/gpt/keyword_extraction/gen_pretraining_data.sh new file mode 100644 index 0000000000000000000000000000000000000000..8a4290c3f2cd4fcae432d7fb94e4da25ec4ec727 --- /dev/null +++ b/convlab2/base_models/gpt/keyword_extraction/gen_pretraining_data.sh @@ -0,0 +1,19 @@ +dataset_name="metalwoz+sgd+tm1+tm2+tm3" +names=$(echo ${dataset_name} | tr "+" "\n") +model_type="gpt" +data_dir=data/key2gen_shuffle_noisy/${model_type}/${name}/${dataset_name} +mkdir -p ${data_dir} +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +for name in ${names} +do + echo "preprocessing ${name}" + python gen_pretraining_data.py -i data/lm/${name}/${model_type} -o data/key2gen_shuffle_noisy/${model_type}/${name} + if [ "${name}" != "${dataset_name}" ]; then + cat "data/key2gen_shuffle_noisy/gpt/${name}/train.json" >> ${train_file} + cat "data/key2gen_shuffle_noisy/gpt/${name}/validation.json" >> ${validation_file} + cat "data/key2gen_shuffle_noisy/gpt/${name}/test.json" >> ${test_file} + fi +done +python gen_pretraining_data.py -i data/lm/multiwoz21/${model_type} -o data/key2gen_shuffle_noisy/${model_type}/multiwoz21 \ No newline at end of file diff --git a/convlab2/base_models/gpt/keyword_extraction/get_keywords.sh b/convlab2/base_models/gpt/keyword_extraction/get_keywords.sh index 6dd2680bc3c4390cf2d85cff46d7000c5293ef70..cffa944b0374cff67abe223b4c2ea252ebd889f4 100644 --- a/convlab2/base_models/gpt/keyword_extraction/get_keywords.sh +++ b/convlab2/base_models/gpt/keyword_extraction/get_keywords.sh @@ -1,20 +1,23 @@ -model_type=dialogpt -dataset_name=multiwoz21 -model_name=dialogpt-large -data_dir="data/lm/${dataset_name}/${model_type}" -word_loss_file="${data_dir}/${model_name}_${dataset_name}_word_loss.json" -keywords_num=5 -keywords_ratio=1 -keywords_th=0 +task_name="lm" +dataset_name=$1 +model_type="gpt" +data_dir="data/${task_name}/${dataset_name}/${model_type}" +model_name_or_path="gpt2-large" +keywords_num=100 +keywords_ratio=0.3 +keywords_th_ratio=0 stopwords=True -output_file="${data_dir}/${dataset_name}_keywords_${model_name}_topk_${keywords_num}_ratio_${keywords_ratio}_th_${keywords_th}_stopwords_${stopwords}.json" +for data_split in validation test train +do + word_loss_file="${data_dir}/${model_name_or_path}_${dataset_name}_${data_split}_word_loss.json" + output_file="${data_dir}/${dataset_name}_${data_split}_keywords_${model_name_or_path}_topk_${keywords_num}_ratio_${keywords_ratio}_th_${keywords_th_ratio}_stopwords_${stopwords}.json" -python lmloss2keywords.py \ - --model_type ${model_type} \ - --word_loss_file ${word_loss_file} \ - --keywords_num ${keywords_num} \ - --keywords_ratio ${keywords_ratio} \ - --keywords_th ${keywords_th} \ - --stopwords ${stopwords} \ - --output_file ${output_file} - \ No newline at end of file + python lmloss2keywords.py \ + --model_type ${model_type} \ + --word_loss_file ${word_loss_file} \ + --keywords_num ${keywords_num} \ + --keywords_ratio ${keywords_ratio} \ + --keywords_th_ratio ${keywords_th_ratio} \ + --stopwords ${stopwords} \ + --output_file ${output_file} +done \ No newline at end of file diff --git a/convlab2/base_models/gpt/keyword_extraction/get_word_loss.sh b/convlab2/base_models/gpt/keyword_extraction/get_word_loss.sh index 2aad467cf181c08532505a1523af746e52aacb4a..e0b8c1499ade1faa90ea26cde1aa988b06ed84d6 100644 --- a/convlab2/base_models/gpt/keyword_extraction/get_word_loss.sh +++ b/convlab2/base_models/gpt/keyword_extraction/get_word_loss.sh @@ -1,65 +1,33 @@ set -e n_gpus=1 task_name="lm" -dataset_name="multiwoz21" -model_type="dialogpt" +dataset_name=$1 +model_type="gpt" data_dir="data/${task_name}/${dataset_name}/${model_type}" output_dir="output/${task_name}/${dataset_name}/${model_type}" cache_dir="../cache" validation_file="${data_dir}/validation.json" source_column="dialogue" max_length=512 -model_name_or_path="microsoft/DialoGPT-large" -per_device_eval_batch_size=4 - -dump_eval_loss_to="${data_dir}/dialogpt-large_${dataset_name}_token_loss.json" -python ../create_data.py --tasks ${task_name} --datasets ${dataset_name} --model_type dialogpt -python ../run_clm.py \ - --dump_eval_loss_to ${dump_eval_loss_to}\ - --model_name_or_path ${model_name_or_path} \ - --output_dir ${data_dir} \ - --validation_file ${validation_file} \ - --source_column ${source_column} \ - --max_length ${max_length} \ - --do_eval \ - --prediction_loss_only \ - --cache_dir ${cache_dir} \ - --preprocessing_num_workers 4 \ - --per_device_eval_batch_size ${per_device_eval_batch_size} -python lmloss2keywords.py --token_loss_file ${dump_eval_loss_to} --model_type ${model_type} - -dump_eval_loss_to="${data_dir}/dialogpt-large-mwoz_${dataset_name}_token_loss.json" -python ../create_data.py --tasks ${task_name} --datasets ${dataset_name} --model_type dialogpt -python ../run_clm.py \ - --dump_eval_loss_to ${dump_eval_loss_to}\ - --model_name_or_path ${output_dir} \ - --output_dir ${data_dir} \ - --validation_file ${validation_file} \ - --source_column ${source_column} \ - --max_length ${max_length} \ - --do_eval \ - --prediction_loss_only \ - --cache_dir ${cache_dir} \ - --preprocessing_num_workers 4 \ - --per_device_eval_batch_size ${per_device_eval_batch_size} -python lmloss2keywords.py --token_loss_file ${dump_eval_loss_to} --model_type ${model_type} - -model_type="gpt" -data_dir="data/${task_name}/${dataset_name}/${model_type}" -validation_file="${data_dir}/validation.json" model_name_or_path="gpt2-large" -dump_eval_loss_to="${data_dir}/gpt2-large_${dataset_name}_token_loss.json" -python ../create_data.py --tasks ${task_name} --datasets ${dataset_name} --model_type gpt -python ../run_clm.py \ - --dump_eval_loss_to ${dump_eval_loss_to}\ - --model_name_or_path ${model_name_or_path} \ - --output_dir ${data_dir} \ - --validation_file ${validation_file} \ - --source_column ${source_column} \ - --max_length ${max_length} \ - --do_eval \ - --prediction_loss_only \ - --cache_dir ${cache_dir} \ - --preprocessing_num_workers 4 \ - --per_device_eval_batch_size ${per_device_eval_batch_size} -python lmloss2keywords.py --token_loss_file ${dump_eval_loss_to} --model_type ${model_type} +per_device_eval_batch_size=16 + +python ../create_data.py --tasks ${task_name} --datasets ${dataset_name} --model_type ${model_type} +for data_split in validation test train +do + validation_file="${data_dir}/${data_split}.json" + dump_eval_loss_to="${data_dir}/${model_name_or_path}_${dataset_name}_${data_split}_token_loss.json" + python ../run_clm.py \ + --dump_eval_loss_to ${dump_eval_loss_to}\ + --model_name_or_path ${model_name_or_path} \ + --output_dir ${data_dir} \ + --validation_file ${validation_file} \ + --source_column ${source_column} \ + --max_length ${max_length} \ + --do_eval \ + --prediction_loss_only \ + --cache_dir ${cache_dir} \ + --preprocessing_num_workers 4 \ + --per_device_eval_batch_size ${per_device_eval_batch_size} + python lmloss2keywords.py --token_loss_file ${dump_eval_loss_to} --model_type ${model_type} +done diff --git a/convlab2/base_models/gpt/keyword_extraction/lmloss2keywords.py b/convlab2/base_models/gpt/keyword_extraction/lmloss2keywords.py index 307d57edf4d09c8a72968f35051d451afe21bc64..b0e14c86f58baaca8af6e246cef4c58eddde6447 100644 --- a/convlab2/base_models/gpt/keyword_extraction/lmloss2keywords.py +++ b/convlab2/base_models/gpt/keyword_extraction/lmloss2keywords.py @@ -80,8 +80,36 @@ def main(args): stop_words = set(stopwords.words('english')) + if args.keywords_th_ratio > 0: + losses = [loss for x in word_loss_list for word, loss in zip(x['words'], x['losses']) if not any([w.lower() in stop_words for w in word_tokenize(word)])] + loss_th = sorted(losses, reverse=True)[round(args.keywords_th_ratio*len(losses))] + print(f'loss th for top {args.keywords_th_ratio*100}%: {loss_th}') + else: + loss_th = 0 + + def keywords_filter(word_loss_pairs): + candidate_indexes = [] + for i, word_loss_pair in enumerate(word_loss_pairs): + if args.stopwords and any([w.lower() in stop_words for w in word_tokenize(word_loss_pair[0])]): + continue + if word_loss_pair[1] <= loss_th: + continue + candidate_indexes.append(i) + + topk = min(round(args.keywords_ratio*len(word_loss_pairs)), args.keywords_num) + topk_indexes = sorted(candidate_indexes, key=lambda x: word_loss_pairs[x][1], reverse=True)[:topk] + topk_indexes = sorted(topk_indexes) + keywords = [] + for i, index in enumerate(topk_indexes): + if i > 0 and index == topk_indexes[i-1] + 1: + keywords[-1]+= ' '+word_loss_pairs[index][0] + else: + keywords.append(word_loss_pairs[index][0]) + + return keywords + dialogs = [] - for item in word_loss_list: + for item in tqdm(word_loss_list): words = item['words'] losses = item['losses'] turns = [] @@ -90,11 +118,9 @@ def main(args): if word == '<|endoftext|>': # switch turn turn['utterance'] = ' '.join(turn['words']) - turn['keywords'] = list(zip(turn['words'], turn['losses'])) - if args.stopwords: - turn['keywords'] = [x for x in turn['keywords'] if not any([w.lower() in stop_words for w in word_tokenize(x[0])])] - turn['keywords'] = sorted(turn['keywords'], key=lambda x: x[1], reverse=True) - turn['keywords'] = [x for x in turn['keywords'] if x[1] > args.keywords_th][:min(round(args.keywords_ratio*len(turn['keywords'])), args.keywords_num)] + keywords = keywords_filter(list(zip(turn['words'], turn['losses']))) + turn['keywords'] = keywords + # turn['keywords'] = ' | '.join([x[0] for x in keywords]) turn.pop('words') turn.pop('losses') turns.append(turn) @@ -116,7 +142,7 @@ if __name__ == '__main__': parser.add_argument('--output_file', '-o', type=str, help='path to the output file') parser.add_argument('--keywords_num', '-n', type=int, default=100, help='how many words in an utterance serve as keywords') parser.add_argument('--keywords_ratio', '-r', type=float, default=1.0, help='how many words (in ratio) in an utterance serve as keywords') - parser.add_argument('--keywords_th', '-th', type=float, default=0., help='loss threshold for the keywords') + parser.add_argument('--keywords_th_ratio', '-th', type=float, default=0., help='loss threshold for the keywords, ratio of all word losses') parser.add_argument('--stopwords', '-s', type=lambda x: bool(eval(x)), default=True, help='filter out stopwords') args = parser.parse_args() print(args) diff --git a/convlab2/base_models/gpt/keyword_extraction/test_t5_key2gen.sh b/convlab2/base_models/gpt/keyword_extraction/test_t5_key2gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..9a274f8fe344efe3125920903bc688e8aeb7c38e --- /dev/null +++ b/convlab2/base_models/gpt/keyword_extraction/test_t5_key2gen.sh @@ -0,0 +1,43 @@ +set -e +n_gpus=1 +task_name="key2gen" +dataset_name="multiwoz21" +speaker="all" +model_type="gpt" +data_dir="data/${task_name}/${model_type}/${dataset_name}" +output_dir="output/${task_name}/${model_type}/${dataset_name}" +cache_dir="../../t5/cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +source_column="keywords+context" +target_column="response" +truncation_side="left" +max_source_length=512 +max_target_length=128 +model_name_or_path="output/key2gen/gpt/metalwoz+sgd+tm1+tm2+tm3" +per_device_train_batch_size=128 +per_device_eval_batch_size=128 +gradient_accumulation_steps=4 +lr=1e-3 +num_train_epochs=1 + +python -m torch.distributed.launch \ + --nproc_per_node ${n_gpus} ../../t5/run_seq2seq.py \ + --task_name ${task_name} \ + --test_file ${test_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${model_name_or_path} \ + --do_predict \ + --predict_with_generate \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_eval_batch_size ${per_device_eval_batch_size} diff --git a/convlab2/base_models/gpt/keyword_extraction/train_lm.sh b/convlab2/base_models/gpt/keyword_extraction/train_lm_dialogpt.sh similarity index 97% rename from convlab2/base_models/gpt/keyword_extraction/train_lm.sh rename to convlab2/base_models/gpt/keyword_extraction/train_lm_dialogpt.sh index 4ae47c3296e5ca7150cbbffcb7a7d247973613de..303ecb3e0c660a13e190b193c5b1769fbe70812d 100644 --- a/convlab2/base_models/gpt/keyword_extraction/train_lm.sh +++ b/convlab2/base_models/gpt/keyword_extraction/train_lm_dialogpt.sh @@ -19,7 +19,7 @@ gradient_accumulation_steps=4 lr=5e-5 num_train_epochs=3 -python ../create_data.py --tasks ${task_name} --datasets ${dataset_name} --model_type dialogpt +python ../create_data.py --tasks ${task_name} --datasets ${dataset_name} --model_type ${model_type} python ../run_clm.py \ --model_name_or_path ${model_name_or_path} \ diff --git a/convlab2/base_models/gpt/keyword_extraction/train_lm_gpt.sh b/convlab2/base_models/gpt/keyword_extraction/train_lm_gpt.sh new file mode 100644 index 0000000000000000000000000000000000000000..fb510c880b25505e83780eeab76760e30dbccf9d --- /dev/null +++ b/convlab2/base_models/gpt/keyword_extraction/train_lm_gpt.sh @@ -0,0 +1,47 @@ +set -e +n_gpus=1 +task_name="lm" +dataset_name="multiwoz21" +model_type="gpt" +data_dir="data/${task_name}/${dataset_name}/${model_type}" +output_dir="output/${task_name}/${dataset_name}/${model_type}" +cache_dir="../cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +source_column="dialogue" +max_length=512 +model_name_or_path="gpt2-large" +per_device_train_batch_size=16 +per_device_eval_batch_size=16 +gradient_accumulation_steps=4 +lr=5e-5 +num_train_epochs=3 + +python ../create_data.py --tasks ${task_name} --datasets ${dataset_name} --model_type ${model_type} + +python ../run_clm.py \ + --model_name_or_path ${model_name_or_path} \ + --train_file ${train_file} \ + --validation_file ${validation_file} \ + --source_column ${source_column} \ + --max_length ${max_length} \ + --do_train \ + --do_eval \ + --save_strategy epoch \ + --evaluation_strategy epoch \ + --load_best_model_at_end \ + --prediction_loss_only \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --gradient_checkpointing diff --git a/convlab2/base_models/gpt/keyword_extraction/train_t5_key2gen.sh b/convlab2/base_models/gpt/keyword_extraction/train_t5_key2gen.sh new file mode 100644 index 0000000000000000000000000000000000000000..c04d68fc374c38eb27b78f0ac288d04470e98d05 --- /dev/null +++ b/convlab2/base_models/gpt/keyword_extraction/train_t5_key2gen.sh @@ -0,0 +1,57 @@ +set -e +n_gpus=1 +task_name="key2gen_shuffle_noisy" +dataset_name="metalwoz+sgd+tm1+tm2+tm3" +speaker="all" +model_type="gpt" +data_dir="data/${task_name}/${model_type}/${dataset_name}" +output_dir="output/${task_name}/${model_type}/${dataset_name}" +cache_dir="../../t5/cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +source_column="keywords+context" +target_column="response" +truncation_side="left" +max_source_length=512 +max_target_length=128 +model_name_or_path="t5-small" +per_device_train_batch_size=128 +per_device_eval_batch_size=128 +gradient_accumulation_steps=8 +lr=1e-3 +num_train_epochs=1 + +python -m torch.distributed.launch \ + --nproc_per_node ${n_gpus} ../../t5/run_seq2seq.py \ + --task_name ${task_name} \ + --train_file ${train_file} \ + --validation_file ${validation_file} \ + --test_file ${test_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${model_name_or_path} \ + --do_train \ + --do_eval \ + --do_predict \ + --save_strategy epoch \ + --evaluation_strategy epoch \ + --load_best_model_at_end \ + --prediction_loss_only \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing diff --git a/convlab2/base_models/t5/create_data.py b/convlab2/base_models/t5/create_data.py index c2f3da96523266936e4f15ad19682de62e5da3da..305538ed039e65ba3c186369f9481d3fc38be93d 100644 --- a/convlab2/base_models/t5/create_data.py +++ b/convlab2/base_models/t5/create_data.py @@ -87,9 +87,9 @@ def create_nlg_data(dataset, data_dir, args): dialogue_acts_seq = serialize_dialogue_acts(sample['dialogue_acts']) if args.context_window_size>0: context = '\n'.join([f"{turn['speaker']}: {turn['utterance']}" for turn in sample['context']]+[f'{sample["speaker"]}: ']) - context = f'{dialogue_acts_seq}\n{context}' + context = f'{dialogue_acts_seq}\n\n{context}' else: - context = f'{dialogue_acts_seq}\n{sample["speaker"]}: ' + context = f'{dialogue_acts_seq}\n\n{sample["speaker"]}: ' assert equal_da_seq(sample['dialogue_acts'], dialogue_acts_seq), print(sample['dialogue_acts'], dialogue_acts_seq, deserialize_dialogue_acts(dialogue_acts_seq)) data.append(json.dumps({'context+da': context, 'response': sample['utterance']}, ensure_ascii=False)+'\n') @@ -138,14 +138,19 @@ if __name__ == '__main__': parser.add_argument('--speaker', '-s', type=str, choices=['user', 'system', 'all'], help='speaker(s)') parser.add_argument('--context_window_size', '-c', type=int, default=0, help='how many contextual utterances are considered') parser.add_argument('--len_tokenizer', '-l', type=str, default=None, help='name or path of tokenizer that used to get seq len') + parser.add_argument('--ratio', '-r', type=float, default=None, help='how many data is used for training and evaluation') + parser.add_argument('--dial_ids_order', '-o', type=int, default=None, help='which data order is used for experiments') args = parser.parse_args() print(args) if args.len_tokenizer: tokenizer = AutoTokenizer.from_pretrained(args.len_tokenizer) for dataset_name in tqdm(args.datasets, desc='datasets'): - dataset = load_dataset(dataset_name) + dataset = load_dataset(dataset_name, args.dial_ids_order) + if args.ratio: + dataset['train'] = dataset['train'][:round(len(dataset['train'])*args.ratio)] + dataset['validation'] = dataset['validation'][:round(len(dataset['validation'])*args.ratio)] for task_name in tqdm(args.tasks, desc='tasks', leave=False): - data_dir = os.path.join('data', task_name, dataset_name) + data_dir = os.path.join('data', task_name, (dataset_name if not args.ratio else f'{dataset_name}_{args.ratio}_order{args.dial_ids_order}')) data_by_split = eval(f"create_{task_name}_data")(dataset, data_dir, args) if args.len_tokenizer: get_max_len(data_by_split, tokenizer) diff --git a/convlab2/base_models/t5/dst/dst.py b/convlab2/base_models/t5/dst/dst.py new file mode 100755 index 0000000000000000000000000000000000000000..ed761c6f747e2bc5b2dc6db50b681bb94f5b0b9c --- /dev/null +++ b/convlab2/base_models/t5/dst/dst.py @@ -0,0 +1,69 @@ +import logging +import os +import torch +from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig +from convlab2.dst.dst import DST +from convlab2.base_models.t5.dst.serialization import deserialize_dialogue_state +from convlab2.util.custom_util import model_downloader + + +class T5DST(DST): + def __init__(self, speaker, context_window_size, model_name_or_path, model_file=None, device='cuda'): + assert speaker in ['user', 'system'] + assert context_window_size > 0 + self.speaker = speaker + self.opponent = 'system' if speaker == 'user' else 'user' + self.context_window_size = context_window_size + + model_dir = os.path.dirname(os.path.abspath(__file__)) + if not os.path.exists(model_name_or_path): + model_downloader(model_dir, model_file) + + self.config = AutoConfig.from_pretrained(model_name_or_path) + self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) + self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path, config=self.config) + self.model.eval() + self.device = device if torch.cuda.is_available() else "cpu" + self.model.to(self.device) + + logging.info("T5DST loaded") + + def update(self, context): + if len(context) > 0 and type(context[0]) is list and len(context[0]) > 1: + context = [item[1] for item in context] + context = context[-self.context_window_size:] + input_seq = '\n'.join([f"{self.opponent if (i % 2) == (len(context) % 2) else self.speaker}: {utt}" for i, utt in enumerate(context)]) + # print(input_seq) + input_seq = self.tokenizer(input_seq, return_tensors="pt").to(self.device) + # print(input_seq) + output_seq = self.model.generate(**input_seq, max_length=256) + # print(output_seq) + output_seq = self.tokenizer.decode(output_seq[0], skip_special_tokens=True) + # print(output_seq) + state = deserialize_dialogue_state(output_seq.strip()) + return state + + +if __name__ == '__main__': + contexts = [ + ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton."], + ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton.", + "What time do you want to leave and what time do you want to arrive by?", + "I want to leave after 17:15."], + ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton.", + "What time do you want to leave and what time do you want to arrive by?", + "I want to leave after 17:15.", + "Booking completed! your taxi will be blue honda Contact number is 07218068540", + "Thank you for all the help! I appreciate it."], + ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton.", + "What time do you want to leave and what time do you want to arrive by?", + "I want to leave after 17:15.", + "Booking completed! your taxi will be blue honda Contact number is 07218068540", + "Thank you for all the help! I appreciate it.", + "You are welcome. Is there anything else I can help you with today?", + "No, I am all set. Have a nice day. Bye."], + ] + dst = T5DST(speaker='user', context_window_size=100, model_name_or_path='output/dst/multiwoz21/user/context_100') + for context in contexts: + print(dst.update(context)) + print() diff --git a/convlab2/base_models/t5/dst/merge_predict_res.py b/convlab2/base_models/t5/dst/merge_predict_res.py index 0a80ee80bfc2bb3a0f39b2e369fabc41d2b2b5d0..9b942260e1e79439bfca7c52787715076fbe5143 100755 --- a/convlab2/base_models/t5/dst/merge_predict_res.py +++ b/convlab2/base_models/t5/dst/merge_predict_res.py @@ -6,7 +6,7 @@ from convlab2.base_models.t5.dst.serialization import deserialize_dialogue_state def merge(dataset_name, speaker, save_dir, context_window_size, predict_result): assert os.path.exists(predict_result) - dataset = load_dataset(dataset_name) + dataset = load_dataset(dataset_name, args.dial_ids_order) data = load_dst_data(dataset, data_split='test', speaker=speaker, use_context=context_window_size>0, context_window_size=context_window_size)['test'] if save_dir is None: @@ -29,6 +29,7 @@ if __name__ == '__main__': parser.add_argument('--save_dir', type=str, help='merged data will be saved as $save_dir/predictions.json. default: on the same directory as predict_result') parser.add_argument('--context_window_size', '-c', type=int, default=0, help='how many contextual utterances are considered') parser.add_argument('--predict_result', '-p', type=str, required=True, help='path to the output file generated_predictions.json') + parser.add_argument('--dial_ids_order', '-o', type=int, default=None, help='which data order is used for experiments') args = parser.parse_args() print(args) merge(args.dataset, args.speaker, args.save_dir, args.context_window_size, args.predict_result) diff --git a/convlab2/base_models/t5/dst/run_dst.sh b/convlab2/base_models/t5/dst/run_dst.sh index 7ee6041efd37c8f3c53d4a10f34e279256cb0f45..c678005ef1284bcb40333ff47e9a1fbf06c90c16 100644 --- a/convlab2/base_models/t5/dst/run_dst.sh +++ b/convlab2/base_models/t5/dst/run_dst.sh @@ -24,14 +24,12 @@ gradient_accumulation_steps=2 lr=1e-3 num_train_epochs=10 -python ../create_data.py -t ${task_name} -d ${dataset_name} -s ${speaker} -c ${context_window_size} -l t5-small +python ../create_data.py -t ${task_name} -d ${dataset_name} -s ${speaker} -c ${context_window_size} -python -m torch.distributed.launch \ - --nproc_per_node ${n_gpus} ../run_seq2seq.py \ +python ../run_seq2seq.py \ --task_name ${task_name} \ --train_file ${train_file} \ --validation_file ${validation_file} \ - --test_file ${test_file} \ --source_column ${source_column} \ --target_column ${target_column} \ --max_source_length ${max_source_length} \ @@ -40,9 +38,9 @@ python -m torch.distributed.launch \ --model_name_or_path ${model_name_or_path} \ --do_train \ --do_eval \ - --do_predict \ --save_strategy epoch \ --evaluation_strategy epoch \ + --save_total_limit 3 \ --prediction_loss_only \ --cache_dir ${cache_dir} \ --output_dir ${output_dir} \ @@ -58,8 +56,7 @@ python -m torch.distributed.launch \ --adafactor \ --gradient_checkpointing -python -m torch.distributed.launch \ - --nproc_per_node ${n_gpus} ../run_seq2seq.py \ +python ../run_seq2seq.py \ --task_name ${task_name} \ --test_file ${test_file} \ --source_column ${source_column} \ @@ -76,7 +73,14 @@ python -m torch.distributed.launch \ --logging_dir ${logging_dir} \ --overwrite_output_dir \ --preprocessing_num_workers 4 \ - --per_device_eval_batch_size ${per_device_eval_batch_size} + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing python merge_predict_res.py -d ${dataset_name} -s ${speaker} -c ${context_window_size} -p ${output_dir}/generated_predictions.json diff --git a/convlab2/base_models/t5/dst/run_dst_fewshot.sh b/convlab2/base_models/t5/dst/run_dst_fewshot.sh new file mode 100644 index 0000000000000000000000000000000000000000..298a37f17a1c0817ff257742b5aa6e61bb9cd5d0 --- /dev/null +++ b/convlab2/base_models/t5/dst/run_dst_fewshot.sh @@ -0,0 +1,89 @@ +n_gpus=1 +task_name="dst" +dataset_name=$1 +speaker="user" +context_window_size=100 +data_dir="data/${task_name}/${dataset_name}_${ratio}_order${dial_ids_order}/${speaker}/context_${context_window_size}" +output_dir="output/${task_name}/${dataset_name}_${ratio}_order${dial_ids_order}/${speaker}/context_${context_window_size}" +cache_dir="../cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +metric_name_or_path="dst_metric.py" +metric_for_best_model="accuracy" +source_column="context" +target_column="state_seq" +truncation_side="left" +max_source_length=1024 +max_target_length=512 +model_name_or_path="t5-small" +per_device_train_batch_size=64 +per_device_eval_batch_size=64 +gradient_accumulation_steps=2 +lr=1e-3 +num_train_epochs=100 + +python ../create_data.py -t ${task_name} -d ${dataset_name} -s ${speaker} -c ${context_window_size} -r ${ratio} -o ${dial_ids_order} + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --train_file ${train_file} \ + --validation_file ${validation_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${model_name_or_path} \ + --do_train \ + --do_eval \ + --save_strategy epoch \ + --evaluation_strategy epoch \ + --save_total_limit 3 \ + --early_stopping_patience 10 \ + --prediction_loss_only \ + --load_best_model_at_end \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --test_file ${test_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${output_dir} \ + --do_predict \ + --predict_with_generate \ + --metric_name_or_path ${metric_name_or_path} \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing + +python merge_predict_res.py -d ${dataset_name} -s ${speaker} -c ${context_window_size} -p ${output_dir}/generated_predictions.json -o ${dial_ids_order} + +python ../../../dst/evaluate_unified_datasets.py -p ${output_dir}/predictions.json diff --git a/convlab2/base_models/t5/dst/run_dst_pretrain.sh b/convlab2/base_models/t5/dst/run_dst_pretrain.sh new file mode 100644 index 0000000000000000000000000000000000000000..f1c5c3d48799a51b208bde12938e15a08d4632a4 --- /dev/null +++ b/convlab2/base_models/t5/dst/run_dst_pretrain.sh @@ -0,0 +1,67 @@ +n_gpus=1 +task_name="dst" +dataset_name="sgd+tm1+tm2+tm3" +speaker="user" +context_window_size=100 +data_dir="data/${task_name}/${dataset_name}/${speaker}/context_${context_window_size}" +output_dir="output/${task_name}/${dataset_name}/${speaker}/context_${context_window_size}" +cache_dir="../cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +metric_name_or_path="dst_metric.py" +metric_for_best_model="accuracy" +source_column="context" +target_column="state_seq" +truncation_side="left" +max_source_length=1024 +max_target_length=512 +model_name_or_path="t5-small" +per_device_train_batch_size=64 +per_device_eval_batch_size=64 +gradient_accumulation_steps=2 +lr=1e-3 +num_train_epochs=1 + +names=$(echo ${dataset_name} | tr "+" "\n") +mkdir -p ${data_dir} +for name in ${names}; +do + echo "preprocessing ${name}" + python ../create_data.py -t ${task_name} -d ${name} -s ${speaker} -c ${context_window_size} + if [ "${name}" != "${dataset_name}" ]; then + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/train.json" >> ${train_file} + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/validation.json" >> ${validation_file} + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/test.json" >> ${test_file} + fi +done + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --train_file ${train_file} \ + --validation_file ${validation_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${model_name_or_path} \ + --do_train \ + --do_eval \ + --save_strategy epoch \ + --evaluation_strategy epoch \ + --prediction_loss_only \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing diff --git a/convlab2/base_models/t5/nlg/merge_predict_res.py b/convlab2/base_models/t5/nlg/merge_predict_res.py index 91e6055e13522caa5763c19b0d443b5244cc2496..205226fae5190505fb31e3ad949278ccd0aa5c6c 100755 --- a/convlab2/base_models/t5/nlg/merge_predict_res.py +++ b/convlab2/base_models/t5/nlg/merge_predict_res.py @@ -5,7 +5,7 @@ from convlab2.util import load_dataset, load_nlg_data def merge(dataset_name, speaker, save_dir, context_window_size, predict_result): assert os.path.exists(predict_result) - dataset = load_dataset(dataset_name) + dataset = load_dataset(dataset_name, args.dial_ids_order) data = load_nlg_data(dataset, data_split='test', speaker=speaker, use_context=context_window_size>0, context_window_size=context_window_size)['test'] if save_dir is None: @@ -28,6 +28,7 @@ if __name__ == '__main__': parser.add_argument('--save_dir', type=str, help='merged data will be saved as $save_dir/predictions.json. default: on the same directory as predict_result') parser.add_argument('--context_window_size', '-c', type=int, default=0, help='how many contextual utterances are considered') parser.add_argument('--predict_result', '-p', type=str, required=True, help='path to the output file generated_predictions.json') + parser.add_argument('--dial_ids_order', '-o', type=int, default=None, help='which data order is used for experiments') args = parser.parse_args() print(args) merge(args.dataset, args.speaker, args.save_dir, args.context_window_size, args.predict_result) diff --git a/convlab2/base_models/t5/nlg/nlg.py b/convlab2/base_models/t5/nlg/nlg.py index 378e38809645f66b157caf3e2ee4326b43ce5061..70fd279e3997aa08bb9c4388271bf7e4766ed60a 100755 --- a/convlab2/base_models/t5/nlg/nlg.py +++ b/convlab2/base_models/t5/nlg/nlg.py @@ -32,13 +32,14 @@ class T5NLG(NLG): if self.use_context: if len(context) > 0 and type(context[0]) is list and len(context[0]) > 1: context = [item[1] for item in context] + context = context[-self.context_window_size:] utts = context + [''] else: utts = [''] input_seq = '\n'.join([f"{self.opponent if (i % 2) == (len(utts) % 2) else self.speaker}: {utt}" for i, utt in enumerate(utts)]) dialogue_acts_seq = serialize_dialogue_acts(dialogue_acts) input_seq = dialogue_acts_seq + '\n' + input_seq - print(input_seq) + # print(input_seq) input_seq = self.tokenizer(input_seq, return_tensors="pt").to(self.device) # print(input_seq) output_seq = self.model.generate(**input_seq, max_length=256) @@ -122,10 +123,16 @@ if __name__ == '__main__': ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton.", "What time do you want to leave and what time do you want to arrive by?", "I want to leave after 17:15."], - ["I want to leave after 17:15.", + ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton.", + "What time do you want to leave and what time do you want to arrive by?", + "I want to leave after 17:15.", "Booking completed! your taxi will be blue honda Contact number is 07218068540", "Thank you for all the help! I appreciate it."], - ["Thank you for all the help! I appreciate it.", + ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton.", + "What time do you want to leave and what time do you want to arrive by?", + "I want to leave after 17:15.", + "Booking completed! your taxi will be blue honda Contact number is 07218068540", + "Thank you for all the help! I appreciate it.", "You are welcome. Is there anything else I can help you with today?" "No, I am all set. Have a nice day. Bye."], ] diff --git a/convlab2/base_models/t5/nlg/run_nlg.sh b/convlab2/base_models/t5/nlg/run_nlg.sh index c9dc80842f38ed462e4b711d675f1445acefe1ca..3352e6c14f4a5c2f61690f0c32fc31b709c73a23 100644 --- a/convlab2/base_models/t5/nlg/run_nlg.sh +++ b/convlab2/base_models/t5/nlg/run_nlg.sh @@ -14,7 +14,7 @@ metric_name_or_path="nlg_metric.py" metric_for_best_model="bleu" source_column="context+da" target_column="response" -truncation_side="right" +truncation_side="left" max_source_length=512 max_target_length=512 model_name_or_path="t5-small" @@ -40,6 +40,7 @@ python ../run_seq2seq.py \ --do_eval \ --save_strategy epoch \ --evaluation_strategy epoch \ + --save_total_limit 3 \ --prediction_loss_only \ --cache_dir ${cache_dir} \ --output_dir ${output_dir} \ @@ -72,8 +73,15 @@ python ../run_seq2seq.py \ --logging_dir ${logging_dir} \ --overwrite_output_dir \ --preprocessing_num_workers 4 \ - --per_device_eval_batch_size ${per_device_eval_batch_size} + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing python merge_predict_res.py -d ${dataset_name} -s ${speaker} -c ${context_window_size} -p ${output_dir}/generated_predictions.json -python ../../../nlg/evaluate_unified_datasets.py -p ${output_dir}/predictions.json +python ../../../nlg/evaluate_unified_datasets.py -p ${output_dir}/predictions.json --dataset_name ${dataset_name} diff --git a/convlab2/base_models/t5/nlg/run_nlg_fewshot.sh b/convlab2/base_models/t5/nlg/run_nlg_fewshot.sh new file mode 100644 index 0000000000000000000000000000000000000000..45d1964a21c44c898958d32f53af7f995a53281a --- /dev/null +++ b/convlab2/base_models/t5/nlg/run_nlg_fewshot.sh @@ -0,0 +1,90 @@ +n_gpus=1 +task_name="nlg" +dataset_name=$1 +speaker="system" +context_window_size=$2 +ratio=$3 +dial_ids_order=$4 +data_dir="data/${task_name}/${dataset_name}_${ratio}_order${dial_ids_order}/${speaker}/context_${context_window_size}" +output_dir="output/${task_name}/${dataset_name}_${ratio}_order${dial_ids_order}/${speaker}/context_${context_window_size}" +cache_dir="../cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +metric_name_or_path="nlg_metric.py" +metric_for_best_model="bleu" +source_column="context+da" +target_column="response" +truncation_side="left" +max_source_length=512 +max_target_length=512 +model_name_or_path="t5-small" +per_device_train_batch_size=128 +per_device_eval_batch_size=64 +gradient_accumulation_steps=4 +lr=1e-3 +num_train_epochs=100 + +python ../create_data.py -t ${task_name} -d ${dataset_name} -s ${speaker} -c ${context_window_size} -r ${ratio} -o ${dial_ids_order} + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --train_file ${train_file} \ + --validation_file ${validation_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${model_name_or_path} \ + --do_train \ + --do_eval \ + --save_strategy epoch \ + --evaluation_strategy epoch \ + --save_total_limit 3 \ + --prediction_loss_only \ + --load_best_model_at_end \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --test_file ${test_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${output_dir} \ + --do_predict \ + --predict_with_generate \ + --metric_name_or_path ${metric_name_or_path} \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing + +python merge_predict_res.py -d ${dataset_name} -s ${speaker} -c ${context_window_size} -p ${output_dir}/generated_predictions.json -o ${dial_ids_order} + +python ../../../nlg/evaluate_unified_datasets.py -p ${output_dir}/predictions.json --dataset_name ${dataset_name} diff --git a/convlab2/base_models/t5/nlg/run_nlg_pretrain.sh b/convlab2/base_models/t5/nlg/run_nlg_pretrain.sh new file mode 100644 index 0000000000000000000000000000000000000000..4ff752b64b321955fad5564353002aeaa79b2f30 --- /dev/null +++ b/convlab2/base_models/t5/nlg/run_nlg_pretrain.sh @@ -0,0 +1,67 @@ +n_gpus=1 +task_name="nlg" +dataset_name="sgd+tm1+tm2+tm3" +speaker="system" +context_window_size=0 +data_dir="data/${task_name}/${dataset_name}/${speaker}/context_${context_window_size}" +output_dir="output/${task_name}/${dataset_name}/${speaker}/context_${context_window_size}" +cache_dir="../cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +metric_name_or_path="nlg_metric.py" +metric_for_best_model="bleu" +source_column="context+da" +target_column="response" +truncation_side="left" +max_source_length=512 +max_target_length=512 +model_name_or_path="t5-small" +per_device_train_batch_size=64 +per_device_eval_batch_size=64 +gradient_accumulation_steps=8 +lr=1e-3 +num_train_epochs=1 + +names=$(echo ${dataset_name} | tr "+" "\n") +mkdir -p ${data_dir} +for name in ${names}; +do + echo "preprocessing ${name}" + python ../create_data.py -t ${task_name} -d ${name} -s ${speaker} -c ${context_window_size} + if [ "${name}" != "${dataset_name}" ]; then + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/train.json" >> ${train_file} + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/validation.json" >> ${validation_file} + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/test.json" >> ${test_file} + fi +done + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --train_file ${train_file} \ + --validation_file ${validation_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${model_name_or_path} \ + --do_train \ + --do_eval \ + --save_strategy epoch \ + --evaluation_strategy epoch \ + --prediction_loss_only \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing diff --git a/convlab2/base_models/t5/nlu/merge_predict_res.py b/convlab2/base_models/t5/nlu/merge_predict_res.py index f3386b210817a6ae26c153776e47324793c70546..cc7c9913b3f954f2a21fa5408f39429fe589dc54 100755 --- a/convlab2/base_models/t5/nlu/merge_predict_res.py +++ b/convlab2/base_models/t5/nlu/merge_predict_res.py @@ -6,7 +6,7 @@ from convlab2.base_models.t5.nlu.serialization import deserialize_dialogue_acts def merge(dataset_name, speaker, save_dir, context_window_size, predict_result): assert os.path.exists(predict_result) - dataset = load_dataset(dataset_name) + dataset = load_dataset(dataset_name, args.dial_ids_order) data = load_nlu_data(dataset, data_split='test', speaker=speaker, use_context=context_window_size>0, context_window_size=context_window_size)['test'] if save_dir is None: @@ -29,6 +29,7 @@ if __name__ == '__main__': parser.add_argument('--save_dir', type=str, help='merged data will be saved as $save_dir/predictions.json. default: on the same directory as predict_result') parser.add_argument('--context_window_size', '-c', type=int, default=0, help='how many contextual utterances are considered') parser.add_argument('--predict_result', '-p', type=str, required=True, help='path to the output file generated_predictions.json') + parser.add_argument('--dial_ids_order', '-o', type=int, default=None, help='which data order is used for experiments') args = parser.parse_args() print(args) merge(args.dataset, args.speaker, args.save_dir, args.context_window_size, args.predict_result) diff --git a/convlab2/base_models/t5/nlu/nlu.py b/convlab2/base_models/t5/nlu/nlu.py index 3d9b77c4b83c70f5d0a58345eb0f6e8bd61c3c45..8cadc00f51e91bade063aa551c6a4dfdd9459923 100755 --- a/convlab2/base_models/t5/nlu/nlu.py +++ b/convlab2/base_models/t5/nlu/nlu.py @@ -32,6 +32,7 @@ class T5NLU(NLU): if self.use_context: if len(context) > 0 and type(context[0]) is list and len(context[0]) > 1: context = [item[1] for item in context] + context = context[-self.context_window_size:] utts = context + [utterance] else: utts = [utterance] @@ -60,13 +61,15 @@ if __name__ == '__main__': [], ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton.", "What time do you want to leave and what time do you want to arrive by?"], - ["What time do you want to leave and what time do you want to arrive by?", + ["I would like a taxi from Saint John's college to Pizza Hut Fen Ditton.", + "What time do you want to leave and what time do you want to arrive by?", "I want to leave after 17:15.", "Booking completed! your taxi will be blue honda Contact number is 07218068540"], [], ["Please find a restaurant called Nusha.", "I don't seem to be finding anything called Nusha. What type of food does the restaurant serve?"], - ["I don't seem to be finding anything called Nusha. What type of food does the restaurant serve?", + ["Please find a restaurant called Nusha.", + "I don't seem to be finding anything called Nusha. What type of food does the restaurant serve?", "I am not sure of the type of food but could you please check again and see if you can find it? Thank you.", "Could you double check that you've spelled the name correctly? The closest I can find is Nandos."] ] diff --git a/convlab2/base_models/t5/nlu/run_nlu.sh b/convlab2/base_models/t5/nlu/run_nlu.sh index 71b5f5f8e488b2c44e76637cc0152b8ff9db8671..05671139cfd691422c924c9da880af317c916a19 100644 --- a/convlab2/base_models/t5/nlu/run_nlu.sh +++ b/convlab2/base_models/t5/nlu/run_nlu.sh @@ -40,6 +40,7 @@ python ../run_seq2seq.py \ --do_eval \ --save_strategy epoch \ --evaluation_strategy epoch \ + --save_total_limit 3 \ --prediction_loss_only \ --cache_dir ${cache_dir} \ --output_dir ${output_dir} \ @@ -72,7 +73,14 @@ python ../run_seq2seq.py \ --logging_dir ${logging_dir} \ --overwrite_output_dir \ --preprocessing_num_workers 4 \ - --per_device_eval_batch_size ${per_device_eval_batch_size} + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing python merge_predict_res.py -d ${dataset_name} -s ${speaker} -c ${context_window_size} -p ${output_dir}/generated_predictions.json diff --git a/convlab2/base_models/t5/nlu/run_nlu_fewshot.sh b/convlab2/base_models/t5/nlu/run_nlu_fewshot.sh new file mode 100644 index 0000000000000000000000000000000000000000..2c783912d66281c8e44f01aafc232b4051e73f86 --- /dev/null +++ b/convlab2/base_models/t5/nlu/run_nlu_fewshot.sh @@ -0,0 +1,90 @@ +n_gpus=1 +task_name="nlu" +dataset_name=$1 +speaker="user" +context_window_size=$2 +ratio=$3 +dial_ids_order=$4 +data_dir="data/${task_name}/${dataset_name}_${ratio}_order${dial_ids_order}/${speaker}/context_${context_window_size}" +output_dir="output/${task_name}/${dataset_name}_${ratio}_order${dial_ids_order}/${speaker}/context_${context_window_size}" +cache_dir="../cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +metric_name_or_path="nlu_metric.py" +metric_for_best_model="overall_f1" +source_column="context" +target_column="dialogue_acts_seq" +truncation_side="left" +max_source_length=512 +max_target_length=512 +model_name_or_path="t5-small" +per_device_train_batch_size=128 +per_device_eval_batch_size=64 +gradient_accumulation_steps=2 +lr=1e-3 +num_train_epochs=100 + +python ../create_data.py -t ${task_name} -d ${dataset_name} -s ${speaker} -c ${context_window_size} -r ${ratio} -o ${dial_ids_order} + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --train_file ${train_file} \ + --validation_file ${validation_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${model_name_or_path} \ + --do_train \ + --do_eval \ + --save_strategy epoch \ + --evaluation_strategy epoch \ + --save_total_limit 3 \ + --prediction_loss_only \ + --load_best_model_at_end \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --test_file ${test_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${output_dir} \ + --do_predict \ + --predict_with_generate \ + --metric_name_or_path ${metric_name_or_path} \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing + +python merge_predict_res.py -d ${dataset_name} -s ${speaker} -c ${context_window_size} -p ${output_dir}/generated_predictions.json -o ${dial_ids_order} + +python ../../../nlu/evaluate_unified_datasets.py -p ${output_dir}/predictions.json diff --git a/convlab2/base_models/t5/nlu/run_nlu_pretrain.sh b/convlab2/base_models/t5/nlu/run_nlu_pretrain.sh new file mode 100644 index 0000000000000000000000000000000000000000..3a059c2d27c809a3a1cf26b3a49160cab96dd1d4 --- /dev/null +++ b/convlab2/base_models/t5/nlu/run_nlu_pretrain.sh @@ -0,0 +1,67 @@ +n_gpus=1 +task_name="nlu" +dataset_name="sgd+tm1+tm2+tm3" +speaker="user" +context_window_size=0 +data_dir="data/${task_name}/${dataset_name}/${speaker}/context_${context_window_size}" +output_dir="output/${task_name}/${dataset_name}/${speaker}/context_${context_window_size}" +cache_dir="../cache" +logging_dir="${output_dir}/runs" +train_file="${data_dir}/train.json" +validation_file="${data_dir}/validation.json" +test_file="${data_dir}/test.json" +metric_name_or_path="nlu_metric.py" +metric_for_best_model="overall_f1" +source_column="context" +target_column="dialogue_acts_seq" +truncation_side="left" +max_source_length=512 +max_target_length=512 +model_name_or_path="t5-small" +per_device_train_batch_size=128 +per_device_eval_batch_size=64 +gradient_accumulation_steps=2 +lr=1e-3 +num_train_epochs=1 + +names=$(echo ${dataset_name} | tr "+" "\n") +mkdir -p ${data_dir} +for name in ${names}; +do + echo "preprocessing ${name}" + python ../create_data.py -t ${task_name} -d ${name} -s ${speaker} -c ${context_window_size} + if [ "${name}" != "${dataset_name}" ]; then + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/train.json" >> ${train_file} + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/validation.json" >> ${validation_file} + cat "data/${task_name}/${name}/${speaker}/context_${context_window_size}/test.json" >> ${test_file} + fi +done + +python ../run_seq2seq.py \ + --task_name ${task_name} \ + --train_file ${train_file} \ + --validation_file ${validation_file} \ + --source_column ${source_column} \ + --target_column ${target_column} \ + --max_source_length ${max_source_length} \ + --max_target_length ${max_target_length} \ + --truncation_side ${truncation_side} \ + --model_name_or_path ${model_name_or_path} \ + --do_train \ + --do_eval \ + --save_strategy epoch \ + --evaluation_strategy epoch \ + --prediction_loss_only \ + --cache_dir ${cache_dir} \ + --output_dir ${output_dir} \ + --logging_dir ${logging_dir} \ + --overwrite_output_dir \ + --preprocessing_num_workers 4 \ + --per_device_train_batch_size ${per_device_train_batch_size} \ + --per_device_eval_batch_size ${per_device_eval_batch_size} \ + --gradient_accumulation_steps ${gradient_accumulation_steps} \ + --learning_rate ${lr} \ + --num_train_epochs ${num_train_epochs} \ + --debug underflow_overflow \ + --adafactor \ + --gradient_checkpointing diff --git a/convlab2/base_models/t5/rg/run_rg.sh b/convlab2/base_models/t5/rg/run_rg.sh index 6fcffca23995db20fab4d581fe350b5d47894882..ac065b81fefa2c406a469a6658263477f1fbe18d 100644 --- a/convlab2/base_models/t5/rg/run_rg.sh +++ b/convlab2/base_models/t5/rg/run_rg.sh @@ -20,7 +20,7 @@ per_device_train_batch_size=128 per_device_eval_batch_size=128 gradient_accumulation_steps=4 lr=1e-3 -num_train_epochs=5 +num_train_epochs=1 names=$(echo ${dataset_name} | tr "+" "\n") mkdir -p ${data_dir} diff --git a/convlab2/base_models/t5/run_seq2seq.py b/convlab2/base_models/t5/run_seq2seq.py index 2f0f5481243c2f78eac4d352786482508f70e617..c76bb5cd690001e550aef4a1ce287d007c5a066d 100644 --- a/convlab2/base_models/t5/run_seq2seq.py +++ b/convlab2/base_models/t5/run_seq2seq.py @@ -25,6 +25,8 @@ import sys import json from dataclasses import dataclass, field from typing import Optional +from itertools import zip_longest +from functools import reduce import datasets import numpy as np @@ -39,6 +41,7 @@ from transformers import ( HfArgumentParser, Seq2SeqTrainer, Seq2SeqTrainingArguments, + EarlyStoppingCallback, set_seed, ) from transformers.trainer_utils import EvalPrediction, get_last_checkpoint @@ -215,6 +218,9 @@ class DataTrainingArguments: source_prefix: Optional[str] = field( default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} ) + early_stopping_patience: Optional[int] = field( + default=10, metadata={"help": "early stopping patience, set to 0 if you do not want to use early stopping."}, + ) def __post_init__(self): if ( @@ -439,8 +445,14 @@ def main(): inputs.append(examples[source_column][i]) targets.append(examples[target_column][i]) - inputs = [prefix + inp for inp in inputs] - model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) + inputs = [prefix + '\n\n' + inp for inp in inputs] + if padding: + model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) + else: + # truncate each part separated by \n\n respectively + split_inputs = [inp.split('\n\n') for inp in inputs] + split_model_inputs = [tokenizer(x, max_length=data_args.max_source_length, padding=False, truncation=True) for x in split_inputs] + model_inputs = {k: [reduce(lambda x, y: x[:-1]+y, item[k]) for item in split_model_inputs] for k in split_model_inputs[0]} # Setup the tokenizer for targets with tokenizer.as_target_tokenizer(): @@ -553,6 +565,8 @@ def main(): data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, ) + if data_args.early_stopping_patience > 0: + trainer.add_callback(EarlyStoppingCallback(early_stopping_patience=data_args.early_stopping_patience)) # Training if training_args.do_train: diff --git a/convlab2/nlg/evaluate_unified_datasets.py b/convlab2/nlg/evaluate_unified_datasets.py index 544c3c37cff10e444869e8579d7210fa5d256032..a44837f1008f7d72c562937d1c768bab2843d073 100644 --- a/convlab2/nlg/evaluate_unified_datasets.py +++ b/convlab2/nlg/evaluate_unified_datasets.py @@ -34,7 +34,7 @@ def evaluate(predict_result, ontology): candidates = [] for i in range(len(predict_result)): references.append(predict_result[i]['utterance']) - candidates.append(predict_result[i]['prediction']) + candidates.append(predict_result[i]['predictions']['utterance']) # metrics['bleu'] = corpus_bleu(references, candidates) metrics['bleu'] = sacrebleu.corpus_bleu(candidates, [references], lowercase=True).score @@ -54,7 +54,7 @@ def evaluate(predict_result, ontology): score_list = [] for item in predict_result: da = item['dialogue_acts'] - utterance = item['prediction'] + utterance = item['predictions']['utterance'] missing_count = 0 redundant_count = 0 all_count = 0