diff --git a/convlab/policy/emoTUS/analysis.py b/convlab/policy/emoTUS/analysis.py
index 75b55eb2e0217786867649f1822533b74eaffd07..829ed134371167cc4d616fa8363ee4aa4415fa78 100644
--- a/convlab/policy/emoTUS/analysis.py
+++ b/convlab/policy/emoTUS/analysis.py
@@ -97,7 +97,7 @@ def get_turn_emotion(conversation):
     for x in data:
         data[x] = np.array(data[x])
 
-    fig, ax = plt.subplots()
+    fig, ax = plt.subplots(figsize=(6.0, 2.5))
     p = {"Complete": {"color": "C0", "label": "Success"},
          "Not Complete": {"color": "C1", "label": "Fail"},
          "all": {"color": "C2", "label": "all"}}
@@ -120,6 +120,7 @@ def get_turn_emotion(conversation):
     plt.grid(axis='x', color='0.95')
     plt.grid(axis='y', color='0.95')
     # plt.show()
+    plt.tight_layout()
     plt.savefig(os.path.join(result_dir, "turn2emotion.png"))
 
 
@@ -284,15 +285,15 @@ def main():
     if not os.path.exists(result_dir):
         os.makedirs(result_dir)
     conversation = json.load(open(args.file))["conversation"]
-    basic_info = basic_analysis(conversation)
-    result["basic_info"] = basic_info
-    print(basic_info)
-    advance_info = advance(conversation)
-    print(advance_info)
-    result["advance_info"] = advance_info
-    json.dump(result, open(
-        os.path.join("conversation_result.json"), 'w'), indent=2)
-    dict2csv(advance_info)
+    # basic_info = basic_analysis(conversation)
+    # result["basic_info"] = basic_info
+    # print(basic_info)
+    # advance_info = advance(conversation)
+    # print(advance_info)
+    # result["advance_info"] = advance_info
+    # json.dump(result, open(
+    #     os.path.join("conversation_result.json"), 'w'), indent=2)
+    # dict2csv(advance_info)
     get_turn_emotion(conversation)
 
 
diff --git a/convlab/policy/emoTUS/evaluate.py b/convlab/policy/emoTUS/evaluate.py
index 2ce57dc8a27bd43679de3d3c64a0ab9ec7dad09e..900c9e80308efe8905d06662b2fc56be020c66d5 100644
--- a/convlab/policy/emoTUS/evaluate.py
+++ b/convlab/policy/emoTUS/evaluate.py
@@ -204,6 +204,14 @@ class Evaluator:
                   indent=2)
         return os.path.join(dir_name, f"{self.time}-nlg_eval.json")
 
+    @staticmethod
+    def _intent_domain(action):
+        acts = []
+        for intent, domain, slot, value in action:
+            if [intent, domain] not in acts:
+                acts.append([intent, domain])
+        return acts
+
     def evaluation(self, generated_file, golden_emotion=False, golden_action=False):
         # TODO add emotion
         gen_file = json.load(open(generated_file))
@@ -231,18 +239,26 @@ class Evaluator:
                 golden_emotions.append(dialog["golden_emotion"])
             dialog_result = gen_file['dialog']
 
-        scores = {"precision": [], "recall": [], "f1": [], "turn_acc": []}
+        scores = {"complete": {"precision": [], "recall": [], "f1": [], "turn_acc": []},
+                  "intent_domain": {"precision": [], "recall": [], "f1": [], "turn_acc": []}}
 
+        # full action
         for gen_act, golden_act in zip(gen_acts, golden_acts):
             s = f1_measure(preds=gen_act, labels=golden_act)
             for metric in scores:
-                scores[metric].append(s[metric])
+                scores["complete"][metric].append(s[metric])
+            s = f1_measure(preds=self._intent_domain(gen_act),
+                           labels=self._intent_domain(golden_act))
+            for metric in scores:
+                scores["intent_domain"][metric].append(s[metric])
 
         result = {}
         result["emotion_weight"] = self.emotion_weight
-        for metric in scores:
-            result[metric] = sum(scores[metric])/len(scores[metric])
-            print(f"{metric}: {result[metric]}")
+        for metric_type, score in scores.items():
+            result[metric_type] = {}
+            for m, s in score.items():
+                result[metric_type][m] = sum(s[m])/len(s[m])
+                print(f"{metric_type}-{m}: {result[metric_type][m]}")
 
         if not golden_emotion:
             emo_score = emotion_score(