# text_analyzer.py 示例框架
import json
from analysis.utils.hanlp_txt import Fenci
from analysis.utils.cp02 import Range, grammar_stats, semantic, suggest


class TextAnalyzer:
    def __init__(self, text):
        self.text = text

    def get_vocabulary_level(self):
        '''实现你的词汇分析逻辑'''
        ans = Fenci(self.text)
        ne_ans = Range(ans)
        la_ans = {"一级": 0, "二级": 0, "三级": 0, "四级": 0, "五级": 0, "六级": 0, "高级": 0}
        for i in ne_ans:
            la_ans[i['level']] += i['num']
        return json.dumps(la_ans, ensure_ascii=False)

    def get_grammar_stats(self):
        '''实现语法分析'''
        ans = grammar_stats(self.text)
        print(ans)
        return json.dumps(ans, ensure_ascii=False)

    def get_semantic(self):
        # 实现语义分析
        ans = semantic(self.text)
        return json.dumps(ans, ensure_ascii=False)

    def get_suggest(self, vocabulary_level, grammar_stats):
        ans = suggest(vocabulary_level, grammar_stats)
        return str(ans)


def analyze_text(text):
    print(text)
    analyzer = TextAnalyzer(text)
    vocabulary_level = analyzer.get_vocabulary_level()
    grammar_stats = analyzer.get_grammar_stats()
    suggest = analyzer.get_suggest(vocabulary_level, grammar_stats)
    return {
        "vocabulary_level": vocabulary_level,
        "grammar": grammar_stats,
        "main_semantic": analyzer.get_semantic(),
        "suggest": suggest
    }

# with open('./analysis/shangwu.txt','r',encoding='utf-8') as fp:
#     text=fp.read()
# analyzer = TextAnalyzer(text)
# test_ans1=analyzer.get_vocabulary_level()
# test_ans2=analyzer.get_grammar_stats()
# test_ans3=analyzer.get_semantic()
# with open('./ans1.json','w',encoding='utf-8') as fp:
#     fp.write(json.dumps(test_ans1,ensure_ascii=False))
# with open('./ans2.json','w',encoding='utf-8') as fp:
#     fp.write(json.dumps(test_ans2,ensure_ascii=False))
# with open('./ans3.json','w',encoding='utf-8') as fp:
#     fp.write(json.dumps(test_ans3,ensure_ascii=False))
