# encoding:utf-8
# -*- coding: UTF-8 -*-     
#当前的项目名：imgPoetry
#当前编辑文件名：imgPoetry3
#当前用户的登录名：TsinkPC
#当前系统日期：2021-09-01
#当前系统时间：18:25
#用于创建文件的IDE的名称: PyCharm

#日志输出
import logging

logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)

handler = logging.FileHandler('log.txt', 'a')
handler.setLevel(logging.DEBUG)

formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

# add formatter to handler
handler.setFormatter(formatter)
# create console handler and set level to debug
stream_hander = logging.StreamHandler()
stream_hander.setLevel(logging.DEBUG)

# add file_hander  and stream_hander to logger
logger.addHandler(handler)
logger.addHandler(stream_hander)

# # 'application' code  
# logger.debug('debug message')
# logger.info('info message')
# logger.warning('warn message')
# logger.error('error message')
# logger.critical('critical message')

from datetime import datetime
import time
import numpy as np

loc = locals()


def get_variable_name(variable):
    for key in loc:
        if type(loc[key]) == type(variable):  #必要判断
            if type(loc[key]) == type(np.array([1, 2])):  # type([[1,2],[3,4,5]])是<class 'list'>
                if all(loc[key] == variable):
                    return key
            else:
                #加上类型判断避免报错：
                if loc[key] == variable:  # type(loc[key])= <class 'numpy.ndarray'>  
                    return key


def prt(vary):
    # return print('vary=',vary)
    # return print(f'{get_variable_name(vary)}=', vary)
    return print(f'{get_variable_name(vary)}={vary}')


def out(vary):
    return f'{get_variable_name(vary)}={vary}'


#开头
print(datetime.now())
start = time.time()

# 日志输出
# 需要输出本语句所在行号。
import logging

# create logger
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)

# create file handler and set level to debug
handler = logging.FileHandler('log.txt', 'a')
handler.setLevel(logging.DEBUG)

# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

# add formatter to handler
handler.setFormatter(formatter)
# create console handler and set level to debug
stream_hander = logging.StreamHandler()
stream_hander.setLevel(logging.DEBUG)

# add file_hander  and stream_hander to logger
logger.addHandler(handler)  # add handler to logger
logger.addHandler(stream_hander)
# add handler to logger
logger.addHandler(handler)

# # 'application' code    #即有run窗口显示，又有log.txt中记录。
# logger.debug('debug message')
# logger.info('info message')
# # logger.warn('warn message')   #DeprecationWarning: The 'warn' method is deprecated, use 'warning' instead
# logger.warning('warn message')
# logger.error('error message')
# logger.critical('critical message')

# import numpy as np
import json
import base64
import requests
import paddlehub as hub
from jieba.analyse import *  # 分析词

from datetime import datetime
import time
import numpy as np

# 输出时可见变量名，不仅变量内容：
loc = locals()  # 在变量同层，不分前后,函数内的作用域不同内容就不同了。


def get_variable_name(variable):
    # loc = locals()  #函数内无效。
    # print("loc=",loc)
    # print("variable=",variable)      #本函数内 不要使用prt，可能死循环。
    # print("variable.all()=", variable.all())  # AttributeError: 'list' object has no attribute 'all'
    # start2=time.time()
    # print("len(loc)=",len(loc))
    # print("start2=", start2)
    for key in loc:
        # print("key=", key)
        # print("loc[key]=", loc[key])
        # print("type(loc[key])=", type(loc[key]))
        # 类型判断，与值判断是否相符，效率是否会高一些？虽然某些情况下也许要多判断一次，可能需要统计确定了。
        # start1= time.time()
        # print("start1=", start1)
        if type(loc[key]) == type(variable):  # 必要判断
            if type(loc[key]) == type(np.array([1, 2])):  # type([[1,2],[3,4,5]])是<class 'list'>
                # print("type(np.array([1,2]))=",type(np.array([1,2])))
                if all(loc[
                           key] == variable):  # variable也要是<class 'numpy.ndarray'>即np.array()才行，否则报错：TypeError: 'bool' object is not iterable
                    # end1= time.time()
                    # print("end1=",end1)
                    # print(f"Running time: {(end1 - start1):.20f} Seconds,{(end1 - start1)/60:.20f} Minutes")
                    # print(f"Running time: {(end1 - start2):.10f} Seconds,{(end1 - start1)/60:.10f} Minutes")
                    # logger.info(f"get_variable_name() Running time: {(end1 - start2):.10f} Seconds,{(end1 - start1)/60:.10f} Minutes")
                    return key
            else:
                # 加上类型判断避免报错：
                if loc[
                    key] == variable:  # type(loc[key])= <class 'numpy.ndarray'>  ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
                    # if all(loc[key] == variable):  #不通用：TypeError: 'bool' object is not iterable
                    # print("loc[key]=",loc[key])
                    #     end1 = time.time()
                    # print("end1=", end1)
                    # print(f"Running time: {(end1 - start1):.20f} Seconds,{(end1 - start1)/60:.20f} Minutes")
                    # print(f"Running time: {(end1 - start2):.10f} Seconds,{(end1 - start1)/60:.10f} Minutes")
                    # logger.info(f"get_variable_name() Running time: {(end1 - start2):.10f} Seconds,{(end1 - start1)/60:.10f} Minutes")
                    return key


def prt(vary):
    # return print('vary=',vary)
    # return print(f'{get_variable_name(vary)}=', vary)
    return print(f'{get_variable_name(vary)}={vary}')


def out(vary):
    return f'{get_variable_name(vary)}={vary}'


def get_img_base64str(image):
    with open(image, 'rb') as fp:
        imgbase64 = base64.b64encode(fp.read())
        return imgbase64.decode()


def get_access_token(APP_ID, API_KEY, SECRET_KEY):
    params = {
        "grant_type": "client_credentials",
        'client_id': API_KEY,
        'client_secret': SECRET_KEY, }
    token_url = 'https://aip.baidubce.com/oauth/2.0/token'
    res = requests.get(token_url, params=params)
    try:
        data = res.json()
        return data['access_token']
    except:
        return ''


def get_allchars(dataset):
    allchars = []
    for i in range(len(dataset)):
        allchars += [dataset[i][4].replace('\n', '')]
    return ''.join(allchars)


def get_dict(allchars):
    char_freq_dict = dict()
    for char in allchars:
        if char not in char_freq_dict:
            char_freq_dict[char] = 0
        char_freq_dict[char] += 1
    char_freq_dict = sorted(char_freq_dict.items(), key=lambda x: x[1], reverse=True)
    char2id_dict = dict()
    id2char_dict = dict()
    n = 0
    for i in range(len(char_freq_dict)):
        char2id_dict[char_freq_dict[i][0]] = i
        id2char_dict[i] = char_freq_dict[i][0]
    return char2id_dict, id2char_dict


def is_chinese(string):
    """
    检查整个字符串是否为中文
    Args:
        string (str): 需要检查的字符串,包含空格也是False
    Return
        bool
    """
    if (len(string) <= 1):
        return False
    for chart in string:
        if (chart < u'\u4e00' or chart > u'\u9fff'):
            return False
    return True


def get_data(chars, char2id_dict, dataset):
    data_list = [None] * len(dataset)
    for i in range(len(dataset)):
        adata = dataset[i][4].replace('\n', '')
        data_list[i] = [char2id_dict[char] for char in adata]
        data_list[i] += [char2id_dict['<END>']]
        data_list[i] = data_list[i][-(L - 1):]
        data_list[i] = [char2id_dict['<START>']] + data_list[i]
        data_list[i] = [char2id_dict['</s>']] * (L - len(data_list[i])) + data_list[i]
    return np.array(data_list)


####以上函数区####
# 开头
# logger.info("开始start："+out(datetime.now()))
# logger.info(f"开始start：{datetime.now()}")
print(datetime.now())
# logger.info(f"开始start：")
start = time.time()

#
# '''
# 通用物体和场景识别
# '''
# # request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v2/advanced_general"
# # image = './test/yh.jpg'
# # img=get_img_base64str(image)
# # params = {"image":img}
# # #需要申请百度api，获取相关以下信息，目前个人也可免费申请。
# # # APP_ID = '******'
# # # # API_KEY = '*******'
# # # # SECRET_KEY = '*******'
# # APP_ID = '24700006'
# # API_KEY = 'Pb5STk9WbPzDsybIOmyLwn9f'
# # SECRET_KEY = 'dlUcBOEH9oq4zTttPtFyaStLM69G1vHv'
# # access_token = get_access_token(APP_ID, API_KEY, SECRET_KEY)
# # request_url = request_url + "?access_token=" + access_token
# # headers = {'content-type': 'application/x-www-form-urlencoded'}
# # response = requests.post(request_url, data=params, headers=headers)
# # # module_image = hub.Module(name="xception71_imagenet")
# # # test_img_path = image
# # # input_dict = {"image": [test_img_path]}
# # # results_image = module_image.classification(data=input_dict)
# # # PictureClassification = list(results_image[0][0].keys())[0]
# # # from translate import Translator
# # # translator = Translator(to_lang="chinese")
# # # PictureClassification_ch = translator.translate("{}".format(PictureClassification))
# #
# # #分类词如何锁定关键字？ 找出关键词汇？
# # #是不是不如全部汇总一起拍平了比较数量、占比？不使用联系。
# #
# # if response:
# #     result_num=response.json()['result_num']
# #     work_num=int(result_num)+1
# # else:
# #     work_num=1
# # #work_num=1 #若不使用百度api，可以将百度api相关代码注释掉，启用此句，应该也是可以跑通的，相当于只使用xception71_imagenet模型分类。
# # # total_max_icount=[0]
# # # total_result=[(0,0)]    # j记录诗歌id。#icount记录字匹配量。
# # #到作用域外
# # words = []
# # for  j in range(work_num-1):
# #     # if j==0:
# #     #     PictureClassification_ch=PictureClassification_ch
# #     # else:
# #     #     PictureClassification_ch=response.json()['result'][j-1]['keyword']
# #     print("j=", j)
# #     PictureClassification_ch = response.json()['result'][j]['keyword']
# #     print("PictureClassification_ch=", PictureClassification_ch)
# #     #弃用联想词：
# #     # module_similar = hub.Module(name="ernie_gen_couplet")
# #     # texts = ["{}".format(PictureClassification_ch)]
# #     # results = module_similar.generate(texts=texts, use_gpu=True, beam_width=20)
# #
# #
# #     #重新处理words：
# #     if (is_chinese(PictureClassification_ch)):
# #         words.append(PictureClassification_ch)
# #     # words = []
# #     # for item in range(20):
# #     #     if (is_chinese(results[0][item])):
# #     #         words.append(results[0][item])
# # print("words=",words) #words= ['日本樱花', '樱花树', '樱花盛开', '晚樱', '樱花']
# # strwords="".join(words)
# # print("strwords=",strwords)
# # words_dict=get_dict(strwords)
# # setwords=set(strwords)
# # words_set=get_dict(setwords)
# words= ['日本樱花', '樱花树', '樱花盛开', '晚樱', '樱花']
# strwords= '日本樱花樱花树樱花盛开晚樱樱花'
# setwords= {'樱', '盛', '日', '花', '本', '开', '树', '晚'}
# #测试: 效果不好，还是需要词的匹配，关键字，还是需要的
# # 4万多首（可能简繁重复）诗中就一首含樱花的：何处哀筝随急管，樱花永巷垂杨岸。
setwords = {'日', '本', '开', '樱', '树', '花', '盛', '晚'}
# print("setwords=",setwords)
#
# #分析词：
# data=strwords
# weight_words=[(0,0)]
# for keyword, weight in extract_tags(data, withWeight=True):
#     print('%s %s' % (weight,keyword))
#     weight_words.append((weight, keyword))
# print("weight_words=",weight_words)
# # weight_words.sort()      #  AttributeError: 'tuple' object has no attribute 'sort'
# # weight_words=sorted(weight_words, key=lambda x: (x[0], x[1]),reverse=True)  #反序。 #TypeError: 'float' object is not subscriptable
# weight_words=sorted(weight_words, key=lambda x: (x,),reverse=True)  #反序。
# print("weight_words=",weight_words)
# first_keyword=weight_words[0][1]

# first_keyword='樱'  #查含"樱"的诗文有多少首，可以匹配的。
first_keyword = '樱花'
print("first_keyword=", first_keyword)

###诗的处理###
dataset = json.load(open('唐诗.json', encoding="utf-8"))
allchars = get_allchars(dataset)
char2id_dict, id2char_dict = get_dict(allchars)
char2id_dict['</s>'] = 7394
char2id_dict['<START>'] = 7395
char2id_dict['<END>'] = 7396
id2char_dict[7394] = '</s>'
id2char_dict[7395] = '<START>'
id2char_dict[7396] = '<END>'
L = 125
data = get_data(allchars, char2id_dict, dataset)
# print("type(data)=",type(data))
# print("len(data)=",len(data))
# print("data[0]=",data[0])
# 增加占比
result = [(0, 0)]
max_icount = [0]
total_max_icount = [0]
total_result = [(0, 0)]
sent_keys = []  # 放查到的含关键词的诗句。
Poetrys = []  # 查到诗句和配诗诗句。
emotion_results2 = []  # 变量识别是否有影响？
continue_flag = False
# for j in range(len(dataset)):   #j取每首诗
for m in range(1, len(dataset)):  # j取每首诗
    ###诗内容查找###
    # print("j=", j)
    if continue_flag:
        prt(f'循环首 m={m}')
        continue_flag = False
    j = m - 1  # m与每首诗的id就一致了。
    if dataset[j][4].find(first_keyword) == -1:  # 不含关键词就跳过。
        # break
        # print("j=",j)
        continue  # 预期效果，是不是下一个效果？
        # print("j=", j)
    else:
        # 匹配词所在诗句如何获取，以句号分割。
        # 一般含一次很不容易了，多次更难，也可以循环来查是否多次。
        poetxt = dataset[j][4]
        loc_key = poetxt.find(first_keyword)  # 找关键词
        loc_start = poetxt[:loc_key].rfind("。")  # 到关键词处的切片中最右边的句号，即句首前，有时可能还有回车符吧。
        loc_end = poetxt[loc_key:].find("。")
        sent_key = poetxt[loc_start + 1:loc_key + loc_end + 1]  # 含最后的句号要加1.
        sent_key = sent_key.replace('\n', '')  # 将\n去掉。一般应该可能在句首。
        # print("sent_key=", sent_key)
        sent_keys.append(sent_key)
        # 5.古诗生成
        module = hub.Module(name="ernie_gen_poetry")  # 调用古诗生成的模型
        FirstPoetry = []
        FirstPoetry.append(sent_key)
        # if  FirstPoetry==['二月草菲菲，山樱花未稀。']:
        #     # FirstPoetry=['菲菲二月草，未稀山樱花。']
        #     FirstPoetry = ['山樱花未稀，二月草菲菲。']
        # if FirstPoetry == ['听时坐部音中有，唱后樱花叶里无。']:
        #     # FirstPoetry = ['音中有听时坐部，叶里无唱后樱花。'] #也不行。['音中有听时坐部，叶里无唱后樱花'], ['（见《吟窗杂录》卷十四《历代吟谱']
        #     # FirstPoetry = ['坐部音中有听时，樱花叶里无唱后。']
        #     FirstPoetry = ['唱后樱花叶里无，听时坐部音中有。']
        # test_texts = FirstPoetry  # 使用上阕生成下阕
        # print("test_texts=", test_texts)
        # logger.info(out(test_texts))
        # logger.info(prt(test_texts)) #无日志输出。
        # 出句含有“《”应该就要处理了吧。  做一次上阕诗词前后半句的更换，再生成下阙，若仍然含有“《”就放弃。
        # Poetrys = [['二月草菲菲，山樱花未稀'], ['（见《吟窗杂录》卷十四正']]
        # Poetrys = [['\n何处哀筝随急管，樱花永巷垂杨岸'], ['一声肠断，万里春归人不见']]
        # Poetrys = [['听时坐部音中有，唱后樱花叶里无'], ['（见《吟窗杂录》卷十四正']]  #此上阙无论怎么更换词顺序、前后半句，都生成下阙仍然含有“《”不变。这类情况需要放弃。
        # Poetrys = [['春风急，樱花杨柳雨凄凄'], ['无限好花狼藉，一时分付与']]
        # flag=True
        deal_count = 0  # 找到《次数，4种可能性：0初始值，1,2。找不到：-1；若2次就不再找。只0,1才能循环。
        SecondPoetry = []
        # SecondPoetry2=[]
        while deal_count == 0 or deal_count == 1:
            prt(deal_count)
            test_texts = FirstPoetry  # 使用上阕生成下阕
            # prt(FirstPoetry)
            print(f"326:FirstPoetry={FirstPoetry}")
            # poet_results = module.generate(texts=test_texts, use_gpu=True,beam_width=5)  # 只有逗号句号会报错：ValueError: too many values to unpack (expected 2)
            # poet_results = module.generate(texts=test_texts, use_gpu=True, beam_width=7)   #use_gpu has been set False as you didn't set the environment variable CUDA_VISIBLE_DEVICES while using use_gpu=True
            poet_results = module.generate(texts=test_texts, use_gpu=False,
                                           beam_width=5)  # Process finished with exit code -1073741795 (0xC000001D)
            #             2021-08-31 11:00:51,607] [ WARNING] - The input text: 春风急，樱花杨柳雨凄凄。, is no antithetical parallelism, which may result in magic output
            # [2021-08-31 11:00:55,386] [ WARNING] - The input text: 樱花杨柳雨凄凄，春风急。, is no antithetical parallelism, which may result in magic output
            # [2021-08-31 11:01:04,857] [ WARNING] - The input text: 春风急，樱花杨柳雨凄凄。, is no antithetical parallelism, which may result in magic output
            # for poet_result in poet_results:  #输出多个结果
            #     print(poet_results)
            # prt(poet_results)
            # logger.info(f"poet_results={poet_results}")
            # SecondPoetry = ["{:.12}".format(poet_results[0][0])]   #只取第一个结果。
            # SecondPoetry2 = ["{:.16}".format(poet_results[0][0])] #对beam_width=5好像没什么意义。
            # 下阙动态长度：
            SecondPoetry = [f"{poet_results[0][0][:len(FirstPoetry[0])]}"]  # 按上阕长度一致。
            # prt(SecondPoetry)
            # print(f"342:SecondPoetry={SecondPoetry}")
            # prt(SecondPoetry2)
            # 要排除无文字的符号输出：此处好像没有起到解决的效果：引起问题：ValueError: too many values to unpack (expected 2)
            # 只是在词组组合中，跳不出循环吧？
            # if SecondPoetry=="，。，。，。，。，。，。":  #应该是没起作用，是list，不是str。
            # if SecondPoetry == ['，。，。，。，。，。，。']:
            #     break
            # Poetrys = []

            if SecondPoetry[0].find('《') != -1:  # 发现下阙含有《就更换上阙半句次序，再含有《就放弃。
                # 更换次序
                # print(f"353应该含有《的下阙SecondPoetry[0].find('《')={SecondPoetry[0].find('《')}")
                # print(f"354:SecondPoetry[0]={SecondPoetry[0]}")
                # prt(SecondPoetry[0])
                # prt(SecondPoetry)
                # print(f"357:FirstPoetry[0]={FirstPoetry[0]}")
                position1 = FirstPoetry[0].find('，')
                position2 = FirstPoetry[0].find('。')
                # 找不到暂不处理。
                FirstPoetry[0] = FirstPoetry[0][position1 + 1:position2] + '，' + FirstPoetry[0][:position1] + '。'
                # print(f"362:FirstPoetry[0]={FirstPoetry[0]}")
                deal_count += 1  # 区分情况
                # prt(deal_count)
                # print(f"365:deal_count={deal_count}")
                # prt(FirstPoetry)
            else:
                deal_count = -1
                prt(deal_count)
        # 还是要对仍然含有《的结果处理
        if deal_count == 2:  # 第2次仍有《就要跳过了。
            prt(f'continue 前 m={m}')
            # 此后才显示循环首的m值，标记continueflag
            continue_flag = True
            continue
            prt(f'continue 紧后 m={m}')
        if continue_flag:
            prt(f'continue 后 m={m}')
            continue_flag = False

        Poetrys.append(FirstPoetry)
        if SecondPoetry[0].find('。') == -1:  # 下阙是否含。若无则补。
            SecondPoetry[0] = SecondPoetry[0] + '。'
        Poetrys.append(SecondPoetry)

        # #长度不同作为另一篇
        # Poetrys.append(FirstPoetry)
        # Poetrys.append(SecondPoetry2)
        # prt(Poetrys)    #Poetrys=[['听时坐部音中有，唱后樱花叶里无'], ['（见《吟窗杂录》卷十四正']]
        # logger.info(f"FirstPoetry={FirstPoetry}")
        # logger.info(f"SecondPoetry={SecondPoetry}")
        # logger.info(f"SecondPoetry2={SecondPoetry2}")

    # print("j=", j)
    if continue_flag:
        prt(f'循环中部 m={m}')
        continue_flag = False
    # print("m=", m)
    txt = []
    for i in range(L):
        txt += [id2char_dict[data[j, i]]]
    icount = 0
    for item in setwords:
        icount = icount + txt.count(item)
    if max_icount[len(max_icount) - 1] < icount:
        max_icount[0] = icount
        max_icount.sort()
        # result[0] = (j, icount)
        result.append((j, icount))  # 只加不替换
        # result=sorted(result, key=lambda x: (x[1], x[0]))
        total_max_icount[0] = icount
        # total_result[0]  = (j, icount) #只加不替换
        # total_result.append((j, icount))#只加不替换
        total_max_icount.sort()
        total_result = sorted(total_result, key=lambda x: (x[1], x[0]))
    else:
        if max_icount[len(max_icount) - 1] == icount and icount != 0:
            if max_icount[0] < icount:
                max_icount[0] = icount
                max_icount.sort()
                # result[0] = (j, icount)
                # result = sorted(result, key=lambda x: (x[1], x[0]))
                total_max_icount[0] = icount
                # total_result[0] = (j, icount)
                total_max_icount.sort()
                # total_result = sorted(total_result, key=lambda x: (x[1], x[0]))
            else:
                max_icount.append(icount)
                total_max_icount.append(icount)
                if result[0][0] == 0 and result[0][1] == 0:
                    result[0] = (j, icount)
                    total_result[0] = (j, icount)
                else:
                    result.append((j, icount))
                    total_result.append((j, icount))
    # print("max_icount=", max_icount)
    # result[0] = (j, icount)
    result.append((j, icount))  # 只加不替换
    result = sorted(result, key=lambda x: (x[1], x[0]))
    total_max_icount[0] = icount
    # total_result[0]  = (j, icount) #只加不替换
    total_result.append((j, icount))  # 只加不替换
max_positive = 0
max_total_result = [0, 0, "", 0, 0]
print("len(total_result)=", len(total_result))
# prt(sent_keys)   #正式使用prt函数。
# prt(len(total_result)) #ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
num_total_result = len(total_result)
# prt(num_total_result)
# for k in range(len(total_result)):
#     # print("诗全部信息dataset[total_result[k]][0]]=", dataset[total_result[k][0]])
#     # print("匹配数量total_result[k]][1]=", total_result[k][1])
#     # print("诗内容dataset[total_result[k]][0]][4]=", dataset[total_result[k][0]][4])
#     #加上情感分析：
#     senta = hub.Module(name="senta_bow")
#     test_text =["{}".format(dataset[total_result[k][0]][4])]       #新手处理方式，估计还有更好的，期待能被指正吧。
#     emotion_results = senta.sentiment_classify(texts=test_text, use_gpu=False, batch_size=1)
#     for emotion in emotion_results:
#         if max_positive<=emotion['positive_probs']:
#               max_positive=emotion['positive_probs']
#               print("total_result[k][0]=",total_result[k][0])  #诗的ID与索引差1.
#               max_total_result=[total_result[k][0],total_result[k][1],emotion['sentiment_key'],emotion['positive_probs'],emotion['negative_probs']]
#         print("该诗的情感倾向：",emotion['sentiment_key'])
#         print("积极的情感倾向概率：",emotion['positive_probs'])
#         print("消极的情感倾向概率：",emotion['negative_probs'])
#     # #增加类原配诗情感分析：暂不排除含有“《”。
# # test_text = ["{}".format(Poetrys[k][0] + Poetrys[k + 1][0] )]  #跨诗了。
# if len(Poetrys)>2*k:  #防止越界
#     test_text2 = ["{}".format(Poetrys[2*k][0]+Poetrys[2*k+1][0] ) ]    #差2倍关系。
#     prt(test_text2)
#     logger.info(f"out(test_text2)={out(test_text2)}")
#     emotion_results2 = senta.sentiment_classify(texts=test_text2, use_gpu=False, batch_size=1)
#     for emotion2 in emotion_results2:
#         emotion2_sentiment_key=emotion2['sentiment_key']      #变量才能使用out，变量的key或value一部分可能不行了：None。
#         emotion2_positive_probs =emotion2['positive_probs']
#         emotion2_negative_probs = emotion2['negative_probs']
#         # print("该诗的情感倾向：",emotion2['sentiment_key'])
#         # print("积极的情感倾向概率：",emotion2['positive_probs'])
#         # print("消极的情感倾向概率：",emotion2['negative_probs'])
#         # logger.info("该诗的情感倾向："+out(emotion2['sentiment_key'])) #报错

#         # logger.info(f"该诗的情感倾向：{out(emotion2_sentiment_key)}")
# logger.info(f"积极的情感倾向概率：{out(emotion2_positive_probs)}")
# logger.info(f"消极的情感倾向概率：{out(emotion2_negative_probs)}")

# ##最后输出最佳匹配的诗：
# # print("诗全部信息dataset[max_total_result[0]]=", dataset[max_total_result[0]])
# # print("匹配数量total_result[k]][1]=", max_total_result[1])
# # print("诗内容dataset[max_total_result[0]][4]=", dataset[max_total_result[0]][4])
# # print("该诗的情感倾向：", max_total_result[2])
# # print("积极的情感倾向概率：", max_total_result[3])
# # print("消极的情感倾向概率：", max_total_result[4])
# logger.info(f"诗全部信息dataset[max_total_result[0]]={dataset[max_total_result[0]]}")
# logger.info(f"匹配数量total_result[k]][1]={max_total_result[1]}")
# logger.info(f"诗内容dataset[max_total_result[0]][4]={dataset[max_total_result[0]][4]}")
# logger.info(f"该诗的情感倾向：{max_total_result[2]}")
# logger.info(f"积极的情感倾向概率：{max_total_result[3]}")
# logger.info(f"消极的情感倾向概率：{max_total_result[4]}")

# weight_words=[(0,0)]
# data=dataset[max_total_result[0]][4]  #对诗内容进行关键词分析。 暂不处理
# for keyword, weight in extract_tags(data, withWeight=True):
#     print('%s %s' % (weight,keyword))
#     weight_words=(weight, keyword)
# print("weight_words=",weight_words)
# # weight_words.sort() #AttributeError: 'tuple' object has no attribute 'sort'
# # print("weight_words=",weight_words)

# # print ("response.json()=",response.json())
# # print("setwords=",setwords)
# # print("words=",words)
# prt(Poetrys)
# logger.info(f"Poetrys={Poetrys}")
print(f"Poetrys={Poetrys}")
# 加上情感分析
for k in range(int(len(Poetrys) / 2)):
    senta = hub.Module(name="senta_bow")  # 加上情感分析
    test_text2 = ["{}".format(Poetrys[2 * k][0] + Poetrys[2 * k + 1][0])]  # 差2倍关系。
    # prt(test_text2)
    print(f"test_text2={test_text2}")
    emotion_results2 = senta.sentiment_classify(texts=test_text2, use_gpu=False, batch_size=1)
    for emotion2 in emotion_results2:
        emotion2_sentiment_key = emotion2['sentiment_key']
        emotion2_positive_probs = emotion2['positive_probs']
        emotion2_negative_probs = emotion2['negative_probs']
        print(f"该诗的情感倾向：emotion2_sentiment_key={emotion2_sentiment_key}")
        print(f"积极的情感倾向概率：emotion2_positive_probs={emotion2_positive_probs}")
        print(f"消极的情感倾向概率：emotion2_negative_probs={emotion2_negative_probs}")
        # print("该诗的情感倾向：",emotion2['sentiment_key'])
        # print("积极的情感倾向概率：",emotion2['positive_probs'])
        # print("消极的情感倾向概率：",emotion2['negative_probs'])

# 结尾
end = time.time()
# print(f"The end Running time: {(end - start):.2f} Seconds,{(end - start)/60:.2f} Minutes")
# logger.info("结束end："+out(datetime.now()))
# logger.info(f"结束end：{datetime.now()}")
# logger.info(f"The end Running time: {(end - start):.2f} Seconds,{(end - start)/60:.2f} Minutes")
print(f"The end Running time: {(end - start):.2f} Seconds,{(end - start) / 60:.2f} Minutes")



#结尾
end = time.time()
print(f"Running time: {(end - start):.2f} Seconds,{(end - start) / 60:.2f} Minutes")