import asyncio
import time

from aiochclient import ChClient
from aiohttp import ClientSession
import lcs
import pandas as pd

tempTableDelete = "DROP TABLE IF EXISTS datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp"
tempTableCreate = ("CREATE TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp ("
                   " `uuid` String,"
                   " `guid` String,"
                   " `name` Nullable(String),"
                   " `organization` Nullable(String),"
                   " `Second_organization` Nullable(String),"
                   " `email` Nullable(String),"
                   " `reprintauthor` Nullable(String),"
                   " `altname` Nullable(String),"
                   " `country` Nullable(String),"
                   " `firstauthor` Nullable(String),"
                   " `organizationdept` Nullable(String)"
                   " )"
                   " ENGINE = Memory"
                   )
ExpertNewTmpDelete = "DROP TABLE IF EXISTS datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew_Tmp"
ExpertNewTmpCreate = ("CREATE TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew_Tmp ("
                      " `uuid` String,"
                      " `guid` String,"
                      " `name` Nullable(String),"
                      " `organization` Nullable(String),"
                      " `Second_organization` Nullable(String),"
                      " `email` Nullable(String),"
                      " `reprintauthor` Nullable(String),"
                      " `altname` Nullable(String),"
                      " `country` Nullable(String),"
                      " `firstauthor` Nullable(String),"
                      " `organizationdept` Nullable(String)"
                      " )"
                      " ENGINE = Memory"
                      )
ExpertsRelationshipTmpDelete = "DROP TABLE IF EXISTS datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp"
ExpertsRelationshipTmpCreate = ("CREATE TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp ("
                                " `uuid` String,"
                                " `guid` String"
                                " )"
                                " ENGINE = Memory"
                                )


async def main():
    async with ClientSession() as s:
        # client = ChClient(s, url='http://115.29.55.141:8123/', user='zxcs', password='123123', database='datahouse')
        client = ChClient(s, url='http://192.168.1.33:8123/', user='default', password=':Clg2rkb', database='datahouse')
        alive = await client.is_alive()  # returns True if connection is Ok

        # 测试，清空专家表和专家关系表
        await client.execute("ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew DELETE WHERE 1=1")
        await client.execute(
            "ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship DELETE WHERE 1=1")
        # 清空已经被处理过的数据记录
        await client.execute("ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Been_Dealt DELETE WHERE 1=1")

        # 如果字段不存在则添加字段
        await client.execute(
            "alter table datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew add column if not exists keywords Nullable(String)"
        )
        await client.execute(
            "alter table datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew add column if not exists subjects Nullable(String)"
        )
        await client.execute(
            "alter table datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew add column if not exists journals Nullable(String)"
        )
        await client.execute(
            "alter table datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew add column if not exists source_email Nullable(String)"
        )

        # 创建数据临时表
        await client.execute(tempTableDelete)
        await client.execute(tempTableCreate)
        # 创建专家关系临时表
        await client.execute(ExpertsRelationshipTmpDelete)
        await client.execute(ExpertsRelationshipTmpCreate)
        # 创建临时专家表
        await client.execute(ExpertNewTmpDelete)
        await client.execute(ExpertNewTmpCreate)

        sourceEmail = 'shibojiang@fudan.edu.cn'
        await executeSourceEmail(client, sourceEmail)
        # time.sleep(1)
        sql0 = (
                "SELECT email from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                "where email is not null and email != '' " +
                "and email != '" + sourceEmail + "' " +
                "and source_email = '" + sourceEmail + "' " +
                "group by email order by LENGTH(email) desc "
        )
        sql0_rows = await client.fetch(sql0)
        for item in sql0_rows:
            itemSourceEmail = item['email']
            await executeSourceEmail(client, itemSourceEmail)
        # 删除临时表
        await client.execute(ExpertNewTmpDelete)
        await client.execute(tempTableDelete)
        await client.execute(ExpertsRelationshipTmpDelete)

        # 去重复,去子串
        await mergeSubstring(client)

        # 合并机构和二级机构的子串
        await mergeSecondSubstring(client)

        # 合并关联论文重复的数据
        await mergeSameRelationship(client)


async def mergeSubstring(client):
    # 查机构相同的 uuid和name
    sql1 = (
            " SELECT `organization` from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew  " +
            " group by organization " +
            " HAVING COUNT() > 1 " +
            " order by LENGTH(organization) desc "
    )
    sql1_rows = await client.fetch(sql1)
    if sql1_rows is not None and len(sql1_rows) > 0:
        for sql1_rows_item in sql1_rows:
            print(sql1_rows_item['organization'])
            sql2 = (
                    " select uuid,`name`,LENGTH(name) as `length` from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                    " where organization = '" + sql1_rows_item['organization'] + "' "
            )
            sql2_rows = await client.fetch(sql2)
            df = pd.DataFrame(sql2_rows)

            # 1、uuid重复的数据删除,按名字长度倒序排序,重新设置列表index
            df1 = df.drop_duplicates(['uuid']).sort_values(by='length', ascending=False).reset_index(drop=True)

            # 2、处理名字完全相同的
            # 标记重复的name
            df2 = df1['name'].duplicated(keep='last')
            sameBegin = False
            sameDict = {}
            sameArr = []
            for i in range(len(df1)):
                df2_item = df2[i]
                df1_item = df1.values[i]
                if df2_item:
                    sameBegin = True
                    sameArr.append(df1_item)
                if sameBegin and not df2_item:
                    sameBegin = False
                    sameDict[df1_item[2]] = sameArr
                    sameArr = []
            if len(sameDict.keys()) > 0:
                # print(sameDict)
                for uuid in sameDict.keys():
                    itemArr = sameDict[uuid]
                    updateUuidArr = []
                    for i in range(len(itemArr)):
                        itemData = itemArr[i]
                        updateUuidArr.append("'" + itemData[2] + "'")
                    sql2_1 = (
                            " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " SELECT '" + uuid + "' ,guid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            "where uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql2_1)
                    sql2_2 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql2_2)
                    sql2_3 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql2_3)

            # 处理名字子串
            df3 = df1.drop_duplicates(['name'], keep='last').sort_values(by='length', ascending=False).reset_index(
                drop=True)
            lcsArr = []
            for i in range(len(df3)):
                itemI = df3.values[i]
                nameI = itemI[1]
                for j in range(len(df3)):
                    itemJ = df3.values[j]
                    nameJ = itemJ[1]
                    if i != j and lcs.isLCS(nameI, nameJ):
                        lcsArr.append({'uuid': itemI[2], 'name': itemI[1], 'length': itemI[0]})
                        lcsArr.append({'uuid': itemJ[2], 'name': itemJ[1], 'length': itemJ[0]})
            df4 = pd.DataFrame(lcsArr)

            # 1、uuid重复的数据删除,按名字长度倒序排序,重新设置列表index
            df5 = df4.drop_duplicates(['uuid']).reset_index(drop=True)
            if len(df5) > 0:
                sameDict = {}
                sameArr = []
                keepUuid = df5.values[0][0]
                for i in range(len(df5)):
                    if i > 0:
                        sameArr.append(df5.values[i])
                sameDict[keepUuid] = sameArr
                # print(df5)
                # print(sameDict)
                for uuid in sameDict.keys():
                    itemArr = sameDict[uuid]
                    updateUuidArr = []
                    for i in range(len(itemArr)):
                        itemData = itemArr[i]
                        updateUuidArr.append("'" + itemData[0] + "'")
                    sql3_1 = (
                            " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " SELECT '" + uuid + "' ,guid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " where uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql3_1)
                    # print(sql3_1)
                    sql3_2 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql3_2)
                    # print(sql3_2)
                    sql3_3 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql3_3)
                    # print(sql3_3)
            # print("--------------------------------------------------------")

    # 查名字相同的 uuid和organization
    sql11 = (
            " SELECT `name` from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew  " +
            " group by name " +
            " HAVING COUNT() > 1 " +
            " order by LENGTH(name) desc "
    )
    sql11_rows = await client.fetch(sql11)
    if sql11_rows is not None and len(sql11_rows) > 0:
        for sql11_rows_item in sql11_rows:
            print(sql11_rows_item['name'])
            sql12 = (
                    " select uuid,`organization`,LENGTH(organization) as `length` from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                    " where `name` = '" + sql11_rows_item['name'] + "' "
            )
            sql12_rows = await client.fetch(sql12)
            df = pd.DataFrame(sql12_rows)

            # 1、uuid重复的数据删除,按机构长度倒序排序,重新设置列表index
            df1 = df.drop_duplicates(['uuid']).sort_values(by='length', ascending=False).reset_index(drop=True)

            # 2、处理名字完全相同的
            # 标记重复的organization
            df2 = df1['organization'].duplicated(keep='last')
            sameBegin = False
            sameDict = {}
            sameArr = []
            for i in range(len(df1)):
                df2_item = df2[i]
                df1_item = df1.values[i]
                if df2_item:
                    sameBegin = True
                    sameArr.append(df1_item)
                if sameBegin and not df2_item:
                    sameBegin = False
                    sameDict[df1_item[2]] = sameArr
                    sameArr = []
            if len(sameDict.keys()) > 0:
                # print(sameDict)
                for uuid in sameDict.keys():
                    itemArr = sameDict[uuid]
                    updateUuidArr = []
                    for i in range(len(itemArr)):
                        itemData = itemArr[i]
                        updateUuidArr.append("'" + itemData[2] + "'")
                    sql12_1 = (
                            " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " SELECT '" + uuid + "' ,guid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " where uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql12_1)
                    # print(sql12_1)
                    sql12_2 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql12_2)
                    # print(sql12_2)
                    sql12_3 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql12_3)
                    # print(sql12_3)

            # 处理机构子串
            df3 = df1.drop_duplicates(['organization'], keep='last').sort_values(by='length',
                                                                                 ascending=False).reset_index(
                drop=True)
            lcsArr = []
            for i in range(len(df3)):
                itemI = df3.values[i]
                organizationI = itemI[1]
                for j in range(len(df3)):
                    itemJ = df3.values[j]
                    organizationJ = itemJ[1]
                    if i != j and lcs.isLCS(organizationI, organizationJ):
                        lcsArr.append({'uuid': itemI[2], 'organization': itemI[1], 'length': itemI[0]})
                        lcsArr.append({'uuid': itemJ[2], 'organization': itemJ[1], 'length': itemJ[0]})
            df4 = pd.DataFrame(lcsArr)

            # 1、uuid重复的数据删除,按名字长度倒序排序,重新设置列表index
            # df5 = df4.drop_duplicates(['uuid']).sort_values(by='length', ascending=False).reset_index(drop=True)
            df5 = df4.drop_duplicates(['uuid']).reset_index(drop=True)
            if len(df5) > 0:
                sameDict = {}
                sameArr = []
                keepUuid = df5.values[0][0]
                for i in range(len(df5)):
                    if i > 0:
                        sameArr.append(df5.values[i])
                sameDict[keepUuid] = sameArr
                # print(df5)
                # print(sameDict)
                for uuid in sameDict.keys():
                    itemArr = sameDict[uuid]
                    updateUuidArr = []
                    for i in range(len(itemArr)):
                        itemData = itemArr[i]
                        updateUuidArr.append("'" + itemData[0] + "'")
                    sql13_1 = (
                            " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " SELECT '" + uuid + "' ,guid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " where uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql13_1)
                    # print(sql13_1)
                    sql13_2 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql13_2)
                    # print(sql13_2)
                    sql13_3 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql13_3)
                    # print(sql13_3)
            # print("--------------------------------------------------------")
    print("mergeSubstring finish")


async def mergeSecondSubstring(client):
    sql1 = (
        " SELECT name from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew group by name HAVING count() > 1 "
    )
    sql1_rows = await client.fetch(sql1)
    if sql1_rows is not None and len(sql1_rows) > 0:
        for sql1_rows_item in sql1_rows:
            itemName = escapeCharacter(sql1_rows_item['name'])
            sql2 = (
                    " SELECT uuid,organization ,Second_organization,length(organization) as `length` from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                    " where name = '" + itemName + "' " +
                    " order by length(organization) desc "
            )
            sql2_rows = await client.fetch(sql2)
            # 转换为DataFrame
            df = pd.DataFrame(sql2_rows)
            # uuid去重，重置索引
            df1 = df.drop_duplicates(['uuid']).reset_index(drop=True)
            lcsArr = []
            for i in range(len(df1)):
                itemI = df1.values[i]
                organizationI = itemI[2]
                for j in range(len(df1)):
                    itemJ = df1.values[j]
                    secondOrganizationJ = itemJ[0]
                    if i != j and lcs.isLCS(organizationI, secondOrganizationJ):
                        lcsArr.append({'Second_organization': itemI[0], 'length': itemI[1], 'organization': itemI[2],
                                       'uuid': itemI[3]})
                        lcsArr.append({'Second_organization': itemJ[0], 'length': itemJ[1], 'organization': itemJ[2],
                                       'uuid': itemJ[3]})
            if len(lcsArr) > 0:
                df4 = pd.DataFrame(lcsArr).drop_duplicates(['uuid']).sort_values(by='length',
                                                                                 ascending=False).reset_index(drop=True)
                sameDict = {}
                sameArr = []
                keepUuid = df4.values[0][3]
                for i in range(len(df4)):
                    if i > 0:
                        sameArr.append(df4.values[i])
                sameDict[keepUuid] = sameArr
                # print(df5)
                # print(sameDict)
                for uuid in sameDict.keys():
                    itemArr = sameDict[uuid]
                    updateUuidArr = []
                    for i in range(len(itemArr)):
                        itemData = itemArr[i]
                        updateUuidArr.append("'" + itemData[3] + "'")
                    sql3_1 = (
                            " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " SELECT '" + uuid + "' ,guid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " where uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql3_1)
                    # print(sql3_1)
                    sql3_2 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql3_2)
                    # print(sql3_2)
                    sql3_3 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                            " DELETE WHERE uuid in (" + ','.join(updateUuidArr) + ") "
                    )
                    await client.execute(sql3_3)
                    # print(sql3_3)
    print("mergeSecondSubstring finish")


async def executeSourceEmail(client, sourceEmail):
    """
        根据目标 email 提取作者和作者关系

    :param client:
    :param sourceEmail:
    :return:
    """

    # 每次循环清空临时表
    await client.execute("ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp DELETE WHERE 1=1")
    await client.execute(
        "ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp DELETE WHERE 1=1")

    # step 1
    sql1 = (
            " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp " +
            " select " +
            " generateUUIDv4() as randomUUID,o.`guid`,o.`name`,o.`organization`,o.`Second_organization`,o.`email`,o.`reprintauthor`,o.`altname`,o.`country`,o.`firstauthor`,o.`organizationdept` " +
            " from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_1 o " +
            " left join datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Been_Dealt bd on(o.guid = bd.guid and o.name = bd.name and o.organization = bd.organization) " +
            " where o.guid in (select o1.guid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_1 o1 where o1.email='" + sourceEmail + "')" +
            " and (bd.guid is NULL or bd.guid = '') "
    )
    await client.execute(sql1)
    sql1_1 = (
        "select count() from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp"
    )
    tmpCount = await client.fetchval(sql1_1)
    if tmpCount <= 0:
        print(sourceEmail, "  tmpCount = 0 , continue")
    else:
        # print(f"sql1 -> {sql1}")
        sql1_2 = (
                " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Been_Dealt " +
                " select `guid`,`name`,`organization` from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp "
        )
        await client.execute(sql1_2)

        # step 2 分组
        sql2 = (
                " select `name`,organization from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp " +
                " WHERE `name` is not null and organization is not null " +
                " GROUP by `name` ,organization " +
                " ORDER BY LENGTH(`name`) DESC "
        )

        sql2_rows = await client.fetch(sql2)
        # print(f"sql2 -> {sql2}")
        sql2_name_list = []
        sql2_organization_list = []
        for row in sql2_rows:
            # 数据库sql字符转义 ' 转义为 ''
            name = row["name"]
            newName = escapeCharacter(name)
            sql2_name_list.append(newName)
            organization = row["organization"]
            newOrganization = escapeCharacter(organization)
            sql2_organization_list.append(newOrganization)

        # step3
        #  遍历 分组数据，插入临时表
        for i in range(len(sql2_name_list)):
            # 每次循环清空临时专家表
            await client.execute(
                "ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew_Tmp DELETE WHERE 1=1")

            name = sql2_name_list[i]
            organization = sql2_organization_list[i]

            # name and organization 相同insert 临时表
            sql3 = (
                    " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew_Tmp " +
                    " select " +
                    " `uuid`,`guid`,`name`,`organization`,`Second_organization`,`email`,`reprintauthor`,`altname`,`country`,`firstauthor`,`organizationdept` " +
                    " from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp " +
                    " where name = '" + name + "' and organization = '" + organization + "';"
            )
            # print(f"sql3 -> {sql3}")
            await client.execute(sql3)
            # 移除插入临时表数据
            # time.sleep(2)  # 141测试环境数据库性能差，休眠2秒
            sql4 = "ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp DELETE where `name` = '" + name + "' and organization = '" + organization + "';"
            # print(f"sql4 -> {sql4}")
            await client.execute(sql4)

            # 根据机构相同遍历
            sql5 = "select uuid,`name` from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp where organization = '" + organization + "'"
            sql5_uuid_list = []
            # print(f"sql5 -> {sql5}")
            sql5_rows = await client.fetch(sql5)
            # 姓名是子串加入临时表
            for row in sql5_rows:
                _uuid = row["uuid"]
                _name = row["name"]
                # 如果姓名是子串,插入临时表，并且删除当前数据
                if lcs.isLCS(name, _name):
                    sql5_1 = (
                            " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew_Tmp " +
                            " select "
                            " `uuid`,`guid`,`name`,`organization`,`Second_organization`,`email`,`reprintauthor`,`altname`,`country`,`firstauthor`,`organizationdept` " +
                            " from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp " +
                            " where uuid = '" + _uuid + "'"
                    )
                    sql5_uuid_list.append(_uuid)
                    await client.execute(sql5_1)

            # 移除移入name子串数据
            for uid in sql5_uuid_list:
                sql5_2 = "ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp DELETE where uuid = '" + uid + "'"
                await client.execute(sql5_2)

            # 根据姓名相同遍历
            sql6 = " select uuid,organization from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp where name ='" + name + "'"
            sql6_uuid_list = []
            sql6_rows = await client.fetch(sql6)
            # 机构如是子串加入临时表
            for row in sql6_rows:
                _uuid = row["uuid"]
                _organization = row["organization"]
                # 如果机构是子串,插入临时表，并且删除当前数据
                if lcs.isLCS(organization, _organization):
                    sql6_1 = (
                            " insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew_Tmp " +
                            " select " +
                            " `uuid`,`guid`,`name`,`organization`,`Second_organization`,`email`,`reprintauthor`,`altname`,`country`,`firstauthor`,`organizationdept` " +
                            " from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp " +
                            " where uuid = '" + _uuid + "'"
                    )
                    sql6_uuid_list.append(_uuid)
                    await client.execute(sql6_1)

            # 移除移入机构子串数据
            for uid in sql6_uuid_list:
                sql6_2 = "ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Tmp DELETE where uuid ='" + uid + "'"
                await client.execute(sql6_2)

            # 操作专家临时表 ，数据合并
            # 遍历临时专家表
            expertId = await client.fetchval("SELECT generateUUIDv4() as randomUUID")
            sql7_rows_sorganization = {}
            sql7_rows_email = {}
            sql7_rows_reprintauthor = {}
            sql7_rows_altname = {}
            sql7_rows_country = {}
            sql7_rows_firstauthor = {}
            sql7_rows_organizationdept = {}
            sql7 = (
                    " select " +
                    " `uuid`,`guid`,`name`,`organization`,`Second_organization`,`email`,`reprintauthor`,`altname`,`country`,`firstauthor`,`organizationdept` " +
                    " from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew_Tmp "
            )
            sql7_rows = await client.fetch(sql7)
            print(sourceEmail, '  ', name, '  ', organization, '  ', len(sql7_rows))

            # 插入关系表
            values = []
            for row in sql7_rows:
                _guid = row["guid"]
                _uuid = str(expertId)
                value = "('" + _uuid + "','" + _guid + "')"
                values.append(value)

                second_organization = row["Second_organization"]
                if second_organization is not None:
                    if sql7_rows_sorganization.get(second_organization) is None:
                        sql7_rows_sorganization[second_organization] = 1
                    else:
                        sql7_rows_sorganization[second_organization] = sql7_rows_sorganization[
                                                                           second_organization] + 1

                email = row["email"]
                if email is not None:
                    if sql7_rows_email.get(email) is None:
                        sql7_rows_email[email] = 1
                    else:
                        sql7_rows_email[email] = sql7_rows_email[email] + 1

                reprintauthor = row["reprintauthor"]
                if reprintauthor is not None:
                    if sql7_rows_reprintauthor.get(reprintauthor) is None:
                        sql7_rows_reprintauthor[reprintauthor] = 1
                    else:
                        sql7_rows_reprintauthor[reprintauthor] = sql7_rows_reprintauthor[reprintauthor] + 1

                altname = row["altname"]
                if altname is not None:
                    if sql7_rows_altname.get(altname) is None:
                        sql7_rows_altname[altname] = 1
                    else:
                        sql7_rows_altname[altname] = sql7_rows_altname[altname] + 1
                country = row["country"]
                if country is not None:
                    if sql7_rows_country.get(country) is None:
                        sql7_rows_country[country] = 1
                    else:
                        sql7_rows_country[country] = sql7_rows_country[country] + 1
                firstauthor = row["firstauthor"]
                if firstauthor is not None:
                    if sql7_rows_firstauthor.get(firstauthor) is None:
                        sql7_rows_firstauthor[firstauthor] = 1
                    else:
                        sql7_rows_firstauthor[firstauthor] = sql7_rows_firstauthor[firstauthor] + 1
                organizationdept = row["organizationdept"]
                if organizationdept is not None:
                    if sql7_rows_organizationdept.get(organizationdept) is None:
                        sql7_rows_organizationdept[organizationdept] = 1
                    else:
                        sql7_rows_organizationdept[organizationdept] = sql7_rows_organizationdept[
                                                                           organizationdept] + 1

            await client.execute(
                "INSERT INTO datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp VALUES" + ','.join(
                    values)
            )

            # 合并专家表
            # email 排序处理
            if len(sql7_rows) > 0:
                sorted(sql7_rows_email.items(), key=lambda item: item[1], reverse=True)
                emailnew = ""
                for e in sql7_rows_email:
                    if emailnew != "":
                        # 拆邮箱
                        if sql7_rows_email[emailnew] == sql7_rows_email[e]:
                            emailHead = "" if e is None else e[0:e.find('@')]
                            emailOrg = "" if e is None else e[e.find('@') + 1:e.find('.')]
                            if lcs.isLCS(name, emailHead) and lcs.isLCS(organization, emailOrg):
                                emailnew = e
                    else:
                        emailnew = e

                await client.execute("INSERT INTO datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew VALUES",
                                     (expertId, name, organization, ",".join(sql7_rows_sorganization.keys()),
                                      emailnew,
                                      ",".join(sql7_rows_reprintauthor.keys()), ",".join(sql7_rows_altname.keys()),
                                      ",".join(sql7_rows_country.keys()), ",".join(sql7_rows_firstauthor.keys()),
                                      ",".join(sql7_rows_organizationdept.keys()), '', '', '', sourceEmail)
                                     )

        print('step 1~4 finish')

        # step5
        sql8 = (
                " SELECT email from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                " where email is not null and email !='' " +
                " and source_email = '" + sourceEmail + "' " +
                " group by email " +
                " HAVING count(*) > 1 "
        )
        sql8_rows = await client.fetch(sql8)

        if sql8_rows is not None and len(sql8_rows) > 0:
            for sql8_rows_item in sql8_rows:
                if sql8_rows_item != '':
                    sql8_1 = (
                            " SELECT " +
                            " `uuid`,`name`,`organization`,`Second_organization`,`email`,`reprintauthor`,`altname`,`country`,`firstauthor`,`organizationdept`,`keywords`,`subjects`,`journals` " +
                            " from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                            " where email ='" + str(sql8_rows_item['email']) + "' " +
                            " and source_email = '" + sourceEmail + "' " +
                            " ORDER BY LENGTH(name) DESC"
                    )
                    sql8_1_rows = await client.fetch(sql8_1)
                    if sql8_1_rows is not None and len(sql8_1_rows) > 0:
                        sameArr = []
                        for j in range(len(sql8_1_rows)):
                            itemj = sql8_1_rows[j]
                            namej = itemj['name']
                            for k in range(len(sql8_1_rows)):
                                if k > j:
                                    itemk = sql8_1_rows[k]
                                    namek = itemk['name']
                                    if namej == namek or lcs.isLCS(namej, namek):
                                        if itemj not in sameArr:
                                            sameArr.append(itemj)
                                        if itemk not in sameArr:
                                            sameArr.append(itemk)

                        if len(sameArr) > 0:
                            randomUUID = await client.fetchval("SELECT generateUUIDv4() as randomUUID")
                            uuid = str(randomUUID)
                            uuidArr = []
                            newOrganization = ''
                            for j in range(len(sameArr)):
                                sameItem = sameArr[j]
                                newOrganization += sameItem['organization']
                                if j < len(sameArr) - 1:
                                    newOrganization += ','
                                itemUuid = str(sameItem['uuid'])
                                uuidArr.append("'" + itemUuid + "'")

                            # 删除专家
                            sql8_2 = (
                                    " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                                    " DELETE WHERE uuid in (" + ','.join(uuidArr) + ")"
                            )
                            # print(f"sql8_2 -> {sql8_2}")
                            await client.execute(sql8_2)

                            # 新增新专家
                            sql8_3 = (
                                    "insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew values(" +
                                    "'" + uuid + "'," +
                                    "'" + str(sameArr[0]['name'] or '') + "'," +
                                    "'" + str(newOrganization) + "'," +
                                    "'" + str(sameArr[0]['Second_organization'] or '') + "'," +
                                    "'" + str(sameArr[0]['email'] or '') + "'," +
                                    "'" + str(sameArr[0]['reprintauthor'] or '') + "'," +
                                    "'" + str(sameArr[0]['altname'] or '') + "'," +
                                    "'" + str(sameArr[0]['country'] or '') + "'," +
                                    "'" + str(sameArr[0]['firstauthor'] or '') + "'," +
                                    "'" + str(sameArr[0]['organizationdept'] or '') + "'," +
                                    "''," +
                                    "''," +
                                    "''," +
                                    "'" + sourceEmail + "'" +
                                    ")"
                            )
                            # print(f"sql8_3 -> {sql8_3}")
                            await client.execute(sql8_3)

                            # 查询专家关系
                            sql8_4 = (
                                    " SELECT " +
                                    " `uuid`,`guid`" +
                                    " FROM datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp " +
                                    " WHERE uuid in (" + ','.join(uuidArr) + ")"
                            )
                            sql8_4_rows = await client.fetch(sql8_4)
                            if sql8_4_rows is not None:
                                # 删除旧专家关系
                                sql8_5 = (
                                        " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp " +
                                        " DELETE WHERE uuid in (" + ','.join(uuidArr) + ")"
                                )
                                await client.execute(sql8_5)

                                sql8_6 = (
                                    "insert into datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp values"
                                )
                                for j in range(len(sql8_4_rows)):
                                    item = sql8_4_rows[j]
                                    guid = str(item['guid'])
                                    sql8_6_item = "('" + uuid + "','" + guid + "')"
                                    if j < len(sql8_4_rows) - 1:
                                        sql8_6_item += ','
                                    sql8_6 += sql8_6_item
                                await client.execute(sql8_6)
        print('step 5 finish')

        # step6
        sql9_1 = (
                " SELECT uuid FROM datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew" +
                " where source_email = '" + sourceEmail + "' "
        )
        # print(f"sql9_1 -> {sql9_1}")
        sql9_1_rows = await client.fetch(sql9_1)
        for sql9_1_row in sql9_1_rows:
            _uuid = sql9_1_row["uuid"]
            sql9_2 = ("SELECT guid,DE,SC,SO FROM datahouse.T_SCI_WOS_2020_ISSN_China WHERE guid in ("
                      "SELECT guid FROM datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp "
                      "WHERE uuid = '" + _uuid + "')")

            sql9_2_rows = await client.fetch(sql9_2)
            keywords = {}
            subjects = {}
            journals = {}
            for sql9_2_row in sql9_2_rows:
                de = sql9_2_row["DE"]
                if de is not None and de != '':
                    newDe = escapeCharacter(de)
                    if keywords.get(newDe) is None:
                        keywords[newDe] = 1
                    else:
                        keywords[newDe] = keywords[newDe] + 1

                sc = sql9_2_row["SC"]
                if sc is not None and sc != '':
                    newSc = escapeCharacter(sc)
                    if subjects.get(newSc) is None:
                        subjects[newSc] = 1
                    else:
                        subjects[newSc] = subjects[newSc] + 1

                so = sql9_2_row["SO"]
                if so is not None and so != '':
                    newSo = escapeCharacter(so)
                    if journals.get(newSo) is None:
                        journals[newSo] = 1
                    else:
                        journals[newSo] = journals[newSo] + 1

            sql9_2 = (
                    " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                    " UPDATE " +
                    " keywords='" + ",".join(keywords.keys()) + "' ," +
                    " subjects = '" + ",".join(subjects.keys()) + "' ," +
                    " journals='" + ",".join(journals.keys()) + "' " +
                    " where uuid = '" + _uuid + "' "
            )
            # print(f"sql9_2 -> {sql9_2}")
            await client.execute(sql9_2)

        # 临时专家关系去重，插入专家关系表
        sql10 = (
            " INSERT INTO datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship "
            " SELECT uuid,guid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship_Tmp group by uuid,guid "
        )
        await client.execute(sql10)
        # time.sleep(2)
        print('step 6 finish')


async def mergeSameRelationship(client):
    """
    合并关联论文重复的数据

    :param client:
    :return:
    """
    sql1 = (
        "SELECT name from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew group by name HAVING count() > 1 "
    )
    sq1_rows = await client.fetch(sql1)
    d1 = pd.DataFrame(sq1_rows, columns=['name'])
    sql2 = (
            " SELECT uuid,name ,organization ,Second_organization ,email from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
            " where name in(SELECT name from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew group by name HAVING count()>1) " +
            " order by name "
    )
    sq2_rows = await client.fetch(sql2)
    d2 = pd.DataFrame(sq2_rows, columns=['uuid', 'name', 'organization', 'Second_organization', 'email'])
    sql3 = (
            " SELECT uuid,guid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
            " where uuid in( " +
            " SELECT uuid from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
            " where name in(SELECT name from datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew group by name HAVING count()>1) " +
            " ) "
    )
    sq3_rows = await client.fetch(sql3)
    d3 = pd.DataFrame(sq3_rows, columns=['uuid', 'guid'])
    if d1 is not None and len(d1) > 0:
        for name in d1.get('name'):
            # 作者里筛选 name ，得到 uuid
            d4 = d2[d2['name'].isin([name])].get('uuid').reset_index(drop=True)
            # 关联关系筛选 uuid
            d5 = d3[d3['uuid'].isin(list(d4))].reset_index(drop=True)
            if len(d4) == len(d5) and len(d4) == 2:
                print(name, ',单一关系')
                guidArr = d5.get('guid').unique()
                if len(guidArr) == 1:
                    d6 = d2[d2['uuid'].isin(list(d4))].reset_index(drop=True)
                    # 删除重复的作者
                    itemDelExperts = d6.loc[[1], :]
                    # 保存的作者
                    itemKeepExperts = d6.loc[[0], :]
                    # 保留的uuid
                    keepUuid = ''.join(itemKeepExperts['uuid'])
                    # 删除的uuid
                    delUuid = ''.join(itemDelExperts['uuid'])
                    sql4_1 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                            " DELETE WHERE uuid = '" + delUuid + "' "
                    )
                    # print(sql4_1)
                    await client.execute(sql4_1)
                    sql4_2 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_ExpertNew " +
                            " UPDATE organization = '" + mergeString(''.join(itemDelExperts['organization']),
                                                                     ''.join(itemKeepExperts['organization'])) + "', " +
                            " Second_organization = '" + mergeString(''.join(itemDelExperts['Second_organization']),
                                                                     ''.join(itemKeepExperts[
                                                                                 'Second_organization'])) + "', " +
                            " email = '" + mergeString(''.join(itemDelExperts['email']),
                                                       ''.join(itemKeepExperts['email'])) + "' " +
                            " WHERE uuid = '" + keepUuid + "' "
                    )
                    # print(sql4_2)
                    await client.execute(sql4_2)
                    sql4_3 = (
                            " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                            " DELETE WHERE uuid = '" + delUuid + "' "
                    )
                    # print(sql4_3)
                    await client.execute(sql4_3)
                else:
                    print('关系不重复，不处理')
            else:
                # print(d5)
                print(name, ',复杂关系')
                guidArr = d5.get('guid').unique()
                if len(guidArr) == len(d5):
                    print('没有重复关系数据，不处理')
                else:
                    # 获取重复的guid
                    repeatedGuidArr = d5[d5['guid'].duplicated()].get('guid').unique()
                    for guid_item in repeatedGuidArr:
                        # print(guid_item)
                        uuidArr = list(d5[d5['guid'].isin([guid_item])].get('uuid').unique())
                        # print(uuidArr)
                        # 保留关系最少的uuid，其余的uuid删除
                        minCountUuid = ''
                        minCount = -1
                        for uuid_item in uuidArr:
                            itemDf = d5[d5['uuid'].isin([uuid_item])]
                            if minCount == -1:
                                minCount = len(itemDf)
                                minCountUuid = ''.join(itemDf.get('uuid'))
                            else:
                                if minCount > len(itemDf):
                                    minCount = len(itemDf)
                                    minCountUuid = ''.join(itemDf.get('uuid'))
                        # print(minCountUuid)
                        uuidArr.remove(minCountUuid)
                        delUuidArr = []
                        for uuid_item in uuidArr:
                            delUuidArr.append("'" + uuid_item + "'")
                        # print(delUuidArr)
                        sql5_1 = (
                                " ALTER TABLE datahouse.T_SCI_WOS_2020_ISSN_China_OutPut_Experts_Relationship " +
                                " DELETE WHERE uuid in(" + ','.join(delUuidArr) + ") " +
                                " and guid = '" + guid_item + "'"
                        )
                        # print(sql5_1)
                        await client.execute(sql5_1)


def mergeString(str1, str2):
    """
    合并字符串
    \n 若完全相同，返回相同的
    \n 若是子串，返回长的那个
    \n 若不相同也不是子串，加 , 链接

    :param str1:
    :param str2:
    :return:
    """
    if str1 == str2:
        return str1
    else:
        if lcs.isLCS(str1, str2):
            return str1 if len(str1) > len(str2) else str2
        else:
            return ';'.join([str1, str2])


def escapeCharacter(str):
    """
    数据库sql字符串转义  ' 转义为  ''

    :param str:
    :return:
    """

    if str.find("'") > 0:
        arr = str.split("'")
        newStr = "''".join(arr)
    else:
        newStr = str
    return newStr


if __name__ == '__main__':
    loop = asyncio.get_event_loop()
    loop.run_until_complete(main())
