# -*- coding: utf-8 -*-
from pipe.readExcel import readExcel, exportExcel


def MarkExlDuplications(path,duplicateFields,dupField):
    '''标记重复'''
    df=readExcel(path)
    # DataFrame.duplicated(subset=None, keep=‘first’)

    # subset：默认为None，需要标记重复的标签或标签序列
    # keep：默认为‘first’，如何标记重复标签
    #
    # first：将除第一次出现以外的重复数据标记为True
    # last：将除最后一次出现以外的重复数据标记为True
    # False：将所有重复的项都标记为True（不管是不是第一次出现）

    # df[dupField] = df.duplicated(duplicateFields, keep='False')
    df[dupField] = df.duplicated(duplicateFields, False)
    exportExcel(path,df,"标记重复")


def dropExlDuplications(path,duplicateFields):
    '''去重'''
    df=readExcel(path)
    # df['res'] = df.duplicated(duplicateFields, keep='last')
    df1 = df.drop_duplicates(duplicateFields, keep='last')
    exportExcel(path,df1,"去重结果")



if __name__ == '__main__':
    dupField='重复标记'
    # duplicateFields=["EXP_NO"]
    # duplicateFields=["PLDEID"]
    # duplicateFields=["name"]
    duplicateFields=["项目编码"]
    # duplicateFields=["项目编号"]
    # duplicateFields=["项目名称"]
    # duplicateFields=["EXP_NO","Y","X","SUR_H"]
    # duplicateFields = ["S_POINT", "E_POINT"]
    # duplicateFields = ["S_POINT", "E_POINT","S_DEEP","E_DEEP","S_H","E_H"]
    # # duplicateFields=["PLID"]
    # # duplicateFields=["PLID"]
    # path=r"E:\data\qingyang\qingyangmdb\result\4.2023.02.15管点数据处理\管点调查表-去重空白-计算xy.xlsx"
    # path=r"E:\data\qingyang\qingyangmdb\result\6.2023.02.19处理\管点数据处理\6.姚工-管点调查表-重复数据-核对20230216标记重复2023-02-19-15-44-26.xlsx"
    # MarkExlDuplications(path,duplicateFields,dupField)

    # duplicateFields = ["S_POINT","E_POINT"]
    # # duplicateFields=["PLID"]
    # # duplicateFields=["PLID"]
    # path = r"E:\项目相关\13.可再生能源补贴\doc\doc\01.统一认证平台\2023.06.09监管平台账号-卢总\2023.06.13监管平台账号\汇总表-有手机号.xlsx"
    path = r"E:\项目相关\13.可再生能源补贴\殷甲伟\2.可再生能源系统测试\2023.11.12报告测试情况\2024.01.25第五批绿证数据\未推送过去的98条记录.xlsx"
    dropExlDuplications(path, duplicateFields)
    # MarkExlDuplications(path,duplicateFields,dupField)