#!/usr/bin/python
# -*- coding: utf-8 -*-


import os
import re
import csv
import dedupe
from unidecode import unidecode
from openpyxl import load_workbook

ID_COL_NAME = '序号'
NAME_COL_NAME = '题名'
AUTHOR_COL_NAME = '作者'
PUBLISHER_COL_NAME = '出版者'

KEY_COL_GEO = '地区'
KEY_COL_D1 = '日期1'
KEY_COL_D2 = '日期2'
KEY_COL_PHASE = '卷别'
PROJECT = '党史资料'


def extract_geo(s):
    s = s.replace('地区党', '党').replace('地方党', '党')
    m = re.search(r'^(.{1,3})(?=地|市|县|党)', s)
    if m:
        return m.group(1).strip()
    m = re.search(r'(?:中共|党)(.+?)(?=地|市|县)', s)
    if m:
        return m.group(1).strip()
    m = re.search(r'中共(.+?)党', s)
    if m:
        return m.group(1).strip()


def chi_to_num(s):
    d = {
        '〇': '0', '一': '1', '二': '2', '三': '3', '四': '4', '五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '零': 0,
        '壹': '1', '贰': '2', '叁': '3', '肆': '4', '伍': '5', '陆': '6', '柒': '7', '捌': '8', '玖': '9', '貮': '2', '两': '2',
    }
    r = ''
    for i, c in enumerate(s):
        r += d[c]
    return r


def extract_d1(s):
    m = re.search(r'[12]\d\d\d', s)
    if m:
        return m.group(0).strip()
    m = re.search(r'[一二][零O一二三四五六七八九]{3}', s)
    if m:
        ch = m.group(0).strip()
        return chi_to_num(ch)


def extract_d2(s):
    m = re.search(r'[12]\d\d\d.+([12]\d\d\d)', s)
    if m:
        return m.group(1).strip()
    m = re.search(r'[一二][零O一二三四五六七八九]{3}.+([一二][零O一二三四五六七八九]{3})', s)
    if m:
        ch = m.group(1).strip()
        return chi_to_num(ch)


def extract_phase(s):
    m = re.search(r'(上|下)\W*$', s)
    if m:
        return m.group(1).strip()
    m = re.search(r'(上|下)(?:编|册)', s)
    if m:
        return m.group(1).strip()
    m = re.search(r'第(.+?)(?:辑|卷|稿)', s)
    if m:
        return m.group(1).strip()


def read_excel(file_name):
    wb = load_workbook(file_name)
    sheet1 = wb.worksheets[0]
    rows_index_d = {}
    rows = []
    for rowid in range(2, sheet1.max_row + 1):
        id = sheet1.cell(row=rowid, column=1).value
        name = sheet1.cell(row=rowid, column=2).value
        aut = sheet1.cell(row=rowid, column=3).value
        pub = sheet1.cell(row=rowid, column=4).value
        row = {
            ID_COL_NAME: id,
            NAME_COL_NAME: name if name else 'None',
            AUTHOR_COL_NAME: aut if aut else 'None',
            PUBLISHER_COL_NAME: pub if pub else 'None',
        }
        rows.append(row)
        rows_index_d[id] = row
    return rows, rows_index_d


def prepare_data(rows):
    d = {}
    for row in rows:
        id = row[ID_COL_NAME]
        name = row[NAME_COL_NAME]
        d[id] = {
            ID_COL_NAME: id,
            NAME_COL_NAME: row[NAME_COL_NAME],
            # AUTHOR_COL_NAME: unidecode(row[AUTHOR_COL_NAME]),
            # PUBLISHER_COL_NAME: unidecode(row[PUBLISHER_COL_NAME]),
            KEY_COL_GEO: extract_geo(name),
            KEY_COL_D1: extract_d1(name),
            KEY_COL_D2: extract_d2(name),
            KEY_COL_PHASE: extract_phase(name),
        }
    return d


if __name__ == '__main__':

    input_file = '党史资料.xlsx'
    output_file = f'{PROJECT}_duplicates.txt'
    settings_file = f'{PROJECT}_learned_settings'
    training_file = f'{PROJECT}_training.json'

    print('importing data ...')
    excel_rows, excel_rows_index_d = read_excel(input_file)
    data_d = prepare_data(excel_rows)

    # If a settings file already exists, we'll just load that and skip training
    if os.path.exists(settings_file):
        print('reading from', settings_file)
        with open(settings_file, 'rb') as f:
            deduper = dedupe.StaticDedupe(f)
    else:
        # ## Training

        # Define the fields dedupe will pay attention to
        fields = [
            {'field': NAME_COL_NAME, 'type': 'String'},
            # {'field': AUTHOR_COL_NAME, 'type': 'String'},
            # {'field': PUBLISHER_COL_NAME, 'type': 'String'},
            {'field': KEY_COL_GEO, 'type': 'String', 'has missing': True},
            {'field': KEY_COL_D1, 'type': 'DateTime', 'has missing': True},
            {'field': KEY_COL_D2, 'type': 'DateTime', 'has missing': True},
            {'field': KEY_COL_PHASE, 'type': 'ShortString', 'has missing': True},
        ]

        # Create a new deduper object and pass our data model to it.
        deduper = dedupe.Dedupe(fields)

        # If we have training data saved from a previous run of dedupe,
        # look for it and load it in.
        # __Note:__ if you want to train from scratch, delete the training_file
        if os.path.exists(training_file):
            print('reading labeled examples from ', training_file)
            with open(training_file, 'rb') as f:
                deduper.prepare_training(data_d, f)
        else:
            deduper.prepare_training(data_d)

        # ## Active learning
        # Dedupe will find the next pair of records
        # it is least certain about and ask you to label them as duplicates
        # or not.
        # use 'y', 'n' and 'u' keys to flag duplicates
        # press 'f' when you are finished
        print('starting active labeling...')

        dedupe.console_label(deduper)

        # Using the examples we just labeled, train the deduper and learn
        # blocking predicates
        deduper.train()

        # When finished, save our training to disk
        with open(training_file, 'w') as tf:
            deduper.write_training(tf)

        # Save our weights and predicates to disk.  If the settings file
        # exists, we will skip all the training and learning next time we run
        # this file.
        with open(settings_file, 'wb') as sf:
            deduper.write_settings(sf)

    # ## Clustering

    # `partition` will return sets of records that dedupe
    # believes are all referring to the same entity.

    print('clustering...')
    clustered_dupes = deduper.partition(data_d, 0.5)

    print('# duplicate sets', len(clustered_dupes))

    # ## Writing Results

    # Write our original data back out to a CSV with a new column called
    # 'Cluster ID' which indicates which records refer to each other.

    cluster_membership = {}
    for cluster_id, (records, scores) in enumerate(clustered_dupes):
        for record_id, score in zip(records, scores):
            cluster_membership[record_id] = {
                "Cluster ID": cluster_id,
                "confidence_score": score
            }

    rows = []
    for row in data_d.values():
        row_id = int(row[ID_COL_NAME])
        row.update(cluster_membership[row_id])
        rows.append(row)
    from itertools import groupby

    rows.sort(key=lambda x: x['Cluster ID'])
    with open(output_file, "w", encoding='utf-8') as fw:

        for k, v in groupby(rows, key=lambda x: x['Cluster ID']):
            items = list(v)
            if len(items) <= 1:
                continue
            print('=' * 30)
            fw.write('=' * 30 + '\r\n')

            for r in items:
                id = r[ID_COL_NAME]
                row = excel_rows_index_d[id]
                name = row[NAME_COL_NAME]
                aut = row[AUTHOR_COL_NAME]
                pub = row[PUBLISHER_COL_NAME]
                simi = "{:.2f}".format(r['confidence_score'])

                print(id, name, aut, pub, simi)
                fw.write(f'{id} {name} {aut} {pub} {simi}' + '\r\n')
    print(f'重复结果输出到了{output_file}')
