#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    :   reading_writing_file_sample.py
@Contact :   291622538@qq.com

@Modify Time      @Author    @Version    @Description
------------      -------    --------    -----------
2021/3/31 14:53   fan        1.0         None
"""
import ast
import csv
import pandas as pd
import pymysql


# csv文件
def csv_sample():
    rows = []
    # 读取文件
    with open(file='data/csv_sample.csv', mode='r', encoding='utf-8') as f:
        for num, line in enumerate(csv.reader(f)):
            print(num, line)
            rows.append(line)
        # 写入文件
    with open(file='data/csv_out_sample.csv', mode='w', encoding='utf-8', newline='') as f:
        writer = csv.writer(f)
        writer.writerows(rows)


# json文件转字典
def json_to_dict():
    string = """{'Ip': '61.160.6.2', 'IP_info': {'as': 'AS4134 CHINANET-BACKBONE', 'asname': 'CHINANET-BACKBONE', 
    'city': "Huai'an", 'continent': '亚洲', 'continentCode': 'AS', 'country': '中国', 'countryCode': 'CN', 'currency': 'CNY', 
    'district': '', 'hosting': False, 'isp': 'Chinanet', 'lat': 33.5039, 'lon': 119.1442, 'mobile': False, 
    'offset': 28800, 'org': 'Chinanet JS', 'proxy': False, 'query': '61.160.6.2', 'region': 'JS', 'regionName': '江苏省', 
    'reverse': '', 'status': 'success', 'timezone': 'Asia/Shanghai', 'zip': ''}, 'addDate': '2020-11-08'} """
    string_dict = eval(string)
    print(string_dict)
    print(type(string_dict))


# 合并多个csv文件为1个文件

def joint_csv_file(file_list, result_name):
    """

    :param file_list: 要合并的文件集合
    :param result_name: 最终生成的文件名
    :return:
    """
    pd.concat(
        [pd.read_csv('./data/%s.csv' % file_name) for file_name in file_list]).to_csv(
        './data/%s.csv' % result_name, index=False, sep=',')


# 读取json文件
def json_sample():
    with open('cn.json', 'r', encoding='utf-8') as f:
        for num, line in enumerate(f.readlines()):
            # if num == 2:
            #     break
            line_json = ast.literal_eval(line.replace('\n', ''))


# csv文件以某个字段为标准进行记录去重
def distinct_by_field(file_name, field, out_file_name):
    # with open(file=file_name, mode='r', encoding='utf-8') as f:
    #     for num, line in enumerate(csv.reader(f)):
    #         print(num, line)
    df = pd.read_csv(file_name, header=0, converters={'Alexa_rank': str})
    data_list = df.drop_duplicates(subset=field)
    data_list.to_csv(out_file_name, index=None, )


# 建立数据库连接
conn = pymysql.connect(
    # 服务器
    host='192.168.37.2',
    port=3306,
    user='root',
    password='nlp123456',
    # 数据库名
    db='domain_and_website',
    charset='utf8'
)

# 获取游标
cursor = conn.cursor()

if __name__ == '__main__':


    
    # csv_sample()
    # file_list = [1, 2]
    # joint_csv_file(file_list, '3')

    # distinct_by_field('data/distinct_by_field/分好类的网站.csv', 'domainName', 'data/distinct_by_field/分好类的网站去重.csv')
    # 获取游标
    cursor = conn.cursor()

    rows = []
    # 读取文件
    with open(file='data/distinct_by_field/分好类的网站去重.csv', mode='r', encoding='utf-8') as f:
        for num, line in enumerate(csv.reader(f)):

            if num == 0:
                line.append("Title")
                rows.append(line)
                continue

            # if num == 10:
            #     break

            if num % 1000 == 0:
                print("进度:  <----   %s    ---->" % num)
            domainName = line[2]
            sql = 'select Title from Site_ZH where domainName="%s"' % domainName
            cursor.execute(sql)
            row = cursor.fetchone()  # 返回结果是受影响的行数

            if row:
                line.append(row[0])
                rows.append(line)
            else:
                sql = 'select Title from Site_Profile where domainName="%s"' % domainName
                cursor.execute(sql)
                row = cursor.fetchone()  # 返回结果是受影响的行数
                if row:
                    line.append(row[0])
                    rows.append(line)
                else:
                    line.append("")
                    rows.append(line)
    # print(rows)
    with open(file='data/distinct_by_field/分好类的网站+Title.csv', mode='w', encoding='utf-8', newline='') as f:
        writer = csv.writer(f)
        writer.writerows(rows)
