import logging
import os
from pydoc import pathdirs
import re
import time
from multiprocessing import Pool
from os import path
from pathlib import Path

import md5sum
import list_files

logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
batch = 1000

pathDict = {}

def same(value):
    return value

def multi_md5(file_list, dest_file):
    pool = Pool(2)
    future_result = []
    for file in file_list:
        p = Path(file)
        sum = pathDict.get(p)
        # md5sum 存在直接赋值即可，否则需要计算md5
        if sum:
            res = pool.apply_async(same, args=(sum,))
            future_result.append(res)
        else:
            res = pool.apply_async(md5sum.md5_file, args=(file,))
            future_result.append(res)
    with open(dest_file, 'a+', encoding='utf-8') as f:
        i = 0
        for res in future_result:
            file = file_list[i]
            dir, file_name = path.split(file)
            name, ext = path.splitext(file_name)
            sum = res.get()
            file_tuple = (file, file_name, dir, ext, sum)
            statinfo = os.stat(file)
            # print(file_tuple)
            for t in file_tuple:
                f.write(t)
                f.write(';')
            f.write('%d' % (statinfo.st_size / 1024))
            f.write('\n')
            i = i + 1
            if i % batch == 0:
                logging.info('processed %s', i)
        logging.info("processed %s", i)

def checkFile(file, clean_file):
    """读取txt文件，判断文件是否存在，存在的写入新的txt"""
    with open(clean_file, 'a+', encoding='utf-8') as nf:
        with open(file, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            print(len(lines))
            for line in lines:
                split = line.split(';')
                abs_path = split[0]
                p = Path(abs_path)
                # 文件存在写入一个新文件
                if p.exists():
                    nf.write(line)

def initDict(file):
    """读取txt文件，将路径为key，md5sum为value"""
    with open(file, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        for line in lines:
            split = line.split(';')
            abs_path = split[0]
            p = Path(abs_path)
            pathDict[p] = split[4]

def compareDirectory(src_dir, dest_dir):
    """比较src_dir里的文件是否存在于dest_dir中"""
    src_files = list_files.get_all_files(src_dir)
    dest_files = list_files.get_all_files(dest_dir)

    # 遍历dest_files，获取md5sum保存到set
    dest_files_set = set()
    for file in dest_files:
        p = Path(file)
        sum = pathDict.get(p)
        if sum:
            dest_files_set.add(sum)
    # 遍历src_files，判断md5sum是否在set中
    not_exist = []
    exist = []
    for file in src_files:
        p = Path(file)
        sum = pathDict.get(p)
        if sum and sum not in dest_files_set:
            not_exist.append(p)
            # print(p)
        else:
            exist.append(p)
            os.remove(p)
            print(p)
    print('exist length: %d, not exist length: %d' % (len(exist), len(not_exist)))

def cleanDuplicateFiles(file):
    """
    读取txt文件，将目录作为key，md5sum和path为value.
    遍历所有目录，将md5sum作为key，path作为value。
    遍历后删除多余path
    """
    directoryDict = {}
    with open(file, 'r', encoding='utf-8') as f:
        lines = f.readlines()
        for line in lines:
            split = line.split(';')
            directory = split[2]
            if directory not in directoryDict:
                directoryDict[directory] = []
            directoryDict[directory].append((split[0], split[4]))
    print(len(directoryDict))

    sumDict = {}
    for a in directoryDict:
        list = directoryDict.get(a)
        for l in list:
            path = l[0]
            sum = l[1]
            if sum not in sumDict:
                sumDict[sum] = []
            sumDict[sum].append(path)
    print(len(sumDict))
    
    for i in sumDict:
        arr = sumDict.get(i)
        if len(arr) > 1:
            sorted_arr = sorted(arr)
            for i in range(1, len(sorted_arr)):
                p = Path(sorted_arr[i])
                print(p)
                os.remove(p)


if __name__ == '__main__':
    t = time.time()
    # files = list_files.get_all_files(r'F:\迅雷下载')
    # print(len(files))
    t1 = time.time()
    # initDict("./files/d.txt")
    # initDict("./files/f.txt")
    # initDict("./files/e.txt")
    print('dict size: %d' % len(pathDict))
    # multi_md5(files, r'files/f1.txt')
    # checkFile("./files/d.txt", "./files/d1.txt")
    # checkFile("./files/e.txt", "./files/e1.txt")
    # checkFile("./files/f.txt", "./files/f1.txt")
    # cleanDuplicateFiles("./files/d.txt")
    t2 = time.time()
    print('list time: %0.2f, md5 time: %0.2f' % ((t1 - t), (t2 - t1)))
