# -*- coding:utf-8 -*-
#! /bin/python
# python 3.2
# 删除重复的文件
# 张力 2009年11月19日23:21:07
# 遍历 文件 记录文件名
# 计算每个文件的 crc32
# 按crc32 为关键字,归类文件路径
# crc32列表中按第一个文件路径为其他文件建立deletefiles.list文件
# 删除其他文件
# ver 1.0 基本完成功能,只是
#   1.输入要处理的文件名的方式不太好,
#   2. 0字节文件 应该只在recovery 脚本中创建文件,
#   3.文件删除异常弄的不太好
#   4.recovery应该显示,恢复的结果
#   5.recovery复制文件,应该使用相对路径
#   6.应该提示用户,要处理的文件夹中是否还有未解压的文件,或者自动解压文件

# zhangli 2014年10月29日11:21:26
# Add multi-path search
# zhangli 2015/11/15
# check md5 only same size of files
# ignore file which size smaller than min_file_size_to_check

import os,sys
import tempfile
import my_file_op
import ProgressStatus
import my_log
import FileTree


# --------------------------------
# 处理函数
# --------------------------------
def writebat(f1,f2):
    try:
        os.chdir(os.path.dirname(f2))
        if os.path.exists('recovery.py') == False:
            f = open('recovery.py','a')
            f.writelines('#! \\bin\\python\r\n')
            f.writelines('import shutil\r\n')
        else:
            f = open('recovery.py','a')
        f.writelines(('shutil.copyfile( \"',f1.replace('\\','\\\\'),'\",\"',f2.replace('\\','\\\\'),'\")\r\n'))
        f.close()
        os.remove(f2)
        #print('.',end='')
    except IOError:
        my_log.debug(f2)
    except WindowsError:
        my_log.debug(f2)


# --------------------------------
# 创建还原处理文件,并删除重复文件
# --------------------------------
def deal_file_by_md5():
    for i in filelistbymd5.values():
        savefile = i[0]
        i.remove(savefile)
        for n in i:
            writebat(savefile,n)
    my_log.debug('\ndone.')
#
# ##################file name which have duplicated file size in filelist#######################
# for f in filelist:
#    print(f[0],f[1])
# --------------------------------
# sort by md5
# --------------------------------
# filelist.sort(key=lambda x:x[0])
# ##################key=md5 value=duplicated files name#######################


# --------------------------------
# remove no duplicated file in map
# --------------------------------
def remove_no_duplicated_file(md5_filenames):
    remove_keys=[]
    for key in md5_filenames.keys():
        if (len(md5_filenames[key]) < 2):
            remove_keys.append(key)
    for key in remove_keys:
        del md5_filenames[key]
    return md5_filenames


# filelist = {0:""}
# ilelist = os.listdir('c:/')
# for i in filelist:
#    print i

# for k,v in os.environ.items():
#    print "%s=%s" % (k,v)

# if __name__   ==   '__main__':
#    if len(argv)   <   2:
#        print '请输入要清理的文件夹作为第一个参数'
#        sys.exit()
class DuplicateScan:

    def __init__(self):
        pass
    #--------------------------------
    #classify file by md5
    #--------------------------------
    #
    #get keymap{key=md5, value=filepath_list}
    #
    def classify_by_col2(self, filelist):
        self.files_count = len(filelist)
        filelistbymd5 = {}
        for f in filelist:
            md5string = f[1]
            if md5string == None:
                continue
            if filelistbymd5.get(md5string) is None:
                filelistbymd5[md5string] = []
            filelistbymd5[md5string].append(f[0])

            #i = filelist.index(f)
            #if (i+1)%40 == 0:
            #    print(" ",i,"\\",len(filelist))
            #else:
            #    print (".",end='')
        #print("")
        return filelistbymd5

          
    def scan_by_md5(self, basepaths=[],exculdefiles=[".","..","md5_info_cache.csv"],min_file_size_to_check=0):
        ProgressStatus.start()
        ProgressStatus.set_workload(1)
        ###################file name and file size in filelist#######################

        filelist = []
        
        #remove duplicated base paths
        basepaths = list(set(basepaths))

        my_log.debug(str(len(basepaths)) + " dirs" + "\n")
        #--------------------------------
        # 遍历目录把路径保存在 filelist中
        #--------------------------------
        for _dir in basepaths:
            _filelist = my_file_op.get_files_from_dir(_dir, exculdefiles)
            filelist.extend(_filelist)

        filelist = my_file_op.remove_same_path_of_file_in_list(filelist)
        my_log.debug(str(len(filelist)) + " files" + "\n")
        #--------------------------------
        # remove file size smaller than min_file_size_to_check
        #--------------------------------
        #filelist = filter(lambda x: x[1] > min_file_size_to_check, filelist)
        #filelist=list(filelist)
        
        filelist = my_file_op.remove_unique_size_of_file_in_list(filelist)
        
        #sum all files size
        filesizelist = []
        for f in filelist:
            fsize = os.path.getsize(f)
            filesizelist.append(fsize)
            ProgressStatus.add_workload(fsize)

        my_log.debug(str(len(filelist)) + " files need to check md5\n")

        file_with_md5 = []
        # computer md5 for each files
        for f,fsize in zip(filelist,filesizelist):
            if ProgressStatus.is_running() is False:
                break
            ftime = os.path.getmtime(f)
            fmd5 = my_file_op.get_md5_of_file(f,fsize,ftime)  
            ProgressStatus.process(fsize)
            file_with_md5.append((f, fmd5))

        my_file_op.dir_info_cache.save()
        
        md5_filenames = DuplicateScan.classify_by_col2(self, file_with_md5)
        
        md5_filenames = remove_no_duplicated_file(md5_filenames)
        
        #indicate the checking is over
        ProgressStatus.set_workload(0)
        
        #for i in md5_filenames.keys():
        #    print(i)
        #    for f in md5_filenames[i]:
        #        print("-",f)
        ProgressStatus.stop()
        return md5_filenames
            
    def scan_by_filesize(self, basepaths=[],exculdefiles=[".",".."],min_file_size_to_check=0):
        ProgressStatus.start()
        ProgressStatus.set_workload(1)
        ###################file name and file size in filelist#######################

        filelist = []
        
        #remove duplicated base paths
        basepaths = list(set(basepaths))

        my_log.debug(str(len(basepaths)) + " dirs" + "\n")
        #--------------------------------
        # 遍历目录把路径保存在 filelist中
        #--------------------------------
        for _dir in basepaths:
            _filelist = my_file_op.get_files_from_dir(_dir, exculdefiles)
            filelist.extend(_filelist)

        filelist = my_file_op.remove_same_path_of_file_in_list(filelist)
        my_log.debug(str(len(filelist)) + " files" + "\n")
        #--------------------------------
        # remove file size smaller than min_file_size_to_check
        #--------------------------------
        #filelist = filter(lambda x: x[1] > min_file_size_to_check, filelist)
        #filelist=list(filelist)
        
        filelist = my_file_op.remove_unique_size_of_file_in_list(filelist)
        
        #sum all files size
        for f in filelist:
            try:
                ProgressStatus.add_workload(os.path.getsize(f))
            except OSError as e:
                my_log.debug("Get file info failed:" + f + "\n")
                continue
        my_log.debug(str(len(filelist)) + " files need to check md5\n")

        file_with_filesize = []
        # computer filesize for each files
        for f in filelist:
            if ProgressStatus.is_running() is False:
                break
            fsize = 0
            try:
                fsize = os.path.getsize(f)
                ftime = os.path.getmtime(f)
            except OSError as e:
                my_log.debug("Get file info failed:" + f + "\n")
                continue

            
            ProgressStatus.process(fsize)
            file_with_filesize.append((f, str(fsize)))


        file_with_filesize = DuplicateScan.classify_by_col2(self, file_with_filesize)
        
        file_with_filesize = remove_no_duplicated_file(file_with_filesize)
        
        #indicate the checking is over
        ProgressStatus.set_workload(0)
        
        ProgressStatus.stop()
        return file_with_filesize
           
#s = DuplicateScan(["/tmp/New"])
#
