#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import os.path
import json
import queue
from concurrent.futures import ThreadPoolExecutor, as_completed
import time
from tqdm import tqdm

from s2c.engine.db_helper import DbHelper
from s2c.engine.file_info import FileInfo, field_names
from s2c.engine.system_info import SystemInfo
from s2c.rules.conf import SCAN_FILTER_DIR, FIELD_DELIMITER, FILE_TAB,SYS_CURRENT
from s2c.tools.utils import get_system_date_time
from s2c.tools.utils import write_file


# FileScanner 文件系统扫描, dict_type: current | base
class FileScanner:
    def __init__(self, root='/', dict_type=SYS_CURRENT):
        self.path_list = None
        self.start_time = None
        self.last_time = None
        self.root_path = root  # 默认全盘扫描
        self.path_queue = queue.Queue()
        self.total = 0
        self.file_list = []
        self.pool = None
        self.dict_type = dict_type
        self.sys = SystemInfo()
        self.file_name = self.sys.file_name(dict_type=dict_type)
        self.current = 0  # 当前完成个数
        self.progress = 0  # 进度 0 到100
        self.bar = None

    # 全盘文件扫描
    def scan(self):
        path = self.root_path
        dir_count = 0
        file_count = 0
        self.path_list = []
        print('统计磁盘文件信息：')
        for dir_paths, dir_names, filenames in os.walk(path):
            if dir_names:
                for dirname in dir_names:
                    dir_count = dir_count + 1
                    fullpath = os.path.join(dir_paths, dirname)
            if filenames:
                for filename in filenames:
                    file_count = file_count + 1
                    fullpath = os.path.join(dir_paths, filename)
                    if not is_filter_path(fullpath):
                        self.path_queue.put(fullpath)
        self.total = self.path_queue.qsize()
        print('\n   路径数目:   ' + str(dir_count) +
              '\n   文件数目:   ' + str(file_count) +
              '\n   过滤后:     ' + str(self.total) + '\n'
              )

    # 使用线程池获取文件详情：计算 MD5
    def start_task_pool(self):
        # 执行开始时间
        start_time = time.time()
        db = DbHelper()
        db_index = db.insert_db_scan_file_table({'create_time': get_system_date_time()})
        # 把index写到临时文件中
        write_file('dbtmp',str(db_index))

        worker_task_max = 10
        page = 1000  # 每次提交多少任务
        self.start_time = time.time()
        self.last_time = time.time()
        self.pool = ThreadPoolExecutor(worker_task_max)
        tasks = []
        self.progress = 0
        self.bar = tqdm(total=self.total, desc='正在扫描:', unit='', unit_scale=False)
        self.current = 0
        while True:
            if self.path_queue.empty():
                break

            total = self.path_queue.qsize()
            if total > page:
                total = page
            db.update_db_scan_file_status(db_index, {
                'status': self.progress # 扫描文件进度
            })
            tasks = [self.pool.submit(file_info_task, self.path_queue.get()) for i in range(total)]
            for task in as_completed(tasks):
                self.task_result(task.result())

        db.update_db_scan_file_status(db_index, {
                'status': self.progress # 已经扫描完文件
            })
        self.pool.shutdown(wait=True)
        self.save()
        print("扫描完成！ 总数：" + str(len(self.file_list)))

    # 线程执行结果
    def task_result(self, result):
        result.dict_type = self.dict_type
        self.file_list.append(result.fields())
        curr_time = time.time()
        # 每隔 1s 输出一次
        if curr_time - self.last_time > 0.2:
            pre_done = self.current
            self.current = self.total - self.path_queue.qsize()
            self.progress = round(self.current / self.total * 100)
            self.last_time = curr_time
            self.bar.update(self.current - pre_done)

    # JSON 文件保存
    def save(self):
        file_name = self.file_name
        obj = self.sys.obj()
        # 增加数据的条数记录到对象中
        obj['table_name'] = FILE_TAB
        obj['lines'] = len(self.file_list)
        obj['fields'] = field_names()
        obj['delimiter'] = FIELD_DELIMITER
        obj['dict_type'] = self.dict_type

        data = {
            'info': obj,
            'list': self.file_list
        }
        with open(file_name, 'w') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)


# end class

# 判断路径是否属于过滤规则
def is_filter_path(filename):
    for rule in SCAN_FILTER_DIR:
        if re.search(rule, filename):
            return True
    return False


# 每个线程要执行的任务
def file_info_task(filepath):
    info = FileInfo(filepath)
    info.calc()
    return info


if __name__ == "__main__":
    fs = FileScanner("/Users/liuguixue/works/project", SYS_CURRENT)
    fs.scan()
    fs.start_task_pool()
