import fnmatch
import os
import shutil
from urllib.parse import urljoin
import subprocess

import loguru
import requests
import tarfile
import zipfile
import time
import fnmatch
from bs4 import BeautifulSoup
from random import randint

def create_directory(path):
    if not os.path.exists(path):
        os.makedirs(path)

def download_file(url, dest_path):
    user_agent = [
        'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.7113.93 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0'
    ]
    response = requests.get(url, stream=True, headers={'User-Agent': user_agent[randint(0, 1)]}, timeout=60)
    # response = requests.get(url, stream=True)
    if response.status_code == 200:
        with open(dest_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
    else:
        loguru.logger.error(f"Failed to download file from {url}")
        return False
    return True

def extract_file(file_path, extract_to):
    if tarfile.is_tarfile(file_path):
        with tarfile.open(file_path, 'r:*') as tar:
            tar.extractall(path=extract_to)
    elif zipfile.is_zipfile(file_path):
        with zipfile.ZipFile(file_path, 'r') as zip_ref:
            zip_ref.extractall(path=extract_to)
    elif file_path.endswith('.tar.xz'):
        try:
            with tarfile.open(file_path, 'r:xz') as tar:
                tar.extractall(path=extract_to)
        except tarfile.ReadError:
            # 如果 tarfile 无法解压,尝试使用系统的 tar 命令
            subprocess.run(['tar', '-xf', file_path, '-C', extract_to], check=True)
    else:
        loguru.logger.error(f"{file_path} is not a supported archive format")


def extract_spec_file(url, dest_path):
    response = requests.get(url)
    if response.status_code == 200:
        soup = BeautifulSoup(response.content, 'html.parser')
        spec_code_lines = soup.find_all('div', class_='line')
        spec_code = "\n".join(line.get_text() for line in spec_code_lines)
        with open(dest_path, 'w') as f:
            f.write(spec_code)
    else:
        loguru.logger.info(f"Failed to download .spec file from {url}")

def find_working_directory(dir_path):
    should_open, subdir_path = should_open_subdir(dir_path)
    if should_open:
        if not subdir_path:
            return None
        print(f"打开文件夹: {subdir_path}")
        return find_working_directory(subdir_path)
    else:
        print(f"处理目录: {dir_path}")
        # 业务逻辑函数
        return dir_path

def should_open_subdir(dir_path):
    # 获取目录中的所有文件和文件夹
    items = os.listdir(dir_path)
    # 分别获取文件和文件夹列表
    files = [f for f in items if os.path.isfile(os.path.join(dir_path, f))]
    dirs = [d for d in items if os.path.isdir(os.path.join(dir_path, d))]

    # 检查文件中是否只有压缩包
    non_archive_files = [f for f in files if not f.endswith(('.zip', '.tar.gz', '.rar', '.7z','.tar.xz'))]

    # 判断是否只有一个文件夹并且没有非压缩文件
    if len(dirs) == 1 and not non_archive_files:
        # 如果是，则说明此文件夹并非合法文件夹，应该继续打开
        return True, os.path.join(dir_path, dirs[0])
    if not non_archive_files:
        return True, None
    return False, None

def process_txt_file(txt_file, base_dir):
    with open(txt_file, 'r') as file:
        lines = file.readlines()
        for line in lines:
            parts = line.strip().split(' ')
            if len(parts) != 3:
                loguru.logger.error(f"Invalid line format: {line}")
                continue
            project_name, spec_url, src_url = parts
            project_dir = os.path.join(base_dir, project_name)
            create_directory(project_dir)

            src_file_name = src_url.split('/')[-1]
            src_file_path = os.path.join(project_dir, src_file_name)
            loguru.logger.info(f"Downloading {project_name} ......")
            spec_url = spec_url.replace('blob', 'raw')
            if 'http' not in src_url:
                src_url = urljoin(f"https://gitee.com/src-openeuler/{project_name}/raw/master/", src_url)
            if not download_file(src_url, src_file_path):
                os.rmdir(project_dir)
                continue

            extract_file(src_file_path, project_dir)
            absolute_path = os.path.abspath(project_dir)
            working_dir = find_working_directory(absolute_path)
            if not working_dir:
                shutil.rmtree(absolute_path)
                loguru.logger.info(f"Delete {project_name}")
                continue
            # cmake_dir = None
            # for root, dirs, files in os.walk(project_dir):
            #     if 'CMakeLists.txt' in files:
            #         cmake_dir = root
            #         break

            # if cmake_dir:
            #     spec_file_name = f"{project_name}.spec"
            #     spec_file_path = os.path.join(cmake_dir, spec_file_name)
            #     download_file(spec_url, spec_file_path)
            spec_file_name = f"{project_name}.spec"
            spec_file_path = os.path.join(working_dir, spec_file_name)
            # print(spec_file_path)
            download_file(spec_url, spec_file_path)
            # else:
            #     loguru.logger.error(f"CMakeLists.txt not found in {project_dir}")
            time.sleep(3)


def extract_and_copy_files(src, dst, file_list):
    # 获取src文件夹的绝对路径
    src = os.path.abspath(src)
    dst = os.path.abspath(dst)

    # 遍历src文件夹
    for root, dirs, files in os.walk(src):
        # 计算当前文件夹相对于src文件夹的相对路径
        rel_path = os.path.relpath(root, src)

        # 目标文件夹路径
        dst_dir = os.path.join(dst, rel_path)

        # 遍历当前文件夹中的文件
        for file in files:
            #if file in file_list:
            if any(fnmatch.fnmatch(file, pattern) for pattern in file_list):
                # 源文件路径
                src_file_path = os.path.join(root, file)

                # 目标文件路径
                dst_file_path = os.path.join(dst_dir, file)

                # 确保目标文件夹存在
                if not os.path.exists(dst_dir):
                    os.makedirs(dst_dir)

                # 复制文件
                shutil.copy2(src_file_path, dst_file_path)
                print(f"Copied: {src_file_path} to {dst_file_path}")

if __name__ == '__main__':
    # 根据txt_file和 package-mapping-result.yaml 到 dst
    # 我跑的时候好像500个一起跑有问题，可以手动三分之一三分之一跑，不知道什么原因
    # print(os.getcwd())
    current_dir = os.path.dirname(os.path.abspath(__file__))
    os.chdir(current_dir)

    txt_file = 'demo.txt'
    base_dir = '../../autotools-Test'

    
    # process_txt_file(txt_file, base_dir)

    # 跑完 package-mapping.py后，从 src 种提取 mapping-depends.yaml 和 package-mapping-result.yaml 到 dst

    src = '../../meson-Test'
    dst = '../../meson-Results'
    file_list = ['*.spec',
                 'package-mapping-result.yaml',
                 'cmake-mapping-depends.yaml','commands.yaml','depends.yaml','final_options.yaml','option_set.yaml','options.yaml','target.yaml',
                 'autotool-mapping-depends.yaml','conditions.yaml','depends.yaml','final_depends.yaml','macros.yaml']

    # src = 'D:\Iron\Work\spec-test\Test'
    # dst = 'D:\Iron\Work\submit\dependency-analysis\package-mapping-results'
    # file_list = ['package-mapping-result.yaml', 'mapping-depends.yaml']
    # extract_and_copy_files(src, dst, file_list)
    # src = '../../Test'
    # dst = '../../Results'
    # file_list = ['package-mapping-result.yaml', 'cmake-mapping-depends.yaml']
    # file_list = ['*.spec',
    #          'package-mapping-result.yaml',
    #          'cmake-mapping-depends.yaml','commands.yaml','depends.yaml','final_options.yaml','option_set.yaml','options.yaml','target.yaml',
    #          'autotool-mapping-depends.yaml','conditions.yaml','depends.yaml','final_depends.yaml','macros.yaml']

    extract_and_copy_files(src, dst, file_list)
