import json
import os
import shutil
from urllib.parse import urljoin
import subprocess
import loguru
import requests
import tarfile
import zipfile
from bs4 import BeautifulSoup
proxies = {"http":"http://127.0.0.1:20809", "https":"http://127.0.0.1:20809"}
progress_file = 'progress.json'
from random import randint


def create_directory(path):
    if not os.path.exists(path):
        os.makedirs(path)

def download_file(url, dest_path):
    user_agent = [
        'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.7113.93 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 Safari/537.36',
        'Mozilla/5.0 (iPhone; CPU iPhone OS 14_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1',
        'Mozilla/5.0 (Linux; Android 10; SM-G981B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Mobile Safari/537.36'
    ]
    try:
        response = requests.get(url, stream=True, headers={'User-Agent': user_agent[randint(0, 4)]}, timeout=60)
    except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
        loguru.logger.error(f'Failed to download. Caused by {e}')
        return False
    
    # response = requests.get(url, stream=True)
    if response.status_code == 200:
        with open(dest_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=8192):
                f.write(chunk)
    else:
        loguru.logger.error(f"Failed to download file from {url}")
        return False
    return True

def extract_file(file_path, extract_to):
    if tarfile.is_tarfile(file_path):
        with tarfile.open(file_path, 'r:*') as tar:
            tar.extractall(path=extract_to)
    elif zipfile.is_zipfile(file_path):
        with zipfile.ZipFile(file_path, 'r') as zip_ref:
            zip_ref.extractall(path=extract_to)
    elif file_path.endswith('.tar.xz'):
        try:
            with tarfile.open(file_path, 'r:xz') as tar:
                tar.extractall(path=extract_to)
        except tarfile.ReadError:
            try:
                # 如果 tarfile 无法解压,尝试使用系统的 tar 命令
                subprocess.run(['tar', '-xJf', file_path, '-C', extract_to], check=True)
            except subprocess.CalledProcessError as e:
                loguru.logger.error(f"Failed to extract {file_path} using system tar command: {e}")
    else:
        loguru.logger.error(f"{file_path} is not a supported archive format")


def extract_spec_file(url, dest_path):
    response = requests.get(url)
    if response.status_code == 200:
        soup = BeautifulSoup(response.content, 'html.parser')
        spec_code_lines = soup.find_all('div', class_='line')
        spec_code = "\n".join(line.get_text() for line in spec_code_lines)
        with open(dest_path, 'w') as f:
            f.write(spec_code)
    else:
        loguru.logger.info(f"Failed to download .spec file from {url}")

def find_working_directory(dir_path):
    should_open, subdir_path = should_open_subdir(dir_path)
    if should_open:
        if not subdir_path:
            return None
        print(f"打开文件夹: {subdir_path}")
        return find_working_directory(subdir_path)
    else:
        print(f"处理目录: {dir_path}")
        # 业务逻辑函数
        return dir_path

def should_open_subdir(dir_path):
    # 获取目录中的所有文件和文件夹
    items = os.listdir(dir_path)
    # 分别获取文件和文件夹列表
    files = [f for f in items if os.path.isfile(os.path.join(dir_path, f))]
    dirs = [d for d in items if os.path.isdir(os.path.join(dir_path, d))]

    # 检查文件中是否只有压缩包
    non_archive_files = [f for f in files if not f.endswith(('.zip', '.tar.gz', '.rar', '.7z','.tar.xz'))]

    # 判断是否只有一个文件夹并且没有非压缩文件
    if len(dirs) == 1 and not non_archive_files:
        # 如果是，则说明此文件夹并非合法文件夹，应该继续打开
        return True, os.path.join(dir_path, dirs[0])
    if not non_archive_files:
        return True, None
    return False, None

def load_progress():
    if os.path.exists(progress_file):
        with open(progress_file, 'r') as file:
            return set(json.load(file))
    return set()

# 保存已爬取的项目列表
def save_progress(progress):
    with open(progress_file, 'w') as file:
        json.dump(list(progress), file)

def process_txt_file(txt_file, base_dir):
    progress = load_progress()

    with open(txt_file, 'r') as file:
        lines = file.readlines()
        for line in lines:
            parts = line.strip().split(' ')
            if len(parts) != 3:
                loguru.logger.error(f"Invalid line format: {line}")
                continue
            project_name, spec_url, src_url = parts
            project_dir = os.path.join(base_dir, project_name)
            if project_dir in progress:
                print(f'Skipping already scraped item: {project_dir}')
                continue
            create_directory(project_dir)

            src_file_name = src_url.split('/')[-1]
            src_file_path = os.path.join(project_dir, src_file_name)
            loguru.logger.info(f"Downloading {project_name} ......")
            spec_url = spec_url.replace('blob', 'raw')
            if 'http' not in src_url:
                src_url = urljoin(f"https://gitee.com/src-openeuler/{project_name}/raw/master/", src_url)
            if not download_file(src_url, src_file_path):
                os.rmdir(project_dir)
                continue

            extract_file(src_file_path, project_dir)
            absolute_path = os.path.abspath(project_dir)
            working_dir = find_working_directory(absolute_path)
            if not working_dir:
                shutil.rmtree(absolute_path)
                loguru.logger.info(f"Delete {project_name}")
                continue
            # cmake_dir = None
            # for root, dirs, files in os.walk(project_dir):
            #     if 'CMakeLists.txt' in files:
            #         cmake_dir = root
            #         break

            # if cmake_dir:
            #     spec_file_name = f"{project_name}.spec"
            #     spec_file_path = os.path.join(cmake_dir, spec_file_name)
            #     download_file(spec_url, spec_file_path)
            spec_file_name = f"{project_name}.spec"
            spec_file_path = os.path.join(working_dir, spec_file_name)
            # print(spec_file_path)
            download_file(spec_url, spec_file_path)
            # else:
            #     loguru.logger.error(f"CMakeLists.txt not found in {project_dir}")
            progress.add(project_dir)
            save_progress(progress)


def extract_and_copy_files(src, dst, file_list):
    # 获取src文件夹的绝对路径
    src = os.path.abspath(src)
    dst = os.path.abspath(dst)

    # 遍历src文件夹
    for root, dirs, files in os.walk(src):
        # 计算当前文件夹相对于src文件夹的相对路径
        rel_path = os.path.relpath(root, src)

        # 目标文件夹路径
        dst_dir = os.path.join(dst, rel_path)

        # 遍历当前文件夹中的文件
        for file in files:
            if file in file_list:
                # 源文件路径
                src_file_path = os.path.join(root, file)

                # 目标文件路径
                dst_file_path = os.path.join(dst_dir, file)

                # 确保目标文件夹存在
                if not os.path.exists(dst_dir):
                    os.makedirs(dst_dir)

                # 复制文件
                shutil.copy2(src_file_path, dst_file_path)
                print(f"Copied: {src_file_path} to {dst_file_path}")

if __name__ == '__main__':
    current_dir = os.path.dirname(os.path.abspath(__file__))
    os.chdir(current_dir)
    # 根据txt_file和 package-mapping-result.yaml 到 dst
    # 我跑的时候好像500个一起跑有问题，可以手动三分之一三分之一跑，不知道什么原因
    # txt_file = 'repos_with_mesonbuild.txt'
    # base_dir = './/'
    txt_file = 'demo.txt'
    base_dir = '../../maven-Test'
    process_txt_file(txt_file, base_dir)

    # 跑完 package-mapping.py后，从 src 种提取 mapping-depends.yaml 和 package-mapping-result.yaml 到 dst
    # src = 'D:\Iron\Work\spec-test\Test'
    # dst = 'D:\Iron\Work\submit\dependency-analysis\package-mapping-results'


    # src = '../../meson-Test'
    # dst = '../../meson-Results'
    # file_list = ['*.spec',
    #              'package-mapping-result.yaml',
    #              'cmake-mapping-depends.yaml','commands.yaml','depends.yaml','final_options.yaml','option_set.yaml','options.yaml','target.yaml',
    #              'autotool-mapping-depends.yaml','conditions.yaml','depends.yaml','final_depends.yaml','macros.yaml', 'meson.build', 'meson_options.txt']
    # file_list = ['package-mapping-result.yaml', 'mapping-depends.yaml']

    # extract_and_copy_files(src, dst, file_list)
