import asyncio
import aiohttp
import aiofiles
import csv
import json
import os
from urllib.parse import urlparse


class ImageDownloader:
    def __init__(self, csv_file, batch_size=10000, max_retries=3):
        self.csv_file = csv_file
        self.batch_size = batch_size
        self.max_retries = max_retries
        self.status_file = "download_status.json"  # 先定义status_file
        self.failed_urls_file = "failed_urls.txt"
        self.current_batch = self.load_status()  # 然后再调用load_status
        self.status_file = "download_status.json"
    
    def load_status(self):
        """加载下载状态"""
        if os.path.exists(self.status_file):
            with open(self.status_file, 'r') as f:
                status = json.load(f)
                return status.get("last_batch", 0)
        return 0
    
    def save_status(self):
        """保存下载状态"""
        status = {"last_batch": self.current_batch}
        with open(self.status_file, 'w') as f:
            json.dump(status, f)
    
    def record_failed_url(self, url, doctor_province, group_title, error_msg):
        """记录失败的URL及其相关信息"""
        with open(self.failed_urls_file, 'a', encoding='utf-8') as f:
            f.write(f"{url}|{doctor_province}|{group_title}|{error_msg}\n")
    
    async def download_image(self, session, url, doctor_province, group_title, save_path):
        """异步下载单张图片，带重试机制"""
        # 处理特殊字符
        doctor_province = doctor_province.replace('/', '_').replace('\\', '_')
        group_title = group_title.replace('/', '_').replace('\\', '_')
        
        for attempt in range(self.max_retries):
            try:
                timeout = aiohttp.ClientTimeout(total=60)  # 增加超时时间到60秒
                async with session.get(url, timeout=timeout) as response:
                    if response.status == 200:
                        # 确保目录存在
                        os.makedirs(os.path.dirname(save_path), exist_ok=True)
                        
                        # 写入文件，使用16KB块大小
                        async with aiofiles.open(save_path, 'wb') as f:
                            async for chunk in response.content.iter_chunked(16384):  # 从8192增加到16384
                                await f.write(chunk)
                        return True
                    else:
                        print(f"下载失败 {url}, 状态码: {response.status}")
                        if attempt < self.max_retries - 1:
                            print(f"  重试 {attempt + 1}/{self.max_retries - 1}")
                            await asyncio.sleep(2 ** attempt)  # 指数退避
            except asyncio.TimeoutError:
                print(f"下载超时 {url}")
                if attempt < self.max_retries - 1:
                    print(f"  重试 {attempt + 1}/{self.max_retries - 1}")
                    await asyncio.sleep(2 ** attempt)
            except aiohttp.ClientPayloadError as e:
                error_msg = f"Response payload is not completed: {str(e)}"
                print(f"{url} - {error_msg}")
                self.record_failed_url(url, doctor_province, group_title, error_msg)
                return False  # 不重试这类错误
            except aiohttp.ClientResponseError as e:
                error_msg = f"Response error {e.status}: {e.message}"
                print(f"{url} - {error_msg}")
                if e.status == 404:
                    self.record_failed_url(url, doctor_province, group_title, error_msg)
                    return False  # 404错误不重试
                elif attempt < self.max_retries - 1:
                    print(f"  重试 {attempt + 1}/{self.max_retries - 1}")
                    await asyncio.sleep(2 ** attempt)
            except Exception as e:
                error_msg = str(e)
                print(f"下载异常 {url}: {error_msg}")
                if attempt < self.max_retries - 1:
                    print(f"  重试 {attempt + 1}/{self.max_retries - 1}")
                    await asyncio.sleep(2 ** attempt)
        # 所有重试都失败了，记录URL
        self.record_failed_url(url, doctor_province, group_title, "Max retries exceeded")
        return False
    
    def get_filename_from_url(self, url):
        """从URL中提取文件名"""
        path = urlparse(url).path
        return os.path.basename(path)
    
    async def process_batch(self, rows):
        """处理一批图片下载"""
        print(f"开始下载第 {self.current_batch + 1} 批，共 {len(rows)} 条记录")
        
        # 创建异步HTTP会话，增加并发连接数到100
        connector = aiohttp.TCPConnector(limit=100)  # 从50增加到100
        timeout = aiohttp.ClientTimeout(total=60)  # 从30增加到60
        async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
            # 创建所有下载任务
            tasks = []
            for row in rows:
                # 跳过标题行
                if row[0] == "id":
                    continue
                
                # 正确的列索引
                url = row[3]  # URL在第4列
                doctor_province = row[9]  # 省份在第10列
                group_title = row[7]  # 组标题在第8列
                
                # 处理特殊字符
                doctor_province_clean = doctor_province.replace('/', '_').replace('\\', '_')
                group_title_clean = group_title.replace('/', '_').replace('\\', '_')
                
                # 创建文件夹结构
                folder_path = os.path.join(doctor_province_clean, group_title_clean)
                filename = self.get_filename_from_url(url)
                save_path = os.path.join(folder_path, filename)
                
                # 如果文件已存在则跳过
                if os.path.exists(save_path):
                    print(f"文件已存在，跳过: {save_path}")
                    continue
                
                # 添加下载任务
                task = self.download_image(session, url, doctor_province, group_title, save_path)
                tasks.append((task, url, doctor_province, group_title, save_path))
            
            # 并发执行所有下载任务
            print(f"开始下载 {len(tasks)} 个文件...")
            results = []
            for task, url, doctor_province, group_title, save_path in tasks:
                result = await task
                results.append(result)
                # 每下载50个文件报告一次进度
                if len(results) % 50 == 0:  # 从10增加到50
                    success_count = sum(1 for r in results if r is True)
                    print(f"进度: {len(results)}/{len(tasks)}, 成功: {success_count}")
            
            # 统计结果
            success_count = sum(1 for r in results if r is True)
            fail_count = len(results) - success_count
            
            print(f"第 {self.current_batch + 1} 批下载完成: 成功 {success_count}, 失败 {fail_count}")
    
    def read_csv_batch(self):
        """按批次读取CSV文件"""
        with open(self.csv_file, 'r', encoding='utf-8') as file:
            reader = csv.reader(file)
            
            # 跳过标题行
            next(reader, None)
            
            # 跳过已处理的批次
            start_line = self.current_batch * self.batch_size
            for _ in range(start_line):
                try:
                    next(reader)
                except StopIteration:
                    return None
            
            # 读取当前批次
            batch_rows = []
            for i in range(self.batch_size):
                try:
                    row = next(reader)
                    batch_rows.append(row)
                except StopIteration:
                    break
            
            return batch_rows if batch_rows else None
    
    async def run(self):
        """运行下载器"""
        # 读取一批数据
        batch_rows = self.read_csv_batch()
        
        # 如果没有更多数据则退出
        if not batch_rows:
            print("所有批次下载完成")
            return
        
        # 处理当前批次
        await self.process_batch(batch_rows)
        
        # 更新批次计数并保存状态
        self.current_batch += 1
        self.save_status()
        
        print(f"第 {self.current_batch} 批下载完成，程序将退出。下次运行时将从第 {self.current_batch + 1} 批开始下载。")

async def download_failed_urls(downloader):
    """下载失败的URL"""
    if not os.path.exists(downloader.failed_urls_file):
        print("没有失败的URL记录文件")
        return
    
    print("开始重新下载失败的URL...")
    failed_records = []
    with open(downloader.failed_urls_file, 'r', encoding='utf-8') as f:
        for line in f:
            parts = line.strip().split('|')
            if len(parts) == 4:
                url, doctor_province, group_title, error_msg = parts
                failed_records.append((url, doctor_province, group_title))
    
    if not failed_records:
        print("没有需要重新下载的URL")
        return
    
    # 备份原失败文件
    backup_file = downloader.failed_urls_file + ".backup"
    os.rename(downloader.failed_urls_file, backup_file)
    
    # 创建新的失败记录文件
    open(downloader.failed_urls_file, 'w').close()
    
    # 下载失败的URL
    connector = aiohttp.TCPConnector(limit=100)  # 增加并发连接数到100
    timeout = aiohttp.ClientTimeout(total=60)  # 增加超时时间到60秒
    async with aiohttp.ClientSession(connector=connector, timeout=timeout) as session:
        tasks = []
        for url, doctor_province, group_title in failed_records:
            # 处理特殊字符
            doctor_province_clean = doctor_province.replace('/', '_').replace('\\', '_')
            group_title_clean = group_title.replace('/', '_').replace('\\', '_')
            
            # 创建文件夹结构
            folder_path = os.path.join(doctor_province_clean, group_title_clean)
            filename = downloader.get_filename_from_url(url)
            save_path = os.path.join(folder_path, filename)
            
            # 确保目录存在
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
            
            task = downloader.download_image(session, url, doctor_province, group_title, save_path)
            tasks.append(task)
        
        if tasks:
            results = await asyncio.gather(*tasks, return_exceptions=True)
            success_count = sum(1 for r in results if r is True)
            print(f"重新下载完成: 成功 {success_count}, 总计 {len(tasks)}")
        else:
            print("没有有效的URL需要重新下载")

def main():
    # CSV文件路径
    csv_file = "hehuang.csv"
    
    # 检查是否是重新下载失败URL的模式
    if len(os.sys.argv) > 1 and os.sys.argv[1] == "--retry-failed":
        downloader = ImageDownloader(csv_file, batch_size=10000, max_retries=3)
        asyncio.run(download_failed_urls(downloader))
        return
    
    # 创建下载器实例
    downloader = ImageDownloader(csv_file, batch_size=10000, max_retries=3)
    
    # 运行下载器
    print(f"开始下载图片...将从第 {downloader.current_batch + 1} 批开始下载")
    asyncio.run(downloader.run())

if __name__ == "__main__":
    main()