import asyncio
import sys
import time
import aiofiles
from colorama import init, Fore
from playwright.async_api import async_playwright
from config import config
from reporter import report


async def read_lines(file_path, batch_size):
    try:
        if batch_size < 20:
           batch_size = 20
        async with aiofiles.open(file_path, 'r',errors='ignore',encoding='utf-8') as file:
            seen_urls = set()  # 使用集合来存储已经出现过的URL
            while True:
                # 初始化一批行的列表
                urls = []
                for _ in range(batch_size):
                    # 异步地逐行读取
                    line = await file.readline()
                    url = line.strip()
                    # 检测后缀名是否在需要排除的列表里面
                    if url != '' and not any(url.split('.')[-1].startswith(exclude_item) for exclude_item in config.exclude_s):
                        if not url.startswith("https://") and not url.startswith("http://") and url != '':
                            url = "https://" + url
                        if not line:
                            # 如果到达文件末尾，跳出循环
                            break
                        if url not in seen_urls and url != '':
                            urls.append(url)
                            seen_urls.add(url)  # 将新的URL添加到集合中
                # 如果有读取到行，则产生这批行
                if urls:
                    yield urls
                else:
                    # 如果没有读取到行（文件结束或其它原因），退出循环
                    break
    except Exception as e:
        print(e)
        print('文件路径或类型错误,当前仅支持传递txt文本')

headers=config.headers

async def screenshot_url(context, url,semaphore,nowtime,i):
    init()
    page = await context.new_page()
    async with semaphore:
        try:
            await page.set_extra_http_headers(headers)
            screenout=config.screnn_timeout
            response=await page.goto(url,timeout=screenout,wait_until='commit')
            status = response.status
            # print(status)
            # 等待页面dom事件加载完毕
            try:
                await page.wait_for_load_state('networkidle', timeout=screenout+500)
                await page.wait_for_load_state('domcontentloaded', timeout=screenout)
                imgpath = f"{i}.png"
                await page.screenshot(path=f"./img/{nowtime}/" + imgpath,timeout=screenout)
            except Exception as e:
                # print(e)
                if  status==200:
                    # print('超时截图') #如果是状态码是200但是里面某些资源加载过于缓慢则引发超时报错,这时候限制图片和字体的加载路由再进行截屏.
                    imgpath = f"{i}.png"
                    await page.route('**/*', lambda route: route.abort() if route.request.resource_type in ['font','image'] else route.continue_())
                    await page.goto(url, timeout=screenout, wait_until='commit')
                    await page.screenshot(path=f"./img/{nowtime}/" + imgpath,timeout=screenout)
                else:
                    raise e
            try:
                title = await page.title()
                if title=='':
                    title='无标题'
            except:
                title='未获取到标题'
            if str(status).startswith('4'):
                print(
                    Fore.GREEN + '[+]' + Fore.RESET + f"{url} 状态码:" + Fore.RED + f"{status}" + Fore.RESET + f" 标题:{title}")
            else:
                print(Fore.GREEN +'[+]'+ Fore.RESET+f'{url} 已截图 标题:{title} 状态码:'+Fore.GREEN +f'{status}'+ Fore.RESET)
            await page.close()
        except Exception as e:
            # print(e)
            print(Fore.RED +'[-]'+ Fore.RESET+f'{url} 无法访问或访问超时')
            status=404
            title='该url无法访问'
            imgpath='无法访问'
            await page.close()
    return url,status,title,imgpath

async def main(filename,nowtime):
    full_result=[]
    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=config.headmode)
        picturewidth=config.width
        pictureheight=config.height
        context = await browser.new_context(ignore_https_errors=True,viewport={"width": picturewidth, "height": pictureheight})
        # 取消自动化特征
        with open('./FastSeeTools/stealth.min.js', 'r') as f:
            js_code = f.read()
        # 注入 JavaScript 代码到每个页面中
        await context.add_init_script(js_code)
        semaphore = asyncio.Semaphore(config.screensemaphore)
        speed=config.screensemaphore
        print('截屏探测任务开始运行...')
        print(f"并发数量为{config.screensemaphore},可在config.py中设置")
        count=0
        i=0
        # 异步生成器来读取文件列表
        async for urllist in read_lines(filename, speed):
            tasks = []
            for url in urllist:
                count+=1
                i+=1
                tasks.append(screenshot_url(context, url,semaphore,nowtime,i))
            result=await asyncio.gather(*tasks)
            # 将阶段性协程结果放入最终的列表存储
            full_result=full_result+result
        await context.close()
        await browser.close()
    urllist=[]
    codelist=[]
    titlelist=[]
    imgpathlist=[]
    for url,code,title,imgpath in full_result:
        urllist.append(url)
        codelist.append(code)
        titlelist.append(title)
        imgpathlist.append(imgpath)
    try:
        report.writehtml(urllist,codelist,titlelist,imgpathlist,nowtime,config.htmlname)
        del urllist,codelist,titlelist,imgpathlist
    except Exception as e:
        print('截屏探测结果保存出错了',e)
    return count

# 主函数入口
def Screen(filename,nowtime):
    try:
        starttime=time.time()
        loop1 = asyncio.get_event_loop()
        count=loop1.run_until_complete(main(filename,nowtime))
        loop1.close()
        time_diff = time.time() - starttime
        print(f"所花的时间：{time_diff:.2f}秒,共截屏探测了{count}个url")
    except Exception as e:
        print("出错了:")
        print(e)
    # 协程最快
async def screen(filename, nowtime):
    starttime = time.time()
    count = await main(filename,nowtime)
    time_diff = time.time() - starttime
    print(f"所花的时间：{time_diff:.2f}秒,共截屏探测了{count}个url")
    return count