
import json
import os

import time  

import re
import queue
import random  

import logging as log

import threading
from concurrent.futures import ThreadPoolExecutor

from colorama import init, Fore, Style  

import requests
import urllib3
from urllib.parse import urlparse, parse_qs, urljoin, unquote_plus
import warnings  
from bs4 import BeautifulSoup

from sqlalchemy import create_engine, Column, Integer, String, JSON  , text 
from sqlalchemy.orm import declarative_base  
from sqlalchemy.orm import scoped_session, sessionmaker

import argparse

# 请求类
class LF_Request:
    num = 0
    urls = queue.Queue()
    tasks = [] 
    max_tasks = 10
    check_time = 0.2
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
        'Content-Type': 'text/html;charset=utf-8'
        }
            

    # get请求
    def get(self, url, callback, callback_data, headers=None, cookie=None):
        try:
            self.num+=1
            if headers is None:  
                headers = self.headers 
            if cookie :
                headers['Cookie'] = cookie

            urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)  
            session = requests.Session() 
            r = session.get(url, timeout=60,  verify=False, headers=self.headers)
            r.raise_for_status()  #如果状态不是200，引发HTTPError异常
            # r.encoding = r.apparent_encoding  #因为apparent更准确
            r.encoding = 'utf-8'
            callback(url, r.text, callback_data) 
        except Exception as e:
            log.error(f'get {url} err => {e}')
            return False
    # 文件下载
    def download(self, url,save_path, callback, callback_data, headers=None): 
        try: 
            self.num+=1
            # 发送GET请求  
            # stream=True允许以流的形式读取响应数据 
            if headers is None:  
                response = requests.get(url, stream=True) 
            else:  
                response = requests.get(url, stream=True, headers=self.headers)

            # 确保请求成功  
            response.raise_for_status()  
        
            folder = os.path.dirname(save_path)
            if not os.path.exists(folder):
                os.makedirs(folder, exist_ok=True)

            # 打开文件用于写入  
            with open(save_path, 'wb') as handle:  
                for block in response.iter_content(1024):  # 以1024字节为单位写入文件  
                    if not block:  
                        break  
                    handle.write(block)  
            callback(url, True, save_path, callback_data)
            
        except Exception as e:
            log.error(f'get {url} err => {e}')
            return False
    # 开始
    def run(self):  
        executor = ThreadPoolExecutor(max_workers=self.max_tasks)
        all = []
        try:
            while True:
                if self.urls.empty() and all == []:
                    break

                b_all = [] 
                for this_thread in all:
                    if not this_thread.done():
                        b_all.append(this_thread)
                    else:
                        res = this_thread.result()
                        if not (res == True and res == "True" or res == '' or res == None):
                            log.error(res)
                all = b_all

                # print(f'queue => {len(all)}')

                # 启动新的进程（直到达到最大进程数）  
                while len(all) < self.max_tasks*2 and not self.urls.empty():
                    task_info = self.urls.get()
                    this_thread = executor.submit(getattr(self,task_info[0]), *task_info[1])
                    all.append(this_thread)  
                    
                
                time.sleep(self.check_time)

            return self.num
        except KeyboardInterrupt:  
            print("用户按下了Ctrl+C，程序将退出。请耐心等待") 
            while not self.urls.empty():  
                self.urls.get_nowait()

            for i in all:
                i.cancel()
                
            return self.num



Models = declarative_base()  

class STATUS:
    error = 0
    success = 1
    in_request = 1002
    failed_request = -1

    error_msg = '失败'
    success_msg = '成功'
    in_request_msg = '请求中'
    failed_request_msg = '请求失败'


# 创建一个基础模型类  
class UrlModel(Models):  
    __tablename__ = 'urls'  
      
    id = Column(Integer, primary_key=True)  
    url = Column(String, unique=True, nullable=False)  
    filepath = Column(String, nullable=True)  
    status = Column(Integer, nullable=False)  
    data = Column(JSON, nullable=True)  

# 模型
class Model:
    _session = threading.local()
    def __init__(self,filename) -> None:
        # 创建数据库引擎（这里以SQLite的内存数据库为例，你也可以使用文件路径）  
        self.engine = create_engine(f'sqlite:///{filename}')  

        # 创建所有定义的表  
        Models.metadata.create_all(self.engine)  
        
        
        # 创建会话类  
        self.Session = sessionmaker(bind=self.engine)  
        self.session = self.Session()  
        self.session.execute(text("PRAGMA busy_timeout=100000000;"))

    def close(self):
        self.session.close()
        self.engine.dispose()

    def get_session(self):
        DbSession = sessionmaker(bind=self.engine)
        session = DbSession()
        return session


# 网站镜像器
class DownloadWeb:
    output = 'output'
    level = None
    cookie = None
    is_allow_extraterritorial = False # 是否允许跳转到域外
    Anti_crawler = [['??','']]
    cmd_show = False

    def __init__(self) -> None:
        # 初始化colorama  
        init()  
        self.request_obj = LF_Request()
    
    def urllog(self,txt):
        if self.cmd_show:
            print(txt)
        with open(os.path.join(self.output, 'url.log'),'a', encoding='utf-8') as f:
            f.write(f'{txt}\n')

    # 随机字符串 
    def generate_unique_str(self):  
        timestamp = int(time.time() * 1000)  # 获取当前时间戳（毫秒）  
        random_part = random.randint(100000, 999999)  # 生成一个四位数随机数  
        return f"{timestamp}{random_part}"  

    # url补全
    def get_full_link(self, url, baseurl):
        if url.startswith(('http://', 'https://')):
            return url
        # 确保baseurl以斜杠结束
        if not baseurl.endswith('/'):
            baseurl += '/'
        # 使用urljoin拼接URL
        joined_url = urljoin(baseurl, url)
        # 解析新的URL，获取查询参数
        parsed_url = urlparse(joined_url)
        query_params = parse_qs(parsed_url.query)
        # 重新构建查询字符串，保持原始顺序
        sorted_query_string = '&'.join([f"{key}={','.join(value)}" for key, value in query_params.items()])
        # 构建最终的完整URL
        final_url = f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}?{sorted_query_string}"
        try:
            return unquote_plus(final_url)  
        except:
            return final_url

    # 页面下载
    def page_download(self, url,level=1):
        if self.level != None and level > self.level:
            log.info("已到最大层")
            return True
        urlobj = urlparse(url)
        if urlobj.netloc not in self.domain:
            return False
        url = url.split('#')[0]
        
        # url文件路径生成
        filetype, filepath, urlpath = self.url_to_path(url)
        
        # 判断是否为文件下载
        if not filepath.endswith('.html'):
            self.file_download(url)

        model = Model(os.path.join(self.output, "sql.db"))
        modeldata = model.session.query(UrlModel).filter(UrlModel.url==url, UrlModel.status==STATUS.in_request).first()
        # 重叠异常处理
        if modeldata:
            # while True :
            #     if modeldata.status != STATUS.in_request:
            #         return modeldata
            #     modeldata = self.model.session.query(UrlModel).filter(UrlModel.url == url).first()
            return modeldata
        # 存库
        try:
            new_data = UrlModel(url=url, filepath=filepath, status=STATUS.in_request, data={'type':filetype,'fun':'get','level':level})
            model.session.add(new_data)
            model.session.commit()
        except Exception:
            model.close()
            return modeldata
        finally:
            model.close()

        # session.close()
        # 请求
        self.request_obj.urls.put(['get', [url, self.page_download_callback, {'type':filetype,'fun':'get','level':level},None,self.cookie]])

    # 页面下载回调
    def page_download_callback(self, url, html, callback_data):  
        try:
            self.urllog(f'page back -> {url}')
            model = Model(os.path.join(self.output, "sql.db"))
            go_urls = []
            warnings.simplefilter("ignore", category=UserWarning)
            bs = BeautifulSoup(html,"html.parser")
            # href url检索（迭代）
            tags_with_href = [tag for tag in bs.find_all(True) if 'href' in tag.attrs] 
            for tag in tags_with_href:
                this_href = self.get_full_link(tag['href'],url)
                if this_href == False:
                    continue
                res = self.url_to_path_and_get_relative_path(url, this_href)  
                if res['target']['filetype'] == 'html':
                    parsed_url = urlparse(this_href)
                    if parsed_url.netloc in obj.domain :
                        tag['href'] = res['relpath']
                        self.page_download(this_href, level=callback_data['level']+1)
                    else:
                        if not self.is_allow_extraterritorial:
                            tag['href'] = res['relpath']
                else:
                    tag['href'] = res['relpath']
                    self.file_download(this_href, level=callback_data['level']+1)

                    
            # src url检索（迭代）
            tags_with_src = [tag for tag in bs.find_all(True) if 'src' in tag.attrs] 
            for tag in tags_with_src:
                this_href = self.get_full_link(tag['src'],url)
                if this_href == False:
                    continue
                if tag.name == 'img' or tag.name == 'image':
                    res = self.url_to_path_and_get_relative_path(url, this_href, 'png')  
                else:
                    res = self.url_to_path_and_get_relative_path(url, this_href)  
                tag['src'] = res['relpath']
                if res['target']['filetype'] == 'html':
                    self.page_download(this_href, level=callback_data['level']+1)
                else:
                    self.file_download(this_href, level=callback_data['level']+1,filetype=res['target']['filetype'])
                
            # 存文件
            modeldata = model.session.query(UrlModel).filter(UrlModel.url==url).first()
            filepath = modeldata.filepath
            folder = os.path.dirname(filepath)
            if not os.path.exists(folder):
                os.makedirs(folder, exist_ok=True)

            modified_html = bs.prettify()
            with open(filepath,'w', encoding='utf-8') as f:
                f.write(modified_html)

            # 存库
            model.session.query(UrlModel).filter(UrlModel.url == url).update({"status": STATUS.success})
            model.session.commit()  
            model.close()
            log.info(f"{url} => success")
        except Exception :
            log.error('An error occurred', exc_info=True)
        finally:
            model.close()

    # 文件下载
    def file_download(self, url,filetype='png', level=1):
        log.info('download '+ url)
        # urlobj = urlparse(url)
        # if urlobj.netloc not in self.domain:
        #     return False
        
        # url文件路径生成
        filetype, filepath, urlpath = self.url_to_path(url,filetype)
        
        # 判断是否为文件下载

        model = Model(os.path.join(self.output, "sql.db"))
        modeldata = model.session.query(UrlModel).filter(UrlModel.url == url, UrlModel.status==STATUS.in_request).first()
        # 重叠异常处理
        if modeldata:
            # while True :
            #     if modeldata.status != STATUS.in_request:
            #         return modeldata
            #     modeldata = self.model.session.query(UrlModel).filter(UrlModel.url == url).first()
            return modeldata
        # 存库
        try:
            new_data = UrlModel(url=url, filepath=filepath, status=STATUS.in_request, data={'fun':'download','type':filetype,'level':level})
            model.session.add(new_data)
            model.session.commit()
        except Exception:
            model.close()
            return modeldata
        finally:
            model.close()

        # session.close()
        # 请求
        self.request_obj.urls.put(['download', [url, filepath, self.file_download_callback, {'fun':'download','type':filetype,'level':level}]])

    # 文件下载回调
    def file_download_callback(self, url, res, save_path, callback_data):
        try:
            if callback_data['type'] == 'css':
                css_code = ''
                with open(save_path, 'r', encoding='utf-8') as f:
                    css_code = f.read()
                # 使用正则表达式查找所有URL
                urls = re.findall(r'url\(([^)]*)\)', css_code)

                # 遍历所有URL，生成本地路径
                for this_url in urls:
                    # 去除URL中的查询参数
                    if this_url.startswith("'") and this_url.endswith("'"):  
                        # 如果是，则去掉首尾的单引号  
                        this_url = this_url[1:-1] 
                    if this_url.startswith("\"") and this_url.endswith("\""):  
                        # 如果是，则去掉首尾的单引号  
                        this_url = this_url[1:-1] 
                    if this_url.startswith("data:image"):
                        continue
                    this_href = self.get_full_link(this_url,url)
                    res = self.url_to_path_and_get_relative_path(url, this_href, default='jpg')  
                    css_code = css_code.replace(this_url,res['relpath'])
                    self.file_download(this_href, level=callback_data['level']+1)
                
                with open(save_path, 'w', encoding='utf-8') as f: 
                    f.write(css_code)


            self.urllog(f'download back -> {url}')
            model = Model(os.path.join(self.output, "sql.db"))
            model.session.query(UrlModel).filter(UrlModel.url == url).update({"status": STATUS.success})
            model.session.commit()  
            model.close()
            log.info(f"{url} => success")
        except Exception :
            log.error('An error occurred', exc_info=True)
        finally:
            model.close()

    # 去除特殊字符
    def delete_other_str(self,txt):
        remove_chars = '"\'/*<>:| '  
        res = str.maketrans('', '', remove_chars)
        return txt.translate(res)
    
    # url转文件路径
    def url_to_path(self, url, default='html'):
        # 反扒策略库
        for i in self.Anti_crawler:
            url = url.replace(i[0],i[1])

        # 解析URL
        parsed_url = urlparse(url)
        # 获取域名
        domain = parsed_url.netloc
        # 提取GET参数并转换为字典
        get_params = parse_qs(parsed_url.query)

        # 简单处理路径
        path = parsed_url.path
        path = path.replace(' ','')
        path = path.replace('//','/')
        
        if path.startswith('/'):
            path = path[1:]

        path = path.split('/')
        filename = path[-1]
        filepath = path[0:-1]

        if filename == '':
            filename = 'index.html'

        if '.' not in filename[-6:-1]:
            filename = f'{filename}.{default}'
        file_get_path = ''

        for k,v in get_params.items():
            if type(v) == list:
                v = v[0]
            file_get_path += f'_{k}_{v}'
        file_get_path = self.delete_other_str(file_get_path)
        
        
        filetype = filename.split('.')[-1]
        if filetype == "htm" or filetype == "jsp"  or filetype == "php" :
            filetype = "html"
            filename = f"{filename.split('.')[0]}.{filetype}"

        if file_get_path != '':
            v = filename.split('.')
            filename = f"{v[0]}{file_get_path}.{v[-1]}"
        domain_dir = domain.replace(".",'_')
        filepath = os.path.join(self.output, domain_dir, *filepath, filename)  
        
        
        filepath = filepath.replace('//','/')
        filepath = filepath.replace('%','_')
        filepath = filepath.replace('\\\\','\\')
        url = filepath
        if parsed_url.fragment:
            url = url+"#"+parsed_url.fragment
        url = url.replace('\\','/')
        return filetype, filepath, url
    
    # url转文件路径并获取相对地址
    def url_to_path_and_get_relative_path(self, base_url, target_url, default='html'):  
        """  
        使用os.path.relpath来获取两个路径之间的相对路径。  
    
        :param base_path: 字符串，基础路径  
        :param target_path: 字符串，目标路径  
        :return: 字符串，从基础路径到目标路径的相对路径  
        """  

        base_filetype, base_filepath, base_url = self.url_to_path(base_url, default=default)  
        target_filetype, target_filepath, target_url = self.url_to_path(target_url, default=default)  
        

        target_url_obj = urlparse(target_url)
        relpath = os.path.relpath(target_filepath, os.path.dirname(base_filepath))

        if target_url_obj.fragment:
            relpath = relpath+"#"+target_url_obj.fragment
        relpath = relpath.replace('\\','/')
        
        return {
                'base' : {'filetype':base_filetype, 'filepath':base_filepath, 'url':base_url},
                'target' : {'filetype':target_filetype, 'filepath':target_filepath, 'url':target_url},
                'relpath':relpath
            }
    
    # 重置状态
    def clear_cache(self):
        try:
            model = Model(os.path.join(self.output, "sql.db"))
            model.session.query(UrlModel).filter(UrlModel.status == STATUS.in_request).update({"status": STATUS.error})
            model.session.commit()  
            model.close()
        except Exception :
            log.error('clear cache error occurred => ', exc_info=True)
        finally:
            model.close()
    
    # 断点恢复执行
    def resume_execution(self):
        self.clear_cache()
        try:
            model = Model(os.path.join(self.output, "sql.db"))
            lists = model.session.query(UrlModel).filter(UrlModel.status != STATUS.in_request).all()
            model.close()
            for i in lists:
                if i.data['fun'] == 'get':
                    self.request_obj.urls.put(['get', [i.url, self.page_download_callback, {'type':i.data['type'], 'fun':'get', 'level':i.data['level'] }]])
                else:
                    self.request_obj.urls.put(['download', [i.url, i.filepath, self.file_download_callback, {'fun':'download','type':i.data['type'],'level':i.data['level']}]])

        except Exception :
            log.error('clear cache error occurred => ', exc_info=True)
        finally:
            model.close()

    def create_map(self):
        pass

    # 运行
    def run(self):
        # 实例化
        model = Model(os.path.join(self.output, "sql.db"))

        # 初始化处理
        for k,v in enumerate(self.urls):
            # 创建快捷进入
            this_filepath = os.path.join(self.output, f"index{k+1}.html")   
            # url文件路径生成
            filetype, filepath, urlpath = self.url_to_path(v)
            
            folder = os.path.dirname(this_filepath)
            if not os.path.exists(folder):
                os.makedirs(folder, exist_ok=True)

            with open(this_filepath,'w') as f:
                f.write(f"<head><meta http-equiv='Refresh' content='0; URL=\"{urlpath.replace(self.output+'/','')}\"' /></head>")



            if not model.session.query(UrlModel).filter(UrlModel.url==v, UrlModel.status==STATUS.success).first():
                self.page_download(v)


        # 处理未完成项加入url队列
        model.close()
        # 运行 
        res = self.request_obj.run()

        self.urllog(f'数据已处理完成：{res}')


if __name__ == "__main__":
    
    parser = argparse.ArgumentParser(description='LF工具箱-网站镜像器')
    parser.add_argument("-u","--url", dest='url', type=str, default="https://www.baidu.com/", metavar="", help="网址")
    parser.add_argument("-d","--domain", type=str, default="www.baidu.com", metavar="", help="域名 逗号隔开")
    parser.add_argument("-o","--output", type=str, default="download", metavar="", help="保存位置")
    parser.add_argument("-l","--level", type=int, default=1, metavar="", help="镜像层数")
    parser.add_argument("-m","--max_tasks", type=int, default=10, metavar="", help="多进程数量")
    parser.add_argument("-c", "--cmd_show", action='store_true', default=True, help="终端输出(默认显示)")
    parser.add_argument("-lo","--link_other", action='store_true', default=False, help="是否允许访问域外")
    parser.add_argument("-k","--cookie", type=str, default='', metavar="", help="携带cookie字符串")
    parser.add_argument("-j","--json", type=str, default='', metavar="", help="json文件路径")
    args = parser.parse_args()
    # 读取配置文件
    try:
        if args.json:
            with open(args.json, 'r', encoding='utf-8') as f:
                data = json.load(f)
                args.url = data['url']
                args.domain = data['domain']
                args.output = data['output']
                args.level = data['level']
                args.max_tasks = data['max_tasks']
                args.cmd_show = data['cmd_show']
                args.link_other = data['link_other']
                args.cookie = data['cookie']

    except Exception:
        log.error('读取配置文件错误', exc_info=True)

    folder = os.path.join(args.output)
    if not os.path.exists(folder):
        os.makedirs(folder, exist_ok=True)


    folder = os.path.join(args.output)
    if not os.path.exists(folder):
        os.makedirs(folder, exist_ok=True)
    
    # 日志
    log.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s',
                level=log.ERROR,
                filename=os.path.join(args.output, 'err.log'),
                filemode='a',
                encoding='utf-8'
                )
    

    obj = DownloadWeb()
    # 设置守护进程等待时间
    obj.request_obj.check_time = 0.2
    # 设置多进程数量
    obj.request_obj.max_tasks = args.max_tasks
    # 爬取层数 静态文件除外
    obj.level = args.level
    # 是否静默模式
    obj.cmd_show = args.cmd_show
    # 输出位置
    obj.output = args.output
    # 可访问域 文件不会被限制
    obj.domain = args.domain.split(',')
    # 是否允许访问域外
    obj.is_allow_extraterritorial = args.link_other
    # 是否静默模式
    obj.cookie = args.cookie
    # 断点恢复执行
    obj.resume_execution()
    
    # 添加启动网址
    obj.urls = args.url.split(',')

    obj.run()