import json
import time
import traceback

from langchain_community.document_loaders import UnstructuredURLLoader
from lxml import etree
import requests
from urllib.parse import urlparse

from file.services import to_chunks
from tables.vector import Vectors
from tables.web import Webs
from utils.main import merge_doc


def remove_trailing_slash(s):
    if s.endswith('/') or s.endswith('#'):
        return s[:-1]
    return s


class Spider:
    def __init__(self):
        self.domain = "https://www.calsp.cn"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.62"
        }

    def get_link(self, home_url=None):
        if home_url is None:
            home_url = self.domain
        res_list = [home_url]
        idx = 0
        while True:
            count = len(res_list)
            if idx >= count:
                break
            for i in range(idx, count):
                get_url = res_list[i]
                idx += 1
                try:
                    session = requests.Session()
                    session.trust_env = False
                    response = session.get(get_url, headers=self.headers)
                    #  判断请求状态
                    response.raise_for_status()

                    element = etree.HTML(response.text)
                    a_list = element.xpath("//a")

                    for a in a_list:
                        hrefs = a.xpath("./@href")
                        if len(hrefs) < 1:
                            continue
                        href = hrefs[0].strip()
                        if href == "":
                            continue
                        href = remove_trailing_slash(href)
                        if href.startswith("#") or href.startswith("/"):
                            href = self.domain + href
                        if not href.startswith(self.domain):
                            continue

                        if href in res_list:
                            continue
                        res_list.append(href)
                except Exception as e:
                    print("ERROR:" + get_url)
                    traceback.print_exc()
                time.sleep(2)
        return res_list


def real_url(url: str):
    response = requests.get(url)
    # soup = BeautifulSoup(response.text, 'html.parser')
    if response.url.endswith("/"):
        # return response.url[:-1], soup.title.string
        return response.url[:-1]
    else:
        # return response.url, soup.title.string
        return response.url

def web_crawl(base_url: str, dis_suffix: str, dis_url: str):
    """
    :param dis_url:
    :param dis_suffix: 过滤指定后缀的网页，多个后缀使用空格隔开 .pdf .docx .doc .pptx .xlsx .xls .txt .mp3 .mp4 .avi .webm .ogg .mov .flv
    :param base_url: 过滤指定的网页， 多个网页通过空格隔开
    :return: url列表
    """
    res_list = []
    # 获取站点下所有链接
    url_list = Spider().get_link(base_url)

    # 获取需要过滤的url和后缀
    public_suffix = ".pdf .docx .doc .pptx .xlsx .xls .txt .mp3 .mp4 .avi .webm .ogg .mov .flv"
    dis_suffix += public_suffix
    dis_url_list = dis_url.split(" ")
    dis_suffix_list = dis_suffix.split(" ")

    for url in url_list:
        if url in dis_url_list or urlparse(url).fragment or url.endswith(tuple(dis_suffix_list)):
            continue
        url = real_url(url)
        if url in dis_url_list:
            continue
        res_list.append(url)

    return res_list



async def web_chunk(
        userid: int,
        rag_id: str,
        collection_name: str,
        base_url: str,
        url_list: list,
        chunk_type: int,
        embed: str,
        llm: str
):
    chunked_url = []
    try:
        for i, url in enumerate(url_list):
            loader = UnstructuredURLLoader([url])
            doc = merge_doc(loader.load())
            await to_chunks(
                rag_id=rag_id,
                userid=userid,
                collection_name=collection_name,
                filename=url,
                chunk_type=chunk_type,
                embed=embed,
                llm=llm,
                document=doc
            )
            chunked_url.append(url)
            _progress = ((i + 1) / len(url_list)) * 100
            await Webs.update_webs_chunk_progress(userid=userid, base_url=base_url, rag_id=rag_id, progress=_progress)
            # yield json.dumps({
            #     "code": 200,
            #     "message": "进度",
            #     "progress": ((i + 1) / len(url_list)) * 100,
            # }) + '/n'
        # 修改站点分块状态
        Webs.update_chunk_status(
            userid=userid,
            rag_id=rag_id,
            base_url=base_url,
            is_chunk=True
        )
    except Exception as e:
        # 清除已分块的数据
        await Webs.update_webs_chunk_progress(userid=userid, base_url=base_url, rag_id=rag_id, progress=0.0)
        for url in chunked_url:
            await Vectors.delete_chunks(collection_name=collection_name, source=url)
        raise Exception(str(e))


