import re
from datetime import datetime
import cgi
from urllib.parse import unquote
import asyncio
import lxml.etree

#
import httpx

#
from common import *
from mydb import CachedbConn

_semaphore = asyncio.Semaphore(4)
_semaphore_git = asyncio.Semaphore(1)


# ========
CONTENT_LENGTH_THRESHOLD = 1024
# ========
WEB_LOGGER = logging.getLogger("web")


def _http_header_last_modified(rsp: httpx.Response):
    headers = rsp.headers
    DT_FOMRAT_RFC1123 = "%a, %d %b %Y %H:%M:%S GMT"
    return (
        datetime.strptime(headers["last-modified"], DT_FOMRAT_RFC1123) if "last-modified" in headers else datetime.now()
    )


def _http_header_attachment_name(rsp: httpx.Response):
    headers = rsp.headers
    disposition, params = cgi.parse_header(headers.get("content-disposition", ""))
    attachment_name = unquote(params.get("filename", "") or params.get("filename*", ""))
    if not (disposition == "attachment" or headers.get("content-type", "").lower().startswith("application")):
        return None
    return attachment_name if attachment_name else unquote(str(rsp.url).split("/")[-1])


def _http_header_content_length_206(rsp: httpx.Response):
    headers = rsp.headers
    if rsp.status_code == 206 and "content-range" in headers:
        content_length_str = headers.get("content-range")
        if content_length_str:
            matched = re.match(r"bytes \d+-\d+/(\d+)", content_length_str)
            if matched:
                return int(matched.group(1))
    return 0


def _cond_pull_DL(dbrecord: dict):
    return (datetime.now() - dbrecord["lastPullTime"]).days >= CONFIG["pull_interval"]


class DirectLinkConn:
    def __init__(self, cache_conn: CachedbConn):
        self.cache_conn = cache_conn
        self._client = httpx.AsyncClient(timeout=httpx.Timeout(CONFIG["pull_timeout"]), follow_redirects=True)

    async def get_header_info(self, url: str):
        dbrecord = self.cache_conn.get_DLheader(url)
        if dbrecord is None or _cond_pull_DL(dbrecord):
            WEB_LOGGER.info("获取 {} 的header".format(url))
            # ========
            rsp = await self._client.head(url)
            rsp.raise_for_status()
            headers = dict(rsp.headers.items())
            content_length = int(headers.get("content-length", "0"))
            # 下面这个if只用来获取content-length
            if content_length <= CONTENT_LENGTH_THRESHOLD:
                try:
                    # 这里rsp.url是3xx跳转后的url
                    rsp2 = await self._client.get(
                        rsp.url, headers={"Range": "Bytes=0-{}".format(content_length - 1)}, follow_redirects=False
                    )
                    rsp2.raise_for_status()
                    content_length = _http_header_content_length_206(rsp2)
                except httpx.HTTPStatusError:
                    WEB_LOGGER.warning("无法获取 {} 的文件大小".format(url))
                    content_length = 0
            # ========
            attachment_name = _http_header_attachment_name(rsp)
            last_modified = _http_header_last_modified(rsp)
            self.cache_conn.put_DLheader(
                url,
                content_length,
                headers.get("etag"),
                last_modified,
                str(rsp.url) if len(rsp.history) > 0 else None,
                attachment_name,
            )
            result = {"url": url, "name": attachment_name, "time": last_modified, "size": content_length}
        else:
            result = {
                "url": url,
                "name": dbrecord["attachmentName"],
                "time": dbrecord["lastestReleaseTime"],
                "size": dbrecord["contentLength"],
            }
        return result

    async def aclose(self):
        if self._client is not None:
            self._client.aclose()
            self._client = None

    def __enter__(self):
        return self

    async def __exit__(self, exc_type, exc_val, exc_tb):
        self.aclose()


# ================git================
# def _overview_purified(text: str):
#     text = re.sub(r"\r+", "", text)
#     text = re.sub(r"[\t 　]+", " ", text)
#     text = re.sub(r"\s{2,}", "\n", text)
#     return text.strip()


def _cond_pull_release(dbrecord: dict):
    return (datetime.now() - dbrecord["lastPullTime"]).days >= CONFIG["pull_interval"]


class GitConn:
    def __init__(self, base_url, token, cache_conn: CachedbConn):
        pass

    async def get_releases(self, project: str):
        pass


class GithubConn(GitConn):
    def __init__(self, base_url, token, cache_conn: CachedbConn):
        self.cache_conn = cache_conn
        self._client = httpx.AsyncClient(base_url=base_url, headers={"token": token})

    async def get_releases(self, project: str):
        ":params: project为owner/project_name"
        dbrecord = self.cache_conn.get_git("github", project)
        if dbrecord is None or _cond_pull_release(dbrecord):
            WEB_LOGGER.info("gitub获取 {} 的releases".format(project))
            rsp = await self._client.get("/repos/{}/releases".format(project))
            rsp.raise_for_status()
            releases = [i for i in rsp.json() if not i["prerelease"]]
            rsp = [
                {
                    "tag_name": release["tag_name"],
                    "name": release["name"],
                    "time": release["published_at"],
                    # "body": _overview_purified(release["body"] or ""),
                    "assets": [
                        {
                            "name": asset["name"],
                            "browser_download_url": asset["browser_download_url"],
                            "size": asset["size"],
                        }
                        for asset in release["assets"]
                    ],
                }
                for release in releases
            ]
            release_time_time = datetime_from_isoz(rsp[0]["time"])
            self.cache_conn.put_git("github", project, rsp, release_time_time)
        else:
            rsp = dbrecord["response"]
        return rsp

    async def aclose(self):
        if self._client is not None:
            self._client.aclose()
            self._client = None

    def __enter__(self):
        return self

    async def __exit__(self, exc_type, exc_val, exc_tb):
        self.aclose()


class GiteeConn(GitConn):
    def __init__(self, base_url, token, cache_conn: CachedbConn):
        self.cache_conn = cache_conn
        self._client = httpx.AsyncClient(base_url=base_url)
        self._token = token

    async def get_releases(self, project: str):
        ":params: project为owner/project_name"
        dbrecord = self.cache_conn.get_git("gitee", project)
        if dbrecord is None or _cond_pull_release(dbrecord):
            # https://gitee.com/api/v5/swagger#/getV5ReposOwnerRepoReleases
            WEB_LOGGER.info("gitee获取 {} 的releases".format(project))
            rsp = await self._client.get(
                "/repos/{}/releases".format(project), params={"access_token": self._token, "direction": "desc"}
            )
            rsp.raise_for_status()
            releases = [i for i in rsp.json() if not i["prerelease"]]
            rsp = [
                {
                    "tag_name": release["tag_name"],
                    "name": release["name"],
                    "time": release["created_at"],
                    # "body": _overview_purified(release["body"] or ""),
                    "assets": release["assets"],
                }
                for release in releases
            ]
            release_time_time = datetime_from_isoz(rsp[0]["time"])
            self.cache_conn.put_git("gitee", project, rsp, release_time_time)
        else:
            rsp = dbrecord["response"]
        return rsp

    async def aclose(self):
        if self._client is not None:
            self._client.aclose()
            self._client = None

    def __enter__(self):
        return self

    async def __exit__(self, exc_type, exc_val, exc_tb):
        self.aclose()


# ========
def _new_html_document(text, encoding="utf-8", parser=None):
    "text: 可以是str或bytes。如果text是str类型，则encoding会被忽略"
    if isinstance(text, bytes):
        text = text.decode(encoding, "replace")
    text = text.replace("\r", "").encode("utf-8")
    return lxml.etree.HTML(text, parser)


def _get_response_from_html_table(url: str, text: str):
    page = _new_html_document(text)
    list_name = page.xpath('//*[@id="list"]/tbody/tr/td[1]/a/text()')[1:]
    list_url_stem = page.xpath('//*[@id="list"]/tbody/tr/td[1]/a/@href')[1:]
    list_size = page.xpath('//*[@id="list"]/tbody/tr/td[2]/text()')[1:]
    list_time = page.xpath('//*[@id="list"]/tbody/tr/td[3]/text()')[1:]

    if len(list_name) == 0:
        WEB_LOGGER.warning("{} 没有releases，请手动检查".format(url))
        return []

    return [
        {
            "name": name,
            "url": url + url_stem,
            "size": int(size.strip()) if size.strip().isdigit() else -1,
            "time": str(time),
        }
        for name, url_stem, size, time in zip(list_name, list_url_stem, list_size, list_time)
    ]


def _fill_version_in_response(response: list[dict], pattern: str):
    version_reobj = get_version_matcher(pattern)
    response = [
        line | {"version": version_reobj.match(line["name"].rstrip("/")).group(1)}
        for line in response
        if version_reobj.match(line["name"].rstrip("/")) is not None
    ]
    response.sort(
        key=lambda line: (
            tuple(int(part) for part in line["version"].lstrip("vV").split(".")),
            line["time"],
        ),
        reverse=True,
    )
    return response


class MirrorFTPConn:
    def __init__(self, cache_conn: CachedbConn):
        self.cache_conn = cache_conn
        self._client = httpx.AsyncClient(timeout=httpx.Timeout(CONFIG["pull_timeout"]), follow_redirects=True)

    async def get_info(self, entry_url: str, release_pattern: str):
        entry_url = entry_url if entry_url.endswith("/") else entry_url + "/"
        if not release_pattern:
            WEB_LOGGER.error("{} 存在忘记填写的pattern，请手动检查".format(entry_url))
            return []
        # pattern_path里面不能有尖括号
        pattern_path = "/".join(release_pattern.split("/")[:-1])
        # pattern_stem里面可以有尖括号
        pattern_stem = release_pattern.split("/")[-1]
        url_with_pattern = (entry_url + pattern_path + "/") if pattern_path != "" else entry_url
        # ========
        dbrecord = self.cache_conn.get_MirrorFTP_info(url_with_pattern)
        filling_pattern = get_filling_pattern(url_with_pattern)
        if dbrecord is None or _cond_pull_release(dbrecord):
            # ========先找到入口列表========
            WEB_LOGGER.info("获取 {} 的信息".format(entry_url))
            # release_pattern的第一个部分（entry_pattern）必须包括{ver}或者其他花括号形态，不然没有意义
            entry_pattern = release_pattern.split("/")[0]
            entry_rsp = await self._client.get(entry_url)
            entry_rsp.raise_for_status()
            entry_response = _fill_version_in_response(
                _get_response_from_html_table(entry_url, entry_rsp.text), entry_pattern
            )
            if len(entry_response) == 0:
                WEB_LOGGER.warning("{} 无法匹配到pattern【{}】，请手动检查".format(entry_url, entry_pattern))
                return []
            # ========再找到目标releases========
            WEB_LOGGER.info("获取 {} 的信息".format(url_with_pattern))
            for version_index in range(len(entry_response)):
                cur_entry_response = entry_response[version_index]
                page_url = filling_pattern.format(cur_entry_response["version"])
                rsp = await self._client.get(page_url)
                rsp.raise_for_status()
                response = _get_response_from_html_table(page_url, rsp.text)
                response_with_version = _fill_version_in_response(response, pattern_stem)
                if len(response_with_version) == 0:
                    WEB_LOGGER.warning("{} 无法匹配到pattern【{}】，请手动检查".format(page_url, pattern_stem))
                    continue
                self.cache_conn.put_MirrorFTP_info(
                    entry_url,
                    url_with_pattern,
                    response,
                    datetime.strptime(cur_entry_response["time"], STD_TIME_FORMAT),
                )
                break
        else:
            response_with_version = _fill_version_in_response(dbrecord["response"], pattern_stem)
        return response_with_version

    async def aclose(self):
        if self._client is not None:
            self._client.aclose()
            self._client = None

    def __enter__(self):
        return self

    async def __exit__(self, exc_type, exc_val, exc_tb):
        self.aclose()


CACHE_CONN = CachedbConn("cachedb.sqlite3")
GITHUB_CONN = GithubConn(cache_conn=CACHE_CONN, **CONFIG["github"])
GITHUB_URL = "https://github.com"
GITEE_CONN = GiteeConn(cache_conn=CACHE_CONN, **CONFIG["gitee"])
GITEE_URL = "https://gitee.com"
DL_CONN = DirectLinkConn(cache_conn=CACHE_CONN)
MIRROR_CONN = MirrorFTPConn(cache_conn=CACHE_CONN)


async def get_network_result(record: dict):
    logging_url = ""
    result = None
    try:
        if record["releaseType"] == "download":
            url = record["releaseUrl"]
            logging_url = url
            async with _semaphore:
                result = await DL_CONN.get_header_info(url)
        elif record["releaseType"] in {"github", "gitee"}:
            project_url = record["releaseUrl"]
            project = "/".join(project_url.split("/")[-2:])
            host_short = {GITHUB_URL: "github", GITEE_URL: "gitee"}.get(project_url.replace("/{}".format(project), ""))
            logging_url = f"{host_short}的 {project} "
            if not host_short:
                return
            git_conn: GitConn = {"github": GITHUB_CONN, "gitee": GITEE_CONN}[host_short]
            result = []
            async with _semaphore_git:
                result = await git_conn.get_releases(project)
        elif record["releaseType"] == "mirrorFTP":
            entry_url = record["releaseUrl"]
            release_pattern = record["releasePattern"]
            logging_url = entry_url + release_pattern
            result = []
            async with _semaphore:
                result = await MIRROR_CONN.get_info(entry_url, release_pattern)
        return result
    except Exception as e:
        WEB_LOGGER.error("获取 {} 的信息时失败: {}".format(logging_url, str(e)))
        return result
