import requests


class YuqueBaseRequest:

    def __init__(self, author: str, knowledge_base: str, doc_urls_dict: dict[str, list[str]]):
        self.base_url = f"https://www.yuque.com/{author}/{knowledge_base}"
        self.extra_url = "/markdown?plain=true&linebreak=false&anchor=false"
        if not doc_urls_dict:
            raise Exception("doc_urls_dict cannot be empty")
        for doc, urls in doc_urls_dict.items():
            for i, url in enumerate(urls):
                if not url.startswith("/"):
                    doc_urls_dict[doc][i] = f'/{url}'
        self.doc_urls_dict = doc_urls_dict
        self.headers = {
            'accept': '*/*',
            'cache-control': 'no-cache',
            'referer': 'https://www.yuque.com/',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/135.0.0.0 Safari/537.36'
        }

    def fetch_content_by_file_name(self, file_name: str) -> str:
        content_list = []
        url_list = self.doc_urls_dict.get(file_name)
        if not url_list:
            raise Exception(f"file [{file_name}] not found")
        url_list = self._merge_complete_url(url_list)
        for url in url_list:
            content_list.append(self._send_request(url=url))
        return "\n".join(content_list)

    def fetch_content_and_split_by_header(self, file_name: str, header: str, strip_header: bool = True) -> str:
        if not header:
            raise Exception("header cannot be empty")
        lines = self.fetch_content_by_file_name(file_name).splitlines(keepends=False)
        if not lines:
            raise Exception(f"file [{file_name}] is empty")
        if lines.count(header) == 0:
            raise Exception(f"file [{file_name}] does not contain header [{header}]")
        if lines.count(header) > 1:
            raise Exception(f"file [{file_name}] contains more than one header [{header}]")
        start_index = lines.index(header)
        end_index = len(lines) - 1
        for index, line in enumerate(lines[start_index + 1:]):
            if line.startswith('#') and lines[start_index + 1:][index - 1] == '':
                end_index = index
                break
        # 去除header
        if strip_header:
            start_index = start_index + 1
        return '\n'.join(lines[start_index:end_index])

    def _merge_complete_url(self, url_list: list[str]) -> list[str]:
        res = []
        for url in url_list:
            if not url.startswith("/"):
                raise Exception(f"url [{url}] is not start with /")
            res.append(self.base_url + url + self.extra_url)
        return res

    def _send_request(self, url: str, method: str = "GET") -> str:
        method = method.lower()
        print(f'send request path: [{method}]{url}')
        response = requests.request(method=method, url=url, headers=self.headers)
        if response.status_code != 200:
            raise Exception(f"request failed, status code: {response.status_code}")
        return response.text


if __name__ == '__main__':
    doc_urls_dict = {
        'introduction': ['axr72hit2947cigh'],
        'entity.go': ['/pn6fx4i5az9vkp43', '/cc8tt7phtrnf63uw'],
        'value.go': ['/wyxtoxfvrbsnykoz'],
        'parser.go': ['/xag7bdrndfrbqlge'],
        'validate.go': ['/tg4qqkgayznzecry'],
        'activity_type_data': ['/ybg9p32a89859gts'],
    }
    yuque_request = YuqueBaseRequest('shuofeng09', 'pub4o1', doc_urls_dict)
    content = yuque_request.fetch_content_and_split_by_header('activity_type_data', '# 活动类型数据')
    print(content)
