import csv
import requests
from lxml import etree
from faker import Faker
import time
import requests



class Fanqie():
    def __init__(self):
        self.url = "https://fanqienovel.com/api/author/library/book_list/v0/"
        self.headers = {
            "Cookie": "s_v_web_id=verify_mbrsomej_fGB287Un_NfdT_49PY_9jHw_bhUlOnINTApA; novel_web_id=7514633557248263690; serial_uuid=7514633557248263690; serial_webid=7514633557248263690; gfkadpd=2503,36144; passport_csrf_token=cef89731891c01232d61cda0e556c91e; passport_csrf_token_default=cef89731891c01232d61cda0e556c91e; passport_mfa_token=CjArneijszR4OUL2AX1tdexZ4FdIAvyexZyAmBEEHjG7glxZ98O08R0ewEVA1ct2HSMaSgo8AAAAAAAAAAAAAE8aBOsEvQO2PnZFFUwjVwPUAw7tgtiCGixuiFBTJE1Q0TjpaWHWRqJE30zguTaA2KBtEMzo8w0Y9rHRbCACIgEDkyHtpw%3D%3D; d_ticket=68d30f34b9bdbaa31776a1ca2687dd8bb77f6; odin_tt=b414c9da43cfb3e94c2b2dd8d53738f053a06c58e332c7df794401d14e1c68962ac82a74b4edcab5b9b23e0a614fa1fd4f9830eb54dd7422d8ff0f9f1d64c891; n_mh=4eWgK3rML1cNjrlOe5KpSNTMyvRCCnab_DZW6Hp75kg; passport_auth_status=a421998cdf27c6747034fa3ec4c1f472%2C; passport_auth_status_ss=a421998cdf27c6747034fa3ec4c1f472%2C; sid_guard=de4a16d4f053106b33511a18b6d7d071%7C1749638302%7C5184000%7CSun%2C+10-Aug-2025+10%3A38%3A22+GMT; uid_tt=45e1146c25c5f71496fd0bdf7660ac8a; uid_tt_ss=45e1146c25c5f71496fd0bdf7660ac8a; sid_tt=de4a16d4f053106b33511a18b6d7d071; sessionid=de4a16d4f053106b33511a18b6d7d071; sessionid_ss=de4a16d4f053106b33511a18b6d7d071; is_staff_user=false; sid_ucp_v1=1.0.0-KGRhMWRmODMxOTE0ZjdiM2Y1YjA2NTQ0NzljM2MzODBjYTJmMGQ5YzYKHgjc2LCuyax4EJ65pcIGGMcTIAwwnbmlwgY4AkDsBxoCbHEiIGRlNGExNmQ0ZjA1MzEwNmIzMzUxMWExOGI2ZDdkMDcx; ssid_ucp_v1=1.0.0-KGRhMWRmODMxOTE0ZjdiM2Y1YjA2NTQ0NzljM2MzODBjYTJmMGQ5YzYKHgjc2LCuyax4EJ65pcIGGMcTIAwwnbmlwgY4AkDsBxoCbHEiIGRlNGExNmQ0ZjA1MzEwNmIzMzUxMWExOGI2ZDdkMDcx; _ga=GA1.1.217305312.1749639468; _ga_S37NWVC3ZR=GS2.1.s1749639467$o1$g0$t1749639472$j55$l0$h0; Hm_lvt_2667d29c8e792e6fa9182c20a3013175=1749636980,1749696149; HMACCOUNT=B205854844C7EBDD; csrf_session_id=766f6e7ebf4b05f3c93515ca9f76ea5e; ttwid=1%7CSq_HYPijjHKUFfXVyTDGAk44iz04QAMEVBQmNWQapp8%7C1749699264%7C05ac80b36a9bcf3e7858728e270383f9237bb2b875224cd81c0fac9c1ff7e915; Hm_lpvt_2667d29c8e792e6fa9182c20a3013175=1749700116",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
            "authority": "fanqienovel.com"
        }
    def get_html(self, params=None, url=None):
        self.headers["User-Agent"] = Faker().user_agent()
        if not url:
           response = requests.get(
                url=self.url, headers=self.headers, params=params,
           )

        else:
            response = requests.get(
                url=url, headers=self.headers, params=params,
            )
        response.encoding = response.apparent_encoding
        print("----", response.status_code)
        try:
            return response.json()
        except:
            return response.text


    def parse_fath_data(self, response):
        data_list = response["data"]["book_list"]
        items = []
        for i in data_list:
            book_id = i["book_id"]
            for j in range(10):
                time.sleep(10)
                son_url = "https://fanqienovel.com/page/" + book_id + "?enter_from=stack-room"
                son_html = self.get_html(url=son_url)
                item = self.parse_son_data(son_html)
                values = item.values()
                values_filter = list(filter(lambda x: len(x) > 0, values))
                if len(values_filter) == len(values):
                    items.append(item)
                    break

        return items


    def parse_son_data(self, son_html):
        html_data = etree.HTML(son_html)
        # 找标题

        title = "".join(html_data.xpath('//*[@class="info-name"]/h1/text()'))
        # print(title)

        author = "".join(html_data.xpath('//*[@class="author-name-text"]/text()'))
        # print(author)

        abstract = "".join(html_data.xpath('//*[@class="page-abstract-content"]/p/text()')).replace('\n', '')

        # 字数
        count = "".join(html_data.xpath('//*[@class="info-count-word"]/span[1]/text()'))
        unit = "".join(html_data.xpath('//*[@class="info-count-word"]/span[2]/text()'))
        word_count = count + unit
        # print(word_count)

        status = "".join(html_data.xpath('//*[@class="info-label-yellow"]/text()'))
        # print(status)

        item = {}
        item["书名"] = title
        item["作者"] = author
        item["简介"] = abstract
        item["字数"] = word_count
        item["状态"] = status
        # print(item)
        return item


    def main(self):
        with open("shujia.csv", "w+", encoding="utf-8-sig", newline="") as f:
            file_name = ["书名", "作者", "简介", "在读人数", "字数", "状态"]
            writer = csv.DictWriter(f, fieldnames=file_name)
            writer.writeheader()
            for i in range(0, 99):
                print(i)
                params = {
                    "page_count": 18,
                    "page_index": i,
                    "gender": 0,
                    "category_id": -1,
                   "creation_status": -1,
                    "word_count": -1,
                    "book_type": -1,
                    "sort": 0,
                    "msToken": "LrciSmSVNIX2Tb8hxOXibQqLjspKh3jwKROlUCuDqeRkdNZ8XVo7VSLj5mUEOQqV51NbsfY2glQ6GOtxttw3tPHJPMaOidfcxUsOCLRwXAPbTQK82eFc-igOSThtooHruocNQT2W0K0Q",
                    "a_bogus": "Qv-dkOZ2Msm1mfV4Ohkz9bTmCcu0YW4mgZEzws2CMUwp",
                }
                response = self.get_html(params)
                item = self.parse_fath_data(response)



                for j in item:
                    writer.writerow(j)



if __name__ == "__main__":
    Fq = Fanqie()
    Fq.main()