import json
import os
import re

from django.core import serializers
from django.http import HttpResponse
import requests
from lxml import etree
import time
import random
from .models import bookDisplay, books
from utils.get_chapter import extract_chapter_number
from utils.apiResponse import ApiResponse
from django.conf import settings


# Create your views here.

def get_book_by_number(request):
    # 获取查询参数
    novel_number = request.GET.get('novel_number')  # 获取得到的类型都是字符串
    chapter_number = request.GET.get('chapter_number')
    try:
        book_chapters = bookDisplay.objects.filter(novel_number=novel_number, chapter_number=chapter_number)
        if not book_chapters:
            raise ValueError("没有找到对应的书籍")
        # print(type(book_chapters))  # <class 'django.db.models.query.QuerySet'>
        # 使用Django的serializers模块将QuerySet序列化为JSON字符串
        # book_chapters = serializers.serialize('json', book_chapters)
        book_chapters = list(book_chapters.values())
        return ApiResponse(data=book_chapters, success=True, status=200, message="获取成功")

    except Exception as e:
        return ApiResponse(message=str(e), success=False, status=500)


def get_book_list(request):
    cnt = int(request.GET.get('cnt'))
    try:
        # 使用 ORM 来随机获取 cnt 条记录
        book_list = books.objects.all().order_by('?')[:cnt]
        if not book_list:
            raise ValueError("没有找到对应的书籍")
        # print(type(book_chapters))  # <class 'django.db.models.query.QuerySet'>
        # 使用Django的serializers模块将QuerySet序列化为JSON字符串
        # book_chapters = serializers.serialize('json', book_chapters)
        book_list = list(book_list.values())
        return ApiResponse(data=book_list, success=True, status=200, message="获取成功")

    except Exception as e:
        return ApiResponse(message=str(e), success=False, status=500)


def get_catalog(request):
    novel_number = int(request.GET.get('novel_number'))
    try:
        book_catalog = bookDisplay.objects.filter(novel_number=novel_number).order_by('chapter_number').values(
            'id',
            'chapter_number',
            'book_name'
        )
        if not book_catalog:
            raise ValueError("没有找到对应的书籍")
        book_catalog = list(book_catalog)
        return ApiResponse(data=book_catalog, success=True, status=200, message="获取成功")

    except Exception as e:
        return ApiResponse(message=str(e), success=False, status=500)


def crawler_book(request, book_id, book_name):
    print(f'开始爬取{book_id}的小说')

    url = f'http://www.diquge.com/book/{book_id}/'
    UAlist = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Safari/605.1.15"
    ]
    UserAgent = random.choice(UAlist)

    headers = {
        'User-Agent': UserAgent,
        'Cookie': 'Hm_lvt_5a95b9b036f3115877c6b70742e12656=1741168886; HMACCOUNT=3833EDEE3D71D336; kuxin_history=384999%2C1; Hm_lpvt_5a95b9b036f3115877c6b70742e12656=1741169031'
    }
    response = requests.get(url, headers=headers)
    html = etree.HTML(response.text)
    url_href = html.xpath('//*[@class="dirlist three clearfix"]/li[*]/a/@href')
    print(f'共{len(url_href)}章')
    # print(url_href)

    # 如果中途中断了，看控制台打印到哪里了，设置cnt(下面两行代码)，重新运行
    # cnt = url_href.index('/book/9282/563.html')
    # url_href = url_href[cnt:]

    # 测试
    # url = 'http://www.diquge.com' + url_href[0]
    # response = requests.get(url, headers=headers)
    # html = etree.HTML(response.text)
    # title = html.xpath('//*[@class="title"]/h1/a/text()')
    # content = html.xpath('//*[@id="chaptercontent"]/p/text()')
    #
    # print(response.text)
    # print(title)
    # print(content)

    question_list = []
    page_number = 0

    url_href = url_href[0:10]  # 爬10个凑数

    for href in url_href:
        url = 'http://www.diquge.com' + href
        # headers = {
        #     'User-Agent': UserAgent
        # }
        response = requests.get(url, headers=headers)
        html = etree.HTML(response.text)
        title = html.xpath('//*[@class="title"]/h1/a/text()')

        # 更新标题
        print("本章标题：", title[0])
        res = title[0].split(book_name)
        if len(res) == 2:
            title = res[1].strip()
        else:
            title = title[0].strip()

        content = html.xpath('//*[@id="chaptercontent"]/p/text()')

        for i in range(len(content)):
            res = content[i].strip().split("本章未完")
            if len(res) == 2:
                content[i] = '　　' + res[0] + '\n'
                match = re.search(r'共(\d+)页', res[1])
                page_number = int(match.group(1))
            else:
                content[i] = '　　' + res[0] + '\n'

        content = ''.join(content)

        if page_number > 1:  # 翻页
            for j in range(2, page_number + 1):
                href = href.split('.')[0] + '_p' + str(j) + '.html'
                url = 'http://www.diquge.com' + href
                response = requests.get(url, headers=headers)
                html = etree.HTML(response.text)
                content1 = html.xpath('//*[@id="chaptercontent"]/p/text()')

                for k in range(len(content1)):
                    res1 = content1[k].strip().split("本章未完")
                    content1[k] = '　　' + res1[0] + '\n'

                content += ''.join(content1)
        else:
            question_list.append(title)

        page_number = 0

        # print(title)
        chapter_number = 0  # 这个直接去数据库修改吧

        # 写入数据库
        book_display = bookDisplay(chapter_number=chapter_number, novel_number=book_id, book_name=title,
                                   book_content=content)
        book_display.save()
        print(f'{title} + —— {url} 写入数据库成功')

        # 随机休眠
        time.sleep(random.randint(1, 2))

    print(question_list)

    return HttpResponse('爬取完成')


def crawler_book_info(request, book_id):
    print(f'开始爬取{book_id}的小说信息')

    url = f'http://www.diquge.com/book/{book_id}/'
    UAlist = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Safari/605.1.15"
    ]
    UserAgent = random.choice(UAlist)

    headers = {
        'User-Agent': UserAgent,
        'Cookie': 'Hm_lvt_5a95b9b036f3115877c6b70742e12656=1741168886; HMACCOUNT=3833EDEE3D71D336; kuxin_history=384999%2C1; Hm_lpvt_5a95b9b036f3115877c6b70742e12656=1741169031'
    }
    response = requests.get(url, headers=headers)
    html = etree.HTML(response.text)

    info = html.xpath('//*[@class="novelinfo-l"]/ul/li[*]/a/text()')
    author = info[0]
    book_type = info[1]
    new_chapter = info[2].split(' ')[0]
    book_introduction = html.xpath('//*[@class="body novelintro"]/p/text()')[0].strip()
    book_name = html.xpath('//*[@class="header line"]/h1/text()')[0]
    novel_number = book_id

    img_url = 'http://www.diquge.com/cover/96/b4/02/96b40292f8d66634286925efa56f3917.jpg'
    response = requests.get(url, headers=headers)
    # 保存图片
    file_path = os.path.join(settings.BASE_DIR, 'static', 'book_cover', f'{book_id}.jpg')
    with open(file_path, 'wb') as f:
        f.write(response.content)

    # print(f'作者：{author}')
    # print(f'类型：{book_type}')
    # print(f'最新章节：{new_chapter}')
    # print(f'简介：{book_introduction}')
    # print(f'书名：{book_name}')
    # print(f'小说编号：{novel_number}')

    # 写入数据库
    book = books(book_name=book_name, book_author=author, book_introduction=book_introduction,
                 novel_number=novel_number, create_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
                 update_time=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), new_chapter=new_chapter,
                 book_cover=f'book_cover/{book_id}.jpg', novel_type=book_type)
    book.save()

    return HttpResponse('保存成功')


# 有一些书爬取下来，可能会有一下问题
def renew_crawler_question_book(request, book_id, q_id):
    # 自己输入出错数据的url
    url = f'http://www.diquge.com/book/{book_id}/{q_id}.html'
    UAlist = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Safari/605.1.15"
    ]
    UserAgent = random.choice(UAlist)

    headers = {
        'User-Agent': UserAgent,
        'Cookie': 'Hm_lvt_5a95b9b036f3115877c6b70742e12656=1741168886; HMACCOUNT=3833EDEE3D71D336; kuxin_history=384999%2C1; Hm_lpvt_5a95b9b036f3115877c6b70742e12656=1741169031'
    }
    response = requests.get(url, headers=headers)
    html = etree.HTML(response.text)

    content = html.xpath('//*[@id="chaptercontent"]/p/text()')

    page_number = 0
    for i in range(len(content)):
        res = content[i].strip().split("本章未完")
        content[i] = '　　' + res[0] + '\n'
        if len(res) == 2:
            match = re.search(r'共(\d+)页', res[1])
            page_number = int(match.group(1))

    content = ''.join(content)

    if page_number > 1:  # 翻页
        for j in range(2, page_number + 1):
            url = f'http://www.diquge.com/book/{book_id}/{q_id}_p{j}.html'
            response = requests.get(url, headers=headers)
            html = etree.HTML(response.text)
            content1 = html.xpath('//*[@id="chaptercontent"]/p/text()')

            for k in range(len(content1)):
                res1 = content1[k].strip().split("本章未完")
                content1[k] = '　　' + res1[0] + '\n'

            content += ''.join(content1)

    # 写入数据库
    book_display = bookDisplay.objects.get(id=00000)
    book_display.book_content = content
    book_display.save()
    print(f'{url} 写入数据库成功')
    return HttpResponse('爬取完成')
