# encoding='utf-8'

import requests
from django.http import HttpResponse
from lxml import etree
from django.shortcuts import render
from fake_useragent import UserAgent

from user_info_fun.models import Baidu_Data


def data_del():
    datalist = Baidu_Data.objects.all()
    if datalist:
        datalist.delete()
        print('数据库清空成功!!!')


class TieBa_Spier():

    def __init__(self, max_page, kw):
        # 初始化
        self.max_page = max_page  # 最大页码
        self.kw = kw  # 贴吧名称
        self.base_url = "https://tieba.baidu.com/f?kw={}&ie=utf-8&pn={}"
        self.headers = {
            'UserAgent': UserAgent().random
        }

    def get_url_list(self):
        """获取url列表"""
        # 根据pn每50进入下一页，构建url列表
        return [self.base_url.format(self.kw, pn) for pn in range(0, self.max_page * 50, 50)]

    def get_author(self, text):
        html = etree.HTML(text)
        author_xpath = './/span[@data-field]/@title'
        author_res = html.xpath(author_xpath)
        all_authors = []
        for author in author_res:
            all_authors.append(author.split(':')[1])
        return all_authors

    def get_title(self, text):
        html = etree.HTML(text)
        title_res = html.xpath('.//a[@class="j_th_tit "]/text()')
        return title_res

    def get_author_url(self, text):
        html = etree.HTML(text)
        urls = html.xpath('.//span[@class="frs-author-name-wrap"]/a/@href')
        url_head = 'https://tieba.baidu.com'
        author_url_res = []
        for url in urls:
            author_url_res.append(url_head + url)
        return author_url_res

    def get_text(self, url):
        """发送请求获取响应内容"""
        response = requests.get(
            url=url,
            headers=self.headers
        )
        # print(response.text)
        response.encoding = 'utf-8'
        return response.text


def before_spider(request):
    uname = request.session.get('username')
    if uname:
        return render(request, 'spider.html')
    else:
        return render(request, 'page_jump.html')


def spiders(request):
    req_type = request.method
    if req_type == 'POST':
        data_del()
        max_page = int(request.POST.get('max_page', None))
        kw_name = request.POST.get('kw_name', None)
        spider = TieBa_Spier(max_page, kw_name)
        url_list = spider.get_url_list()
        for url in url_list:
            url_text = spider.get_text(url)
            authors = spider.get_author(url_text)
            titles = spider.get_title(url_text)
            urls = spider.get_author_url(url_text)
            for author, title, au_url in zip(authors, titles, urls):
                ba_info = Baidu_Data(author=author, title=title, author_url=au_url)
                ba_info.save()
        # return render(request, 'show_data.html')
        return HttpResponse('成功！！')
    else:
        return render(request, 'page_jump.html')


def show_data(request):
    data_list = Baidu_Data.objects.values()
    username = request.session.get('username')
    return render(request, 'show_data.html', locals())


def deldata(request):
    data_list = Baidu_Data.objects.values()
    for data in data_list:
        data.delete()
    return None
