# !/usr/bin/python3
# -*- coding: utf-8 -*-
import httpx
from bs4 import BeautifulSoup
from lxml import etree
from pyquery import PyQuery as pq
import datetime


class HupuDetailSpider:

    def __init__(self):
        self.url = "http://nba.hupu.com/"
        # 定义请求头
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"
        }

    def get_content(self, url):
        client = httpx.Client(http2=True, verify=False)
        response = client.get(url=url, headers=self.headers)
        return response.content

    def get_detail(self, url):
        content = self.get_content(url)
        html = str(content, 'utf-8')
        # print(html)
        soup = BeautifulSoup(html, 'lxml')
        breadcrumb = soup.find(itemprop="breadcrumb")
        # print(breadcrumb, '------------111111------------')
        br_href = breadcrumb.findAll('a')[-1].get('href')
        # print(br_href)

        quote_content = soup.find(class_='quote-content')
        # print(quote_content)

        if br_href == '/4860':
            print('篮球新闻')
            p_content = quote_content.findAll('p')
            # print(p_content)
            for item in p_content:
                # print(len(item.text))
                if len(item.text.strip()) > 0:
                    print(item.text.strip())
        elif br_href == '/vote':
            print('湿乎乎的话题')
            p_content = quote_content.findAll('p')
            # print(p_content)
            for item in p_content:
                # print(len(item.text))
                if len(item.text.strip()) > 0:
                    print(item.text.strip())
        elif br_href == '/nba':
            # print('content', quote_content.getText())
            div_content = quote_content.findAll('div')
            print('div_content', div_content)
            if len(div_content) > 0:
                print('篮球场 div')
                for item in div_content:
                    # print(len(item.text))
                    if len(item.text.strip()) > 0:
                        print(item.text.strip())
            else:
                print('篮球场 p')
                p_content = quote_content.findAll('p')
                # print(p_content)
                for item in p_content:
                    # print(len(item.text))
                    if len(item.text.strip()) > 0:
                        print(item.text.strip())

    def get_shh_detail_xpath(self, url):
        print('xpath url= ', url)
        # content = self.get_content(url)
        # text = str(content, 'utf-8')
        # print(text)
        # element = etree.HTML(text)

        element = etree.parse("hupu_shh1.html", etree.HTMLParser())
        # print(type(element), element, sep='|')

        bbs_hd_h1 = element.xpath('//div[@class="bbs-hd-h1"]//h1/text()')
        # print(bbs_hd_h1)
        # print(type(bbs_hd_h1), bbs_hd_h1, sep='|')
        for h1_item in bbs_hd_h1:
            print(h1_item)

        # /[@class="quote-content"]//p
        quote_content = element.xpath('//div[@class="quote-content"]/p/span/text()')
        # print(type(quote_content), quote_content, sep='|')
        for content_text_item in quote_content:
            if len(content_text_item.strip()) > 0:
                print(content_text_item)

        print('亮评')
        # floor_box = element.xpath('//div[@id="readfloor"]//div[@class="floor_box"]//table//p')
        # for floor_box_item in floor_box:
        #     if floor_box_item.text is not None:
        #         print(floor_box_item.text)

    def get_shh_detail_pq(self, url):
        print('pq url= ', url)
        # content = self.get_content(url)
        print('start pq', datetime.datetime.now())
        # text = str(content, 'utf-8')
        # doc = pq(text)
        with open("hupu_shh.html", encoding="utf-8") as f:
            content = f.read()
        doc = pq(content)
        # print(doc)
        bbs_hd_h1 = doc('.bbs-hd-h1')
        # print(bbs_hd_h1)
        print(bbs_hd_h1.find('h1').text())

        quote_content = doc('.quote-content')
        quote_content_text = quote_content.find('p')
        for content_text_item in quote_content_text.items():
            if len(content_text_item.text().strip()) > 0:
                print(content_text_item.text().strip())
            # print('----------------')

        print('亮评')
        floor_box = doc('#readfloor .floor_box table p')
        # print(floor_box)
        # for floor_box_item in floor_box.items():
        #     print(floor_box_item.text())

        print('全部评论')
        floor_all_comment = doc('.floor-show .floor_box tbody p')
        # print(floor_all_comment)
        for comment_item in floor_all_comment.items():
            print(comment_item.text())
        print('end pq', datetime.datetime.now())

    def get_shh_detail_bs4(self, url):
        print('bs4 url= ', url)
        content = self.get_content(url)
        html = str(content, 'utf-8')
        # print(html)
        print('start', datetime.datetime.now())
        soup = BeautifulSoup(html, 'lxml')
        bbs_hd_h1 = soup.find(class_="bbs-hd-h1")
        print(bbs_hd_h1.find('h1').text)
        # br_href = breadcrumb.findAll('a')[-1].get('href')
        # print(br_href)

        # <div class="quote-content">
        quote_content = soup.find(class_='quote-content')
        quote_content_text = quote_content.findAll('p')
        for content_text_item in quote_content_text:
            if len(content_text_item.text.strip()) > 0:
                print(content_text_item.text.strip())

        print('end', datetime.datetime.now())

        print('亮评')
        floor_box = soup.find(id='readfloor').find_all(class_='floor_box')
        for floor_box_item in floor_box:
            for box_item in floor_box_item.find('table').find_all('p'):
                print(box_item.text)
            # print(floor_box)
            print('----------------')

    def run(self):
        # self.get_detail("https://bbs.hupu.com/44168023.html")
        # self.get_detail('https://bbs.hupu.com/44205518.html')
        # self.get_detail('https://bbs.hupu.com/44206936.html')
        # self.get_shh_detail_bs4('https://bbs.hupu.com/44205518.html')
        self.get_shh_detail_xpath('https://bbs.hupu.com/44206936.html')
        # self.get_shh_detail_pq('https://bbs.hupu.com/44205518.html')
        # self.get_shh_detail_bs4('https://bbs.hupu.com/44206936.html')
        pass


if __name__ == '__main__':
    spider = HupuDetailSpider()
    spider.run()
