#王曾辉
import re

import pymysql
import urllib3

#  忽略警告：InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised.
import requests
from bs4 import BeautifulSoup

urllib3.disable_warnings()
# 一个PoolManager实例来生成请求, 由该实例对象处理与线程池的连接以及线程安全的所有细节
http = urllib3.PoolManager()
# 通过request()方法创建一个请求：
class spider2(object):
    n=1
    def __init__(self, flag):
        self.s=0
        if flag==0:
            spider2.n=1
            self.x=spider2.n
        if flag==1:
            spider2.n+=1
            self.x=spider2.n
        if flag==-1:
            spider2.n -= 1
            if spider2.n==0:
                spider2.n += 1
                self.x=spider2.n
            else:
                self.x=spider2.n
        if flag==2:
            self.x=spider2.n
            self.s=1

    @property
    def x(self):
        return self.__x
    @x.setter
    def x(self,value):
        self.__x=value
    header = {
    'Accept':'application/json, text/plain, */*',
    'Accept-Encoding':'gzip, deflate, br',
    'Accept-Language':'zh-CN,zh;q=0.9',
    'Connection':'keep-alive',
    # 'Host':'api.bilibili.com',
    # 'Origin':'https://search.bilibili.com',
    'Referer':'https://search.bilibili.com/all?keyword=%E9%AC%BC%E7%95%9C&from_source=banner_search&spm_id_from=333.334.b_62616e6e65725f6c696e6b.1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
        }





    def __fetch_content(self):
        #url = "https://search.bilibili.com/all?keyword=%E9%AC%BC%E7%95%9C&from_source=banner_search&page="
        url = "https://search.bilibili.com/all?keyword=%E9%AC%BC%E7%95%9C&from_source=banner_search&spm_id_from=333.334.b_62616e6e65725f6c696e6b.2&order=click&duration=0&tids_1=0&page="
        url = "%s%d" % (url, self.x)
        resObj = requests.get(url, headers=spider2.header)
        # resObj2=requests.get(Spider.url2,headers=Spider.header)
        # resObj2.encoding='utf-8'
        resObj.encoding = 'utf-8'
        # html2=resObj2.text
        html = resObj.text
        #print(html)
        return html

    # httpResponse = http.request('GET',
    #                 url,
    #              fields={},
    #              headers=header)
    # #print(httpResponse.status) # 200
    # # 获得html源码,utf-8解码
    # #print(httpResponse.data.decode("utf8"))
    # html=httpResponse.data.decode("utf8")
    #print(html)
    def __analysis(self, html):
        title = []
        href = []
        playtime = []
        subtitle = []
        date = []
        img=[]
        soup = BeautifulSoup(html, 'lxml')
        # video = soup.find('ul', class_='video-contain clearfix')
        #print(video)
        pattern='class="video-contain clearfix">([\s\S]*?)</ul>'
        #pattern='class="video-contain clearfix">(.+?)</ul>'
        products=re.findall(pattern,html)
        #print(products)
        # for i in products:
        #print(products[0])
        products_info=[]
        products_price=[]
        for i in products:
            pattern_info='class="video matrix">([\s\S]*?)</li>'#得到标题的div包含的信息
            #pattern_info='class="video matrix">(.+?)</li>'#得到标题的div包含的信息
            #pattern_price='class="p-price">([\s\S]*?)</div>'#得到价格的div包含的信息
            result=re.findall(pattern_info, i)
            #result_price=re.findall(pattern_price,i)
            for j in result:
                products_info.append(j)
        for i in products_info:
            #print(i)
            # pattern_hittime='/i>title="(.+?)</span'
            pattern_title = 'title="(.+?)"'
            pattern_href = 'href="(.+?)"'
            pattern_img = 'alt src="(.+?)"'
            pattern_playtime = 'class="icon-playtime"></i>\n          ([\s\S]*?)\n        </span>'
            pattern_subtitle = 'class="icon-subtitle"></i>\n          ([\s\S]*?)\n        </span>'
            pattern_date = 'class="icon-date"></i>\n          ([\s\S]*?)\n        </span>'
            result1 = re.findall(pattern_title, i)
            result2 = re.findall(pattern_href, i)
            result3 = re.findall(pattern_playtime, i)
            result4 = re.findall(pattern_subtitle, i)
            result5 = re.findall(pattern_date, i)
            result6 = re.findall(pattern_img, i)
            # print(result4)
            flag = 0
            for j in result1:
                if flag == 0:
                    title.append(j)
                flag += 1
            flag = 0
            for k in result2:
                if flag == 0:
                    href.append(k)
                flag += 1
            for i in result3:
                playtime.append(i)
            for i in result4:
                subtitle.append(i)
            for i in result5:
                date.append(i)
            # for i in result6:
            #     img.append(i)
            #     print(i)
            # print(playtime)

        notebooks = []
        for i in range(0, len(title) - 2):
            goods = {"标题": title[i], "播放量": playtime[i], "弹幕量": subtitle[i], "上传时间": date[i], "链接": href[i]}
            notebooks.append(goods)
            #print(notebooks)
        return notebooks
        #print(products_info)
        #for k in result_price:
            #products_price.append(k)
    def __show(self,products):
        for product in products:
            print(product)
    def __storage(self,products):#张聪
        if self.s==1:
            for product in products:
                try:
                    conn = pymysql.connect(
                        host="localhost", user="root", password="",
                        database="bilibilivideo", port=3306,
                        charset='utf8'
                    )
                    cursor = conn.cursor()
                    sql_insert = "insert into spider(title,playtime,subtitle,date,href) values(%s,%s,%s,%s,%s)"
                    cursor.execute(sql_insert,(product['标题'],product['播放量'],product['弹幕量'],product['上传时间'],product['链接']))
                    conn.commit()
                except Exception as e:
                    print(e)
                    conn.rollback()
                finally:
                    cursor.close()
                    conn.close()
        else:
            return 0
    def go(self):
        html=self.__fetch_content()
        products=self.__analysis(html)
        self.__storage(products)
       # self.__show(products)
        return products


if __name__ == "__main__":
        spider = spider2(0)
        spider.go()

#print(notebooks)
#print(result3)



#print(title[0])
#print(products_info[0])
