import requests
from lxml import etree
import time
import random
import openpyxl
import pandas as pd
from settings import *

class WeiboCraw():
    def __init__(self):
        self.word=word
        self.total_page=total_page
    def get_resp_obj(self,word,page):
        params={
            'q':word,
            'page':page
        }
        data=requests.get(url=url,headers=headers,params=params).text
        resp_obj=etree.HTML(data)
        return resp_obj
    def get_hot_info(self,resp_obj):#获取页面热门信息，共十条
        hot_nick_name=resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div[2]/div[1]/div[2]/div[1]/div[2]/a/text()')
        hot_content=resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div[2]/div[1]/div[2]/p[2]/text()')
        hot_comment_count=resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div[2]/div[2]/ul/li[2]/a/text()')
        hot_transmit_count=resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div[2]/div[2]/ul/li[1]/a/text()[2]')
        hot_liked_count=resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div[2]/div[2]/ul/li[3]/a/button/span[2]/text()')
        hot_time=resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div[2]/div[1]/div[2]/p[1]/a[1]/text()')
        hot_list=[]
        for i in range(len(hot_nick_name)):
            hot_dic={}
            hot_dic['hot_nick_name']=hot_nick_name[i]
            hot_dic['hot_content']=hot_content[i]
            hot_dic['hot_comment_count']=hot_comment_count[i]
            hot_dic['hot_transmit_count']=hot_transmit_count[i]
            hot_dic['hot_liked_count']=hot_liked_count[i]
            hot_dic['hot_time']=hot_time[i]
            hot_list.append(hot_dic)
        return hot_list
    def get_info(self,resp_obj):#获取当前页面微博非热门信息
        nick_name = resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div/div[1]/div[2]/div[1]/div[2]/a/text()')
        content = resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div/div[1]/div[2]/p[2]/text()')
        comment_count = resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div/div[2]/ul/li[2]/a/text()')
        transmit_count = resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div/div[2]/ul/li[1]/a/text()[2]')
        liked_count = resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div/div[2]/ul/li[3]/a/button/span[2]/text()')
        time = resp_obj.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[@class="card-wrap"]/div/div[1]/div[2]/p[1]/a[1]/text()')
        list = []
        for i in range(len(nick_name)):
            dic = {}
            dic['nick_name'] = nick_name[i]
            dic['content'] = content[i]
            dic['comment_count'] = comment_count[i]
            dic['transmit_count'] = transmit_count[i]
            dic['liked_count'] = liked_count[i]
            dic['time'] = time[i]
            list.append(dic)
        return list
    def get_all_info(self,word,total_page):#获取所有页面的微博非热门信息
        total_list=[]
        for page in range(1,total_page+1):
            print(f'-------正在爬取第{page}页-------')
            resp_obj=self.get_resp_obj(word,page)
            list=self.get_info(resp_obj)
            total_list+=list
            time.sleep(random.randint(2, 3))
        return total_list
    def save_to_excel(self,hot_list,list):#将热门与非热门信息都存入excel表格，文件名可在settings文件中自定义
        df=pd.DataFrame(list)
        hot_df=pd.DataFrame(hot_list)
        df.to_excel(excel_file,'a')
        hot_df.to_excel(hot_excel_file,'a')




