import time
from lxml import etree

import os
import requests
import random
#可随机获取UserAgent
from fake_useragent import UserAgent

import sqlite3
from bin.pymysqlmod import Pymysqlmod
from bin.Sheet import Sheetmod
from myconfig import  *
"""
response = requests.get(url)中response是发送请求获取的响应对象；response响应对象中除了text、content获取响应内容以外还有其它常用的属性或方法：
response.url响应的url；有时候响应的url和请求的url并不一致
response.status_code 响应状态码
response.request.headers 响应对应的请求头
response.headers 响应头
response.request._cookies 响应对应请求的cookie；返回cookieJar类型
response.cookies 响应的cookie（经过了set-cookie动作；返回cookieJar类型
response.json()自动将json字符串类型的响应内容转换为python对象（dict or list）
r.status_code	HTTP请求的返回状态(HTTP是一个简单的请求-响应协议) 200为连接成功，否则，失败
r.text	响应内容的字符串形式，即url对应的页面内容
r.encoding	从HTTP header中猜测的响应内容编码方式
r.apparent_encoding	从内容中分析出的响应内容编码方式（备选编码方式）
r.content	HTTP响应内容的二进制形式
"""
class url():
    """
    以对象方法包装爬虫函数
    属性：
        1.基础路由：base_url
        2.一些传输数据data
        3.缓存等信息cookies
        4.头部信息headers
        5.代理列表proxy_list
    方法：
        1.get还是post方法
    """
    def __init__(self,url,data=None,cookies=None,headers=None,proxylist=None):
        self.urlkv={}
        self.url=url
        self.data=data
        self.cookies=cookies
        self.headers=headers
        self.proxylist=proxylist

        self.urlkv["url"] = url
        self.urlkv["data"]=data
        self.urlkv["cookies"]=cookies
        self.urlkv["headers"]=headers
        self.urlkv["proxylist"]=proxylist

    def get_keys(self):
        """
        返回函数中的所有键
        :return:
        """
        urlk=[]
        for key in self.urlkv:
            urlk.append(key)
        return urlk

    def get_values(self):
        """
        返回函数中的所有值
        :return:
        """
        urlv=[]
        for keys in self.urlkv:
            urlv.append(self.urlkv[keys])
        return urlv

    def get_all(self):
        """
        返回函数中的所有键值对
        :return:
        """
        return self.urlkv

    def get(self):
        """
        执行requests.get命令
        :return: 返回requests.get的命令的运行值
        """
        keys=self.get_keys()
        exestr=""
        for key in keys:
            if self.urlkv[key] and not(key == "proxylist") and not (key == "headers"):
                exestr=exestr+str(key)+"=" +"\""+str(self.urlkv[key])+"\""
                exestr=exestr+","
            if key == "proxylist" and self.urlkv[key]:
                s = requests.session()
                s.keep_alive = False
                #设置过时时间
                exestr=exestr+"timeout=600"+","
            if key == "headers" and self.urlkv[key]:
                # exestr=exestr+"verify=False,"
                pass
        exestr=exestr[0:len(exestr)-1]

        if self.urlkv["proxylist"]:
            p=random.choice(self.proxylist)

            try:
                if self.headers:
                    h=self.headers
                    r=eval("requests.get("+exestr+","+"headers =h"+",proxies=p"+")")
                else:
                    r=eval("requests.get("+exestr+",proxies=p"+")")
            except Exception as e:
                print(e)
                print(p)
                print(exestr)
                if self.headers:
                    r = eval("requests.get(" + exestr + ",headers=h" + ")")
                else:
                    r = eval("requests.get(" + exestr + ")")
        else:
            if self.headers:
                h = self.headers
                print(h)
                r = eval("requests.get(" + exestr + ",headers=h" + ")")
            else:
                r = eval("requests.get(" + exestr + ")")
            print(exestr)
        print(r.status_code)
        # print(r.request.headers)
        return r

    def post(self):
        """
        执行requests.post命令
        :return: 返回requests.get的命令的运行值
        """
        keys = self.get_keys()
        exestr = ""
        for key in keys:
            if self.urlkv[key] and not (key == "proxylist") and not (key == "headers"):
                exestr = exestr + str(key) + "=" + "\"" + str(self.urlkv[key]) + "\""
                exestr = exestr + ","
            if key == "proxylist" and self.urlkv[key]:
                s = requests.session()
                s.keep_alive = False
                # 设置过时时间
                exestr = exestr + "timeout=1" + ","
            if key == "headers" and self.urlkv[key]:
                # exestr=exestr+"verify=False,"
                pass
        exestr = exestr[0:len(exestr) - 1]

        if self.urlkv["proxylist"]:
            p = random.choice(self.proxylist)

            try:
                if self.headers:
                    h = self.headers
                    r = eval("requests.post(" + exestr + "," + "headers =h" + ",proxies=p" + ")")
                else:
                    r = eval("requests.post(" + exestr + ",proxies=p" + ")")
            except Exception as e:
                print(e)
                print(h)
                print(p)
                print(exestr)
                if self.headers:
                    r = eval("requests.post(" + exestr + ",headers=h" + ")")
                else:
                    r = eval("requests.post(" + exestr + ")")
        else:
            if self.headers:
                h = self.headers
                print(h)
                r = eval("requests.post(" + exestr + ",headers=h" + ")")
            else:
                r = eval("requests.post(" + exestr + ")")
            print(exestr)
        print(r.status_code)
        # print(r.request.headers)
        return r

    # 处理url
    def newurl(self, page_num=None):
        if page_num:
            word = "?start=" + str((int(page_num) - 1) * 25)
            newurl = str(self.url) + str(word)
            return newurl
        else:
            return str(self.url)

    def pinlun_newurl(self,oldurl=None):
        if oldurl:
            word="comments?status=P"
            newuel = str(oldurl)+str(word)
            return newuel

    # 发送请求
    def send_request(self, base_url=None):
        if base_url:
            self.url = base_url
            self.urlkv["url"] = base_url
        return self.get().text

    # 解析数据
    def paser_data(self, html_str=None):
        paser_data = None
        if html_str != None:
            # 转换为一个html对象
            html = etree.HTML(html_str)
            # 定义好的路径
            xpath_path = "//div[@class='hd']/a/span[1]/text()|//div[@class='bd']/div/span[2]/text()"
            paser_data = html.xpath(xpath_path)
        print(paser_data)
        return paser_data

    def pinlun_data(self,html_str=None):
        pinlun_data =None
        if html_str != None:
            # print(html_str)
            html=etree.HTML(html_str)
            print(html)
            xpath_path="//div[@class='comment']/p/span[@class='short']"
            pinlun_data=html.xpath(xpath_path)
        return pinlun_data

    # 保存数据
    def save_data(self, paser_data, word):
        # 存储时间格式
        time_data = time.strftime('%y-%m-%d')
        old_path = 'file/' + time_data
        # 如果路径不存在
        if os.path.lexists(old_path) == False:
            os.makedirs(old_path)
        # 获取文件的名字
        file_name = 'top250_' + word + '.txt'
        new_path = old_path + '/' + file_name
        # 文件对象   追加写    此处控制文件写入格式
        file = open(new_path, 'a+',encoding="utf-8")
        for i in range(0, int(len(paser_data) / 2)):
            msg1 = paser_data[i * 2]
            msg2 = paser_data[i * 2 + 1]
            file.write(msg1 + "\t" + msg2 + "\n")
        file.close()

    # 控制整体运行情况
    def run(self):
        print('**********************')
        page_num = input("请输入爬取的页数")
        base_url = self.newurl(page_num)
        html_str = self.send_request(base_url)
        paser_data = self.paser_data(html_str)
        if html_str != None:
            self.save_data(paser_data, page_num)
            time.strftime('结束爬取：%Y-%m-%d %H:%M:%S')

    def pinlun_run(self):
        i=0
        talbe= Sheetmod(Pymysqlmod(HOST, USER, PASSWORD, NAME), "bin_goods",
                        "电影编号")
        rurl=talbe.getColmun("电影链接")
        movieid=talbe.getColmun("电影编号")
        name=talbe.getColmun('电影名称')
        talbe2 = Sheetmod(Pymysqlmod(HOST, USER, PASSWORD, NAME), "bin_moviepinlun",
                         "评论编号")
        x=0
        for r in rurl:
            time.sleep(5)
            url=rurl[x][0]
            # print(movieid[0][0])
            # print(name[0][0])
            # print(url)
            oldurl=url
            base_url=self.pinlun_newurl(oldurl=oldurl)
            html_str=self.send_request(base_url)
            # print(html_str)
            pinlun_data=self.pinlun_data(html_str=html_str)
            # print(pinlun_data)
            if movieid[x][0]==251:
                return 0
            global flag
            flag=movieid[x][0]/250
            print(movieid[x][0])
            for a in pinlun_data:
                # print(a.text)
                i=i+1
                if len(a.text)>14000:
                    data = (i, movieid[x][0], name[x][0], rurl[x][0], a.text[1:14000])
                else:
                    data=(i, movieid[x][0], name[x][0], rurl[x][0], a.text)
                # print(data)
                talbe2.setdata(data)
            x=x+1

def run():
    base_url = "https://movie.douban.com/top250"
    #免费代理id
    proxy_list = [
        {"https": "https//78.46.200.37:80"}, {"https": "https//170.238.163.2:999"},
        {"https": "https//189.195.139.150:999"}, {"https": "https//210.211.122.169:8080"},
        {"https": "https//222.190.208.172:8089"}
    ]
    #随机ie头
    ua = UserAgent()
    user_agent = ua.firefox
    # print(user_agent)
    header = {
        'User-Agent': user_agent
    }
    u = url(base_url, proxylist=proxy_list, headers=header)
    # u.run()
    u.pinlun_run()
if __name__=="__main__":
    run()