import requests
from urllib.parse import urlencode, unquote
from config import *
from requests.exceptions import ConnectionError
from retrying import retry
from lxml import etree
from pyquery import PyQuery as pq


class WeixinSpider():

    def __init__(self):
        self.base_url = 'http://weixin.sogou.com/weixin?'
        self.keyword = KEY_WORD
        self.page = PAGE
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36",
            'Referer': 'http://weixin.sogou.com/',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,zh-TW;q=0.8,en-US;q=0.7,en;q=0.6',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Host': 'weixin.sogou.com',
            'Pragma': 'no-cache',
            'Upgrade-Insecure-Requests': '1',
            'Cookie': 'IPLOC=CN5101; SUID=B828D9AB5018910A000000005B1946D4; SUV=1528383188857647; ABTEST=0|1528383192|v1; SNUID=9303F5872C2945225D6CB9872CFB3D36; weixinIndexVisited=1; JSESSIONID=aaaQKTlQ71I3Ggmyjklnw; sct=3; pgv_pvi=9125960704; pgv_si=s2547306496; ld=ekllllllll2bpUhslllllV7BFaUlllll$d6omlllllGlllllxZlll5@@@@@@@@@@; LSTMV=197%2C71; LCLKINT=873'
        }
        self.proxy = None

    def get_proxy(self): # 获取代理ip
        url = 'http://localhost:5555/random'
        r = requests.get(url)
        if r.status_code == 200:
            return r.text
        else:
            return None


    # @retry(stop_max_attempt_number=3)  # 让被装饰的函数反复执行3次，3次全部报错才会报错，中间有一次成功，则继续执行
    def parse_url(self, url, type):  # 请求url 获取html
        try:
            self.proxy = self.get_proxy() # 获取 随机代理ip
            if self.proxy:
                print('use proxy:', self.proxy)
                proxies = {
                    'http': 'http://'+self.proxy
                }
            else:
                print('没有获取到代理ip')
                self.proxy = self.get_proxy()
            
            r = requests.get(url=url, headers=self.headers, proxies=proxies ,allow_redirects=False)
            if r.status_code == 200:
                if type == 'text':
                    print('获取成功')
                    return r.content.decode()
                elif type == 'content':
                    return r.content
            else:
                # need Proxy
                print(r.status_code)
                self.proxy = self.get_proxy()
                if self.proxy:
                    print('use proxy:', self.proxy)
                    self.parse_url(url, 'text')
                else:
                    print('Get Proxy Failed!')
                    self.parse_url(url, 'text')
        except ConnectionError as e:
            print('访问出错!!!', e)
            self.parse_url(url,'text')

    def get_url_list(self, page):  # 获取url列表
        url_list = []
        for i in range(1, page+1):
            parems = {
                'type': 2,
                'query': self.keyword,
                'page': i,
                's_from': 'input',
                'ie': 'utf8'
            }
            url = self.base_url+urlencode(parems)
            url_list.append(url)
        return url_list

    def get_inner_url(self, html_str):
        html = etree.HTML(html_str)
        li_items = html.xpath('//div[@class="news-box"]/ul/li')
        article_urls = []
        for li in li_items:
            href = li.xpath('.//h3/a/@href')[0]
            title = li.xpath('.//h3/a//text()')[0]
            # intro = li.xpath('.//p[@class="txt-info"]/text()')
            article_urls.append(href)
            print(title,'\n',href)
        return article_urls
    
    def get_detail(self, url): # 请求详情页面
        try:
            r = requests.get(url)
            if r.status_code == 200:
                return r.text
            return None
        except:
            print('请求detail_url出错！')

    def get_data(self, html):# 获取数据
        doc = pq(html)
        div = etree.HTML(html)
        title = doc('#activity-name').text()
        title = div.xpath('//h2[@id="activity-name"]/text()')
        content = doc('.rich_media_content ').text()
        time = doc('#publish_time').text()
        time = div.xpath('//em[@id="publish_time"]/text()')
        autor = doc('#profileBt > a').text()
        wechat = doc('#js_profile_qrcode > div > p:nth-child(3) > span').text()
        return {
            'title': title,
            'content': content,
            'time': time,
            'autor': autor,
            'wechat': wechat
        }
    
    def save_data(self, datas):# 保存数据
        pass

    def run(self):  # 主要逻辑

        pass
        # 1. 获取url
        page_num = 10
        url_list = self.get_url_list(page_num)
        # 2. 请求url获取html
        for url in url_list:
            html_str = self.parse_url(url, 'text')
            article_urls = self.get_inner_url(html_str)
            for inner_url in article_urls:
                article_html = self.get_detail(inner_url)
                if article_html:
                    article_data = self.get_data(article_html)
                    print(article_data)
            # 代理ip

        # 3. 提取数据

        # 4. 保存数据


if __name__ == "__main__":
    weixin = WeixinSpider()
    weixin.run()
