#coding:utf-8

import requests
from urllib.parse import urlencode
from requests.exceptions import ConnectionError
from pyquery import PyQuery as pq
import pymongo
from lxml.etree import XMLSyntaxError
import time

MONGO_URI = 'localhost'
client =pymongo.MongoClient(MONGO_URI)
MONGO_DB = 'weixin'
db=client[MONGO_DB]

base_url="http://weixin.sogou.com/weixin?"

headers={
   'Cookie':'SUV=006417883A64B6955A9D3327F7DAC475; ABTEST=8|1520410506|v1; IPLOC=CN3301; SUID=DD2647654942910A000000005A9F9F8A; SUID=DD2647653220910A000000005A9F9F8A; weixinIndexVisited=1; JSESSIONID=aaaLD1LDeTZpdcliABwhw; sct=4; PHPSESSID=qfbhudj0rjcio4guiu3o8rmf50; SUIR=E01B7A593C385AF2146F04333D89E4EB; SNUID=1EE284A1C3C6A20C90439876C4CC524F; ppinf=5|1520429025|1521638625|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZToxNjpmZWlzdGVsJUVFJTg0JThEfGNydDoxMDoxNTIwNDI5MDI1fHJlZm5pY2s6MTY6ZmVpc3RlbCVFRSU4NCU4RHx1c2VyaWQ6NDQ6bzl0Mmx1SkI5TXVFa09DTDFEb0VXbm8zQVgxTUB3ZWl4aW4uc29odS5jb218; pprdig=gVfoCc4uWUg-BttWIFvcr5Sh8vaIfiicOHbo3OMheUJAsJxiDIS3V8QlVPj0FxoKcTsvd3pfzvnByfIx8cZ95oVJWHsG03IUh_UDOFXgGvQ_08vnwSVGq8jBEzVElOLObQkT8S4RtJXujyGo1Vhu2fkGEpo__BTwfPemnahWE8A; sgid=30-33236001-AVqf5ibEztYZFC43nasfzpV8; ppmdig=15204290250000008a50745b446afe9b0e9d6dfd68551a5c',
   'Host':'weixin.sogou.com',
   'Upgrade-Insecure-Requests':'1',
   'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}

keyword="中印洞朗"
proxy_pool_url='http://127.0.0.1:5000/get'

proxy=None
max_count=15 # 请求的最大次数

def get_proxy():
    try:
        response=requests.get(proxy_pool_url)
        if response.status_code==200:
            return response.text # 返回网页源码,此时即动态ip
        return None # 其他情形也返回None
    except ConnectionError:
        return None


def get_html(url,count=1):
    print('crawing ', url) # 输出一些信息
    print('trying times ', count)
    global proxy  # 全局变量
    if count>=max_count:
        print('tried so many times')
        return None
    try:
        if proxy:
            proxies={
                'http': 'http://' +proxy
            }
            response=requests.get(url,allow_redirects=False,headers=headers,proxies=proxies)
            time.sleep(1) # 暂停一秒
        else:
            response = requests.get(url, allow_redirects=False, headers=headers)
            time.sleep(1)  # 暂停一秒
            # 没有代理就用原来的ip
        if response.status_code==200:
            return response.text # 返回url的html源代码
        if response.status_code==302:
            print(url+' returns 302 ')
            proxy=get_proxy() # 获取动态ip
            if proxy: # proxy非空
                print('using proxy ', proxy)
                count+=1
                return get_html(url,count) # 重新用动态ip爬取网页
            else:
                print('get proxy failed!') # 获取代理ip失败
                return None
        print(url+' returns '+str(response.status_code))
        # 注意503错误为对方服务器服务不可用
        # 503：服务出错 由于临时的服务器维护或者过载，服务器当前无法处理请求。
        # 这个状况是临时的，并且将在一段时间以后恢复。

    except ConnectionError as e:
        print('error occurred!', e.args)
        proxy=get_proxy() # 链接异常更换动态ip
        count+=1 # 链接失败，count+1
        return get_html(url,count) # 连接错误时，重新调用get_html()方法


def get_index(keyword,page):
    data={
        'query':keyword,
        'type':2,
        'page':page
    }

    queries=urlencode(data)
    url=base_url+queries
    html=get_html(url)
    return html

def parse_index(html):
    doc=pq(html) # 得到网页源码
    items=doc('.news-box .news-list .txt-box h3 a').items()
    for item in items:
        yield item.attr('href') # 得到该网页中每条微信的url地址


def get_content(url): # 获取微信正文html源代码
    try:
        response=requests.get(url)
        # 此处微信没有反爬虫措施，不需用动态ip
        if response.status_code==200:
            return response.text
        return None
    except ConnectionError:
        return None

def parse_content(html): # 获取微信正文内容（文字内容）
    doc=pq(html)
    title=doc('.rich_media_title').text() # 标题
    content=doc('.rich_media_content').text() # 正文
    date=doc('#post-date').text() # id属性是唯一的
    # nickname=doc('.rich_media_meta .rich_media_meta_nickname').text()
    nickname=doc('#js_profile_qrcode >div>strong').text() # 第二种取法
    # nickname为微信号
    wechat=doc('#js_profile_qrcode > div > p:nth-child(3) > span').text()
    # wechat是微信公众号简介
    return{
        'title': title,
        'content': content,
        'date': date,
        'nickname': nickname,
        'wechat': wechat
    }


def save_to_mongo(data):
    if db['articles'].update({'title':data['title']},{'$set':data},True):
        # 表示data键值设为set，True表示如果是新title则为插入，否则为更新
        # {'title':data['title']}表示键
        print('Save to Mongo',data['title'])
    else:
        print('save to mongo failed', data['title'])

def main():
    # for page in range(1, 11):
    #     print('page= '+str(page))
    #     #print(get_index(keyword, page))
    #     html2=get_index(keyword, page)  # get_index遇到503错误为None
    html2='http://weixin.sogou.com/weixin?query=%E4%B8%AD%E5%8D%B0%E6%B4%9E%E6%9C%97&type=2&page=3 '
    title_urls=parse_index(html2) # 注意是生成器对象
    for title_url in title_urls:
        print(title_url)
        content_html=get_content(title_url)
        if content_html:
            content_data=parse_content(content_html)
            print(content_data)
            print(content_data)
            if content_data:
                save_to_mongo(content_data)

if __name__=='__main__':
    main()

