#coding:utf-8
import requests
from requests.exceptions import  ConnectionError
from urllib.parse import urlencode
import time

base_url='http://weixin.sogou.com/weixin?'

headers={
'Cookie':'ABTEST=3|1520471325|v1; IPLOC=CN3301; SUID=DA0A0CDD4A42910A000000005AA08D1D; SUID=DA0A0CDD3320910A000000005AA08D21; weixinIndexVisited=1; SUV=00DE5AA4DD0C0ADA5AA08D2BC6D1B390; SNUID=4D929B459792FE44CA2CD02B98AE5332; JSESSIONID=aaaY082aTL56pdplOzwhw; ppinf=5|1520471441|1521681041|dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZToxNjpmZWlzdGVsJUVFJTg0JThEfGNydDoxMDoxNTIwNDcxNDQxfHJlZm5pY2s6MTY6ZmVpc3RlbCVFRSU4NCU4RHx1c2VyaWQ6NDQ6bzl0Mmx1SkI5TXVFa09DTDFEb0VXbm8zQVgxTUB3ZWl4aW4uc29odS5jb218; pprdig=A0iQL_bRH-lXrdagaalIymVtDWQ74J3m-TmGaLi-F60IWHdC0gxxd1ykplmMU-K71_h22JdAkD9GB9NY79wUZ94c0H4Ui2XDV7Np_RnRiclXYj9iShzglpgYpy0XzYEhUoTjpdMUyjkW342cu5AxeLVtGAneiY7299dDnNrb7zE; sgid=30-33236001-AVqgjZEMFLTVkV2oyS6YPkU; ppmdig=1520471442000000013839a66b561d4480d0b184cf8ac541',
'Host':'weixin.sogou.com',
'Referer':'http://weixin.sogou.com/weixin?query=%E4%B8%AD%E5%8D%B0%E6%B4%9E%E6%9C%97&type=2&page=2&ie=utf8',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}

keyword='中印洞朗'

proxy_pool_url='http://localhost:5000/get'

proxy=None
max_count=15 # 请求的最大次数

def get_proxy():
    try:
        response=requests.get(proxy_pool_url)
        if response.status_code==200:
            return response.text # 返回网页源码，此时就是动态ip
        return None
    except ConnectionError:
        return None


def get_html(url,count=1):
    print('crawing ',url)
    print('try times ',count)
    global proxy # 全局变量
    if count>=max_count:
        print('tried so times')
        return None
    try:
        if proxy:
            proxies={
                'http':'http"//'+proxy
            }

            response=requests.get(url,allow_redirects=False,headers=headers,proxies=proxies)
            time.sleep(1)
        else:
            response = requests.get(url, allow_redirects=False, headers=headers)
            # 没有用代理
            time.sleep(1)
        if response.status_code==200:
            return response.text  # 返回url的html源码
        if response.status_code==302:
            print(url+' response 302')
            proxy=get_proxy() # 获取动态ip
            if proxy:
                print('using proxy',proxy)
                count += 1
                return get_html(url, count)  # 重新用新的动态ip获取url的html源码
            else:
                print('get proxy failed!')
                return None
        print(url+' return '+str(response.status_code))
    except ConnectionError as e:
        print('error occurred!', e.args)
        proxy=get_proxy() #
        count+=1
        return get_html(url,count) #链接错误，重新调用get_html()方法

def get_index(keyword,page): # 得到每个分页页面的html源码
    data={
        'query':keyword,
        'type':2,
        'page':page
    }
    queries=urlencode(data)
    url=base_url+queries
    html=get_html(url)
    return html

def main():
    for page in range(1,101):
        print(get_index(keyword,page))

if __name__=="__main__" :
    main()

