from bs4 import BeautifulSoup
import requests
import time
import random
import urllib.request

user_agents = [
    "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
    "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
    "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)"
    ]

def get_html(url, headers, proxies,cookie):
    random_userAgent = random.choice(headers)
    random_proxy = random.choice(proxies)

    # 下面是模拟浏览器进行访问
    # req = urllib.request.Request(url)
    # req.add_header("User-Agent", random_userAgent)
    # req.add_header("GET", url)

    # 下面是使用ip代理进行访问
    proxy_support = urllib.request.ProxyHandler({"http": random_proxy})
    opener = urllib.request.build_opener(proxy_support)
    urllib.request.install_opener(opener)

    # res = urllib.request.urlopen(req)
    # content = res.read().decode('utf-8')
    # soup = BeautifulSoup(content, 'lxml')

    web_data_pre = requests.get(url, cookies=cookie,proxies=proxies)
    web_data_pre.encoding = 'utf-8'
    web_data_pre.headers=random_userAgent
    soup = BeautifulSoup(web_data_pre.text, 'lxml')
    return soup


myproxies=[]        #代理IP
with open('proxyIP.txt','r',encoding='utf-8') as fr:
    for i in fr.readlines():
        myproxies.append(i.strip('\n'))

# url='http://www.dlmu.edu.cn/'
# html=get_html(url,user_agents,myproxies)
# print(html.read().decode('utf-8'))







print("Start time : %s" % time.ctime())
# cookieDATA=input("输入登录信息cookie：")
cookieDATA="_T_WM=9e641c; SCF=Re24.; SUB=_2y8nyQ..; SUHB=033D; SSOLoginState=10; M_WEIBOCN_PARAMS=074"#防盗号内容有删减
cookie={"Cookie":cookieDATA}
url = 'http://weibo.cn/u/1061064063781'
print(url)
print('wbSpider准备就绪')

uiddata=[]      #待爬微博的uid集合
with open('uiddata.txt', 'r',encoding='utf-8',errors='ignore') as f:
    for uid in f.readlines():
        uiddata.append(uid.strip('\n'))

uiddata_length=len(uiddata)
for i in range(0,uiddata_length):
    user_id =uiddata[i]
    url = 'http://weibo.cn/u/%s'%user_id
    web_data_pre = requests.get(url, cookies=cookie)
    web_data_pre.encoding = 'utf-8'
    soup_pre = BeautifulSoup(web_data_pre.text, 'lxml')






    result = []
    weibo_name = soup_pre.select('.ctt')[1].text.lstrip('认证：')
    print('好了，我要开始看看：',weibo_name)
    result.append(weibo_name)

    for tip2 in soup_pre.select('.tip2'):       #先获取微博名称、粉丝量、关注量、博客总数
        weibo_total = tip2.select('.tc')[0].text
        weibo_follow = tip2.select('a')[0].text
        weibo_fans = tip2.select('a')[1].text
        result.append(weibo_total)
        result.append(weibo_follow)
        result.append(weibo_fans)
    result.append('\n')

    for page in range(1, 50):
        url = 'http://weibo.cn/u/%s?page=%d' % (user_id, page)
        web_data = requests.get(url, cookies=cookie)
        web_data.encoding = 'utf-8'
        soup = BeautifulSoup(web_data.text, 'lxml')
        # print(soup)

        for c in soup.select('.c'):
            if (len(c.select('.cmt')) > 3):  # 存在转发
                result.append(c.select('.cmt')[0].text)  # 转发来源
                if (len(c.select('div')) > 2):
                    result.append((c.select('div')[2].text))
                elif (len(c.select('div')) > 1):
                    result.append(c.select('div')[1].text)  # 理由全文

                # result.append(c.select('.ct')[0].text)  # 转发时间等信息
                result.append('\n')
                # print(c)
            elif (len(c.select('.ctt')) > 0):
                if (len(c.select('div')) > 1):
                    result.append(c.select('.ctt')[0].text)  # 原创内容
                    result.append(c.select('div')[1].text)  # 发表时间等信息
                elif (len(c.select('div')) > 0):
                    result.append(((c.select('div')[0].text)))  # 专业版微博发表的博文及时间等信息
                result.append('\n')
        time.sleep(10 * random.random())  # 随机睡眠控制爬取频率
    print(weibo_name,':阅览结束- = - = - = - = - = - = - = ')
    print("主人别急，我先歇会儿O(∩_∩)O")
    fo = open("wbdata%s.txt" % user_id, "w", encoding='utf-8')
    fo.write(str('\n'.join(result)))
    time.sleep(20)  # 睡眠控制下一个微博开始爬取的时间为20s后

print('全部执行完毕')
print("End time : %s" % time.ctime())