# -*- coding: utf-8 -*-
import requests
import random
import re
from bs4 import BeautifulSoup
import time

agent_ip = []
useful = []
zhongzhuan_port = []
zhongzhuan_ip = []
titles = set()
authors = []
replies = []
levels = []
timedates = []
sunrise = []
urlgroup = []
urlbase = []

ip_re = re.compile(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', re.I)
port_re = re.compile(r'^[1-9]\d*$', re.I)
ll_re = re.compile(r'(.*(?i)Love.*live.*sunshine.*)', re.I)
url_re = re.compile(r'(/p/\d{10})', re.I)
time_re = re.compile('(\d{1}\D)', re.I)
pages_re = re.compile(u'(.*\u5c3e\u9875.*)', re.I)

i = 0
nums = 0


def get(page, count):
    headers = {
        'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36'}
    url = 'http://www.xicidaili.com/nt/%d' % page
    r = requests.get(url, headers=headers)
    soup = BeautifulSoup(r.content, 'html5lib')
    agents = soup.find_all("tr", class_="odd")
    agent_ips = soup.find_all("tr", class_="")
    for agent in agents, agent_ips:
        for daili in agent:
            for i in daili.strings:
                try:
                    if port_re.match(i) is not None:
                        zhongzhuan_port.append(i)
                    if ip_re.match(i) is not None:
                        zhongzhuan_ip.append(i)
                except:
                    pass
    for num in range(len(zhongzhuan_ip)):
        result = '%s:%s' % (zhongzhuan_ip[num], zhongzhuan_port[num])
        if result not in agent_ip:
            agent_ip.append(result)
    for id in range(count):
        globals()['list%d' % id] = []
    for split_num in range(0, len(agent_ip), count):
        for plus in range(count):
            try:
                d = agent_ip[split_num + plus]
                globals()['list%d' % plus].append(d)
            except:
                pass
    length = len(globals()['list%d' % id])
    for k in range(0, length, count):
        def ha(k):
            for show in range(count):
                try:
                    ip_daili = globals()['list%d' % show][k]
                    proxies = {"http": "http://%s" % ip_daili}
                    try:
                        if requests.get('http://www.baidu.com', proxies=proxies, timeout=1).status_code == 200:
                            useful.append(ip_daili)
                    except:
                        pass
                except:
                    pass

        ha(k)
    nums = 0
    pages = 0
    max_page=50

    while nums < len(useful) and 50 * pages < max_page+1:
        ip_agent = useful[nums]
        proxies = {"http": "http://%s" % ip_agent}
        try:
            url = 'http://tieba.baidu.com/f?kw=lovelivesunshine&ie=utf-8&pn=' + str(50 * pages)
            r = requests.get(url, proxies=proxies, timeout=3)
            soup = BeautifulSoup(r.text, 'html5lib')
            question_links = soup.select('a[href]')


            # check error page
            if soup.find('div', class_='error') is not None:
                print('end.')

            for link in question_links:
                title = link.text.strip() + '\n'
                if pages_re.match(title) is not None:
                    my_text = re.findall('(\d\d+)' , link['href'])
                    max_page = my_text[0]

                if ll_re.match(title) is not None and title not in titles and url_re.match(link["href"]) is not None:
                    if link["href"] not in urlbase:
                        print('matched:' , title[:-1])
                        titles.add(title)
                        headers = {
                            'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36'}
                        new_urls = 'http://tieba.baidu.com%s' % link['href']
                        urlbase.append(link['href'])
                        try:
                            r_new = requests.get(new_urls, headers=headers)
                            soup = BeautifulSoup(r_new.text, 'html5lib')
                            author = soup.find("li", class_="d_name").select('a[alog-group]')
                            reply = soup.find("li", class_="l_reply_num").select('span[style]')
                            level = soup.find('div', class_="d_badge_lv").string
                            timedata = soup.find_all('span', class_="tail-info")
                            if time_re.match(timedata[1].string) is not None:
                                timedates.append(timedata[2].string)
                            else:
                                timedates.append(timedata[1].string)
                            levels.append(level)
                            for author_name in author:
                                authors.append(author_name.string)
                            for num in reply:
                                replies.append(num.string)
                        except:
                            pass
            pages += 10
        except KeyboardInterrupt:
            length = len(titles)
            sunrise.extend(titles)
            sunrise.extend(levels)
            sunrise.extend(timedates)
            sunrise.extend(authors)
            sunrise.extend(replies)
            for arc in range(length):
                for ids in range(5):
                    print sunrise[ids*length+arc]
            print("\ntest end")
            break
        except:
            nums += 1
            print 'a'
            continue
    length = len(titles)
    sunrise.extend(titles)
    sunrise.extend(levels)
    sunrise.extend(timedates)
    sunrise.extend(authors)
    sunrise.extend(replies)
    for arc in range(length):
        for ids in range(5):
            print sunrise[ids * length + arc]
get(1, 50)
