#!/usr/bin/env python
# -*- coding:utf-8 -*-


import urllib2
from bs4 import BeautifulSoup
from mylog import MyLog


class Item(object):
    title = None
    first_author = None
    first_time = None
    return_time = None
    content = None
    last_author = None
    last_time = None


class GetBarInfo(object):
    def __init__(self, url):
        self.url = url
        self.log = MyLog()
        self.page_num = 5
        self.urls = self.get_urls(self.page_num)
        self.items = self.spider(self.urls)
        self.pipelines(self.items)


    def get_urls(self, page_num):
        urls = []
        pns = [str(i * 50) for i in xrange(page_num)]
        ul = self.url.split('=')

        for pn in pns:
            ul[-1] = pn
            url = '='.join(ul)
            urls.append(url)
            print url
        # self.log.info('Get Info Success')

        return urls


    def spider(self, urls):
        items = []

        for url in urls:
            html_content = self.get_response_content(url)
            soup = BeautifulSoup(html_content, 'lxml')
            tags_li = soup.find_all('li', attrs={'class': ' j_thread_list clearfix'})
            # print tags_li

            for tag in tags_li:
                print '-----------------------------------------'
                item = Item()
                title = tag.find('a', attrs={'class': 'j_th_tit '}).get_text().strip()
                first_author = tag.find('span', attrs={'class': 'frs-author-name-wrap'}).a.get_text().strip()
                first_time = tag.find('span', attrs={'class': 'pull-right is_show_create_time'}).get_text().strip()
                content = tag.find('div', attrs={'class': 'threadlist_abs threadlist_abs_onlyline '}).get_text().strip()
                last_author = tag.find('span', attrs={'class': 'tb_icon_author_rely j_replyer'}).a.get_text().strip()
                last_time = tag.find('span', attrs={'class': 'threadlist_reply_date pull_right j_reply_data'}).get_text().strip()

                item.title = title
                item.first_author = first_author
                item.first_time = first_time
                item.content = content
                item.last_author = last_author
                item.last_time = last_time

                items.append(item)

                print title
                print first_author
                print first_time
                print content
                print last_author
                print last_time

        return items


    def pipelines(self, items):
        filename = 'white_album_bar.txt'

        with open(filename, 'w') as fp:
            for item in items:
                fp.write('first_author: ' + item.first_author.encode('utf8') + '\n')
                fp.write('first_time: ' + item.first_time.encode('utf8') + '\n')
                fp.write('title: ' + item.title.encode('utf8') + '\n')
                fp.write('last_author: ' + item.last_author.encode('utf8') + '\n')
                fp.write('last_time: ' + item.last_time.encode('utf8') + '\n')
                fp.write('content: ' + '\n')
                fp.write(item.content.encode('utf8') + '\n')
                fp.write('\n')

    def get_response_content(self, url):
        try:
            response = urllib2.urlopen(url)
        except:
            self.log.error('Url:%s Open Error' % url)
        else:
            self.log.info('Url:%s Success' % url)
            return response.read()


if __name__ == '__main__':
    url = 'https://tieba.baidu.com/f?kw=%E7%99%BD%E8%89%B2%E7%9B%B8%E7%B0%BF2&fr=wwwt&pn=0'
    # url = u'https://tieba.baidu.com/f?kw=白色相簿2&fr=wwwt&pn=0'.encode('utf8')
    GTI = GetBarInfo(url)
