# coding:utf-8
import requests
import sys
from lxml import etree
import os
import json


class BaidutiebaImage(object):
    def __init__(self, tieba_name):
        self.url = 'http://tieba.baidu.com/f?kw={}'.format(tieba_name)
        self.filename = 'ext/E3_baidutieba.json'

    def get_page(self, url):
        _path = os.path.join(os.path.abspath('.'), '../')
        sys.path.append(_path)
        _ei = __import__('Utils.C002_extract_info', fromlist=['C002_extract_info'])
        _ua_file = {'pc': '{0}Utils/{1}'.format(_path, _ei.ExtractInfo.ua_file['pc'])}
        _user_agent = _ei.ExtractInfo.get_useragent(file=_ua_file)
        _response = requests.get(url, headers=_user_agent)

        return _response.content

    def parse_list_page(self, list_page):
        # 将响应数据转换成elemenet对象
        _html = etree.HTML(list_page.decode(encoding='utf-8'))
        _node_list = _html.xpath('//ul[@id="thread_list"]/li/div/div[2]/div[1]/div[1]/a')
        _detail_list = []
        print(_node_list)
        for _node in _node_list:
            _temp = {}
            _temp['title'] = _node.xpath('./text()')[0]
            _temp['link'] = 'http://tieba.baidu.com' + _node.xpath('./@href')[0]
            _detail_list.append(_temp)
        try:
            _next_url = 'http:' + _html.xpath('//*[@id="frs_list_pager"]/a[10]/@href')[0]
        except:
            _next_url = None

        return _detail_list, _next_url

    def parse_detail_page(self, detail_page):
        _html = etree.HTML(detail_page)
        _image_list = _html.xpath(
            '//div[@id="j_p_postlist"]//div[@class="d_post_content j_d_post_content  clearfix"]/img/@src')

        return _image_list

    def download(self, image_list):
        if not os.path.exists('images'):
            os.makedirs('images')
        for _url in image_list:
            _filename = 'images/E3_baidutieba_{}'.format(_url.split('/')[-1].split('?')[0])
            _data = self.get_page(_url)
            with open(_filename, mode='wb')as f:
                f.write(_data)

    def save_data(self, data):
        with open(self.filename, mode='w+', encoding='utf-8') as f:
            _str_data = json.dumps(data, ensure_ascii=False) + ',\n'
            f.write(_str_data)

    def run(self):
        # 起始的url
        # 构建请求头
        _next_url = self.url
        while _next_url:
            # 发起请求获取响应
            _list_page = self.get_page(_next_url)
            # 从列表页面的响应中抽取 详情页面的url和标题列表 下一页的链接
            _detail_list, _next_url = self.parse_list_page(_list_page)
            # 编列详情页面列表
            for _detail in _detail_list:
                _detail_url = _detail['link']
                # 获取详情页面的响应
                _detail_page = self.get_page(_detail_url)
                # 从详情页面中获取图片列表
                _image_list = self.parse_detail_page(_detail_page)
                # 下载图片
                self.download(_image_list)
                # 保存数据
                _detail['images'] = _image_list
                self.save_data(_detail)
                # TODO 翻页


def main():
    tieba = BaidutiebaImage('校花')
    tieba.run()


if __name__ == '__main__':
    main()
