# coding: utf-8
"""
爬取策略：
1.1min刷新一次标签
2.访问次数过快，不能得到数据(给了一份旧数据, 或者强行断开连接)
3.代码存在ajax里，并且数据为ascii码
4.系统默认是浅拷贝，引用导致只是存了一个值

数据结构：
nation = [{
    'p_name': province,
    'p_url': province_url,
    'gv_name': city,
    'gv_url': city_url,
    'gv_time': gonglv_time,
    'gv_download': gonglv_download,
    'gv_title': gonglv_title,
    'gv_desc': gonglv_desc,
    'gv_image': gonglv_image,
    'yj_url': youji_url,
    'yj_title': youji_title,
    'yj_desc': youji_desc,
    'yj_view': youji_view,
    'yj_praise': youji_praise,
    'yj_startday': youji_startday,
    'yj_day': youji_day,
    'yj_type': youji_type,
    'yj_cost': youji_cost,
    'yj_text': youji_text,
    'yj_image': youji_image,
}]
"""
import json
import os
import time
import random
from copy import deepcopy
import requests
import sys
from lxml import etree
from redis import StrictRedis


class MaFengWo(object):
    def __init__(self, url):
        # 设置访问域
        self.domain = 'http://www.mafengwo.cn'
        # 设置起始url
        self.start_url = url
        # 连接redis数据库
        self.rc = StrictRedis()
        self.scenary = []
        self.filename = 'ext/E1_mafengwo.json'

        _path = os.path.join(os.path.abspath('.'), '../')
        sys.path.append(_path)
        self.ei = __import__('Utils.C002_extract_info', fromlist=['C002_extract_info'])
        self.ua_file = {'pc': '{0}Utils/{1}'.format(_path, self.ei.ExtractInfo.ua_file['pc'])}
        self.user_agent = self.ei.ExtractInfo.get_useragent(file=self.ua_file)
        self.data_url = 'http://www.mafengwo.cn/gonglve/ajax.php?act=get_travellist'
        self.data = {
            'mddid': '',
            'pageid': 'mdd_index',
            'sort': '1',
            'cost': '0',
            'days': '0',
            'month': '0',
            'tagid': '0',
            'page': '',
        }
        self.rediskey = 'mafengwo'

    # 5.解析每篇游记的详情页
    def parse_detail(self, city_url):
        _detail_url = city_url['yj_url']
        print(_detail_url)
        self.user_agent = self.ei.ExtractInfo.get_useragent(file=self.ua_file)
        _response = requests.get(_detail_url, headers=self.user_agent)
        _html = etree.HTML(_response.content.decode())
        try:
            _yj_startday = _html.xpath('//li[@class="time"]/text()')[1]
        except IndexError:
            return
        _yj_day = _html.xpath('//li[@class="day"]/text()')[1]
        try:
            _yj_type = _html.xpath('//li[@class="people"]/text()')[1]
        except IndexError:
            _yj_type = ''
        try:
            _yj_cost = _html.xpath('//li[@class="cost"]/text()')[1]
        except IndexError:
            _yj_cost = ''
        _yj_text = _html.xpath('//div[@class="_j_content_box"]//text()')
        _yj_image = _html.xpath('//div[@class="_j_content_box"]//img/@data-rt-src')
        city_url['yj_startday'] = _yj_startday
        city_url['yj_day'] = _yj_day
        city_url['yj_type'] = _yj_type
        city_url['yj_cost'] = _yj_cost
        city_url['yj_text'] = _yj_text
        city_url['yj_image'] = _yj_image
        _tmp = deepcopy(city_url)
        self.scenary.append(_tmp)
        self.rc.rpush(self.rediskey, _tmp)
        time.sleep(random.random() * 2 + 1)

    # 4.获取指定的每个游记
    def parse_yj(self, first_url, city_url, page='1'):
        _c_id = first_url.split('/')[-1].split('.')[0]
        _c_url = city_url
        print(_c_id, page)
        self.data['mddid'] = _c_id
        self.data['page'] = page
        self.user_agent = self.ei.ExtractInfo.get_useragent(file=self.ua_file)
        _response = requests.post(self.data_url, data=self.data, headers=self.user_agent)
        _dict_data = json.loads(_response.content.decode())
        _html = _dict_data['list']
        _html = etree.HTML(_html)
        _nodes = _html.xpath('//div[@class="tn-wrapper"]')
        for _node in _nodes[:1]:
            # for _node in _nodes:
            _yj_url = self.domain + _node.xpath('./dl/dt/a/@href')[-1]
            print(_yj_url)
            _yj_title = _node.xpath('./dl/dt/a/text()')[-1]
            city_url['yj_url'] = _yj_url
            city_url['yj_title'] = _yj_title
            try:
                _yj_desc = _node.xpath('./dl/dd/a/text()')[0]
                city_url['yj_desc'] = _yj_desc
            except IndexError:
                city_url['yj_desc'] = ''
            try:
                _yj_view = _node.xpath('./div/span[@class="tn-nums"]/text()')[0]
                city_url['yj_view'] = _yj_view
            except IndexError:
                city_url['yj_view'] = '0'
            try:
                _yj_praise = _node.xpath('./div/span[@class="tn-ding"]/em/text()')[0]
                city_url['yj_praise'] = _yj_praise
            except IndexError:
                city_url['yj_praise'] = '0'
            self.parse_detail(city_url)
        _max_page = etree.HTML(_dict_data['page']).xpath('//span[@class="count"]/span/text()')[0]
        # if int(page) < 2:
        # if int(page) < _max_page:
        _now_page = int(page) + 1
        self.parse_yj(first_url, _c_url, page=str(_now_page))

    # 3.获取每个城市攻略
    def parse_gv(self, city_url):
        _c_url = city_url['gv_url']
        self.user_agent = self.ei.ExtractInfo.get_useragent(file=self.ua_file)
        _response = requests.get(_c_url, headers=self.user_agent)
        _html = etree.HTML(_response.content.decode())
        city_url['gv_title'] = _html.xpath('//div[@class="gl_title"]/span/text()')
        city_url['gv_desc'] = _html.xpath('//div[@class="jianjie"]/p/text()')[0]
        city_url['gv_image'] = _html.xpath('//li[@class="scroll-content-item"]/a/img/@src')
        _first_url = self.domain + _html.xpath('//div[@class="mdd_m"]/dl/dt/a/@href')[0]
        self.parse_yj(_first_url, city_url)

    # 2.获取每个省的城市列表
    def parse_city(self, province_url):
        _p_url = province_url['p_url']
        self.user_agent = self.ei.ExtractInfo.get_useragent(file=self.ua_file)
        _response = requests.get(_p_url, headers=self.user_agent)
        _html = etree.HTML(_response.content.decode())
        _nodes = _html.xpath('//div[@class="gl_list"]')
        for _node in _nodes[:1]:
            # for _node in _nodes:
            province_url['gv_name'] = _node.xpath('./a/@title')[0]
            province_url['gv_url'] = self.domain + _node.xpath('./a/@href')[0]
            province_url['gv_time'] = _node.xpath('./div[@class="update_time"]/text()')[0]
            province_url['gv_download'] = _node.xpath('./div[@class="down_cout"]/p/text()')[0]
            self.parse_gv(province_url)

    # 1.获取每个省列表
    def parse_province(self, html):
        _html = etree.HTML(html)
        _nodes = _html.xpath('//div[@class="wrapper"]/div[4]/ol/li')
        _tmp = {}
        for _node in _nodes[:1]:
            # for _node in _nodes:
            _tmp['p_name'] = _node.xpath('./a/text()')[0].split('(')[0]
            _tmp['p_url'] = self.domain + _node.xpath('./a/@href')[0]
            self.parse_city(_tmp)

    def run(self):
        self.user_agent = self.ei.ExtractInfo.get_useragent(file=self.ua_file)
        # 解析url，获取响应
        _response = requests.get(self.start_url, headers=self.user_agent)
        # 存储所有的数据
        self.parse_province(_response.content.decode())
        # 存入redis数据库
        with open(self.filename, 'w', encoding='utf-8') as f:
            for _detail in self.scenary:
                _str_data = json.dumps(_detail, ensure_ascii=False) + ',\n'
                f.write(_str_data)


def main():
    start_url = 'http://www.mafengwo.cn/gonglve/mdd-cn-0-0-1.html#list'
    mafengwo = MaFengWo(start_url)
    mafengwo.run()


if __name__ == '__main__':
    main()
