# coding=utf-8

import re
import json
import time
try:
    import urlparse
except:
    import urllib.parse as urlparse
import time
import requests
from lxml import html
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from scrapy.crawler import Crawler
from scrapy import Spider, Item, Field
from scrapy import Request
from scrapy.http import FormRequest
from scrapy.selector import Selector
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, TakeFirst

from common.mysql_client import MysqlClient

class MafengwoDbHandle(MysqlClient):
    
    def __init__(self):
    
        DB_CFG = {
            'host':'123.207.142.111',
            'port':3306,
            'user':'root',
            'password':'a78456780666',
            'connection_timeout': 3000
        }
        super(MafengwoDbHandle, self).__init__(**DB_CFG)

class ScenicItemLoader(ItemLoader):
    default_output_processor = TakeFirst()

class ScenicItem(Item):
    title = Field()        
    url = Field()            
    intro = Field()    
    open_time = Field()   
    play_time = Field()
    cost = Field()
    address = Field()   
    traffic = Field()   
    comment_count = Field()   
    scenic_id = Field()

class CommentItem(Item):
    content = Field()
    scenic_id = Field()
    user_name = Field()
    level = Field()
    star = Field()
    comment_date = Field()
    from_url = Field()
    from_text = Field()


class CustomPipeline(object):
    def __init__(self):
        self.scenic_db = MafengwoDbHandle()
        self.scenic_db.set_db_table('test', 'mafengwo_scenic')

        self.comment_db = MafengwoDbHandle()
        self.comment_db.set_db_table('test', 'mafengwo_comment')

    def close_spider(self, spider):
        self.scenic_db.destroy()
        self.comment_db.destroy()

    def process_item(self, item, spider):
        data = dict(item)
        get_info = lambda key, data=data: data.get(key) if data.get(key) is not None else ''

        if str(type(item)) == "<class '__main__.ScenicItem'>":
            info = {
                'title': get_info('title'),
                'url': get_info('url'),
                'open_time': get_info('open_time'),
                'play_time': get_info('play_time'),
                'intro': get_info('intro'),
                'cost': get_info('cost'),
                'address': get_info('address'),
                'traffic': get_info('traffic'),
                'comment_count': get_info('comment_count'),
                'scenic_id': get_info('scenic_id'),
            }
            self.scenic_db.insert(info)
            self.scenic_db.commit()

        elif str(type(item)) == "<class '__main__.CommentItem'>":
            info = {
                'content': get_info('content'),
                'scenic_url': get_info('scenic_url'),
            }
            self.comment_db.insert(info)
            self.comment_db.commit()



class MafengwoSpider(Spider):

    custom_settings = {
        'DNSCACHE_ENABLED':True,
        'ROBOTSTXT_OBEY':False,
        'RETRY_ENABLED':True,
        'DOWNLOAD_TIMEOUT':20,
        'CONCURRENT_REQUESTS':32,
        'CONCURRENT_REQUESTS_PER_DOMAIN':32,
        'CONCURRENT_REQUESTS_PER_IP':32,
        'DOWNLOAD_DELAY': 4,
        'COOKIES_ENABLED':False,
        'ITEM_PIPELINES':{
            'mafengwo.CustomPipeline':100
        },
    }
    name = "mafengwo"
    allowed_domains = []
    #start_urls = ['https://m.mafengwo.cn/jd/10099/gonglve.html?page=1&is_ajax=1']
    urls_format = 'https://m.mafengwo.cn/jd/10099/gonglve.html?page={page}&is_ajax=1'
            
    url_comment = 'https://m.mafengwo.cn/poi/poi/comment_page'

    HEADERS = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Connection': 'keep-alive',
        'Host': 'm.mafengwo.cn',
        'Referer': 'https://m.mafengwo.cn/poi/5504076.html',
        'Cache-Control': 'max-age=0',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Mobile Safari/537.36'
    }

    pattern_a = re.compile(r'<a href="(.*?)" class="poi-li">', re.S)
    pattern_comment_url = re.compile(r'/poi/(\d+)\.html')
    pattern_comment_content = re.compile(r'<div class="context">(.*?)</div>', re.S)

    def __init__(self):
        self.headers = self.HEADERS

    def start_requests(self):
        page = 1
        resp = requests.get(self.urls_format.format(page=page), headers=self.headers, verify=False)
        data_json = json.loads(resp.text)
        content = data_json.get("html")
        for index, a in enumerate(re.findall(self.pattern_a, content), start=1):
            scenic_url = urlparse.urljoin(self.urls_format, a)
            yield Request(scenic_url, headers=self.headers, callback=self.parse)

        while data_json.get("has_more", 0):
            time.sleep(5)
            page += 1
            print(page)
            resp = requests.get(self.urls_format.format(page=page), headers=self.headers, verify=False)
            data_json = json.loads(resp.text)
            content = data_json.get("html")
            for a in re.findall(self.pattern_a, content):
                scenic_url = urlparse.urljoin(self.urls_format, a)
                yield Request(scenic_url, headers=self.headers, callback=self.parse)

    def parse(self, response):
        print(response.status)
        # item_loader = ScenicItemLoader(item=ScenicItem(), response=response)
        # item_loader.add_css('title', 'body > div.wrapper > section.poiHead > div > h1::text')
        # item_loader.add_value('url', response.url)
        # item_loader.add_css('comment_count', '#dianping > h3 > strong > span::text')
        # item_loader.add_css('intro', '#gonglve > div > div.tips > p:nth-child(1)::text')
        # item_loader.add_css('open_time', '#gonglve > div > div.tips > p:nth-child(2)::text')
        # item_loader.add_css('play_time', '#gonglve > div > div.tips > p:nth-child(3)::text')
        # item_loader.add_css('cost', '#gonglve > div > div.tips > p:nth-child(4)::text')
        # item_loader.add_xpath('address', '//*[@id="gonglve"]/div/div[2]/ul/li[1]/text()')
        # item_loader.add_xpath('traffic', '//*[@id="gonglve"]/div/div[2]/ul/li[2]/text()')

        item = ScenicItem()
        item['title'] = response.css('body > div.wrapper > section.poiHead > div > h1::text').extract_first()
        item['url'] = response.url
        item['comment_count'] = response.css('#dianping > h3 > strong > span::text').extract_first()

        for index, p in enumerate(response.css('#gonglve > div > div.tips > p'), start=1):
            tmp_text = p.xpath("./strong/text()").extract()
            data = p.xpath("string(.)").extract()
            if tmp_text:
                tmp_text = tmp_text[0]
                if tmp_text.find("开放时间") != -1:
                    item['open_time'] = data[0] if data else ""
                elif tmp_text.find("用时参考") != -1:
                    item['play_time'] = data[0] if data else ""
                elif tmp_text.find("票") != -1:
                    item['cost'] = data[0] if data else ""
            else:
                item['intro'] = data[0] if data else ""

        for index, li in enumerate(response.xpath('//*[@id="gonglve"]/div/div[2]/ul/li'), start=1):
            tmp_text = li.xpath("./strong/text()").extract()[0]
            data = li.xpath("./text()").extract()
            if tmp_text.find("地址") != -1:
                item['address'] = data[0] if data else ""
            elif tmp_text.find("交通") != -1:
                item['traffic'] = data[0] if data else ""
        
        scenic_id = re.search(self.pattern_comment_url, response.url)
        s_id = scenic_id.group(1)
        item['scenic_id'] = s_id

        yield item

    #     yield FormRequest(self.url_comment, headers=self.headers, callback=self.parse_comment, 
    #        formdata={"poiid": s_id, "page": "1"}, meta={"poiid": s_id, "page": "1", "scenic_url": response.url})

    # def parse_comment(self, response):
    #     print()
    #     data_json = json.loads(eval(response.body.replace("true", "True")))
    #     content = data_json.get("html")
    #     for comment_text in re.findall(self.pattern_comment_content, content):
    #         comment = CommentItem()
    #         comment['content'] = comment_text.replace("<br />", "").replace("<br/>", "").replace("<br>", "")
    #         comment['scenic_url'] = response.meta.get("scenic_url", "")
    #         yield comment

    #     if data_json.get("moreComment"):
    #         poiid = response.meta['poiid']
    #         page = str(int(response.meta['page'])+1)
    #         print("comment page" % page)
    #         yield FormRequest(self.url_comment, headers=self.headers, callback=self.parse_comment, 
    #             formdata={"poiid": poiid, "page": page}, 
    #             meta={"poiid": poiid, "page": page, "scenic_url": response.meta.get("scenic_url", "")})

    # 关闭spider时调用
    def closed(self, reason):
        print(reason)

class CommentSpider(object):
    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Connection': 'keep-alive',
        'Host': 'm.mafengwo.cn',
        'Referer': 'https://m.mafengwo.cn/poi/comment_5504076.html',
        'Cache-Control': 'max-age=0',
        'X-Requested-With': 'XMLHttpRequest',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Mobile Safari/537.36'
    }
    pattern_comment_content = re.compile(r'<div class="context">(.*?)</div>', re.S)


    def __init__(self):
        self._scenic_db = MafengwoDbHandle()
        self._scenic_db.set_db_table('test', 'mafengwo_scenic')

        self._comment_db = MafengwoDbHandle()
        self._comment_db.set_db_table('test', 'mafengwo_comment')


    def close(self):
        self._comment_db.destroy()
        self._scenic_db.destroy()

    def _get_response(self, url_comment, scenid, page=1):
        formdata={"poiid": scenid, "page": page}
        time.sleep(4)
        resp = requests.post(url_comment, headers=self.headers, data=formdata)
        return resp.text

    def req(self):
        field_list = ['scenic_id']
        where = "scenic_id=1364"
        result = self._scenic_db.query(field_list, where)
        url_comment = 'https://m.mafengwo.cn/poi/poi/comment_page'

        for s_id in result:
            page = 1
            scenid = s_id["scenic_id"]
            content = self._get_response(url_comment, scenid)
            data_json = json.loads(content.strip())
            for comment in self.parse_comment(data_json, scenid):
                try:
                    self._comment_db.insert(comment)
                    self._comment_db.commit()
                except Exception as e:
                    print(e)
                    continue

            while data_json.get("moreComment"):
                page += 1
                content = self._get_response(url_comment, scenid, page)
                data_json = json.loads(content)
                for comment in self.parse_comment(data_json, scenid):
                    try:
                        self._comment_db.insert(comment)
                        self._comment_db.commit()
                    except Exception as e:
                        print(e)
                        continue


    def parse_comment(self, data_json, scenid):
        content = data_json.get("html").strip()
        tree = html.fromstring(content)
        # for comment_text in re.findall(self.pattern_comment_content, content):
        #     comment = {}
        #     comment['content'] = comment_text.replace("<br />", "").replace("<br/>", "").replace("<br>", "")
        #     comment['scenic_url'] = scenid
        #     yield comment
        take_first = lambda x: x[0] if x else ""
        tree = html.fromstring(content)
        for li in tree.xpath('/html/body/ul/li'):
            comment = {}
            comment['content'] = take_first(li.xpath("./div[1]/text()"))
            comment['scenic_id'] = scenid
            comment['user_name'] = take_first(li.xpath("./dl/dd/p/text()"))
            comment['level'] = take_first(li.xpath("./dl/dd/p/span/text()"))
            comment['star'] = take_first(li.xpath("./dl/dd/div/text()"))
            comment['comment_date'] = take_first(li.xpath("./dl/dd/div[2]/text()"))
            comment['from_url'] = take_first(li.xpath("./dl/div[3]/a/@href"))
            comment['from_text'] = take_first(li.xpath("./dl/div[3]/a/text()"))
            yield comment

def test_comment():
    headers = {
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.8',
        'Connection': 'keep-alive',
        'Host': 'm.mafengwo.cn',
        'Referer': 'https://m.mafengwo.cn/poi/comment_5504076.html',
        'Cache-Control': 'max-age=0',
        'X-Requested-With': 'XMLHttpRequest',
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Mobile Safari/537.36'
    }
    data = {"poiid": "14333425", "page": "1"}
    resp = requests.post("https://m.mafengwo.cn/poi/poi/comment_page", headers=headers, data=data)
    content = resp.json().get("html").strip()
    
    print(len(tree.xpath('/html/body/ul/li')))
    for li in tree.xpath('/html/body/ul/li'):
        pass

if __name__ == '__main__':
    #spider = CommentSpider()
    try:
        runner = CrawlerRunner()
        runner.crawl(MafengwoSpider)
        d = runner.join()
        d.addBoth(lambda _: reactor.stop())
        reactor.run()
        # spider.req()
    except Exception as e:
        print(str(e))
    finally:
       # spider.close()
        pass