import pymysql.cursors
import requests
import scrapy
import scrapy.linkextractors
import scrapy.selector
import scrapy.spiders
from bs4 import BeautifulSoup

from viewspotcomments.items import ViewspotcommentsItem


class ViewSpotComments(scrapy.Spider):
    name = "viewspotcomments"
    allowed_domains = ["you.ctrip.com"]
    # start_urls=["http://you.ctrip.com/sight/kaiyang2342/21893.html"]
    # 打开数据库连接
    db = pymysql.connect('localhost', 'root', 'root', 'spider')

    # 使用cursor()方法创建一个cursor对象
    cur = db.cursor()
    # 清空数据库
    cur.execute("delete from cr_xiech_estimate_ss_comments")
    db.commit()
    cur.execute("select link from cr_xiech_estimate_ss_list")
    comment_urls = cur.fetchall()
    comment_urls = list(comment_urls)
    url_list = []
    for i in range(len(comment_urls)):
        tem = str(comment_urls[i])
        tem2 = tem[2:len(tem) - 3]
        # print(tem2)
        url_list.append(tem2)
    start_urls = [url_list[0][:-5] + "-dianping.html"]
    print(start_urls)

    def parse(self, response):
        req_session = requests.session()
        web_data = req_session.get(response.url)
        html = web_data.text.encode(web_data.encoding).decode('utf-8')
        # print(html)
        soup = BeautifulSoup(html, 'lxml')
        v_name = soup.find("h1").string
        # box = Selector(response)
        # v_name=response.xpath("//div[@class='cf']/div[@class='f_left']/h1/text()")
        # v_name = response.xpath("//div[@class='comment_wrap']/div[@id='comment']/div[@class='c_detail_title cf']/h2/a[@id='dianping']/text()")
        print(v_name)

        for box in response.xpath("//div[@class='comment_ctrip']/div[@class='comment_single']"):
            item = ViewspotcommentsItem()
            item['view_name'] = v_name
            item['username'] = box.xpath(".//div[@class='userimg']/span[@class='ellipsis']/a/@title").extract()[
                0].strip()
            item['view_star'] = box.xpath(
                ".//ul/li[@class='title cf']/span[@class='f_left']/span[@class='starlist']/span/@style").extract()[
                                    0].strip()[6:-1]
            line = box.xpath(".//ul/li[@class='title cf']/span[@class='f_left']/span[@class='sblockline']")
            if (len(line) == 0):
                item['sight_score'] = ""
                item['interest_score'] = ""
                item['costperformance_score'] = ""
            else:
                line = line.xpath("string(.)").extract()[0].strip()
                print(line.strip())
                item['sight_score'] = line[3:4].strip()
                item['interest_score'] = line[line.find('味') + 2:line.find('味') + 3].strip()
                item['costperformance_score'] = line[line.find('比') + 2:line.find('比') + 3].strip()
            item['comment'] = \
            box.xpath(".//ul/li[@class='main_con']/span[@class='heightbox']").xpath("string(.)").extract()[0].strip()
            item['travel_time'] = box.xpath(".//ul/li[@class='title cf']/span[@class='youcate']/text()")
            if (len(item['travel_time']) == 0):
                item['travel_time'] = ""
            else:
                item['travel_time'] = item['travel_time'].extract()[0].strip()[:-3]
            item['comment_time'] = box.xpath(
                ".//ul/li[@class='from_link']/span[@class='f_left']/span[@class='time_line']/em/text()").extract()[
                0].strip()
            item['usefultodo'] = \
            box.xpath(".//ul/li[@class='from_link']/span[@class='f_right']/span[@class='useful']/em/text()").extract()[
                0].strip()
            url = \
            box.xpath(".//ul/li[@class='from_link']/span[@class='f_right']/a[contains(text(),'详情')]/@href").extract()[
                0].strip()
            item['detail_url'] = "http://you.ctrip.com" + url
            yield item

        # nextpage = soup.find("a",class_='nextpage')
        nextpage = response.xpath("//div[@class='ttd_pager cf']/div[@class='pager_v1']/a[contains(text(),'下一页')]/@href")

        if nextpage:
            url = nextpage.extract()[0].strip()
            # url=nextpage.attrs["href"]
            print("下一页url************", url)
            # 将信息组合成下一页的url
            page = "http://you.ctrip.com" + url
            print(page)
            # 返回url
            yield scrapy.Request(page, callback=self.parse)
        else:

            # 下一景点
            self.url_list.pop(0)
            if len(self.url_list) != 0:
                next_url = self.url_list[0]
                yield scrapy.Request(next_url[:-5] + "-dianping.html", callback=self.parse)
