import pymysql.cursors
import scrapy
import scrapy.linkextractors
import scrapy.selector
import scrapy.spiders

from viewtags.items import ViewTagsItem


class ViewTags(scrapy.Spider):
    name="viewtags"
    allowed_domains = ["dianping.com"]
    # 打开数据库连接
    db = pymysql.connect('localhost', 'root', 'root', 'spider')
    # 使用cursor()方法创建一个cursor对象
    cur = db.cursor()
    # 清空tags表
    cur.execute("delete from cr_dianping_comments_tags")
    db.commit()
    cur.execute("select link from cr_dianping_estimate_ss_list_copy")
    comment_urls = cur.fetchall()
    comment_urls = list(comment_urls)
    url_list = []
    for i in range(len(comment_urls)):
        tem = str(comment_urls[i])
        tem2 = tem[2:len(tem) - 3]
        # print(tem2)
        url_list.append(tem2)
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
    f = open(r'viewtags/cookies.txt', 'r')  # 打开所保存的cookies内容文件
    cookies = {}  # 初始化cookies字典变量
    for line in f.read().split(';'):  # 按照字符：进行划分读取
        # 其设置为1就会把字符串拆分成2份
        key, value = line.strip().split('=', 1)
        cookies[key] = value  # 为字典cookies添加内容
    print(cookies)
    start_urls=[url_list[0]+"/review_all"]
    #start_urls = ["http://www.dianping.com/shop/2160487/review_all"]
    def start_requests(self):
        print("*****start_requests*****")
        yield scrapy.Request(
            url=self.start_urls[0],
            headers=self.headers,
            cookies=self.cookies,
            callback=self.parse)
    def parse(self,response):
        print("*********parse**********")
        comments_count = response.xpath("//div[@class='tabs']/ul/li/span[@class='active']/em[@class='col-exp']/text()").extract()[0].strip()[1:-1]
        if(int(comments_count)>=14):
            spot_name = response.xpath("//div[@class='review-list-main']/div[@class='review-list-header']/h1/a/text()").extract()[0].strip()
            print(spot_name)
            print(response.xpath("//div[@class='content']/span"))
            for tag in response.xpath("//div[@class='content']/span"):
                item = ViewTagsItem()
                item['spot_name'] =spot_name
                tags=tag.xpath(".//a/text()").extract()[0].strip()
                item['comments_tags']=tags[:tags.find("(")].strip()
                item['tags_count'] = tags[tags.find("(")+1:tags.find(")")].strip()
                item['url'] = self.url_list[0]
                yield item
            else:
                # 下一景点
                self.url_list.pop(0)
                if len(self.url_list) != 0:
                    next_url = self.url_list[0]
                    yield scrapy.Request(next_url + "/review_all", headers=self.headers,cookies=self.cookies, callback=self.parse)

