import scrapy

class SentimentSpider(scrapy.Spider):
    name = 'sentiment'
    allowed_domains = ["*"]
    start_urls = ['http://guba.eastmoney.com']

    def __init__(self):
        super().__init__()
        self.Url = "http://guba.eastmoney.com"
        self.comments = []
        self.Maxp = 9
        self.cnt1 = 0
        self.ripe_all = []


    def parse(self, response):
        if self.cnt1 < self.Maxp:
            self.cnt1 += 1
            raw_all = response.xpath("//ul[@class='newlist']/li/span/a").extract()

            for r in raw_all:
                r = r[r.find("herf=\"") + 10: r.find("\" title=")]
                if r.find("\" class") != -1:
                    continue

                if r.find("caifuhao") != -1:
                    self.ripe_all.append("http:" + r)
                else:
                    self.ripe_all.append(self.Url + r)

            nexturl = response.xpath("//ul[@class='pagernums']//a").extract()[-1]
            nexturl = 'http://guba.eastmoney.com/' + nexturl[nexturl.find("href=") + 6: nexturl.rfind("\" target=")]
            yield scrapy.Request(url=nexturl, callback=self.parse, dont_filter=True)

        elif self.cnt1 == self.Maxp:
            self.cnt1 += 1
            yield scrapy.Request(url=self.ripe_all[0], callback=self.parse, dont_filter=True)

        elif self.cnt1 < self.Maxp + 7:
            # print("---------------------------------yes---------------------------------")
            is_in = False
            temp = []
            raw_all = response.xpath("//div[@class='article-body']//p").extract()

            try:
                for r in raw_all:
                    for char in r:
                        if char == '<':
                            is_in = True
                            continue
                        if char == '>':
                            is_in = False
                            continue

                        if is_in:
                            continue
                        else:
                            temp.append(char)

                    r = "".join(temp)


            except:
                print("-----------------------------sth-wrong!----------------------------")

            yield scrapy.Request(url=self.ripe_all[self.Maxp - 11], callback=self.parse, dont_filter=True)
