# -*- coding: utf-8 -*-
import scrapy
import re
import os
from scrapy import Request
from html.parser import HTMLParser
from scrapy.selector import Selector


class MyHTMLParser(HTMLParser):
    def __init__(self):
        HTMLParser.__init__(self)
        self.data = []
    def handle_startendtag(self, tag, attrs):
        pass
    def handle_endtag(self, tag):
        pass
    def handle_data(self, data):
        if data.count('\n') == 0:
            self.data.append(data)

class ExampleSpider(scrapy.Spider):
    name = 'news'
    allowed_domains = ['jg.hbpu.edu.cn']
    start_urls = ['http://jg.hbpu.edu.cn']
    domain = 'http://jg.hbpu.edu.cn'

    def parse(self, response):
        self.make_folder("output")
        sel = response.xpath('//div/table[6]/tbody/tr/td[1]/table/tbody/tr[2]/td/table//tbody/tr/td[2]/table/tbody/tr[2]/td/table')
        for item in sel.xpath("//a[@target='_blank']"):
        
            print(item.extract())

           # 这里要重新实例化一个Selector， 说实话我也没搞懂， 正常来说拼接路径即可
            title = Selector(text=item.extract()).xpath("//@title").get()

            print("****** title ****")
          #  print(item.xpath("//text()"))
         #   print(item)
            print(title)

            # 这里有个bug先绕过去
            # 正确写法应该是 link = td.xpath("//@href").get()
            prefix='href='
            reg = prefix + r'"\S+"'

            reg_img = re.compile(reg)#编译一下，运行更快
            imglist = reg_img.findall(item.get())
           
            if len(imglist) > 0:
                link = imglist[0]
                processingUrl = self.domain + "/" + link[len(prefix):].strip(r'"')
                print(title)
                print(processingUrl)
                yield Request(url=processingUrl, callback=self.bookIndexParse, meta={"title": title})

            
    
    def bookIndexParse(self,response):
        print("******* bookIndexParse ******")
        title = response.meta["title"]
        print(title)
        item = response.xpath("//div/table[4]/tbody/tr/td[3]/table/tbody/tr/td[2]/table/tbody/tr[2]/td/form/table").xpath("//div[@class='v_news_content']").get()
      #  print(item)
    
        parser = MyHTMLParser()
        parser.feed(item)
        result = ''.join(parser.data)
        outputPath = "output" + "/" + title + ".txt"
        print(result)
        self.write_string_to_text(result, outputPath)

    def write_string_to_text(self, str, path):
	    pageFile = open(path, 'w')
	    pageFile.write(str)
	    pageFile.close()
    
    def make_folder(self, path):
        if os.path.exists(path) is False:
    	    os.makedirs(path)
        else: 
            print("文件已经存在")
   



