import scrapy
import json
import collections
from nlproject.items import SingleItem

"""
使用方法:
scrapy crawl table -a start_urls='["http://www.w3school.com.cn/xpath/xpath_syntax.asp"]' -a trXpath='tr' -a tdXpath='["th/text()","td/text()"]'     
"""
# http://www.miit.gov.cn/n1146312/n1146904/n1648374/c5473705/content.html
class TableSpider(scrapy.Spider):
    """
    xpath提取目标
    thead
    tbody
    tr
    th
    td
    """
    name = "table"

    def __init__(self, start_urls, trXpath, tdXpath, *args, **kwargs):
        """
        :param start_urls: 这里传json化的list
        """
        super(TableSpider, self).__init__(*args, **kwargs)
        # self.resList = []

        start_urls = json.loads(start_urls)
        self.start_urls = start_urls  # type: list
        print(self.start_urls)

        # default: 'tr' or '/tbody/tr'
        self.trXpath = trXpath  # type: str

        tdXpath = json.loads(tdXpath)  # default: '["td/text()"]'
        self.tdXpath = tdXpath  # type: list

    def parse(self, response):
        tables = response.xpath('//table')  # 这个key是固定的
        print(tables)
        for table in tables:
            tableDict = collections.OrderedDict()
            for idx, tr in enumerate(table.xpath(self.trXpath)):
                print(tr.extract()+'\n')
                print(idx)
                for tdXpath in self.tdXpath:
                    for idx, text in enumerate(tr.xpath(tdXpath).extract()):
                        # trList.append(text)
                        tableDict[idx] = text
                    # if idx == 0:
                    #     for k in tableDict.keys():  # XXX: 插入空行 表格间
                    #         tableDict[k] = ''
                yield tableDict