# -*- coding: utf-8 -*-
import scrapy
from kst.items import DartItem

api_url = 'http://dart.fss.or.kr/dsab001/search.ax'
stock_num = '089230'
stock_name = 'THE E&M'
stock_id = '00519252'
date_start = '20170101'
date_end = '20170606'


class FssOrSpider(scrapy.Spider):
    name = 'fss_or'
    allowed_domains = ['dart.fss.or.kr']
    start_urls = [api_url]

    def start_requests(self):
        return [scrapy.FormRequest(api_url,
                                   formdata={'currentPage': '1',
                                             'maxResults': '100',
                                             'maxLinks': '10',
                                             'sort': 'date',
                                             'series': 'desc',
                                             'textCrpCik': stock_id,
                                             'textCrpNm': stock_name,
                                             'finalReport': 'recent',
                                             'startDate': date_start,
                                             'endDate': date_end})]

    def parse(self, response):
        filename = 'RESULT_STOCK'
        with open(filename, 'wb') as f:
            f.write(response.body)

        for tr in response.xpath('//table/tbody/tr'):
            tds = tr.xpath('.//td')
            t_order = tds[0].xpath('.//text()').extract_first().strip()
            t_name = tds[1].xpath('.//span/a/text()').extract_first().strip()
            t_name_url = tds[1].xpath('.//span/a/@href').extract_first().strip()
            t_title = tds[2].xpath('.//a/text()').extract()[1].strip()
            t_title_url = tds[2].xpath('.//a/@href').extract_first().strip()
            t_type = tds[3].xpath('.//div/text()').extract_first().strip()
            t_time = tds[4].xpath('.//text()').extract_first().strip()
            t_name_url_f = response.urljoin(t_name_url)
            t_title_url_f = response.urljoin(t_title_url)
            item = DartItem()
            item['time'] = t_time
            item['order'] = t_order
            item['name'] = t_name
            item['title'] = t_title
            item['type'] = t_type
            item['name_url'] = t_name_url_f
            item['title_url'] = t_title_url_f
            yield item
            # yield scrapy.Request(t_title_url_f, callback=self.parse_stock)

    def parse_stock(self, response):
        print 'start write'
        filename = response.url.split("=")[1] + '.html'
        with open(filename, 'wb') as f:
            f.write(response.body)
        print 'stop write'
