# -*-coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import scrapy



with open('domains.txt', 'rb') as file_obj:
    domains = [_.replace('\r\n', '') for _ in file_obj]

whois_url = 'http://www.sfn.cn/whois/whoisResult?flag=flag&whoisName='
urls = [whois_url + domain for domain in domains]


class WhoisSpider(scrapy.Spider):
    name = 'whois'
    start_urls = urls

    def parse(self, response):
        index = 0
        for div in response.css('div[class="left_def"]').xpath('div'):
            if index > 6:
                continue
            text = div.xpath('span[@class="vright"]/text()').extract()
            if index == 0:
                #domain = text[0]
		pass
            elif index == 1:
                status = text[0]
            elif index == 3:
                sponsor = text[0]
            elif index == 5:
		try:
                    start_time = text[0]
		except:
		    start_time = ''
            elif index == 6:
                end_time = text[0]
            index += 1
        # print '=='.join([domain, status, sponsor, start_time, end_time])
	domain = response.url.split('=')[-1]
        print '=='.join([domain, status, sponsor, start_time, end_time])
