# http://vip.stock.finance.sina.com.cn/quotes_service/view/CN_TransListV2.php?num=2&symbol=%s'

import scrapy
import datetime
import os
from scrapy import Request
from allstocksname import g_StockNameDict

class DmozSpider(scrapy.Spider):
    name = "fenbi"
    allowed_domains = ["10jqka.com.cn"]
    save_dirs = None
    start_urls = []
    
    headers = {
        'Connection': 'keep - alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'
    }
    
    def __init__(self):
        day = str(datetime.date.today())
        DmozSpider.save_dirs = r"E:/temp/fenbi/%s/" % (day)
        try:     
            os.makedirs(DmozSpider.save_dirs)
        except Exception, e:
            print e
        
        for key in g_StockNameDict.keys():
            DmozSpider.start_urls.append('http://vip.stock.finance.sina.com.cn/quotes_service/view/CN_TransListV2.php?num=10000&symbol=%s'%(key))
            
    def make_requests_from_url(self, url):
        return Request(url, headers = DmozSpider.headers)
            
    def parse(self, response):
        filename = response.url.split("=")[-1]
        filename = DmozSpider.save_dirs + filename
        
        with open(filename, 'wb') as f:
            f.write(response.body)
