import scrapy

class DmozSpider(scrapy.Spider):
    name = "500"
    namedict = {
                "000905": "g_500",
                "000016": "g_50",
                "000300": "g_300"
                }
    
    start_urls = []
    
    def __init__(self):
        DmozSpider.start_urls.append("http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?type=SHSZZS&sty=SHSZZS&st=0&sr=-1&p=0&ps=500&js=var%20ACdrDEml={pages:(pc),data:[(x)]}&code=000905")
        DmozSpider.start_urls.append("http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?type=SHSZZS&sty=SHSZZS&st=0&sr=-1&p=1&ps=300&js=var%20WSZsvHCE={pages:(pc),data:[(x)]}&code=000300")
        DmozSpider.start_urls.append("http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?type=SHSZZS&sty=SHSZZS&st=0&sr=-1&p=1&ps=50&js=var%20lqSKJlHA={pages:(pc),data:[(x)]}&code=000016")
            
    def parse(self, response):
        filename = response.url.split("=")[-1]
        who = DmozSpider.namedict[filename]
        
        data = response.body
        pos1 = data.find('[')
        pos2 = data.find(']')
        data = data[pos1:pos2+1]
        l = list(eval(data))
        
        with open(DmozSpider.namedict[filename] + ".py", 'w') as f:
            f.write("#!/usr/bin/env python2.7\n")
            f.write("# encoding: utf-8\n")
            
            f.write("\n\n%s = {\n" % (who))
            for item in l:
                items = item.split(',')
                if items[0][0] == '6':
                    suffix = "sh"
                else:
                    suffix = "sz"
                    
                line = "    '%s%s': %f,\t\t#%s\n" % (suffix, items[0], float(items[12]), items[1])
                f.write(line)
            f.write("}")