# -*- coding: utf-8 -*-
import os
import time
import scrapy
from scrapy import Request
from scrapy import FormRequest

class BkSpider(scrapy.Spider):
    name = "bk"
    page = 1;
    allowed_domains = ["ikeepfit.cn"]
    # start_urls = ["http://pub.alimama.com/myunion.htm?spm=a219t.7473494.1998155387.6.f7MrUa#!/promo/self/items?q=9.9%E5%8C%85%E9%82%AE"]
    start_urls = ["http://pub.alimama.com/pubauc/searchAuctionList.json?spm=a219t.7473494.1998155387.6.f7MrUa&q=9.9%E5%8C%85%E9%82%AE&perPagesize=40&t=1447514627318&_tb_token_=i1fh1jzwRxxo&_input_charset=utf-8&toPage="]

    def start_requests(self):
        print os.getcwd()
        with open("./spiders/cookie.txt") as f:
            cookie_str = f.read()
        str_arr = cookie_str.split(';')
        data = {}
        for str in str_arr:
            item_arr = str.split("=")
            data[item_arr[0].strip()] = item_arr[1].strip()

        #print data,"@@@"
        for i in range(1,100):
            url = self.start_urls[0]
            time.sleep(1);
            self.page = i
            print i,"########"
            url = url+ ("%d" % (i))
            print url;
            yield Request(url,cookies=data)
    def parse(self,res):
        print "########"
        path = "data/"+str(self.page)
        with open(path, "w") as f:
            f.write(res.body)
        print "########"

