#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import scrapy
import re
import json
import os
from scrapy.selector import Selector

class PandapiaSpider(scrapy.Spider):
    name = 'pandapia'
    
    def start_requests(self):
        url=''
        for row in self.getNames():
            url='http://pandapia.com/Index/search_all.html'
            yield scrapy.FormRequest(url=url, formdata={'keyword':row[1]}, callback=self.getUniqu)
    def getUniqu(self, response):
        body = json.loads(response.body)
        pandaUnique = body['data']['panda'][0]['id']
        yield scrapy.Request(url='http://pandapia.com/panda/view.html?id='+pandaUnique, callback=self.parse) 
    def parse(self, response):
        filename = self.getPage(response.url)
        if(os.path.isfile(filename)):
            pass
        s = Selector(text=response.body)
        with open(filename, 'wb') as f:
            f.write(response.body) 
        links=s.xpath('//a[contains(@href, "javascript")]/@onclick').extract()
        for link in links:
            linkinfo = re.match('.*?id=(.*?)\'$', link)
            url = 'http://pandapia.com/panda/view.html?id='+linkinfo.group(1)
            yield scrapy.Request(url=url, callback=self.family) 
    def family(self, response):
        filename = self.getPage(response.url)
        if(os.path.isfile(filename)):
            pass
        with open(filename, 'wb') as f:
            f.write(response.body)
    
    def getPage(self, url):
        pageinfo = re.match(r'.*?id=(.*?)$', url)
        page = pageinfo.groups(0)
        filename = './data/pandapia/%s.html' % page
        return filename
    
    def getNames(self):
        excel = open('./config/pandaName.txt', 'r', encoding='UTF-8').read()
        data = []
        rows = excel.splitlines()
        for row in rows:
            data.append(row.split('\t'))
        return data    
            