# -*- coding: utf-8 -*-
import json
import re

import scrapy
from bs4 import BeautifulSoup

from MySpiders.items import LaGouItem


class LaGouSpider(scrapy.Spider):
    name = "lagou"
    start_urls = (
        #'http://www.lagou.com/zhaopin/',
        'http://www.loldytt.com.cn/search.asp'
    )

    #myurl = 'http: // www.loldytt.com.cn / search.asp'
    myurl = 'http://www.loldytt.com.cn/search.asp'

    kds = [u'大数据', u'云计算', u'docker', u'中间件', 'Node.js', u'数据挖掘', u'自然语言处理', u'搜索算法', u'精准推荐', u'全栈工程师', u'图像处理',
           u'机器学习', u'语音识别']

    totalPageCount = 0
    curpage = 1
    cur = 0
    kd = kds[0]

    def start_requests(self):
        return [scrapy.http.FormRequest(self.myurl, formdata={'searchword': u""}, callback=self.parse)]
        #return [scrapy.http.FormRequest(self.myurl,formdata={'pn': str(self.curpage),'kd':self.kd}, callback=self.parse)]

    def parse(self, response):
        soup = BeautifulSoup(response.body, "html5lib")

        # 格式化输html内容
        print soup.prettify()




        # item = LaGouItem()
        # jdict = json.loads(response.body)
        # jcontent = jdict["content"]
        # jposresult = jcontent["positionResult"]
        # jresult = jposresult["result"]
        # # 计算总页数
        # self.totalPageCount = jposresult['totalCount'] / 15 + 1;
        # for each in jresult:
        #     item["city"] = each["city"]
        #     item["companyFullName"] = each["companyFullName"]
        #     item["companySize"] = each["companySize"]
        #     item["positionName"] = each["positionName"]
        #     item["companyLabelList"] = each["companyLabelList"]
        #     item["salary"] = each["salary"]
        #     item["industryField"] = each["industryField"]
        #     yield item
        #     if self.curpage <= self.totalPageCount:
        #         self.curpage += 1
        #         yield scrapy.http.FormRequest(self.myurl,formdata={'pn': str(self.curpage), 'kd': self.kd}, callback=self.parse)
        #     elif self.cur < len(self.kds) - 1:
        #         self.curpage = 1
        #         self.totalPageCount = 0
        #         self.cur += 1
        #         self.kd = self.kds[self.cur]
        #         yield scrapy.http.FormRequest(self.myurl,
        #                                       formdata={'pn': str(self.curpage), 'kd': self.kd}, callback=self.parse)
