import scrapy
import json
import math
from tongCheng.items import TongchengItem
class SpiderSpider(scrapy.Spider):
    name = 'spider'
    # allowed_domains = ['www.xxx.com']
    # start_urls = ['http://www.xxx.com/']
    cookie = {'mail_psc_fingerprint': 'c1be4ec6354df388b76d43600036e58d', ' _ntes_nnid': '0dd64ab7c73d5eceefa6f8f393a005c4,1601124817734', ' _ntes_nuid': '0dd64ab7c73d5eceefa6f8f393a005c4', ' idate_province': '19', ' idate_city': '0', ' NTES_OSESS': 'wiMsPPbSxWdi6KEJpNss__fPTECXD6HgiyWnkZCkJ4bXNNKp1ilCQ0SiJWtJeWp80Ny9JA4JcMc8DSZQiDwv6aFygyIANvU_GVvdcODFXSnlto9q4KqyzQI3isoshJG0mLRdSrIlWx1UVGZNK9Ai_zvf8oRjcgf8UqOOPu6q7UsflJeH4U5r5LWrGsQo3A2VTF_aGR8ObSDckhMDbsD8_bzCSygBvCzsjAwygCiZqEsyl', ' S_OINFO': '1608890343|0|##|uid_c497523f109428a7b852282ed30bd29c@qq.163.com', ' P_OINFO': 'uid_c497523f109428a7b852282ed30bd29c@qq.163.com|1608890343|0|yuehui|00&99|null#0|null|yuehui|uid_c497523f109428a7b852282ed30bd29c@qq.163.com', ' sid': '703992344g3wEpgmpuhYZoBe2qMCBFedawegSjhvZB8ENcSeqsKpO1QikyMWtBKXvRD2XjpcS', ' idateuid': '703992344', ' idate_emsex': '0', ' JSESSIONID': '7A3ED5E0190AFC580CC45F1B429D0FBD'}
    #女性url
    url_f = 'https://yuehui.163.com/searchusersrcm.do?ajax=1&ageBegin=18&ageEnd=89&aim=-1&marriage=-1&mode=4&order=0&province=19&city=2&district=-1&sex=0&userTag=0&vippage=-1&searchType=0&pagesize=81&page='
    # 男性url
    url_m = 'https://yuehui.163.com/searchusersrcm.do?ajax=1&ageBegin=18&ageEnd=89&aim=-1&marriage=-1&mode=4&order=0&province=19&city=2&district=-1&sex=1&userTag=0&vippage=-1&searchType=0&pagesize=81&page='
    # 开始页面，获取珠海女的人。
    ip = "183.220.145.3:80"
    def start_requests(self):
        print("====爬虫开始====")
        for i in [self.url_f,self.url_m]:
            page = 1
            url = i + str(page)
            if(i==self.url_f):
                sex = "女性"
                print("==开始爬取 女性 ==")
            else:
                sex = "男性"
                print("==开始爬取 男性 ==")
            yield scrapy.Request(url, callback=self.parse,meta={"proxy":"http://"+self.ip,"url":i,"page":page,"sex":sex}, cookies=self.cookie,dont_filter = True)

    # 获取详细页面的id
    def parse(self, response):
        # 格式化数据转换成字典
        data = json.loads(response.text)[0]
        # 一个多少条数据
        total = data.get("total")
        # 获取当前页码
        page = response.meta.get("page")

        #获取当前爬取的性别
        sex = response.meta.get("sex")
        print("====开始爬取",sex,"第",str(page),"页，一共",total,"页====")

        ## 获取详细内容
        for i in data.get("list"):
            items = TongchengItem()
            id = i.get("id")
            items['id'] = id
            url = 'https://yuehui.163.com/viewuser.do?id='+str(id)
            print(url)
            yield scrapy.Request(url,callback=self.get_detailed,meta={"proxy":"http://"+self.ip,"items":items},cookies=self.cookie)
            # break;#用于测试，返回一条然后结束。

        # 下一页
        page += 1
        # 获取最大页码
        max_page = math.ceil(int(total)/81)
        # 循环获取女性下一页内容
        if page < max_page:
            url = response.meta.get("url")
            # print(url)
            yield scrapy.Request(url+str(page),callback=self.parse,meta={"proxy":"http://"+self.ip,"url":url,"page":page,"sex":sex},cookies=self.cookie,dont_filter = True)

    #获取详情数据
    def get_detailed(self,response):
        items = response.meta.get("items")
        # 性别
        items = self.get_str('sex','//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[1]/text()',items, response)
        # 婚姻状况
        items = self.get_str('marital_status','//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[2]/text()',items, response)
        # 年龄
        items = self.get_str('age', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[3]/text()', items, response)
        # 学历
        items = self.get_str('education', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[4]/text()', items, response)
        # 地区
        items['area'] = response.xpath('//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[5]/p[1]/span[2]//text()').extract()[0]
        # 职业
        items['job'] = response.xpath('//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[5]/p[2]/span[2]//text()').extract()[0]
        # 身高
        items = self.get_str('height', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[6]/text()', items, response)
        # 公司类型
        items = self.get_str('company_type', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[7]/text()', items, response)
        # 体重
        items = self.get_str('weight', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[8]/text()', items, response)
        # 月均收入
        items = self.get_str('month_avg_income', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[9]/text()', items, response)
        # 星座
        items = self.get_str('horoscope', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[10]/text()', items, response)
        # 住房情况
        items = self.get_str('apartment', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[11]/text()', items, response)
        # 生肖
        items = self.get_str('animal', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[12]/text()', items, response)
        # 交通工具
        items = self.get_str('vehicle', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[3]/ul/li[13]/text()', items, response)
        # 昵称
        items['pet_name'] = response.xpath('//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[1]/p[1]/text()').extract()[0]
        # 自我介绍
        items = self.get_str('self_introduction', '//*[@id="page-viewuser"]/article/article[2]/section[1]/div[2]/div[2]/div[2]/p/text()', items, response)
        items['self_introduction'].replace(",","，").replace("\n","").replace(" ","")
        # 他/她在寻找
        items['search'] = ''.join(''.join(response.xpath('//*[@id="page-viewuser"]/article/aside/section[1]/div[2]/div[7]/div//text()').extract()).split()).replace(",","，").replace("\n","").replace(" ","")
        # items = self.get_str('age','',items, response)
        # items['job'] = response.xpath('').extract()[0]
        print(items)
        yield items

    def get_str(self,name,x,items,response):
        items[name] = ''.join(''.join(response.xpath(x).extract()[0].split("：")[1]).split())
        return items



