# -*- coding: utf-8 -*-
import re

import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from zhenai.items import ZhenaiItem

class UmsgSpider(CrawlSpider):
    name = 'umsg'
    allowed_domains = ['zhenai.com']
    start_urls = ['http://www.zhenai.com/zhenghun/']

    page_links = LinkExtractor(allow = (r"http://www.zhenai.com/zhenghun/\w+"))
    # 匹配所有地区

    user_links = LinkExtractor(allow = (r"http://www.zhenai.com/zhenghun/\w+/nv"))
    # 匹配盖地区的用户

    profile_links = LinkExtractor(allow = (r"http://album.zhenai.com/u/\d+"))
    # 匹配用户的个人主页

    rules = (
        Rule(page_links, follow=True),
        Rule(user_links, follow=True),
        Rule(profile_links, callback='parse_item', follow=False),
    )

    def parse_item(self, response):

        item = ZhenaiItem()

        item["username"] = self.get_username(response)
        # 用户名

        item["age"] = self.get_age(response)
        # 年龄

        item["header_url"] = self.get_header_url(response)
        # 头像链接

        item["images_url"] = self.get_images_url(response)
        # 相册链接

        item["content"] = self.get_content(response)
        # 内心独白

        item["place_from"] = self.get_place_from(response)
        # 居住地

        item["education"] = self.get_education(response)
        # 学历

        item["hobby"] = self.get_hobby(response)
        # 兴趣爱好

        item["source_url"] = response.url
        # 个人主页

        item["sourec"] = "zhenai"
        # 数据来源
        yield item

    def get_username(self, response):
        return response.xpath("//div/h1[@class='nickName']/text()").get()

    def get_age(self, response):
        return response.xpath("//div[@class='purple-btns']//div[2]//text()").get()

    def get_header_url(self, response):
        style_link = response.xpath("//div[@class='m-userInfo']//div/div/@style").get()
        res = re.compile(r"https.*?(jpg|png)")
        return re.search(res, style_link).group()

    def get_images_url(self, response):
        return response.xpath("//div[@class='photoWrapper']//@href").getall()

    def get_content(self, response):
        return response.xpath("//div[@class='m-content-box m-des']/span/text()").get()

    def get_place_from(self, response):
        return response.xpath("//div[@class='purple-btns']/div[5]/text()").get()

    def get_education(self, response):
        return response.xpath("//div[@class='purple-btns']/div[8]/text()").get()

    def get_hobby(self, response):

        root_hobby = response.xpath("//div/div[@class='item f-fl']")

        ho_data = ""
        for data in root_hobby:
            ho_data += data.xpath("./div[@class='question f-fl']/text()").get()
            ho_data += data.xpath("./div[@class='answer f-fl']/text()").get()
            ho_data += '\n'

        return ho_data
