# -*- coding: utf-8 -*-
import json
import re
import time

import scrapy

# https://m.weibo.cn/api/container/getIndex?type=uid&value=1870795791
from lxml import etree

from weiboSpider.items import WeibospiderItem


class WeiboSpider(scrapy.Spider):
    name = 'weibo'
    allowed_domains = ['weibo.com']
    # , '102803_ctg1_4288_-_ctg1_4288', '102803_ctg1_4888_-_ctg1_4888'
    containerids = ['102803_ctg1_5088_ - _ctg1_5088']
    start_urls = []
    for i in range(len(containerids)):
        for j in range(2):
            start_urls.append(
                "https://m.weibo.cn/api/container/getIndex?containerid=%s&openApp=0&since_id=%d" % (containerids[i], j))

    def parse(self, response):
        # pass
        datas = json.loads(response.body_as_unicode())['data']['cards']
        for i in range(len(datas)):
            url = datas[i]['mblog']['user']['profile_url']
            uid = url[url.find("uid=") + 4:url.find("&")]

            url = response.urljoin("https://weibo.com/u/%s?is_hot=1" % uid)
            user_cookies = 'SUB=_2AkMpfGl_f8NxqwJRmP8cy2nrb45zyQ3EieKfIJikJRMxHRl-yj9kqhwetRB6AvxHkL8cZPcIpBSohprRD0xCPuIuRzNX; SUBP=0033WrSXqPxfM72-Ws9jqgMF55529P9D9WFPWTXSB3Rc6zOgXfpUGYFK; SINAGLOBAL=3579043863435.597.1579214410571; ULV=1579218836203:2:2:2:6745014687324.452.1579218836055:1579214410797; YF-Page-G0=e57fcdc279d2f9295059776dec6d0214|1579218836|1579218676; _s_tentry=-; Apache=6745014687324.452.1579218836055'
            cookies = {i.split('=')[0]: i.split('=')[1] for i in user_cookies.split('; ')}
            yield scrapy.Request(url, cookies=cookies
                                 , callback=self.parse_info)

            # url2 = response.urljoin("https://m.weibo.cn/api/container/getIndex?type=uid&value=%s" % uid)
            # yield scrapy.Request(url2,
            #                      callback=self.parse_info2)



    def parse_info(self, response):
        selector = response.xpath("//script/text()")
        items = []

        user_name = response.xpath('//title/text()').get()
        user_name = user_name[0:user_name.find('的微博_微博')]
        for select in selector:
            item = WeibospiderItem()
            text = select.get()
            # # or text.find('"Pl_Official_MyProfileFeed')
            if (text.find('Pl_Core_UserInfo') != -1):
                # with open(r'c:\Users\1\Desktop\b.txt', 'w+', encoding='utf-8') as f:
                #     f.write(text)
                datas = json.loads(text[8:-1])
                ns = datas['ns']
                # domid = datas['domid']
                html = datas['html']
                if ns == 'pl.content.homeFeed.index':
                    html = html.replace(r'\r\n', '')
                    html = html.replace(r'\t', '')
                    html = html.replace(r'\\', '')
                    # item =
                    self.parse_info2(user_name,html)
                    # if item['info'] != None and item['info'] == "":
                    #     items.append(item)

        return items

    def parse_info2(self, username,html):
        # item = WeibospiderItem()
        html_xpath = etree.HTML(html)

        user_info = username + '|-|'

        # 认证信息
        verifly = html_xpath.xpath('//p[@class="info"]/span/text()')
        if len(verifly) != 0:
            verifly = verifly[0]
        else:
            verifly = '无'
        # print("认证：" + verifly)
        user_info = user_info +'微博认证：' + verifly + '|-|'


        # 基本信息
        infos = html_xpath.xpath('//span[@class="item_text W_fl"]/text()')
        for i in range(len(infos)):
            info = re.sub(r"\r\n", "", infos[i])
            info = re.sub("\t", "", info)
            info = re.sub("\n", "", info)
            info = re.sub(" ", "", info)
            user_info = user_info + info + ';'

        user_info = user_info + '|-|'



        # 标签
        labels = html_xpath.xpath('//a[@target="_blank"]/text()')
        if (len(labels) == 0):
            user_label = '无'
        else:
            user_label = ''
        for i in range(len(labels)):
            user_label = user_label + labels[i] + ','
        user_label = user_label[0:-1]

        print(user_info)
        print(user_label)
        print('--------------------------------------------')




        # item['info'] = user_info
        # return item
        # print('-------------------------------------------')
