#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
import re
import pymongo
import pandas as pd
import numpy as np
import json
client=pymongo.MongoClient("localhost");
db=client["weibo"];
# cookies ={
# "SCF":"Aqlyqauj_RXnHsKTLOZwgRADloKqsToM7wz-3IPPL06CPNJ-FhcXk9TYpwfCbqsgTteTT3tc7xbbRC4_ariHKSc.",
# "SUBP":"0033WrSXqPxfM725Ws9jqgMF55529P9D9WWsH9IsKRQ04jhqa0U7El4y5JpX5K-hUgL.Foq7eKzpeozX1K.2dJLoI79QUg8.Uc-t",
# "_T_WM":"9166e1a4cd3113b61aa9ae066c1b5ade",
# "SUB":"_2A2536VPADeRhGeBO6lAQ8izIwjWIHXVVEn2IrDV6PUJbkdANLRTfkW1NSiNnHU5DnLEwYIQLsMF6CALuG4oLs2RW",
# "SUHB":"0C1yel_Oojpl8c",
# }

# cookies={
# "_T_WM":"cb71582275404c624b6078227b323290",
# "SCF":"AkNVZx4RcI5gZT5J6kwLRv4a9SVAi8yL4FIZoC5FZE3EUMHBKctwnKGw1j5PvtlUOq_WzEFCMLWlY9ZDjIIuo78.",
# "SUB":"_2A2539FfEDeRhGeBO6lAQ8izIwjWIHXVVF3mMrDV6PUJbktAKLWXxkW1NSiNnHS5dDmaN2xzy8YXrVtZdeFWuSxZm",
# "SUBP":"0033WrSXqPxfM725Ws9jqgMF55529P9D9WWsH9IsKRQ04jhqa0U7El4y5JpX5K-hUgL.Foq7eKzpeozX1K.2dJLoI79QUg8.Uc-t",
# "SUHB":"0k1yeGEg1YF-e_"
# }
cookies={}
try:
    with open("cookie.txt","r",encoding="utf-8") as f:
        cookies=json.loads(f.read())
except:
    pass
print(cookies)



proxy=None
def get_user_info(id):
    count=0
    global proxy
    if id==None:
        return
    url="https://weibo.cn/{id}/info".format(id=id)
    if proxy==None:
        proxy = get_proxy_ip()
    print(proxy,id)
    try:
        response = requests.get(url, cookies=cookies,proxies=proxy)
        if response.status_code == 200:
            html = response.text
            soup = BeautifulSoup(html, 'lxml')
            gender=soup.find_all(text=re.compile("性别"))
            name=soup.find_all(text=re.compile("昵称"))
            location=soup.find_all(text=re.compile("地区"))
            birth=soup.find_all(text=re.compile("生日"))
            tags=soup.find_all(text=re.compile("达人"))
            sig=soup.find_all(text=re.compile("简介"))
            user={
                "gender":gender,
                "name":name,
                "location":location,
                "birth":birth,
                "tags":tags,
                "sig":sig
            }
            print(user)
            saveToMongo(user)
        else:
            if count < 3:
                proxy = get_proxy_ip()
                get_user_info(id)
    except Exception as e:
        count+=1
        print(e)
        if count<3:
            proxy = get_proxy_ip()
            get_user_info(id)
        else:
            pass



def get_page_html(i):
    print("currentpage",i)
    url="https://weibo.cn/search/mblog?keyword=小猪佩奇&mp=100&page={page}".format(page=i)

    try:
        response = requests.get(url, cookies=cookies)
        if response.status_code==200:

            html= response.text
            return html
        else:
            return None
    except Exception as e:
        print(e)
        return None

def parse_page_detail(html):
    #https://weibo.cn/u/3900273583
    soup = BeautifulSoup(html, 'lxml')
    items=soup.select(".c .nk")
    for item in items:
        url= item.attrs["href"]
        name=item.get_text()
        user={
            "name":name,
            "url":url

        }
        print(user)
        saveToMongo(user)

def get_data(TABLE):
    queryArgs = {}
    projectionFields = {'_id': False}  # 用字典指定
    result = db[TABLE].find(queryArgs,projectionFields)
    return result

def saveToMongo(result):
    try:
        if db["小猪佩奇info"].insert(result):
            print("save result success");
    except:
        print("save mongo error")

def get_proxy_ip():
    try:
        res=requests.get("http://127.0.0.1:5000/get")
        if res.status_code==200:
            proxy = {'http': res.text}
            return proxy
        else:
            get_proxy_ip()
    except Exception as e:
        print(e)
        get_proxy_ip()


def main():
    # get_user_info("5829937896")
    # get_user_info_date("小猪佩奇url")
    get_all_rel_users()



def get_user_info_date(TABLE):
    data = pd.DataFrame(list(get_data(TABLE)))
    data["id"] = data["url"].apply(filter_url)
    ids = list(data["id"])
    print(ids)
    for id in ids:
        get_user_info(id)


def filter_url(url):
    try:
        p=re.compile(r"https://weibo.cn/u/(.*)")
        res=re.match(p,url)
        id=res.groups(1)[0]
    except:
        return None
    return id


def get_all_rel_users():
    for i in range(1, 100):
        html = get_page_html(i)
        if html:
            parse_page_detail(html)


if __name__ == '__main__':
    main()