import requests
import json
import time
import csv
import xlrd
import xlwt
from urllib.parse import quote
import re
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
}


def get_uid_by_name(nickname):

    url = 'https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D3%26q%3D{}%26t%3D0&page_type=searchall'.format(nickname)
    # url = 'https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D3%26q%3D{}&page_type=searchall'.format(nickname)
    # print(url)
    resp = requests.get(url=url, headers=headers,timeout=10)
    time.sleep(5)
    resp.encoding = 'utf-8'
    data = json.loads(resp.text)
    # print(data)
    if data['ok']:
        for card in data['data']['cards']:
            cg = card.get('card_group', '')
            if cg:
                for c in cg:
                    user = c.get('user', '')
                    if user and user['screen_name'].lower().replace(' ','') == nickname.lower().replace(' ',''):
                        return user['screen_name'],user['id']
    return nickname,''

def get_uid_by_name2(nickname):
    url = 'https://s.weibo.com/ajax/topsuggest.php?key={}&_k=155011120259629&uid=5447625353'.format(nickname)
    resp = requests.get(url=url, headers=headers,timeout=10)
    time.sleep(2)
    resp.encoding = 'utf-8'
    real_str = resp.text.replace('try{window.&(','').replace(');}catch(e){}','')
    data = json.loads(real_str)
    # print(data)
    if not data['data']:
        time.sleep(5)
        url = 'https://s.weibo.com/ajax/topsuggest.php?key={}&_k=155011120259629&uid=5447625353'.format(nickname)
        resp = requests.get(url=url, headers=headers,timeout=10)
        resp.encoding = 'utf-8'
        real_str = resp.text.replace('try{window.&(','').replace(');}catch(e){}','')
        data = json.loads(real_str)
        if not data['data']:
            return nickname,''
    if data['code'] == 100000 and data['data']['user']:
        users = data['data']['user']
        for user in users:
            if user['u_name'].lower().replace(' ','') == nickname.lower().replace(' ',''):
                return user['u_name'],user['u_id']
    return nickname,''

def main(file_path, save_path):
    with xlrd.open_workbook(file_path) as f:
        sheet = f.sheet_by_index(0)
    nrows = sheet.nrows
    ncols = sheet.ncols
    unames = sheet.col_values(9)[1:]
    uids = sheet.col_values(8)[1:]
    for i in range(len(uids)):
        try:
            uids[i] = int(uids[i])
        except:
            pass
        try:
            uids[i] = int(re.findall('[\d.]*$',str(uids[i]))[0])
        except:
            pass
        if uids[i] == '(空)' or uids[i] == 3:
            uids[i] = ''
    dic = dict(zip(unames,uids))
    dic.pop(-3.0)
    print(dic)
    save_file = open(save_path,'a',encoding='utf-8')
    for nickname,user_id in dic.items():
        try:
            uname,uid = get_uid_by_name(nickname)
        except Exception as e:
            print(e)
            time.sleep(10)
            continue
        data = {'origin_uname':nickname,'origin_uid':user_id,'crawl_uname':uname,'crawl_uid':uid}
        # print(data)
        str_data = json.dumps(data,ensure_ascii=False)+'\n'
        save_file.write(str_data)
        save_file.flush()
        with open('ids.txt','a',encoding='utf-8') as fp1:
            if data['crawl_uid'] and data['crawl_uid']:
                fp1.write(str(data['crawl_uid'])+'\n')
            elif data['origin_uid']:
                fp1.write(str(data['origin_uid'])+'\n')
            fp1.flush()
        if data['origin_uname'] == '无' or (data['origin_uid'] and data['origin_uid'] != data['crawl_uid']) or (data['origin_uid']=='' and data['crawl_uid']=='') or (data['origin_uname'] != data['crawl_uname'] and data['origin_uid'] != data['crawl_uid']):
            with open('error.json','a',encoding='utf-8') as fp2:
                fp2.write(str_data)
                fp2.flush()
    save_file.close()

if __name__ == '__main__':
    # nicknames_path 需要爬取的用户昵称的文件路径(txt),格式为一个昵称占一行
    # save_path 爬取结果保存的路径,保存格式为json
    nicknames_path = 'nicknames.txt'
    # filename = '致真书院-20190122.xlsx'
    # save_path = 'result.json'
    # main(filename, save_path)
    # print('end all')
    with open(nicknames_path,'r',encoding='utf-8') as fp:
        names = [i.strip() for i in fp]
    # with open('yinyu_uids.txt','a') as fp:
    #     for name in names[:]:
    #         # name = 'Xi望你爱来不来'
    #         _,uid = get_uid_by_name2(name)
    #         if not uid:
    #             _,uid = get_uid_by_name(name)
    #         print(_,uid)
    #         fp.write(str(uid)+'\n')
    #         fp.flush()
            # break
    # name = 'Xi望你爱来不来'
    # get_uid_by_name2(name)
    for name in names[:]:
        # name = 'Xi望你爱来不来'
        _,uid = get_uid_by_name2(name)
        if not uid:
            _,uid = get_uid_by_name(name)
        print(_,uid)