# -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 14:24:07 2019

@author: Lenovo
"""

#%%
import requests
import pandas as pd
import json
import re
from bs4 import BeautifulSoup
import copy


def get_word(tmp_str):
    print(tmp_str)
    str_split = copy.copy(tmp_str)
    ch_word = re.findall(u'[\u4e00-\u9fa5]', str_split)
    ch_word = [word for word in ch_word if len(word) > 0 ]

    str_split = re.sub("[\s+\.\:\!\-\|\/_,\)$%^*(+\"\']+|[+——！：，。？“”《》【】​、~@#￥%……&*（）]+", " ", str_split)
    print("str1_step1", str_split)
    str_split = re.sub(u'[\u4e00-\u9fa5]+'," ", str_split)
    print("str1_step2", str_split)
    eng_word = [ word for word in str_split.split(" ") if len(word) > 0]
    str_split = re.sub(u'[a-zA-Z0-9 ]+',"", str_split)
    print("str1_step3", str_split)
    
    return {'ch_word': ch_word, 'eng_word': eng_word, 'only_word': len(str_split), 'remain_char': str_split}

def get_weibo_content(kw):
    url="https://s.weibo.com/weibo/{}?topnav=1&wvr=6&b=1&page=1".format(kw)
    result = []
    r = requests.get(url)
    
    weibo_content = r.text
    
    html_content = BeautifulSoup(weibo_content, 'lxml')
    
    weibo_div_content = html_content.find_all('div', 'content')
    
    for item in weibo_div_content:
    #    print("origin:>>>>", item)
        txt = item.find('p', 'txt')
        tag_as = txt.find_all('a')
        print("tag_as:>>>>",tag_as)
    #    if tag_as is not None:
    #        continue
        for tag_a in tag_as:
            tag_a.extract()
        p_from = item.find('p','from')
        url = ""
        time = ""
        if p_from is not None:
            p_from_a = p_from.find('a')
            url = p_from_a['href']
            print("p_from:>>>>>", p_from_a['href'])
            print("p_from:>>>>>", p_from_a.get_text().strip())
            time = p_from_a.get_text().strip()
            if "年" not in time:
                if "月" in time:
                    pass
                    
        else:
            continue
        content_text = txt.get_text().strip()
        
        wordlist = get_word(content_text)
        print("time_ur:>>>>", url)
        
        new_row = {
                "kw":kw,
                "text": content_text,
                "url": url,
                "time":time,
                "ch_word": wordlist['ch_word'],
                "eng_word":wordlist['eng_word'],
                "ch_word_len": len(wordlist['ch_word']),
                "eng_word_len": len(wordlist['eng_word']),
                "only_word": wordlist['only_word'],
                "remain_char": wordlist['remain_char'],
                "stat_len": len(wordlist['ch_word']) + len(wordlist['eng_word'])
                }
        result.append(new_row)
    return result
result = get_weibo_content('airports')
result_df = pd.read_json(json.dumps(result), orient='records')
print(result_df)
result_df.to_excel('D:\\vs_project\\test\\result.xlsx')

#%%
from random import sample
import time

with open('D:\\vs_project\\test\\words.txt', 'r') as f:
    words = f.readlines()

word_sample=sample(words,50)
result=[]
i = 0
for word in word_sample:
   print("#######################",i)
   tmp=get_weibo_content(word)
   result = result+tmp
   i = i + 1
   time.sleep(0.5)
result_df = pd.read_json(json.dumps(result), orient='records')
result_df.to_excel('D:\\vs_project\\test\\result.xlsx')


#%%
str_split='''有的人，来约克去参观景点，而有的人就住在了景点里，
酒店格雷斯酒店（Grays Court），，。？、
让您感受英国国王詹姆斯一世来过的地方，看过的风景，品尝过的美
'''

print(re.findall(u'[\u4e00-\u9fa5]', str_split))
ch_word = re.findall(u'[\u4e00-\u9fa5]', str_split)
str_split = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——！，。？、~@#￥%……&*（）]+", " ",str_split)
str_split = re.sub(u'[\u4e00-\u9fa5]+',"", str_split)
eng_word = [ word for word in str_split.split(" ") if len(word) > 0]
print(eng_word)