{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [],
   "source": [
    "from lxml import etree\n",
    "import numpy as np\n",
    "import os\n",
    "import csv\n",
    "import re\n",
    "import requests\n",
    "import datetime\n",
    "import time\n",
    "import random\n",
    "from multiprocessing.dummy import Pool as TreadPool\n",
    "from threading import Semaphore\n",
    "import concurrent.futures\n",
    "from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, wait, ALL_COMPLETED"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取source\n",
    "def getHTMLResponse(url):\n",
    "    headers = {\n",
    "        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',\n",
    "        'cookie': '_T_WM=7cd2fe00a46ec406f450f4ad4764df2b; SUB=_2A25yHhL1DeRhGeNL6FUZ8inIyD-IHXVR4L69rDV6PUNbktANLWXBkW1NSRtw-UgxtiIVZEtYWqy18AK3UycSktDK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW9ciTfJHi3v88u56ZUFhyf5JpX5KzhUgL.Fo-fe0MReoMXe0e2dJLoIpzLxKqL1h2LB.2LxKqLBK2L1K2t; SUHB=0sqBYxW86J6g_p; SSOLoginState=1595564709; ALF=1598156709',\n",
    "    }\n",
    "    response =  requests.get(url,headers=headers)\n",
    "    return response"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取source\n",
    "def getHTMLTread(url):\n",
    "    headers = {\n",
    "        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',\n",
    "        'cookie': '_T_WM=7cd2fe00a46ec406f450f4ad4764df2b; SUB=_2A25yHhL1DeRhGeNL6FUZ8inIyD-IHXVR4L69rDV6PUNbktANLWXBkW1NSRtw-UgxtiIVZEtYWqy18AK3UycSktDK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW9ciTfJHi3v88u56ZUFhyf5JpX5KzhUgL.Fo-fe0MReoMXe0e2dJLoIpzLxKqL1h2LB.2LxKqLBK2L1K2t; SUHB=0sqBYxW86J6g_p; SSOLoginState=1595564709; ALF=1598156709',\n",
    "    }\n",
    "    response =  requests.get(url,headers=headers)\n",
    "    # 无框架延迟处理\n",
    "    random_time = random.randint(3,6)\n",
    "    time.sleep(random_time)\n",
    "    return response"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 链接初始化\n",
    "def init_url_by_keywords():\n",
    "    # crawl tweets include keywords in a period, you can change the following keywords and date\n",
    "    keywords = ['新冠疫情']\n",
    "    date_start = datetime.datetime.strptime(\"2019-12-30\", '%Y-%m-%d')\n",
    "    date_end = datetime.datetime.strptime(\"2020-01-10\", '%Y-%m-%d')\n",
    "    time_spread = datetime.timedelta(days=1)\n",
    "    urls = []\n",
    "    url_format = \"https://weibo.cn/search/mblog?hideSearchFrame=&keyword={}\" \\\n",
    "                 \"&advancedfilter=1&starttime={}&endtime={}&sort=time&page=1\"\n",
    "    while date_start < date_end:\n",
    "        for keyword in keywords:\n",
    "            next_time = date_start + time_spread\n",
    "            url = url_format.format(keyword, date_start.strftime(\"%Y%m%d\"), next_time.strftime(\"%Y%m%d\"))\n",
    "            urls.append(url)\n",
    "            date_start = next_time + time_spread\n",
    "    return urls"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取 标签值\n",
    "def printWeibo(html_content,csv_file):\n",
    "    tree_node = etree.HTML(html_content)\n",
    "    tweet_nodes = tree_node.xpath('//div[@class=\"c\" and @id]')\n",
    "    for tweet_node in tweet_nodes:\n",
    "        try:\n",
    "            user_id = tweet_node.xpath('.//a[@class=\"nk\"]/text()')[-1]\n",
    "            print(user_id)\n",
    "\n",
    "            infos = tweet_node.xpath('.//span[@class=\"ctt\"]/text()')\n",
    "            content = \"\"\n",
    "            for i in infos:\n",
    "                content = content + i.strip(' ')\n",
    "            print(content)\n",
    "            # 点赞\n",
    "            like_num_text = tweet_node.xpath('.//a[contains(text(),\"赞[\")]/text()')[-1]\n",
    "            like_match = re.search(r'\\d+',like_num_text)\n",
    "            like_num = like_match.group(0)\n",
    "            print(like_num)\n",
    "            # 转发\n",
    "            repost_num_text = tweet_node.xpath('.//a[contains(text(),\"转发[\")]/text()')[-1]\n",
    "            repost_match = re.search(r'\\d+',repost_num_text)\n",
    "            repost_num = repost_match.group(0)\n",
    "            print(repost_num)\n",
    "            \n",
    "            # 评论\n",
    "            comment_num_text = tweet_node.xpath('.//a[contains(text(),\"评论[\") and not(contains(text(),\"原文\"))]/text()')[-1]\n",
    "            comment_match = re.findall(r'\\d+',comment_num_text)\n",
    "            comment_num = comment_match[-1]\n",
    "            print(comment_num)\n",
    "            \n",
    "            # 时间 和 发布工具\n",
    "            time_tool_info = tweet_node.xpath('.//span[@class=\"ct\"]/text()')[-1]\n",
    "            match = re.split(r'(\\xa0)',time_tool_info)\n",
    "            # 既然match一定存在，那就捕获异常，改造异常来判定\n",
    "            if match:\n",
    "                try:\n",
    "                    time_info,tool_info = match[0],match[2]\n",
    "                    print(time_info)\n",
    "                    print(tool_info)\n",
    "                except:\n",
    "                    time_info = match[0]\n",
    "                    tool_info = np.nan\n",
    "                    print(time_info)\n",
    "            \n",
    "            csv_file.writerow([user_id,content,like_num,repost_num,comment_num,time_info,tool_info])\n",
    "        \n",
    "        except Exception as reason:\n",
    "            print(reason.args)\n",
    "            "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "url:https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20191230&endtime=20191231&sort=time&page=1 总页数：1\n",
      "url:https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200101&endtime=20200102&sort=time&page=1 总页数：1\n",
      "url:https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200103&endtime=20200104&sort=time&page=1 总页数：1\n",
      "url:https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200105&endtime=20200106&sort=time&page=1 总页数：1\n",
      "url:https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200107&endtime=20200108&sort=time&page=1 总页数：1\n",
      "url:https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200109&endtime=20200110&sort=time&page=1 总页数：1\n",
      "https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20191230&endtime=20191231&sort=time&page=1\n",
      "https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200101&endtime=20200102&sort=time&page=1\n",
      "https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200103&endtime=20200104&sort=time&page=1\n",
      "https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200105&endtime=20200106&sort=time&page=1\n",
      "https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200107&endtime=20200108&sort=time&page=1\n",
      "https://weibo.cn/search/mblog?hideSearchFrame=&keyword=新冠疫情&advancedfilter=1&starttime=20200109&endtime=20200110&sort=time&page=1\n",
      "开始写入\n",
      "钱辉_医学美容\n",
      ":随着年龄逐增，事情增多，精力和时间更加有限，微博私信回复比较少，还请大家谅解，除了前面微博提到的咨询途径如桃花源部落微博私信，还可以通过爱问医生平台我的个人主页简短沟通和咨询；如果大家需要预约（尤其是胎记方面），可以联系，希望对大家有帮助\n",
      "212\n",
      "21\n",
      "99\n",
      "01月01日 14:00\n",
      "来自钱辉_医学美容超话\n",
      "秋月仰望\n",
      ":2020年核心股票池（2020.4.26更新）受新冠疫情影响，全球金融市场激烈动荡，3月全球股市经历了几十年难见的股灾，中国股市也不能幸免，在欧美主要发达经济体采取强烈救市措施后，欧美股市大幅反弹，A股弱势反弹，目前依然处于惊慌后的底部震荡阶段，现在国内疫情基本控制住，欧美疫情高峰已过，5、6月份即将进入复工复产，但疫情对经济的影响依然存在不确定性，特别是供应链重构和需求恢复方面，中国做为全球工厂的不确定性特别明显，这也许是近期A股反弹远弱于欧美的原因吧，在疫情完全控制经济复苏前，A股估计会较长时间维持弱势或结构性行情，但最恐慌时机已过，投资者应该从恐慌中走出来，疫情的影响只是短期和阶段性的，中国做为全球的最大制造市场和消费市场，自我调节的能力特别强，同时国内目前已完全复工复产，但欧美主要经济体还处于疫情中，在某些方面中国反而从中受益，从近期国际局势看，美国在疫情后可能会加大对中国的敌对行动，中美存在脱钩的可能，国产替代会是未来的热点之一，投资以确定性方向为主，关注内需消费和确定性科技成长股为主线，重点关注5G、芯片、半导体、消费电子、新能源（含特斯拉）、软件、人工智能、生物医药等领域，下面是核心股票池：1、芯片、半导体（含设备、材料）:韦尔股份、闻泰科技、兆易创新、圣邦股份、三安光电、北方华创、华润微、中微公司、澜起科技、北京君正、汇顶科技、睿创微纳、华天科技、捷佳伟创、中颖电子；2、消费电子:立讯精密、信维通信、歌尔股份、蓝思科技；3、新能源、特斯拉等:宁德时代、亿纬锂能、恩捷股份、新宙邦、赣锋锂业、隆基股份、中环股份、三花智控、汇川技术；4、5G:中兴通讯、光环新网、深南电路、生益科技、东山精密；5、软件、计算机、云计算、网络安全等:金山办公、安恒信息、用友网络、浪潮信息、深信服、启明星辰、紫光股份、宝兰德、东华软件、华宇软件、恒生电子、创业慧康；6、人工智能:科大讯飞、中科创达、大华股份；7、游戏、传媒:芒果超媒、完美世界、三七互娱；8、医药:凯利泰、卫宁健康、泰格医药、生物股份、迈瑞医疗、乐普医疗、欧普康视、药明康德、恒瑞医药、爱尔眼科、康龙化成、南微医学、华兰生物、华海药业、普洛药业、健康元。\n",
      "113\n",
      "62\n",
      "24\n",
      "01月04日 17:24\n",
      "来自nova5Pro人像超级夜景\n",
      "finish get！耗时：{} 14.379142045974731\n"
     ]
    }
   ],
   "source": [
    "# 文件头部\n",
    "HEADERS = [\"user_id\",\"content\",\"like_num\",\"repost_num\",\"comment_num\",\"time_info\",\"tool_info\"]\n",
    "# 文件路径\n",
    "# \".\" + os.sep + \n",
    "FILE_PATH = \"WB_WH_Epidemic_situation.csv\"\n",
    "# 获得 urls 集合\n",
    "urls = init_url_by_keywords()\n",
    "# 设置线程池\n",
    "pool = TreadPool(3)\n",
    "# sources集合\n",
    "contents = []\n",
    "page_urls = []\n",
    "# 总页面初始化 \n",
    "total_pagenum = 1\n",
    "# 计时\n",
    "begin_time = time.time()\n",
    "\n",
    "# 获取url数据集\n",
    "for url in urls:\n",
    "    # 获取 每日source\n",
    "    response = getHTMLResponse(url)\n",
    "    # 获取页面总数\n",
    "    source = etree.HTML(response.content)\n",
    "    is_pages = source.xpath(r'//div[@id=\"pagelist\"]/form/div/text()')\n",
    "    if len(is_pages):\n",
    "        page_text = is_pages[1]\n",
    "        total_pagenum = re.findall(r'/(.*?)页',page_text)\n",
    "        total_pagenum = int(total_pagenum[-1])\n",
    "    print(f\"url:{url} 总页数：{total_pagenum}\")\n",
    "    # 获取每页数据 url集合  \n",
    "    for page_num in range(1,total_pagenum+1):\n",
    "        page_url = response.url.replace('page=1', 'page={}'.format(page_num))\n",
    "        page_urls.append(page_url)\n",
    "        \n",
    "# 创建csv文件\n",
    "with open(file=FILE_PATH,mode=\"w\",newline=\"\",encoding=\"utf-8\") as file:\n",
    "    # 创建csv写入对象\n",
    "    csv_file = csv.writer(file)\n",
    "    # 写入头部信息\n",
    "    csv_file.writerow(HEADERS)\n",
    "    # 写入数据\n",
    "    # 分布式爬取source数据\n",
    "    with ThreadPoolExecutor(max_workers=3) as executor:\n",
    "        for url,source in zip(urls, executor.map(getHTMLTread,page_urls)):\n",
    "            print(url)\n",
    "            contents.append(source.content)   \n",
    "    print(\"开始写入\")\n",
    "    for index in range(len(contents)):\n",
    "        printWeibo(contents[index],csv_file)\n",
    "\n",
    "print(\"finish get！耗时：{}\",format(time.time()-begin_time))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
