{
 "cells": [
  {
   "cell_type": "code",
   "id": "6dff02b91603fdee",
   "metadata": {},
   "source": [
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "import time\n",
    "import re\n",
    "import random\n",
    "import concurrent.futures\n",
    "\n",
    "MAX_RETRIES = 3\n",
    "USER_AGENTS = [\n",
    "    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',\n",
    "    # Add more user-agents as needed\n",
    "]\n",
    "\n",
    "headers = {'User-Agent': random.choice(USER_AGENTS)}\n",
    "\n",
    "# 设置客户端浏览器信息\n",
    "def safe_request(url):\n",
    "    for _ in range(MAX_RETRIES):\n",
    "        try:\n",
    "            # print(f\"正在获取URL: {url}\")\n",
    "            response = requests.get(url, headers=headers)\n",
    "            response.raise_for_status()\n",
    "            return response.text\n",
    "        except requests.exceptions.RequestException as e:\n",
    "            # print(f\"RequestException: {e}\")\n",
    "            time.sleep(1)  # 等待一小段时间后重试\n",
    "    return None\n",
    "\n",
    "\n",
    "# 判断是否为赛事\n",
    "def Judge_Olympic_Game(id):\n",
    "    # 爬取奥林匹克首页网页信息\n",
    "    index_url = \"https://www.olympedia.org/editions/\"\n",
    "    # 获取首页网页内容\n",
    "    htmltext = safe_request(index_url)\n",
    "    # BeautifulSoup解析网页内容\n",
    "    soup = BeautifulSoup(htmltext, 'html.parser')\n",
    "    # 获取奥运会表格所有行\n",
    "    tr = soup.find_all('table', class_='table table-striped')[0].find_all('tr')\n",
    "    # 定义赛事名称\n",
    "    game_name = \"\"\n",
    "    # 遍历所有行，从第一行数据开始，不遍历表头\n",
    "    for j in tr[1:]:\n",
    "        # 获取一行中所有数据\n",
    "        td = j.find_all('td')  # td表格\n",
    "        # 获取赛事时间\n",
    "        Year = td[1].get_text().strip()  # 遍历年份\n",
    "        # 获取赛事城市\n",
    "        City = td[2].get_text().strip()  # 遍历城市\n",
    "        # 判断传输的id是否在表格中\n",
    "        if id == td[2].find_all('a')[0]['href']:\n",
    "            # 存在则拼接赛事名称\n",
    "            game_name = City + \" \" + Year + \" Olympic Games\"\n",
    "            # 结束遍历\n",
    "            break\n",
    "    # 返回赛事名称\n",
    "    return game_name\n",
    "\n",
    "# 获取运动员详细数据\n",
    "def personal_information(html, game_name, country):\n",
    "    # BeautifulSoup解析国家参赛的人员页面\n",
    "    soup = BeautifulSoup(html, 'html.parser')\n",
    "    # 获取参赛人员表格所有行\n",
    "    tr = soup.find_all('table', class_='table')[0].find_all('tr')\n",
    "    # 定义运动项目\n",
    "    Event = \"\"\n",
    "    # 定义是否获奖（金牌：Gold,银牌：Silver，铜牌：Bronze）\n",
    "    medal = \"\"\n",
    "    # 定义运动类别\n",
    "    Sports = \"\"\n",
    "    # 获取参赛时间年份\n",
    "    pr = \"([0-9]{4})\"\n",
    "    game_year = re.findall(pr, game_name)[0]\n",
    "    # 当前文件夹下创建和使用 csv 文件，文件命名为 赛事名称\n",
    "    with open(\"./\" + game_name + \".csv\", \"a+\", encoding=\"utf-8\") as f:\n",
    "        # tr[0:]表头到最后一列，表头为第0列\n",
    "        for j in tr[0:]:\n",
    "            # 获取所有行数据\n",
    "            td = j.find_all('td')  # td表格\n",
    "            # 查询首列数据是否带有 h2 标签，带有则为运动类别\n",
    "            h2 = td[0].find_all('h2')\n",
    "            # 若没有 h2 标签列则为运动详情数据\n",
    "            if len(h2) == 0:\n",
    "                # 判断当前行长度是否为6或者4（单人为4.团体为6）\n",
    "                if len(td) == 6 or len(td) == 4:\n",
    "                    medal = td[3].get_text().strip()\n",
    "                # 获取当前行的运动项目\n",
    "                Event_Name = td[0].get_text().strip()\n",
    "                # 判断是否为空，为空一般指的是团队\n",
    "                if Event_Name == \"\":\n",
    "                    # 为空，等于获取上一行的运动项目\n",
    "                    Event_Name = Event\n",
    "                else:\n",
    "                    # 不为空，记录当前的运动项目\n",
    "                    Event = Event_Name\n",
    "                # 判断第二列数据是否存有超链接（为0则为团队上一行数据，为1有则为单行运动员数据，>1 则为团队数据）\n",
    "                a = td[1].find_all('a')\n",
    "                if len(a) != 0:\n",
    "                    # 遍历 a 列表，获取当前列表中所有运动员信息\n",
    "                    for i in range(0, len(a)):\n",
    "                        # 获取运动员姓名\n",
    "                        Athlete = a[i].get_text().strip()\n",
    "                        # 获取运动员id\n",
    "                        Athlete_id = a[i]['href'].split(\"/\")[2]\n",
    "                        # 根据运动员id拼接运动员url\n",
    "                        Href = \"http://www.olympedia.org\" + a[i]['href']\n",
    "                        # 访问运动员页面\n",
    "                        Athlete_html = safe_request(Href)\n",
    "                        # 使用BeautifulSoup解析运动员页面\n",
    "                        soup2 = BeautifulSoup(Athlete_html, 'html.parser')\n",
    "                        # 获取运动员详情数据\n",
    "                        tr2 = soup2.find_all('table', class_=\"biodata\")[0].find_all(\"tr\")\n",
    "                        # 定义运动员性别\n",
    "                        Sex = \"\"\n",
    "                        # 定义运动员生日\n",
    "                        Born = \"\"\n",
    "                        # 定义运动员身高\n",
    "                        Height = \"\"\n",
    "                        # 定义运动员体重\n",
    "                        Weight = \"\"\n",
    "                        # 定义运动员国籍\n",
    "                        Nationality = \"\"\n",
    "                        # 遍历运动员详情数据（需要从表头到最后一行，表头也为详情数据）\n",
    "                        for j2 in tr2[0:]:\n",
    "                            # 获取表格内的表头单元格\n",
    "                            th2 = j2.find_all('th')[0].get_text().strip()\n",
    "                            # 获取 th 对应的详情数据\n",
    "                            td2 = j2.find_all('td')[0].get_text().strip()\n",
    "                            # 当表头为 Sex 时，获取运动员性别\n",
    "                            if th2 == \"Sex\":\n",
    "                                Sex = td2\n",
    "                            # 当表头为 Born 时，获取运动员生日\n",
    "                            elif th2 == \"Born\":\n",
    "                                Born = td2.replace(\",\", \" \")\n",
    "                            # 当表头为 Measurements 时，获取运动员的测量数据\n",
    "                            elif th2 == \"Measurements\":\n",
    "                                Measurements = td2\n",
    "                                # 如果测量数据中带有 / 的就是存在身高体重的数据\n",
    "                                if \"/\" in Measurements:\n",
    "                                    # 获取身高\n",
    "                                    Height = Measurements.split(\"/\")[0].strip()\n",
    "                                    # 获取体重\n",
    "                                    Weight = Measurements.split(\"/\")[1].strip()\n",
    "                                else:\n",
    "                                    # 如果没有的话，也可能存在单个的测量数据\n",
    "                                    # 数据带有cm的就为身高数据\n",
    "                                    if \"cm\" in Measurements:\n",
    "                                        Height = Measurements.strip()\n",
    "                                    # 数据带有kg的就为体重数据\n",
    "                                    elif \"kg\" in Measurements:\n",
    "                                        Weight = Measurements.strip()\n",
    "                            # 当表头为 Measurements 时，获取国籍数据\n",
    "                            elif th2 == \"NOC\":\n",
    "                                # 替换,为空格\n",
    "                                Nationality = td2.replace(\",\", \" \")\n",
    "                        # 替换运动项目数据中,为空格\n",
    "                        information = Athlete_id + \",\" + Athlete + \",\" + Sex + \",\" + Born + \",\" + Height + \",\" + Weight + \",\" + Nationality + \",\" + country + \",\" + game_name + \",\" + game_year + \",\" + Sports + \",\" + Event_Name.replace(\n",
    "                            \",\", \" \") + \",\" + medal + \"\\n\"\n",
    "                        # 将运动员信息输出至文件中\n",
    "                        f.write(information)\n",
    "\n",
    "            else:\n",
    "                # 为空则为运动类别（第一次是不为空）\n",
    "                Sports = h2[0].get_text().strip()\n",
    "\n",
    "def fetch_country_info(country_url, game_name, country):\n",
    "    country_text = safe_request(country_url)\n",
    "    if country_text:\n",
    "        personal_information(country_text, game_name, country)\n",
    "        print(country + \"信息获取成功\")\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # 获取当前时间\n",
    "    s_time = time.time()\n",
    "    # 设置奥运会 id\n",
    "    id = \"61\"\n",
    "    # 根据拼接奥运会url\n",
    "    url = \"https://www.olympedia.org/editions/\" + id\n",
    "    # 定义 a 标签文本（/editions/id）\n",
    "    a_url = url.replace(\"https://www.olympedia.org\", \"\")\n",
    "    # 调用Judge_Olympic_Game获取赛事名称\n",
    "    game_name = Judge_Olympic_Game(a_url)\n",
    "    # 获取所有参赛的国家\n",
    "    html = safe_request(\"https://www.olympedia.org/editions/\" + id + \"/countries/\")\n",
    "    # 解析参赛国家页面内容\n",
    "    soup = BeautifulSoup(html, 'html.parser')\n",
    "    # 获取列表所有行\n",
    "    tr = soup.find_all('table', class_='table')[0].find_all('tr')\n",
    "    # 定义国家列表\n",
    "    list_ = []\n",
    "    # tr[1:]遍历第1列到最后一列，表头为第0列\n",
    "    for j in tr[1:]:\n",
    "        # 获取行中数据\n",
    "        td = j.find_all('td')  # td表格\n",
    "        # 获取表行头a标签列表\n",
    "        a = td[0].find_all('a')\n",
    "        # 存在a标签\n",
    "        if len(a) != 0:\n",
    "            # 获取国家简称\n",
    "            country = a[0].get_text().strip()\n",
    "            # 根据国家来拼接国家运动员页面url\n",
    "            country_url = \"https://www.olympedia.org/countries/\" + country + \"/editions/\" + id\n",
    "            # 将链接存放至 list_列表中\n",
    "            list_.append(country_url)\n",
    "    # 查看共有多少国家参加\n",
    "    print(len(list_))\n",
    "\n",
    "    with concurrent.futures.ThreadPoolExecutor() as executor:\n",
    "        # 使用线程池并发地获取国家信息\n",
    "        futures = [executor.submit(fetch_country_info, li, game_name, li.split(\"/\")[4]) for li in list_[0:len(list_)]]\n",
    "\n",
    "        # 等待所有任务完成\n",
    "        concurrent.futures.wait(futures)\n",
    "\n",
    "    print('总耗时: %s' % (time.time() - s_time))"
   ],
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
