import json
import pandas as pd
from bs4 import BeautifulSoup
from common import get_structured_text, save_file, setup_session
import requests

from guanghua.common import save_txt_file

yearList = [
            {"year": "2024", "id": "277"},
            {"year": "2023", "id": "258"},
            {"year": "2022", "id": "229"},
            {"year": "2021", "id": "150"},
            {"year": "2020", "id": "147"},
            {"year": "2019", "id": "145"},
            {"year": "2018", "id": "143"},
            {"year": "2017", "id": "141"},
            {"year": "2016", "id": "139"},
            {"year": "2015", "id": "137"},
            {"year": "2014", "id": "135"},
            {"year": "2013", "id": "134"}
]

# countryList = [{"country": "英国", "id": "274"},
#                {"country": "美国", "id": "275"},
#                {"country": "加拿大", "id": "276"},
#                {"country": "中国香港", "id": "278"},
#                {"country": "澳大利亚及其他", "id": "279"}]


def resolveDuty(obj):
    href = obj["href"]
    title = obj["title"]
    content = obj["content"]
    soup = BeautifulSoup(content, 'html.parser')
    lis = soup.find_all('li', class_='wow slideInUp')
    results = []

    for li in lis:
        results.append("####")
        img = li.find('img')['src']
        text = li.find('h3').get_text(strip=True)
        desc = li.find('p', class_='fontp').get_text(strip=True)

        results.append(f"部门职责: {text}")
        results.append(f"职责内容: {desc}")


        # results.append({"name": text, "desc": desc, "img": img})

    save_txt_file(results, '部门职责.txt')


def resolvePlan(obj):
    href = obj["href"]
    title = obj["title"]
    content = obj["content"]
    soup = BeautifulSoup(content, 'html.parser')
    lis = soup.find_all('li', class_='clearfix wow slideInUp')
    results = []

    for li in lis:
        results.append("####")
        p_tag = li.find('p', class_='f16p')
        name = p_tag.find('span').get_text(strip=True)
        p_tag.span.extract()
        desc = p_tag.get_text(strip=True)

        results.append(f"学期: {name}")
        results.append(f"计划: {desc}")

        #results.append({"name": name, "desc": desc})

    save_txt_file(results, '升学规划.txt')


def resolveWhereGo(obj):
    url = "https://ghcis.com/ashx/showlist.ashx"
    headers = {}

    results = []

    for yearItem in yearList:
        dataXXX = {
            "action": "getGraduateGoCountry",
            "yid": yearItem['id'],
        }
        responseXXX = requests.post(url, data=dataXXX)
        country_list = json.loads(responseXXX.text)['list']


        for countryItem in country_list:
            print(yearItem['year'] + "---" + countryItem['Title'])
            data = {
                "action": "getGraduateGo",
                "yid": yearItem['id'],
                "cid": countryItem['ID']
            }
            response = requests.post(url, data=data)
            item = resolveOnePage(yearItem, countryItem, response.text)
            results += (item)

            #newContent = getContent(results[0])
            # save_txt_file(newContent, '毕业去向_' + yearItem['year'] + "_" + countryItem['Title'] + ".txt")
    #save_file(results, '毕业去向.json')

    # 转换为 DataFrame
    df = pd.DataFrame(results)

    # 保存为 CSV 文件
    csv_file = "university_admissions.csv"
    df.to_csv(csv_file, index=False, encoding="utf-8-sig")



def getContent(objs):
    results = []
    for li in objs:
        results.append("####")
        results.append(f"入读学校(中文): {li['schoolChineseName']}")
        results.append(f"入读学校(英文): {li['schoolEnglishName']}")
        results.append(f"{li['studentName']}")
        results.append(f"{li['studentCount']}")
    return results


def resolveOnePage(yearItem, countryItem, content):
    pageResults = []

    soup = BeautifulSoup(content, 'html.parser')
    itemTitleNodes = soup.find_all('section', attrs={'data-id': '106349'})
    itemStudentNodes = soup.find_all('section', attrs={'data-id': '106351'})
    if len(itemStudentNodes) < len(itemTitleNodes):
        itemTitleNodes.remove(itemTitleNodes[0])

    for index in range(0, len(itemTitleNodes)):
        itemTitleNode = itemTitleNodes[index]

        schoolChineseNameNode = itemTitleNode.find('strong')
        schoolChineseName = get_structured_text(schoolChineseNameNode)

        schoolEnglishNameNode = itemTitleNode.find_all('section', class_='135brush')[1]
        schoolEnglishName = get_structured_text(schoolEnglishNameNode)

        itemStudentNode = itemStudentNodes[index]

        student_section = itemStudentNode.find('p')  # 第一个p标签包含学生信息
        students_text = student_section.get_text(strip=True)

        number_sectionXXX = itemStudentNode.find_all('p')
        number_text = ''
        if len(number_sectionXXX) > 1:
            number_text = number_sectionXXX[1].get_text(strip=True)

        pageResults.append({
            'year': yearItem['year'],
            'country': countryItem['Title'],
            'schoolChineseName': schoolChineseName,
            'schoolEnglishName': schoolEnglishName,
            'studentName': students_text,
            'studentCount': number_text})

    return pageResults
