import gzip
import os
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
import redis
import json
import re
from collections import Counter

redis_db = redis.Redis(host='98.82.254.89', port=6379, db=5, password='953058009')


# 解压文件
def unzip(file_path, list, day=1):
    # 解压并读取内容
    with gzip.open(file_path, 'rt', encoding='utf-8') as f:
        content = f.read()
    # 解析 XML
    root = ET.fromstring(content)
    # 定义命名空间
    ns = {'ns': 'http://www.sitemaps.org/schemas/sitemap/0.9'}
    # 提取所有 loc 和 lastmod
    for url in root.findall('ns:url', ns):
        loc_url = url.find('ns:loc', ns).text
        lastmod_time = url.find('ns:lastmod', ns).text
        lastmod_time = lastmod_time[:10]
        # 获取昨天
        today = datetime.now()
        # 计算昨天的日期
        yesterday = today - timedelta(days=day)
        # 格式化为 'YYYY-MM-DD'
        formatted_yesterday = yesterday.strftime('%Y-%m-%d')
        # 指定存入今年数据
        # formatted_yesterday = '2023-12-31'
        # 做日期比较
        date1 = datetime.strptime(lastmod_time, "%Y-%m-%d")
        date2 = datetime.strptime(formatted_yesterday, "%Y-%m-%d")
        if date1 == date2:
            category = getFilename(file_path)
            detail = {"page_url": loc_url, 'date': lastmod_time, 'category': category}
            list.append(detail)
    return list


# 获取文件列表
def getFileList(directory='C:/Users/Administrator/Desktop/sitemap/'):
    file_list = []
    for filename in os.listdir(directory):
        if os.path.isfile(os.path.join(directory, filename)):
            file_list.append(directory + str(filename))
    return file_list


# 获取文件名称
def getFilename(filenames=''):
    if filenames == '':
        return filenames

    # 使用最后一个斜杠分割字符串
    parts = filenames.rsplit('/', 1)
    if len(parts) > 1:
        filenames = parts[-1]

    # 首先移除以 '-' 开头的数字和 '.xml.gz'
    pattern1 = re.compile(r'-\d+\.xml\.gz$')
    cleaned_filename = re.sub(pattern1, '', filenames)

    # 然后移除 '.xml.gz'（如果它还存在的话）
    pattern2 = re.compile(r'\.xml\.gz$')
    cleaned_filename = re.sub(pattern2, '', cleaned_filename)
    return cleaned_filename


# 开始
file_list = getFileList()
list = []
if len(file_list) == 0:
    print('暂无压缩文件')
    pass
else:
    # 循环去解压提取url
    num = 0
    for file_path in file_list:
        num += 1
        list = unzip(file_path, list)
        print("正在执行第：" + str(num) + "个解压")

# 提取日期
dates = [entry['date'] for entry in list]
# 统计每个日期的出现次数
date_counts = Counter(dates)
# 打印结果
for date, count in date_counts.items():
    print(f"{date}: {count}")

# 存入数据
redis_db.set('yesterday_data', json.dumps(list))
