import re
import time
import collections
from CrawlXiecheng import CrawlXiecheng
from DataOutput import DataOutput


class CrawlCommentService:
    def __init__(self,data_dir):
        self.target_cities = None
        self.spider = CrawlXiecheng()
        self.writer = DataOutput(data_dir)
        self.get_crawl_cities()

    def get_crawl_cities(self):
        cities = self.crawl_all_cities()
        self.target_cities = []
        with open("./resources/target_cities.txt", mode="r", encoding="utf-8") as f:
            city_names = f.read().split("\n")
            for city_name in city_names:
                city_id = cities[city_name]
                if city_id is None:
                    print(f"不存在城市{city_name}")
                    continue
                self.target_cities.append((city_name, city_id))

    def crawl(self,pages=-1):
        print("----------")
        for city_name, city_id in self.target_cities:
            print(f"正在爬取城市： {city_name}")
            self.writer.city_name = city_name
            attractions = self.spider.crawl_spot_list(city_id)
            for attract in attractions:
                poi_id, poi_name = attract['card']['poiId'], attract['card']['poiName']
                print(f"正在爬取{city_name}的景点 {poi_name}")
                self.writer.spot_name = poi_name
                self.crawl_save_comments(poi_id,pages)
                print(f"爬取{city_name}的景点 {poi_name}完毕")
                time.sleep(2)
        print("正在合并评论")
        self.writer.merge_comment()

    def crawl_save_comments(self, poi_id,pages = -1):
        pages = pages
        cur_page = 1
        comments, total = self.spider.crawl_spot_comment(poi_id, cur_page)
        if pages == -1:
            pages = total

        while cur_page < pages:
            try:
                cur_page += 1
                print(f"正在爬取第{cur_page}页")
                new_comments,_ = self.spider.crawl_spot_comment(poi_id, cur_page)
                comments += new_comments
                time.sleep(2)
            except:
                print(f"爬取第{cur_page}页失败")
                break
        print(f"爬取完毕,共{len(comments)}条评论")
        self.writer.write_comment(parse_content(comments))
        print(f"写入评论内容完毕")
        self.writer.write_region(parse_region(comments))
        print(f"写入ip统计完毕")


    def crawl_all_cities(self):
        xc_html = self.spider.crawl_xiecheng_page()
        return parse_cities(xc_html)


def parse_cities(xc_html):
    cities = {}
    # 正则表达式匹配id和name
    pattern = r'"id":(\d+),"name":"([^"]+)"'
    # 查找所有的id和name
    matches = re.findall(pattern, xc_html)
    for match in matches:
        city_id, city_name = match
        cities[city_name] = city_id

    return cities


def parse_content(comments):
    contents = [comment['content'] for comment in comments]
    return contents


def parse_region(comments):
    regions = [comment['ipLocatedName'] for comment in comments]
    c = collections.Counter(regions)
    return list(c.items())
