from typing import List
import spacy
import utils

class Focus:
    def __init__(self, text, length = 5):
        self.focus_words = []
        self.focus_length = length
        self.origin = text
        self.focused = ""
    
    @property
    def length(self):
        return len(self.origin)


    def focus(self, words: List[str]):
        try:
            self.nlp = spacy.load("en_core_web_md")
        except OSError:
            raise ImportError("请先安装模型：python -m spacy download en_core_web_md")
        doc = self.nlp(self.origin)
        sentences = list(doc.sents)
        matched_contexts = set()  # 用于存储去重后的上下文
        
        word_tokens = [self.nlp(word) for word in words]

        for i, sent in enumerate(sentences):
            if any(word_token.similarity(sent) > 0.5 for word_token in word_tokens):
                start = max(0, i - self.focus_length)
                end = min(len(sentences), i + self.focus_length + 1)

                context = ' '.join(s.text for s in sentences[start:end])
                matched_contexts.add(context)  # 使用 set 进行去重

        self.focus_words = words
        self.focused = '\n\n'.join(matched_contexts)
        return self.focused
    
    def to_dict(self):
        return {
            "focus_words": self.focus_words,
            "length": self.length,
            "origin": self.origin,
            "focused": self.focused,
        }
    
    def dump(self, path=""):
        j = self.to_dict()
        if not path:
            utils.dump_json(j, prefix=f"focus")
        else:
            utils.dump_json(j, path)


class Entity:
    def __init__(self, paper_id, paper_title, chunk_id, chunk_text, year):
        self.paper_id = paper_id
        self.paper_title = paper_title
        self.chunk_id = chunk_id
        self.chunk_text = Focus(chunk_text)
        self.year = year

    @classmethod
    def from_dict(cls, data: dict):
        return cls(
            paper_id=data.get('paper_id'),
            paper_title=data.get('paper_title'),
            chunk_id=data.get('chunk_id'),
            chunk_text=data.get('chunk_text'),
            year=data.get('year')
        )

    def to_dict(self):
        return {
            "paper_id": self.paper_id,
            "paper_title": self.paper_title,
            "chunk_id": self.chunk_id,
            "chunk_text": self.chunk_text.origin,
            "year": self.year
        }

    def __str__(self):
        return f"Entity(paper_id={self.paper_id}, paper_title={self.paper_title}, chunk_id={self.chunk_id}, chunk_text_len={self.chunk_text.length})"
    
    def dump(self, path=""):
        j = self.to_dict()
        if not path:
            utils.dump_json(j, prefix=f"entity")
        else:
            utils.dump_json(j, path)


class Paper:
    def __init__(self, paper_id: str, paper_title: str, year: int, chunks: List[dict]):
        self.paper_id = paper_id
        self.paper_title = paper_title
        self.year = year
        self.chunks: List[Entity] = chunks

    @classmethod
    def from_list(cls, data: list):
        base = data[0]
        chunks = []
        for c in data:
            chunks.append(Entity.from_dict(c))
        return cls(
            paper_id=base.get("paper_id"),
            paper_title=base.get("paper_title"),
            year=base.get("year"),
            chunks=chunks
        )

    def to_dict(self):
        return {
            "paper_id": self.paper_id,
            "paper_title": self.paper_title,
            "year": self.year,
            "chunks": [c.to_dict() for c in self.chunks]
        }

    def dump(self, path=""):
        j = self.to_dict()
        if not path:
            utils.dump_json(j, prefix=f"paper_{self.paper_id}")
        else:
            utils.dump_json(j, path)


class Doc:
    def __init__(self, id, distance, entity, strong=None, summary=None):
        self.id = id
        self.distance = distance
        self.focused = ""
        if isinstance(entity, Entity):
            self.entity = entity
            self.title = entity.paper_title
        else:
            self.entity = Entity.from_dict(entity)
            self.title = self.entity.paper_title
        self.strong = strong
        self.summary = summary

    def __str__(self):
        return f"Doc(id={self.id}, distance={self.distance}, entity={self.entity})"
    
    def focus(self, words: List[str]):
        self.entity.chunk_text.focus(words)
        self.focused = self.entity.chunk_text.focused
        return self.focused
    
    @property
    def focus_words(self):
        return self.entity.chunk_text.focus_words
    
    @property
    def text(self):
        return self.entity.chunk_text.origin
    
    @classmethod
    def from_dict(cls, data: dict):
        return cls(
            id=data.get('id'),
            distance=data.get('distance'),
            entity=Entity.from_dict(data.get('entity')),
            strong=data.get('strong'),
            summary=data.get('summary')
        )
    
    def to_dict(self):
        return {
            "id": self.id,
            "distance": self.distance,
            "entity": self.entity.to_dict(),
            "strong": self.strong,
            "summary": self.summary
        }

    def dump(self, path=""):
        j = self.to_dict()
        if not path:
            utils.dump_json(j, prefix=f"doc")
        else:
            utils.dump_json(j, path)


class Query:
    def __init__(self, query: List[Doc]):
        self.query = query

    @classmethod
    def from_list(cls, data: list):
        return cls(
            query=[Doc.from_dict(d) for d in data]
        )
    
    @property
    def length(self):
        return len(self.query)
    
    @property
    def title_list(self):
        return [doc.title for doc in self.query]
    
    @property
    def doc_list(self):
        return [doc.entity for doc in self.query]
    
    @property
    def doc_id_list(self):
        return [doc.id for doc in self.query]
    
    def to_dict(self):
        return [doc.to_dict() for doc in self.query]
    
    def dump(self, path=""):
        j = self.to_dict()
        if not path:
            utils.dump_json(j, prefix=f"query")
        else:
            utils.dump_json(j, path)


class SearchResult:
    def __init__(self, docs: List[Doc]):
        self.docs = docs

    @classmethod
    def from_list(cls, data: list):
        return cls(
            docs=[Doc.from_dict(d) for d in data]
        )

    def to_dict(self):
        return [doc.to_dict() for doc in self.docs]
    
    def dump(self, path=""):
        j = self.to_dict()
        if not path:
            utils.dump_json(j, prefix=f"search_result")
        else:
            utils.dump_json(j, path)


class Collection:
    def __init__(self):
        self.docs: List[Doc] = []

    def to_dict(self):
        return [doc.to_dict() for doc in self.docs]
    
    def dump(self, path=""):
        j = self.to_dict()
        if not path:
            utils.dump_json(j, prefix=f"collection")
        else:
            utils.dump_json(j, path)
    
    def add(self, doc: Doc):
        self.docs.append(doc)
    
    def add_list(self, docs: List[Doc]):
        self.docs.extend(docs)

    def find_by_title(self, title: str):
        return [doc for doc in self.docs if doc.title == title]

    def append_query(self, query: Query):
        self.docs.extend(query.doc_list)
    
    def remove_duplicates(self):
        self.docs = list(set(self.docs))
    
    @classmethod
    def from_list(cls, data: list):
        c = cls()
        c.docs = [Doc.from_dict(d) for d in data]
        return c

    @classmethod
    def from_query(cls, query: Query):
        return cls(
            docs=query.doc_list
        )
    
    @classmethod
    def from_file(cls, path: str):
        return cls.from_list(utils.load_json(path))

    def get_title_list(self):
        return [doc.title for doc in self.docs]
    
    def get_dict_list(self, title: str):
        return [doc.to_dict() for doc in self.docs if title in doc.title]
    
    def get_chunk_desc(self):
        retstr = ""
        for index, doc in enumerate(self.docs):
            if doc.strong:
                retstr += f"Paper{index+1}:{doc.title}\n{doc.text}\n\n"
            else:
                retstr += f"Paper{index+1}:{doc.title}\n{doc.summary}\n\n"
        return retstr

    def get_chunk_summary(self):
        retstr = ""
        for index, doc in enumerate(self.docs):
            retstr += f"Paper{index+1}:{doc.title}\n{doc.summary}\n\n"
        return retstr


class SubSection:
    def __init__(self, title, description, refs: Collection):
        self.title = title
        self.description = description
        self.refs = refs
    
    def to_dict(self):
        return {
            "title": self.title,
            "description": self.description,
            "refs": self.refs.to_dict()
        }


class Section:
    def __init__(self, title, description, subsections: List[SubSection]): 
        self.title = title
        self.description = description
        self.subsections = subsections

    def to_dict(self):
        return {
            "title": self.title,
            "description": self.description,
            "subsections": [s.to_dict() for s in self.subsections]
        }


class Review:
    def __init__(self, sections: List[Section]):
        self.sections = sections

    def to_dict(self):
        return [s.to_dict() for s in self.sections]
        

if __name__ == "__main__":
    origin_doc = {
        "id": 454846772095316628,
        "distance": 0.6154661774635315,
        "entity": {
            "paper_id": "6516338d3fda6d7f065e50d0",
            "paper_title": "Resisting Backdoor Attacks in Federated Learning Via Bidirectional Elections and Individual Perspective",
            "chunk_id": 6,
            "chunk_text": "# 6 Conclusion\nThis work focuses on defending against backdoor attacks in FL and proposes a novel approach named Snowball. It enables an individual perspective that treats each model update as an agent electing model updates for aggregation, and conducts bidirectional election to select models to be aggregated, i.e., a) bottom-up election where each model update votes to several peers such that a few model updates are elected as selectees for aggregation; and b) top-down election, where selectees progressively enlarge themselves focusing on differences between model updates. Extensive experiments conducted on five real-world datasets demonstrate the superior performance of Snowball to resist backdoor attacks compared with SOTA approaches when dealing with the situations in which 1) the non-IIDness of data is complex and the PDR is not high such that the benign and infected model updates do not obviously gather in different positions, and 2) the ratio of attackers to all clients is not low. Besides, Snowball is easy to be integrated into existing FL systems.\n#\nAn, J.; and Cho, S. 2015. Variational autoencoder based anomaly detection using reconstruction probability. Special lecture on IE , 2(1): 1–18.   \nBagdasaryan, E.; Veit, A.; Hua, Y.; Estrin, D.; and Shmatikov,V. 2020. How to backdoor federated learning. In International Conference on Artificial Intelligence and Statistics ,2938–2948. PMLR.   \nBlanchard, P.; El Mhamdi, E. M.; Guerraoui, R.; and Stainer, J. 2017. Machine learning with adversaries: Byzantine tolerant gradient descent. Advances in neural information processing systems , 30.   \nCaldas, S.; Duddu, S. M. K.; Wu, P.; Li, T.; Koneˇcn y, J.; McMahan, H. B.; Smith, V.; and Talwalkar, A. 2018. Leaf: A benchmark for federated settings. arXiv preprint arXiv:1812.01097 .  \nCali´nski, T.; and Harabasz, J. 1974. A dendrite method for cluster analysis. Communications in Statistics-theory and Methods , 3(1): 1–27.   \nCao, X.; Jia, J.; and Gong, N. Z. 2021. Provably secure federated learning against malicious clients. In Proceedings of the AAAI conference on artificial intelligence , volume 35, 6885–6893.  \nDeng, L. 2012. The mnist database of handwritten digit images for machine learning research. IEEE Signal Processing Magazine , 29(6): 141–142.   \nFallah, A.; Mokhtari, A.; and Ozdaglar, A. 2020. Personalized federated learning with theoretical guarantees: A modelagnostic meta-learning approach. Advances in Neural Information Processing Systems , 33: 3557–3568.   \nFung, C.; Yoon, C. J.; and Beschastnikh, I. 2018. Mitigating sybils in federated learning poisoning. arXiv preprint arXiv:1808.04866 .  \nGhosh, A.; Chung, J.; Yin, D.; and Ramchandran, K. 2020. An efficient framework for clustered federated learning. Advances in Neural Information Processing Systems , 33: 19586– 19597.   \nGlorot, X.; Bordes, A.; and Bengio, Y. 2011. Deep sparse rectifier neural networks. In Proceedings of the fourteenth international conference on artificial intelligence and statistics ,315–323. JMLR Workshop and Conference Proceedings. He, K.; Zhang, X.; Ren, S.; and Sun, J. 2015. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. In Proceedings of the IEEE international conference on computer vision , 1026–1034.   \nHe, K.; Zhang, X.; Ren, S.; and Sun, J. 2016. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition , 770– 778.   \nHsu, T.-M. H.; Qi, H.; and Brown, M. 2019. Measuring the effects of non-identical data distribution for federated visual classification. arXiv preprint arXiv:1909.06335 .  \nHuang, Y.; Chu, L.; Zhou, Z.; Wang, L.; Liu, J.; Pei, J.; andZhang, Y. 2021. Personalized Cross-Silo Federated Learning on Non-IID Data. In AAAI Conference on Artificial Intelligence , 7865–7873. Kingma, D. P.; and Welling, M. 2014. Auto-Encoding Variational Bayes. In Bengio, Y.; and LeCun, Y., eds., International Conference on Learning Representations, ICLR .Krizhevsky, A.; Hinton, G.; et al. 2009. Learning multiple layers of features from tiny images.   \nLi, Q.; Diao, Y.; Chen, Q.; and He, B. 2021. Federated learning on non-iid data silos: An experimental study. arXiv preprint arXiv:2102.02079 .  \nLi, S.; Cheng, Y.; Wang, W.; Liu, Y.; and Chen, T. 2020a. Learning to detect malicious clients for robust federated learning. arXiv preprint arXiv:2002.00211 .  \nLi, X.; Huang, K.; Yang, W.; Wang, S.; and Zhang, Z. 2020b. On the Convergence of FedAvg on Non-IID Data. In International Conference on Learning Representations, ICLR .Li, Y.; Jiang, Y.; Li, Z.; and Xia, S.-T. 2022. Backdoor learning: A survey. IEEE Transactions on Neural Networks and Learning Systems .  \nLiu, X.; Li, H.; Xu, G.; Chen, Z.; Huang, X.; and Lu, R.2021. Privacy-enhanced federated learning against poisoning adversaries. IEEE Transactions on Information Forensics and Security , 16: 4574–4588.   \nLu, S.; Li, R.; Liu, W.; and Chen, X. 2022. Defense against backdoor attack in federated learning. Computers & Security ,121: 102819.   \nMcMahan, B.; Moore, E.; Ramage, D.; Hampson, S.; and y Arcas, B. A. 2017. Communication-Efficient Learning of Deep Networks from Decentralized Data. In the International Conference on Artificial Intelligence and Statistics ,volume 54, 1273–1282.   \nMikolov, T.; Sutskever, I.; Chen, K.; Corrado, G. S.; and Dean, J. 2013. Distributed representations of words and phrases and their compositionality. In Advances in Neural Information Processing Systems , 3111–3119.   \nNguyen, T. D.; Rieger, P.; Chen, H.; Yalame, H.; Möllering, H.; Fereidooni, H.; Marchal, S.; Miettinen, M.; Mirhoseini, A.; Zeitouni, S.; Koushanfar, F.; Sadeghi, A.; and Schneider, T. 2022. FLAME: Taming Backdoors in Federated Learning. In USENIX Security Symposium , 1415–1432.   \nOzdayi, M. S.; Kantarcioglu, M.; and Gel, Y. R. 2021. Defending against backdoors in federated learning with robust learning rate. In Proceedings of the AAAI Conference on Artificial Intelligence , volume 35, 9268–9276.   \nPaszke, A.; Gross, S.; Massa, F.; Lerer, A.; Bradbury, J.; Chanan, G.; Killeen, T.; Lin, Z.; Gimelshein, N.; Antiga, L.; et al. 2019. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems , 32.   \nQin, Z.; Deng, S.; Zhao, M.; and Yan, X. 2023a. FedAPEN: Personalized Cross-silo Federated Learning with Adaptability to Statistical Heterogeneity. In Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining , 1954–1964.   \nQin, Z.; Yan, X.; Zhou, M.; Zhao, P.; and Deng, S. 2023b. BlockDFL: A Blockchain-based Fully Decentralized Federated Learning Framework. arXiv preprint arXiv:2205.10568 .Rieger, P.; Nguyen, T. D.; Miettinen, M.; and Sadeghi, A. 2022. DeepSight: Mitigating Backdoor Attacks in Federated Learning Through Deep Model Inspection. In Annual Network and Distributed System Security Symposium, NDSS .Sattler, F.; Müller, K.-R.; and Samek, W. 2020. Clustered federated learning: Model-agnostic distributed multitask optimization under privacy constraints. IEEE transactions on neural networks and learning systems , 32(8): 3710–3722. Shayan, M.; Fung, C.; Yoon, C. J. M.; and Beschastnikh, I. 2021. Biscotti: A Blockchain System for Private and Secure Federated Learning. IEEE Trans. Parallel Distributed Syst. ,32(7): 1513–1525.   \nShejwalkar, V.; and Houmansadr, A. 2021. Manipulating the byzantine: Optimizing model poisoning attacks and defenses for federated learning. In NDSS .  \nShi, S.; Hu, C.; Wang, D.; Zhu, Y.; and Han, Z. 2022. Federated Anomaly Analytics for Local Model Poisoning Attack. IEEE J. Sel. Areas Commun. , 40(2): 596–610.   \nSun, Z.; Kairouz, P.; Suresh, A. T.; and McMahan, H. B. 2019. Can you really backdoor federated learning? arXiv preprint arXiv:1911.07963 .  \nTan, A. Z.; Yu, H.; Cui, L.; and Yang, Q. 2022. Towards personalized federated learning. IEEE Transactions on Neural Networks and Learning Systems .  \nTibshirani, R.; Walther, G.; and Hastie, T. 2001. Estimating the number of clusters in a data set via the gap statistic. Journal of the Royal Statistical Society: Series B (Statistical Methodology) , 63(2): 411–423.   \nWang, H.; Sreenivasan, K.; Rajput, S.; Vishwakarma, H.; Agarwal, S.; Sohn, J.; Lee, K.; and Papailiopoulos, D. S. 2020. Attack of the Tails: Yes, You Really Can Backdoor Federated Learning. In Larochelle, H.; Ranzato, M.; Hadsell, R.; Balcan, M.; and Lin, H., eds., Advances in Neural Information Processing Systems .  \nXiao, H.; Rasul, K.; and Vollgraf, R. 2017. Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms. arXiv preprint arXiv:1708.07747 .  \nXie, C.; Chen, M.; Chen, P.-Y.; and Li, B. 2021. CRFL: Certifiably robust federated learning against backdoor attacks. In International Conference on Machine Learning , 11372– 11382.   \nXie, C.; Huang, K.; Chen, P.-Y.; and Li, B. 2020. DBA: Distributed backdoor attacks against federated learning. In International conference on learning representations .  \nYin, D.; Chen, Y.; Kannan, R.; and Bartlett, P. 2018. Byzantine-robust distributed learning: Towards optimal statistical rates. In International Conference on Machine Learning ,5650–5659. PMLR.   \nYu, D.; Zhang, H.; Chen, W.; and Liu, T. 2021. Do not Let Privacy Overbill Utility: Gradient Embedding Perturbation for Private Learning. In International Conference on Learning Representations, ICLR .  \nZawad, S.; Ali, A.; Chen, P.-Y.; Anwar, A.; Zhou, Y.; Bara-caldo, N.; Tian, Y.; and Yan, F. 2021. Curse or redemption? how data heterogeneity affects the robustness of federated learning. In Proceedings of the AAAI conference on artificial intelligence , volume 35, 10807–10814. Zeng, H.; Zhou, T.; Wu, X.; and Cai, Z. 2022. Never Too Late: Tracing and Mitigating Backdoor Attacks in Federated Learning. In 2022 41st International Symposium on Reliable Distributed Systems (SRDS) , 69–81.   \nZhang, K.; Tao, G.; Xu, Q.; Cheng, S.; An, S.; Liu, Y.; Feng, S.; Shen, G.; Chen, P.; Ma, S.; and Zhang, X. 2023. FLIP: A Provable Defense Framework for Backdoor Mitigation in Federated Learning. In International Conference on Learning Representations, ICLR .  \nZhang, Z.; Cao, X.; Jia, J.; and Gong, N. Z. 2022. FLDetector: Defending federated learning against model poisoning attacks via detecting malicious clients. In Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining , 2545–2555.   \nZhao, Y.; Li, M.; Lai, L.; Suda, N.; Civin, D.; and Chandra, V. 2018. Federated learning with non-iid data. arXiv preprint arXiv:1806.00582 .",
            "original_filename": "Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db"
        }
    }
    d = Doc.from_dict(origin_doc)
    print(d.entity.chunk_text.length)
    print(len(d.focus(["Poisoning Attacks"])))
    print(d.entity.chunk_text.focused)
    d.dump()