import feedparser
import getpass
import os
import hashlib
from langchain_openai import OpenAIEmbeddings
from langchain_chroma import Chroma
import chromadb
from langchain_core.documents import Document
import rss_config
import datetime

def parse_news():
    rss_url = rss_config.rss_url
    data = feedparser.parse(rss_url)

    api_key=rss_config.openai_api_key

    embeddings = OpenAIEmbeddings(base_url=rss_config.openai_api_url, model=rss_config.openai_embedding_model, api_key=api_key, dimensions = 1536)

    vector_store = Chroma(
        collection_name=rss_config.chromadb_name,
        embedding_function=embeddings,
        persist_directory=rss_config.chromadb_path,  # Where to save data locally, remove if not necessary
    )

    documents=[]

    for news_data in data.entries:
        if news_data.summary=='':
            continue

        print(news_data)
        md5=hashlib.md5(news_data.link.encode('utf-8')).hexdigest()
        document=Document(page_content=news_data.summary,
                          metadata={'link': news_data.link, 'date':news_data.published, 'title':news_data.title},
                          id=md5)
        documents.append(document)

    ids=[doc.id for doc in documents]

    print(documents)
    print(ids)

    vector_store.add_documents(documents=documents, ids=ids)
    now = datetime.datetime.now()
    format_time = now.strftime('%Y-%m-%d %H:%M:%S')
    print(f'{format_time} | add {len(documents)} to chromadb ...')
