# import logging
#
# import feedparser
# from scraper.ScrapeConfigManager import ScrapeConfigManager
# from util.DatabaseManager import DatabaseManager
#
#
# class RssUrlExtractor:
#
#     def __init__(self, sourceId):
#         """Extracts new Urls from RSS Feeds.
#
#         Args:
#             sourceId (str): The first parameter.
#             param2 (str): The second parameter.
#
#         """
#         self.sourceId = sourceId
#         self.config = ScrapeConfigManager().get_config(sourceId)
#         self.rssUrls = None
#         self.dbManager = DatabaseManager(self.sourceId)
#
#     def getNewUrlsFromRss(self):
#         """Return all urls from RSS Feeds as list."""
#         urls = []
#         self.rssUrls = self.config["rss"]["urls"]
#
#         for rssUrl in self.rssUrls:
#             rss = feedparser.parse(rssUrl)
#             for entry in rss.entries:
#                 link = entry['link']
#                 urls.append(link)
#
#         return list(urls)
#
#     def start(self):
#         """Run by scrape script to extract urls."""
#         urls = self.getNewUrlsFromRss()
#         logging.info('Adding Urls from Rss Feeds')
#         self.dbManager.addToQueueDatabase(urls)
