James Stevenson commited on
Commit
246df79
1 Parent(s): 3562beb
Files changed (32) hide show
  1. .gitattributes +0 -27
  2. LIWC2015 Results (Storm_Front_Posts).csv +0 -0
  3. Pinpoint_Internal/Aggregator_NGram.py +103 -0
  4. Pinpoint_Internal/Aggregator_TfIdf.py +41 -0
  5. Pinpoint_Internal/Aggregator_Word2Vec.py +31 -0
  6. Pinpoint_Internal/Aggregator_WordingChoice.py +51 -0
  7. Pinpoint_Internal/ConfigManager.py +21 -0
  8. Pinpoint_Internal/FeatureExtraction.py +793 -0
  9. Pinpoint_Internal/Grapher.py +60 -0
  10. Pinpoint_Internal/Logger.py +21 -0
  11. Pinpoint_Internal/RandomForest.py +374 -0
  12. Pinpoint_Internal/Sanitizer.py +131 -0
  13. Pinpoint_Internal/Serializer.py +20 -0
  14. Pinpoint_Internal/Twitter_api.py +215 -0
  15. Pinpoint_Internal/__pycache__/Aggregator_NGram.cpython-38.pyc +0 -0
  16. Pinpoint_Internal/__pycache__/Aggregator_TfIdf.cpython-38.pyc +0 -0
  17. Pinpoint_Internal/__pycache__/Aggregator_Word2Vec.cpython-38.pyc +0 -0
  18. Pinpoint_Internal/__pycache__/Aggregator_WordingChoice.cpython-38.pyc +0 -0
  19. Pinpoint_Internal/__pycache__/ConfigManager.cpython-38.pyc +0 -0
  20. Pinpoint_Internal/__pycache__/FeatureExtraction.cpython-38.pyc +0 -0
  21. Pinpoint_Internal/__pycache__/Grapher.cpython-38.pyc +0 -0
  22. Pinpoint_Internal/__pycache__/Logger.cpython-38.pyc +0 -0
  23. Pinpoint_Internal/__pycache__/RandomForest.cpython-38.pyc +0 -0
  24. Pinpoint_Internal/__pycache__/Sanitizer.cpython-38.pyc +0 -0
  25. Pinpoint_Internal/__pycache__/Twitter_api.cpython-38.pyc +0 -0
  26. Pinpoint_Internal/centrality-v2.py +325 -0
  27. Pinpoint_Internal/far-right-core.py +65 -0
  28. README.md +0 -13
  29. app.py +0 -22
  30. far-right-radical-language.model +0 -3
  31. predictor.py +0 -96
  32. requirements.txt +0 -8
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.onnx filter=lfs diff=lfs merge=lfs -text
13
- *.ot filter=lfs diff=lfs merge=lfs -text
14
- *.parquet filter=lfs diff=lfs merge=lfs -text
15
- *.pb filter=lfs diff=lfs merge=lfs -text
16
- *.pt filter=lfs diff=lfs merge=lfs -text
17
- *.pth filter=lfs diff=lfs merge=lfs -text
18
- *.rar filter=lfs diff=lfs merge=lfs -text
19
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
- *.tar.* filter=lfs diff=lfs merge=lfs -text
21
- *.tflite filter=lfs diff=lfs merge=lfs -text
22
- *.tgz filter=lfs diff=lfs merge=lfs -text
23
- *.wasm filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
LIWC2015 Results (Storm_Front_Posts).csv DELETED
The diff for this file is too large to render. See raw diff
 
Pinpoint_Internal/Aggregator_NGram.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.feature_extraction.text import CountVectorizer
2
+
3
+ from Pinpoint_Internal.Logger import *
4
+
5
+ c_vec = CountVectorizer(ngram_range=(1, 5))
6
+
7
+
8
+ class n_gram_aggregator():
9
+ """
10
+ This class is used to retrieve the most common NGrams for a given dataset corpus.
11
+ """
12
+
13
+ def _get_average_ngram_count(self, n_grams_dict):
14
+ """
15
+ takes a dict of Ngrams and identifies the average weighting
16
+ :param n_grams_dict:
17
+ :return:
18
+ """
19
+ all_count = []
20
+ for n_gram in n_grams_dict:
21
+ ng_count = n_grams_dict[n_gram]
22
+ all_count.append(ng_count)
23
+
24
+ average_count = sum(all_count) / len(all_count)
25
+ # print(all_count)
26
+ return average_count
27
+
28
+ def _get_all_ngrams(self, data):
29
+ """
30
+ Returns all ngrams (tri, bi, and uni) for a given piece of text
31
+ :param data:
32
+ :return:
33
+ """
34
+
35
+ if type(data) is not list:
36
+ data = [data]
37
+
38
+ # input to fit_transform() should be an iterable with strings
39
+ ngrams = c_vec.fit_transform(data)
40
+
41
+ # needs to happen after fit_transform()
42
+ vocab = c_vec.vocabulary_
43
+
44
+ count_values = ngrams.toarray().sum(axis=0)
45
+
46
+ # output n-grams
47
+ uni_grams = {}
48
+ bi_grams = {}
49
+ tri_grams = {}
50
+
51
+ for ng_count, ng_text in sorted([(count_values[i], k) for k, i in vocab.items()], reverse=True):
52
+ sentence_length = len(ng_text.split(" "))
53
+
54
+ if sentence_length == 3:
55
+ tri_grams[ng_text] = ng_count
56
+ elif sentence_length == 2:
57
+ bi_grams[ng_text] = ng_count
58
+ elif sentence_length == 1:
59
+ uni_grams[ng_text] = ng_count
60
+
61
+ return uni_grams, bi_grams, tri_grams
62
+
63
+ def _get_popular_ngrams(self, ngrams_dict):
64
+ """
65
+ Returns ngrams for a given piece of text that are the most popular (i.e. their weighting is
66
+ above the average ngram wighting)
67
+ :param ngrams_dict:
68
+ :return:
69
+ """
70
+ average_count = self._get_average_ngram_count(ngrams_dict)
71
+
72
+ popular_ngrams = {}
73
+ for n_gram in ngrams_dict:
74
+ ng_count = ngrams_dict[n_gram]
75
+
76
+ if ng_count >= average_count:
77
+ popular_ngrams[n_gram] = ng_count
78
+ return popular_ngrams
79
+
80
+ def get_ngrams(self, data=None, file_name_to_read=None):
81
+ """
82
+ Wrapper function for returning uni, bi, and tri grams that are the most popular (above the average weighting in
83
+ a given piece of text).
84
+ :param data:
85
+ :param file_name_to_read:
86
+ :return:
87
+ """
88
+ logger().print_message("Getting Ngrams")
89
+
90
+ if data is None and file_name_to_read is None:
91
+ raise Exception("No data supplied to retrieve n_grams")
92
+
93
+ if data is None and file_name_to_read is not None:
94
+ with open(file_name_to_read, 'r') as file_to_read:
95
+ data = file_to_read.read()
96
+
97
+ uni_grams, bi_grams, tri_grams = self._get_all_ngrams(data)
98
+
99
+ popular_uni_grams = list(self._get_popular_ngrams(uni_grams).keys())
100
+ popular_bi_grams = list(self._get_popular_ngrams(bi_grams).keys())
101
+ popular_tri_grams = list(self._get_popular_ngrams(tri_grams).keys())
102
+
103
+ return popular_uni_grams, popular_bi_grams, popular_tri_grams
Pinpoint_Internal/Aggregator_TfIdf.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.feature_extraction.text import TfidfVectorizer
2
+
3
+ from Pinpoint_Internal.Logger import *
4
+
5
+
6
+ class tf_idf_aggregator():
7
+ """
8
+ A wrapper class around SKlearn for retrieving TF-IDF scores.
9
+ """
10
+
11
+ def get_tf_idf_scores(self, ngrams_vocabulary, corpus_data=None, file_name_to_read=None):
12
+ """
13
+ Used to generate a TF IDF score based of a vocabulary of Ngrams and a data corpus.
14
+ :param ngrams_vocabulary:
15
+ :param corpus_data:
16
+ :param file_name_to_read:
17
+ :return: a dictionary of the pairing name and their score
18
+ """
19
+ logger.print_message("Getting TF IDF scores")
20
+
21
+ if corpus_data is None and file_name_to_read is None:
22
+ raise Exception("No data supplied to retrieve n_grams")
23
+
24
+ if corpus_data is None and file_name_to_read is not None:
25
+ with open(file_name_to_read, 'r') as file_to_read:
26
+ corpus_data = file_to_read.read()
27
+
28
+ tfidf = TfidfVectorizer(vocabulary=ngrams_vocabulary, stop_words='english', ngram_range=(1, 2))
29
+ tfs = tfidf.fit_transform([corpus_data])
30
+
31
+ feature_names = tfidf.get_feature_names()
32
+ corpus_index = [n for n in corpus_data]
33
+ rows, cols = tfs.nonzero()
34
+
35
+ dict_of_scores = {}
36
+
37
+ for row, col in zip(rows, cols):
38
+ dict_of_scores[feature_names[col]] = tfs[row, col]
39
+ logger.print_message((feature_names[col], corpus_index[row]), tfs[row, col])
40
+
41
+ return dict_of_scores
Pinpoint_Internal/Aggregator_Word2Vec.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gensim.models import Word2Vec
2
+
3
+
4
+ class word_2_vec_aggregator():
5
+ """
6
+ A wrapper function around gensim used for creating a word 2 vec model
7
+ """
8
+
9
+ def get_model(self, list_of_sentences):
10
+ """
11
+ Used to retrieve the model
12
+ :param list_of_sentences:
13
+ :return: the model
14
+ """
15
+
16
+ list_of_sentences_in_nested_list = []
17
+
18
+ for sentence in list_of_sentences:
19
+
20
+ # Skip unigrams
21
+ if " " not in sentence:
22
+ continue
23
+
24
+ list_of_sentences_in_nested_list.append(sentence.split(" "))
25
+
26
+ model = Word2Vec(min_count=1, window=5) # vector size of 100 and window size of 5?
27
+ model.build_vocab(list_of_sentences_in_nested_list) # prepare the model vocabulary
28
+ model.train(list_of_sentences_in_nested_list, total_examples=model.corpus_count,
29
+ epochs=model.epochs) # train word vectors
30
+
31
+ return model
Pinpoint_Internal/Aggregator_WordingChoice.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ class wording_choice_aggregator():
5
+ """
6
+ A class used for retrieving frequencies based on wording in a message
7
+ """
8
+
9
+ def get_frequency_of_capatalised_words(self, text):
10
+ """
11
+ A function used to retrieve the frequencies of capitalised words in a dataset
12
+ :param text:
13
+ :return: the frequency of capitalised words in a dataset
14
+ """
15
+ number_of_capatalised_words = 0
16
+ for word in text.split(" "):
17
+ if word.isupper():
18
+ number_of_capatalised_words = number_of_capatalised_words + 1
19
+
20
+ total_number_of_words = len(text.split(" "))
21
+ frequency = number_of_capatalised_words / total_number_of_words
22
+
23
+ return frequency
24
+
25
+ def get_frequency_of_violent_or_curse_words(self, text, violent_words_datasets_location):
26
+ """
27
+ A function ued for retrieving the frequencies of violent words in a dataset
28
+ :param text:
29
+ :return: the frequency of violent words in a dataset
30
+ """
31
+
32
+ dataset_folder = os.path.join(os.getcwd(), violent_words_datasets_location)
33
+
34
+ list_of_violent_or_curse_words = []
35
+
36
+ # Retrieves all words in all of the files in the violent or curse word datasets
37
+ for filename in os.listdir(dataset_folder):
38
+ with open(os.path.join(dataset_folder, filename), 'r') as file:
39
+
40
+ for line in file.readlines():
41
+ line = line.strip().replace("\n", " ").replace(",", "")
42
+ list_of_violent_or_curse_words.append(line)
43
+
44
+ number_of_swear_words = 0
45
+ for word in text.split(" "):
46
+ if word in list_of_violent_or_curse_words:
47
+ number_of_swear_words = number_of_swear_words + 1
48
+
49
+ total_number_of_words = len(text.split(" "))
50
+ frequency = number_of_swear_words / total_number_of_words
51
+ return frequency
Pinpoint_Internal/ConfigManager.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from pathlib import Path
3
+
4
+
5
+ class ConfigManager:
6
+ """
7
+ A wrapper file used to abstract Twitter config options. """
8
+
9
+ @staticmethod
10
+ def _get_config(config_path):
11
+ if Path(config_path).is_file() == False:
12
+ raise Exception("The {} config file was not found.".format(config_path))
13
+
14
+ with open(config_path) as json_file:
15
+ twitter_config_dict = json.load(json_file)
16
+
17
+ return twitter_config_dict
18
+
19
+ @staticmethod
20
+ def getTwitterConfig():
21
+ return ConfigManager._get_config("twitterConfig.json")
Pinpoint_Internal/FeatureExtraction.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import base64
3
+ import codecs
4
+ import csv
5
+ import gc
6
+ import json
7
+ import os
8
+ import pickle
9
+ import re
10
+ import shutil
11
+ import time
12
+
13
+ import easy_db
14
+ import numpy
15
+ import pandas as pd
16
+ import uuid
17
+ from scipy.spatial import distance
18
+
19
+ from Pinpoint_Internal.Aggregator_NGram import n_gram_aggregator
20
+ from Pinpoint_Internal.Aggregator_TfIdf import tf_idf_aggregator
21
+ from Pinpoint_Internal.Aggregator_Word2Vec import word_2_vec_aggregator
22
+ from Pinpoint_Internal.Aggregator_WordingChoice import wording_choice_aggregator
23
+ from Pinpoint_Internal.Grapher import grapher
24
+ from Pinpoint_Internal.Logger import logger
25
+ from Pinpoint_Internal.Sanitizer import sanitization, sys
26
+
27
+
28
+ class feature_extraction():
29
+ """
30
+ This class is used to wrap the functionality of aggregating tweets from CSV files and extracting features pertinent
31
+ to building a random forest extremist classifier.
32
+ """
33
+
34
+ # A graph used to store connections between aggregated users
35
+ graph = grapher()
36
+ archived_graphs = [] # an archive of the previous graphs
37
+ # A list storing dictionaries of user ids and their features.
38
+ tweet_user_features = []
39
+ completed_tweet_user_features = [] # has centrality added
40
+ # the global TF IDF model used for the Word 2 Vec model
41
+ saved_tf_idf_model = None
42
+ # A dictionary used for the translation of actual Twitter username to UUID
43
+ dict_of_users = {}
44
+
45
+ # The max size for all data entries (i.e. baseline tweets)
46
+ MAX_RECORD_SIZE = sys.maxsize # 3050
47
+
48
+ # Datasets for training
49
+ violent_words_dataset_location = None
50
+ tf_idf_training_dataset_location = None
51
+ outputs_location = None
52
+
53
+ # Used for knowing which columns to access data from. For Twitter data.
54
+ # Summary variables
55
+ DEFAULT_USERNAME_COLUMN_ID = 0
56
+ DEFAULT_DATE_COLUMN_ID = 1
57
+ DEFAULT_MESSAGE_COLUMN_ID = 2
58
+ DEFAULT_ANALYTIC_COLUMN_ID = 4
59
+ DEFAULT_CLOUT_COLUMN_ID = 5
60
+ DEFAULT_AUTHENTIC_COLUMN_ID = 6
61
+ DEFAULT_TONE_COLUMN_ID = 7
62
+ # Emotional Analysis
63
+ DEFAULT_ANGER_COLUMN_ID = 36
64
+ DEFAULT_SADNESS_COLUMN_ID = 37
65
+ DEFAULT_ANXIETY_COLUMN_ID = 35
66
+ # Personal Drives:
67
+ DEFAULT_POWER_COLUMN_ID = 62
68
+ DEFAULT_REWARD_COLUMN_ID = 63
69
+ DEFAULT_RISK_COLUMN_ID = 64
70
+ DEFAULT_ACHIEVEMENT_COLUMN_ID = 61
71
+ DEFAULT_AFFILIATION_COLUMN_ID = 60
72
+ # Personal pronouns
73
+ DEFAULT_P_PRONOUN_COLUMN_ID = 13
74
+ DEFAULT_I_PRONOUN_COLUMN_ID = 19
75
+
76
+ # Constants for the fields in the baseline data set (i.e. ISIS magazine/ Stormfront, etc)
77
+ DEFAULT_BASELINE_MESSAGE_COLUMN_ID = 5
78
+ # Summary variables
79
+ DEFAULT_BASELINE_CLOUT_COLUMN_ID = 10
80
+ DEFAULT_BASELINE_ANALYTIC_COLUMN_ID = 9
81
+ DEFAULT_BASELINE_TONE_COLUMN_ID = 12
82
+ DEFAULT_BASELINE_AUTHENTIC_COLUMN_ID = 11
83
+ # Emotional Analysis
84
+ DEFAULT_BASELINE_ANGER_COLUMN_ID = 41
85
+ DEFAULT_BASELINE_SADNESS_COLUMN_ID = 42
86
+ DEFAULT_BASELINE_ANXIETY_COLUMN_ID = 40
87
+ # Personal Drives
88
+ DEFAULT_BASELINE_POWER_COLUMN_ID = 67
89
+ DEFAULT_BASELINE_REWARD_COLUMN_ID = 68
90
+ DEFAULT_BASELINE_RISK_COLUMN_ID = 69
91
+ DEFAULT_BASELINE_ACHIEVEMENT_COLUMN_ID = 66
92
+ DEFAULT_BASELINE_AFFILIATION_COLUMN_ID = 65
93
+ # Personal pronouns
94
+ DEFAULT_BASELINE_P_PRONOUN_COLUMN_ID = 18
95
+ DEFAULT_BASELINE_I_PRONOUN_COLUMN_ID = 24
96
+
97
+ # Used for Minkowski distance
98
+ _average_clout = 0
99
+ _average_analytic = 0
100
+ _average_tone = 0
101
+ _average_authentic = 0
102
+ _average_anger = 0
103
+ _average_sadness = 0
104
+ average_anxiety = 0
105
+ average_power = 0
106
+ average_reward = 0
107
+ average_risk = 0
108
+ average_achievement = 0
109
+ average_affiliation = 0
110
+ average_p_pronoun = 0
111
+ average_i_pronoun = 0
112
+
113
+ # Used to chache messages to free memory
114
+ MESSAGE_TMP_CACHE_LOCATION = "message_cache"
115
+
116
+ def __init__(self, violent_words_dataset_location=None
117
+ , baseline_training_dataset_location=None,
118
+ outputs_location=r"outputs"):
119
+ """
120
+ Constructor
121
+
122
+ The feature_extraction() class can be initialised with violent_words_dataset_location,
123
+ tf_idf_training_dataset_location, and outputs_location locations. All files in the violent_words_dataset_location
124
+ will be read (one line at a time) and added to the corpus of violent and swear words. The csv file at
125
+ baseline_training_dataset_location is used to train the TFIDF model and a Minkowski distance score is calculated based on the LIWC scores present.
126
+
127
+ If the constant variable need to be changed, do this by setting the member variables.
128
+ """
129
+
130
+ # Error if datasets not provided
131
+ if violent_words_dataset_location is None:
132
+ raise Exception("No Violent Words dir provided. Provide a directory that contains new line seperated "
133
+ "files where each line is a violent, extremist, etc word")
134
+
135
+ if baseline_training_dataset_location is None:
136
+ raise Exception("No baseline (TF-IDF/ Minkowski) dataset provided. Thus should be a csv file containing "
137
+ "extremist content and LIWC scores.")
138
+
139
+ # Set datasets to member variables
140
+ self.violent_words_dataset_location = violent_words_dataset_location
141
+ self.tf_idf_training_dataset_location = baseline_training_dataset_location
142
+ self.outputs_location = outputs_location
143
+
144
+ # Attempt to make the outputs folder if it doesn't exist
145
+ try:
146
+ os.makedirs(outputs_location)
147
+ except:
148
+ pass
149
+
150
+ def _reset_stored_feature_data(self):
151
+ """
152
+ Resets memeber variables from a previous run. Importantly does not reset to TF IDF model.
153
+ :return:
154
+ """
155
+
156
+ # A graph used to store connections between aggregated users
157
+ self.graph = grapher()
158
+ archived_graphs = [] # an archive of the previous graphs
159
+ # A list storing dictionaries of user ids and their features.
160
+ self.tweet_user_features = []
161
+ self.completed_tweet_user_features = [] # has centrality added
162
+ # the global TF IDF model used for the Word 2 Vec model
163
+ self.dict_of_users = {}
164
+
165
+ # Used for Minkowski distance
166
+ self._average_clout = 0
167
+ self._average_analytic = 0
168
+ self._average_tone = 0
169
+ self._average_authentic = 0
170
+ self._average_anger = 0
171
+ self._average_sadness = 0
172
+ self.average_anxiety = 0
173
+ self.average_power = 0
174
+ self.average_reward = 0
175
+ self.average_risk = 0
176
+ self.average_achievement = 0
177
+ self.average_affiliation = 0
178
+ self.average_p_pronoun = 0
179
+ self.average_i_pronoun = 0
180
+
181
+ def _get_unique_id_from_username(self, username):
182
+ """
183
+ A function used to retrieve a UUID based on a twitter username. If a username has been used before the same UUID
184
+ will be returned as it is stored in a dictionary.
185
+ :param username:
186
+ :return: a string representation of a UUID relating to a Twitter username
187
+ """
188
+
189
+ if username in self.dict_of_users:
190
+ # username already in dictionary
191
+ unique_id = self.dict_of_users[username]
192
+ else:
193
+ # make new UUID
194
+ unique_id = uuid.uuid4().hex
195
+ # stops uuid collisions
196
+ while unique_id in self.dict_of_users.values():
197
+ unique_id = uuid.uuid4().hex
198
+
199
+ # Add new user id to dictionary
200
+ self.dict_of_users[username] = unique_id
201
+
202
+ # todo it's less efficient writing the whole file every run
203
+ path = os.path.join(self.outputs_location, "users.json")
204
+
205
+ with open(path, 'w') as outfile:
206
+ json.dump(self.dict_of_users, outfile)
207
+
208
+ return unique_id
209
+
210
+ def _add_to_graph(self, originating_user_name, message):
211
+ """
212
+ A wrapper function used for adding a node/ connection to the graph.
213
+ :param originating_user_name: the Twitter username
214
+ :param message: The Tweet
215
+ """
216
+
217
+ # Adds node to graph so that if they don't interact with anyone they still have a centrality
218
+ self.graph.add_node(originating_user_name)
219
+
220
+ # Process mentions
221
+ mentions = re.findall("\@([a-zA-Z\-\_]+)", message)
222
+
223
+ # For all mentions in the tweet add them to the graph as a node
224
+ for mention in mentions:
225
+ self.graph.add_edge_wrapper(originating_user_name, mention, 1, "mention")
226
+
227
+ # process hashtags
228
+ hashtags = re.findall("\#([a-zA-Z\-\_]+)", message)
229
+
230
+ # For all hashtags in the tweet add them to the graph as a node
231
+ for hashtag in hashtags:
232
+ self.graph.add_edge_wrapper(originating_user_name, hashtag, 1, "hashtag")
233
+
234
+ def _get_capitalised_word_frequency(self, message):
235
+ """
236
+ A wrapper function for returning the frequency of capitalised words in a message.
237
+ :param message:
238
+ :return: the frequency of capitalised words in a message.
239
+ """
240
+ return wording_choice_aggregator().get_frequency_of_capatalised_words(
241
+ message) # NEEDS TO BE DONE before lower case
242
+
243
+ def _get_violent_word_frequency(self, message):
244
+ """
245
+ A wrapper function used to retrieve the frequency of violent words in a message.
246
+ :param message: a string representation of a social media message
247
+ :return: The frequency of violent words in the message
248
+ """
249
+ return wording_choice_aggregator().get_frequency_of_violent_or_curse_words(message,
250
+ self.violent_words_dataset_location)
251
+
252
+ def _get_tweet_vector(self, message):
253
+ """
254
+ A wrapper function used retrieve the 200 size vector representation (Average and Max vector concatenated)
255
+ of that message.
256
+ :param message: a string representation of a message
257
+ :param tf_idf_model:
258
+ :return: a 200 size vector of the tweet
259
+ """
260
+ vectors = []
261
+ tf_idf_model = self._get_tf_idf_model()
262
+
263
+ for word in message.split(" "):
264
+ # todo add back word = sanitization().sanitize(word, self.outputs_location, force_new_data_and_dont_persisit=True)
265
+ try:
266
+ vectors.append(tf_idf_model.wv[word])
267
+ logger().print_message("Word '{}' in vocabulary...".format(word))
268
+ except KeyError as e:
269
+ pass
270
+ logger().print_message(e)
271
+ logger().print_message("Word '{}' not in vocabulary...".format(word))
272
+
273
+ # Lists of the values used to store the max and average vector values
274
+ max_value_list = []
275
+ average_value_list = []
276
+
277
+ # Check for if at least one word in the message is in the vocabulary of the model
278
+ final_array_of_vectors = pd.np.zeros(100)
279
+ if len(vectors) > 0:
280
+
281
+ # Loop through the elements in the vectors
282
+ for iterator in range(vectors[0].size):
283
+
284
+ list_of_all_values = []
285
+
286
+ # Loop through each vector
287
+ for vector in vectors:
288
+ value = vector[iterator]
289
+ list_of_all_values.append(value)
290
+
291
+ average_value = sum(list_of_all_values) / len(list_of_all_values)
292
+ max_value = max(list_of_all_values)
293
+ max_value_list.append(max_value)
294
+ average_value_list.append(average_value)
295
+
296
+ final_array_of_vectors = pd.np.append(pd.np.array([max_value_list]), pd.np.array([average_value_list]))
297
+
298
+ # Convert array to list
299
+ list_of_vectors = []
300
+ for vector in final_array_of_vectors:
301
+ list_of_vectors.append(vector)
302
+
303
+ return list_of_vectors
304
+
305
+ def _process_tweet(self, user_name, message, row):
306
+ """
307
+ Wrapper function for taking a username and tweet and extracting the features.
308
+ :param user_name:
309
+ :param message:
310
+ :return: a dictionary of all features from the message
311
+ """
312
+ self._add_to_graph(user_name, message)
313
+
314
+ features_dict = {"cap_freq": self._get_capitalised_word_frequency(message),
315
+ "violent_freq": self._get_violent_word_frequency(message),
316
+ "message_vector": self._get_tweet_vector(message)}
317
+
318
+
319
+ return features_dict
320
+
321
+ def _get_average_liwc_scores_for_baseline_data(self):
322
+ """
323
+ Calculate the LIWC scores for the baseline dataset and the minkowski dataset.
324
+ """
325
+
326
+ # Checks if the values have already been set this run, if so don't calculate again
327
+ # TODO what of the edge case where average clout is 0?
328
+ if self._average_clout == 0:
329
+ logger.print_message("Opening dataset {} for LIWC feature extraction and Minkowski distance".format(
330
+ self.tf_idf_training_dataset_location))
331
+ baseline_data_set_name = self.tf_idf_training_dataset_location
332
+
333
+ clout_list = []
334
+ analytic_list = []
335
+ tone_list = []
336
+ authentic_list = []
337
+ anger_list = []
338
+ sadness_list = []
339
+ anxiety_list = []
340
+ power_list = []
341
+ reward_list = []
342
+ risk_list = []
343
+ achievement_list = []
344
+ affiliation_list = []
345
+ p_pronoun_list = []
346
+ i_pronoun_list = []
347
+
348
+ with open(baseline_data_set_name, 'r', encoding='cp1252') as file:
349
+ reader = csv.reader(file)
350
+
351
+ is_header = True
352
+ for row in reader:
353
+
354
+ if is_header:
355
+ is_header = False
356
+ continue
357
+
358
+ # Try and access columns, if can't then LIWC fields haven't been set and should be set to 0
359
+ try:
360
+ clout = row[self.DEFAULT_BASELINE_CLOUT_COLUMN_ID]
361
+ analytic = row[self.DEFAULT_BASELINE_ANALYTIC_COLUMN_ID]
362
+ tone = row[self.DEFAULT_BASELINE_TONE_COLUMN_ID]
363
+ authentic = row[self.DEFAULT_BASELINE_AUTHENTIC_COLUMN_ID]
364
+ anger = row[self.DEFAULT_BASELINE_ANGER_COLUMN_ID]
365
+ sadness = row[self.DEFAULT_BASELINE_SADNESS_COLUMN_ID]
366
+ anxiety = row[self.DEFAULT_BASELINE_ANXIETY_COLUMN_ID]
367
+ power = row[self.DEFAULT_BASELINE_POWER_COLUMN_ID]
368
+ reward = row[self.DEFAULT_BASELINE_REWARD_COLUMN_ID]
369
+ risk = row[self.DEFAULT_BASELINE_RISK_COLUMN_ID]
370
+ achievement = row[self.DEFAULT_BASELINE_ACHIEVEMENT_COLUMN_ID]
371
+ affiliation = row[self.DEFAULT_BASELINE_AFFILIATION_COLUMN_ID]
372
+ p_pronoun = row[self.DEFAULT_BASELINE_P_PRONOUN_COLUMN_ID]
373
+ i_pronoun = row[self.DEFAULT_BASELINE_I_PRONOUN_COLUMN_ID]
374
+ except:
375
+ clout = 0
376
+ analytic = 0
377
+ tone = 0
378
+ authentic = 0
379
+ anger = 0
380
+ sadness = 0
381
+ anxiety = 0
382
+ power = 0
383
+ reward = 0
384
+ risk = 0
385
+ achievement = 0
386
+ affiliation = 0
387
+ p_pronoun = 0
388
+ i_pronoun = 0
389
+
390
+ clout_list.append(float(clout))
391
+ analytic_list.append(float(analytic))
392
+ tone_list.append(float(tone))
393
+ authentic_list.append(float(authentic))
394
+ anger_list.append(float(anger))
395
+ sadness_list.append(float(sadness))
396
+ anxiety_list.append(float(anxiety))
397
+ power_list.append(float(power))
398
+ reward_list.append(float(reward))
399
+ risk_list.append(float(risk))
400
+ achievement_list.append(float(achievement))
401
+ affiliation_list.append(float(affiliation))
402
+ p_pronoun_list.append(float(p_pronoun))
403
+ i_pronoun_list.append(float(i_pronoun))
404
+
405
+ # Get average for variables, used for distance score. These are member variables so that they don't
406
+ # have to be re-calculated on later runs
407
+ self._average_clout = sum(clout_list) / len(clout_list)
408
+ self._average_analytic = sum(analytic_list) / len(analytic_list)
409
+ self._average_tone = sum(tone_list) / len(tone_list)
410
+ self._average_authentic = sum(authentic_list) / len(authentic_list)
411
+ self._average_anger = sum(anger_list) / len(anger_list)
412
+ self._average_sadness = sum(sadness_list) / len(sadness_list)
413
+ self.average_anxiety = sum(anxiety_list) / len(anxiety_list)
414
+ self.average_power = sum(power_list) / len(power_list)
415
+ self.average_reward = sum(reward_list) / len(reward_list)
416
+ self.average_risk = sum(risk_list) / len(risk_list)
417
+ self.average_achievement = sum(achievement_list) / len(achievement_list)
418
+ self.average_affiliation = sum(affiliation_list) / len(affiliation_list)
419
+ self.average_p_pronoun = sum(p_pronoun_list) / len(p_pronoun_list)
420
+ self.average_i_pronoun = sum(i_pronoun_list) / len(i_pronoun_list)
421
+
422
+ return [self._average_clout, self._average_analytic, self._average_tone, self._average_authentic,
423
+ self._average_anger, self._average_sadness, self.average_anxiety,
424
+ self.average_power, self.average_reward, self.average_risk, self.average_achievement,
425
+ self.average_affiliation,
426
+ self.average_p_pronoun, self.average_i_pronoun]
427
+
428
+ def _get_tf_idf_model(self):
429
+ """
430
+ A function used to retrieve the TFIDF model trained on the extremist dataset. If the model has already been
431
+ created then the previously created model will be used.
432
+ :return: a TF-IDF model
433
+ """
434
+
435
+ # if already made model, reuse
436
+ if self.saved_tf_idf_model is None:
437
+ logger.print_message("Opening dataset {} for TF-IDF".format(self.tf_idf_training_dataset_location))
438
+ baseline_data_set_name = self.tf_idf_training_dataset_location
439
+
440
+ data_set = ""
441
+
442
+ with open(baseline_data_set_name, 'r', encoding='cp1252') as file:
443
+ reader = csv.reader(file)
444
+
445
+ is_header = True
446
+ for row in reader:
447
+
448
+ if is_header:
449
+ is_header = False
450
+ continue
451
+
452
+ # take quote from dataset and add it to dataset
453
+ message = row[self.DEFAULT_BASELINE_MESSAGE_COLUMN_ID] # data column
454
+ data_set = data_set + message + "/n"
455
+
456
+ # clean data set
457
+ # todo should we be doing sanitization clean_data = sanitization().sanitize(data_set, self.outputs_location) # if so remove line below
458
+ clean_data = data_set
459
+
460
+ # get ngrams
461
+ uni_grams, bi_grams, tri_grams = n_gram_aggregator().get_ngrams(clean_data)
462
+ ngrams = uni_grams + bi_grams + tri_grams
463
+
464
+ # todo The TF_IDF most important ngrams arn't being used. Should these be used instead of the other ngrams
465
+ tf_idf_scores = tf_idf_aggregator().get_tf_idf_scores(ngrams, data_set)
466
+ number_of_most_important_ngrams = int(len(ngrams) / 2) # number is half all ngrams
467
+ list_of_most_important_ngrams = sorted(tf_idf_scores, key=tf_idf_scores.get, reverse=True)[
468
+ :number_of_most_important_ngrams]
469
+
470
+ # create a word 2 vec model
471
+ model = word_2_vec_aggregator().get_model(list_of_sentences=list_of_most_important_ngrams)
472
+ self.saved_tf_idf_model = model
473
+ else:
474
+ model = self.saved_tf_idf_model
475
+
476
+ return model
477
+
478
+ def open_wrapper(self, location, access_type, list_of_encodings=["utf-8", 'latin-1', 'cp1252']):
479
+ """
480
+ A wrapper around the open built in function that has fallbacks for different encodings.
481
+ :return:
482
+ """
483
+
484
+ for encoding in list_of_encodings:
485
+ try:
486
+ file = open(location, access_type, encoding=encoding)
487
+ # Attempt to read file, if fails try other encoding
488
+ file.readlines()
489
+ file.seek(0)
490
+ file.close()
491
+ file = open(location, access_type, encoding=encoding)
492
+ return file
493
+ except LookupError as e:
494
+ continue
495
+ except UnicodeDecodeError as e:
496
+ continue
497
+
498
+ raise Exception(
499
+ "No valid encoding provided for file: '{}'. Encodings provided: '{}'".format(location, list_of_encodings))
500
+
501
+ def _add_user_post_db_cache(self, user_id, dict_to_add):
502
+ """
503
+ Used to add data to the post message db cache used to free up memory.
504
+ """
505
+
506
+ if not os.path.isdir(self.MESSAGE_TMP_CACHE_LOCATION):
507
+ os.mkdir(self.MESSAGE_TMP_CACHE_LOCATION)
508
+
509
+ # Save file as pickle
510
+ file_name = "{}-{}.pickle".format(user_id,int(time.time()))
511
+ file_name = os.path.join(self.MESSAGE_TMP_CACHE_LOCATION, file_name)
512
+ with open(file_name, 'wb') as pickle_handle:
513
+ pickle.dump({"description":"a temporery file used for saving memory",
514
+ "data":dict_to_add}, pickle_handle, protocol=pickle.HIGHEST_PROTOCOL)
515
+
516
+ def _get_user_post_db_cache(self, file_name):
517
+ """
518
+ Retrieves data from the cache database used to free up memory.
519
+ """
520
+ if not os.path.isdir(self.MESSAGE_TMP_CACHE_LOCATION):
521
+ raise Exception("Attempted to access temporery cache files before files are created")
522
+
523
+ if not os.path.isfile(file_name):
524
+ raise Exception("Attempted to access cache file {}, however, it does not exist".format(file_name))
525
+
526
+ with (open(file_name, "rb")) as openfile:
527
+ cache_data = pickle.load(openfile)
528
+
529
+ return cache_data["data"]
530
+
531
+ def _delete_user_post_db_cache(self):
532
+ if os.path.isdir(self.MESSAGE_TMP_CACHE_LOCATION):
533
+ shutil.rmtree(self.MESSAGE_TMP_CACHE_LOCATION)
534
+
535
+ def _get_type_of_message_data(self, data_set_location, has_header=True, is_extremist=None):
536
+ # Ensure all temp files are deleted
537
+ self._delete_user_post_db_cache()
538
+
539
+ # Counts the total rows in the CSV. Used for progress reporting.
540
+ print("Starting entity count. Will count '{}'".format(self.MAX_RECORD_SIZE))
541
+
542
+ # Read one entry at a time
543
+ max_chunksize = 1
544
+ row_count = 0
545
+
546
+ for row in pd.read_csv(data_set_location, iterator=True,encoding='latin-1'):
547
+
548
+ row_count = row_count + 1
549
+
550
+ if row_count >= self.MAX_RECORD_SIZE:
551
+ break
552
+
553
+
554
+ print("Finished entity count. Count is: '{}'".format(row_count))
555
+ print("")
556
+ # Loops through all rows in the dataset CSV file.
557
+ current_processed_rows = 0
558
+ is_header = False
559
+
560
+ for row in pd.read_csv(data_set_location, iterator=True,encoding='latin-1'):
561
+ row = row.columns
562
+ # Makes sure same number for each dataset
563
+ if current_processed_rows > row_count:
564
+ break
565
+
566
+ # Skips the first entry, as it's the CSV header
567
+ if has_header and is_header:
568
+ is_header = False
569
+ continue
570
+
571
+ # Retrieve username
572
+ try:
573
+ username = row[self.DEFAULT_USERNAME_COLUMN_ID]
574
+ date = row[self.DEFAULT_DATE_COLUMN_ID]
575
+ user_unique_id = self._get_unique_id_from_username(username)
576
+ except:
577
+ # if empty entry
578
+ continue
579
+ # Attempt to get LIWC scores from csv, if not present return 0's
580
+ try:
581
+ # Summary variables
582
+ clout = float(row[self.DEFAULT_CLOUT_COLUMN_ID])
583
+ analytic = float(row[self.DEFAULT_ANALYTIC_COLUMN_ID])
584
+ tone = float(row[self.DEFAULT_TONE_COLUMN_ID])
585
+ authentic = float(row[self.DEFAULT_AUTHENTIC_COLUMN_ID])
586
+ # Emotional Analysis
587
+ anger = float(row[self.DEFAULT_ANGER_COLUMN_ID])
588
+ sadness = float(row[self.DEFAULT_SADNESS_COLUMN_ID])
589
+ anxiety = float(row[self.DEFAULT_ANXIETY_COLUMN_ID])
590
+ # Personal Drives:
591
+ power = float(row[self.DEFAULT_POWER_COLUMN_ID])
592
+ reward = float(row[self.DEFAULT_REWARD_COLUMN_ID])
593
+ risk = float(row[self.DEFAULT_RISK_COLUMN_ID])
594
+ achievement = float(row[self.DEFAULT_ACHIEVEMENT_COLUMN_ID])
595
+ affiliation = float(row[self.DEFAULT_AFFILIATION_COLUMN_ID])
596
+ # Personal pronouns
597
+ i_pronoun = float(row[self.DEFAULT_I_PRONOUN_COLUMN_ID])
598
+ p_pronoun = float(row[self.DEFAULT_P_PRONOUN_COLUMN_ID])
599
+
600
+ except:
601
+ # Summary variables
602
+ clout = 0
603
+ analytic = 0
604
+ tone = 0
605
+ authentic = 0
606
+ # Emotional Analysis
607
+ anger = 0
608
+ sadness = 0
609
+ anxiety = 0
610
+ # Personal Drives:
611
+ power = 0
612
+ reward = 0
613
+ risk = 0
614
+ achievement = 0
615
+ affiliation = 0
616
+ # Personal pronouns
617
+ i_pronoun = 0
618
+ p_pronoun = 0
619
+
620
+ liwc_dict = {
621
+ "clout": clout,
622
+ "analytic": analytic,
623
+ "tone": tone,
624
+ "authentic": authentic,
625
+ "anger": anger,
626
+ "sadness": sadness,
627
+ "anxiety": anxiety,
628
+ "power": power,
629
+ "reward": reward,
630
+ "risk": risk,
631
+ "achievement": achievement,
632
+ "affiliation": affiliation,
633
+ "i_pronoun": i_pronoun,
634
+ "p_pronoun": p_pronoun,
635
+ }
636
+
637
+ # Calculate minkowski distance
638
+ average_row = self._get_average_liwc_scores_for_baseline_data()
639
+
640
+ actual_row = [clout, analytic, tone, authentic,
641
+ anger, sadness, anxiety,
642
+ power, reward, risk, achievement, affiliation,
643
+ p_pronoun, i_pronoun
644
+ ]
645
+
646
+ try:
647
+ liwc_dict["minkowski"] = distance.minkowski(actual_row, average_row, 1)
648
+ except ValueError:
649
+ continue
650
+
651
+ # Retrieve Tweet for message
652
+ tweet = str(row[self.DEFAULT_MESSAGE_COLUMN_ID])
653
+
654
+ # clean/ remove markup in dataset
655
+ sanitised_message = sanitization().sanitize(tweet, self.outputs_location,
656
+ force_new_data_and_dont_persisit=True)
657
+
658
+ # If no message skip entry
659
+ if not len(tweet) > 0 or not len(sanitised_message) > 0 or sanitised_message == '' or not len(
660
+ sanitised_message.split(" ")) > 0:
661
+ continue
662
+
663
+ # Process Tweet and save as dict
664
+ tweet_dict = self._process_tweet(user_unique_id, tweet, row)
665
+
666
+ # If the message vector is not 200 skip (meaning that a blank message was processed)
667
+ if not len(tweet_dict["message_vector"]) == 200:
668
+ continue
669
+
670
+ if is_extremist is not None:
671
+ tweet_dict["is_extremist"] = is_extremist
672
+
673
+ tweet_dict["date"] = date
674
+
675
+ # Merge liwc dict with tweet dict
676
+ tweet_dict = {**tweet_dict, **liwc_dict}
677
+
678
+ #tweet_dict["user_unique_id"]= user_unique_id
679
+
680
+ self._add_user_post_db_cache(user_unique_id, {user_unique_id: tweet_dict})
681
+ #self.tweet_user_features.append()
682
+ # TODO here save to cache json instead of list and graph
683
+
684
+ logger().print_message("Added message from user: '{}', from dataset: '{}'. {} rows of {} completed."
685
+ .format(user_unique_id, data_set_location, current_processed_rows, row_count), 1)
686
+ current_processed_rows = current_processed_rows + 1
687
+ print("Finished reading row")
688
+
689
+ # Add the centrality (has to be done after all users are added to graph)
690
+ completed_tweet_user_features = []
691
+ # Loops through each item in the list which represents each message/ tweet
692
+
693
+ # Loop through all data in cache file
694
+ for cached_message_file in os.listdir(self.MESSAGE_TMP_CACHE_LOCATION):
695
+ cached_message_file = os.fsdecode(cached_message_file)
696
+ cached_message_file = os.path.join(self.MESSAGE_TMP_CACHE_LOCATION,cached_message_file)
697
+
698
+ # Only process pickle files
699
+ if not cached_message_file.endswith(".pickle"):
700
+ continue
701
+
702
+ print("Reading cache file: '{}'".format(cached_message_file))
703
+ cached_message_data = self._get_user_post_db_cache(cached_message_file)
704
+ # Loops through the data in that tweet (Should only be one entry per tweet).
705
+ for user_id in cached_message_data.keys():
706
+ updated_entry = {}
707
+ updated_entry[user_id] = cached_message_data[user_id]
708
+ # Adds centrality
709
+ updated_entry[user_id]["centrality"] = self.graph.get_degree_centrality_for_user(user_id)
710
+ logger().print_message(
711
+ "Added '{}' Centrality for user '{}'".format(updated_entry[user_id]["centrality"], user_id), 1)
712
+ completed_tweet_user_features.append(updated_entry)
713
+ gc.collect()
714
+ break # Only one entry per list
715
+
716
+
717
+ self._delete_user_post_db_cache()
718
+ self.completed_tweet_user_features = self.completed_tweet_user_features + completed_tweet_user_features
719
+ self.tweet_user_features = []
720
+ #self.archived_graphs.append(self.graph)
721
+ self.graph = grapher()
722
+ print("Finished messages")
723
+
724
+ def _get_extremist_data(self, dataset_location):
725
+ """
726
+ This function is responsible for aggregating tweets from the extremist dataset, extracting the features, and
727
+ saving them to a file for a model to be created.
728
+ """
729
+
730
+ self._get_type_of_message_data(data_set_location=dataset_location, is_extremist=True)
731
+
732
+ def _get_counterpoise_data(self, dataset_location):
733
+ """
734
+ This function is responsible for aggregating tweets from the counterpoise (related to the topic but from
735
+ legitimate sources, e.g. news outlets) dataset, extracting the features, and saving them to a file for a
736
+ model to be created.
737
+ """
738
+
739
+ self._get_type_of_message_data(data_set_location=dataset_location, is_extremist=False)
740
+
741
+ def _get_standard_tweets(self, dataset_location):
742
+ """
743
+ This function is responsible for aggregating tweets from the baseline (random sample of twitter posts)
744
+ dataset, extracting the features, and saving them to a file for a model to be created.
745
+ """
746
+
747
+ self._get_type_of_message_data(data_set_location=dataset_location, is_extremist=False)
748
+
749
+ def dump_features_for_list_of_datasets(self, feature_file_path_to_save_to, list_of_dataset_locations,
750
+ force_new_dataset=True):
751
+ """
752
+ Saves features representing a provided dataset to a json file. Designed to be used for testing after a
753
+ model has been created.
754
+ :param feature_file_path_to_save_to:
755
+ :param dataset_location:
756
+ :return:
757
+ """
758
+
759
+ self._reset_stored_feature_data()
760
+
761
+ if force_new_dataset or not os.path.isfile(feature_file_path_to_save_to):
762
+ for dataset in list_of_dataset_locations:
763
+ self._get_type_of_message_data(data_set_location=dataset, is_extremist=None)
764
+
765
+ with open(feature_file_path_to_save_to, 'w') as outfile:
766
+ json.dump(self.completed_tweet_user_features, outfile, indent=4)
767
+
768
+ else:
769
+ with open(feature_file_path_to_save_to, 'r') as file:
770
+ data = file.read()
771
+
772
+ # parse file
773
+ self.completed_tweet_user_features = json.loads(data)
774
+
775
+ def dump_training_data_features(self, feature_file_path_to_save_to, extremist_data_location,
776
+ baseline_data_location, force_new_dataset=True):
777
+ """
778
+ The entrypoint function, used to dump all features, for all users in the extreamist, counterpoise, and baseline
779
+ datsets to a json file.
780
+ :param feature_file_path_to_save_to: The filepath to save the datasets to
781
+ """
782
+
783
+ self._reset_stored_feature_data()
784
+
785
+ if force_new_dataset or not os.path.isfile(feature_file_path_to_save_to):
786
+ print("Starting baseline messages")
787
+ self._get_standard_tweets(baseline_data_location)
788
+ print("Starting extremist messages")
789
+ self._get_extremist_data(extremist_data_location)
790
+
791
+
792
+ with open(feature_file_path_to_save_to, 'w') as outfile:
793
+ json.dump(self.completed_tweet_user_features, outfile, indent=4)
Pinpoint_Internal/Grapher.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import networkx as nx
2
+
3
+
4
+ class grapher():
5
+ """
6
+ A wrapper class used for generating a graph for interactions between users
7
+ """
8
+ graph = None
9
+
10
+ def __init__(self):
11
+ """
12
+ Constructor.
13
+ """
14
+ self.graph = nx.DiGraph()
15
+
16
+ def add_edge_wrapper(self, node_1_name, node_2_name, weight, relationship):
17
+ """
18
+ A wrapper function used to add an edge connection or node.
19
+ :param node_1_name: from
20
+ :param node_2_name: to
21
+ :param weight:
22
+ :param relationship:
23
+ :return:
24
+ """
25
+ self.graph.add_edge(node_1_name, node_2_name, weight=weight, relation=relationship)
26
+
27
+ def add_node(self, node_name):
28
+ """
29
+ A wrapper function that adds a node with no edges to the graph
30
+ :param node_name:
31
+ """
32
+ self.graph.add_node(node_name)
33
+
34
+ def get_info(self):
35
+ """
36
+ Retrieves information about the graph
37
+ :return:
38
+ """
39
+ return nx.info(self.graph)
40
+
41
+ def show_graph(self):
42
+ """
43
+ Displays the graph
44
+ :return:
45
+ """
46
+ nx.spring_layout(self.graph)
47
+
48
+ def get_degree_centrality_for_user(self, user_name):
49
+ """
50
+ Returns the Degree of Centrality for a given user present in the graph
51
+ :param user_name:
52
+ :return: the Degree of Centrality for a given user present in the graph
53
+ """
54
+ centrality = nx.degree_centrality(self.graph)
55
+ return centrality[user_name]
56
+
57
+ # todo implement
58
+ # def get_eigenvector_centrality_for_user(self, user_name):
59
+ # centrality = nx.eigenvector_centrality(self.graph)
60
+ # return centrality[user_name]
Pinpoint_Internal/Logger.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+
4
+ class logger():
5
+ """
6
+ A wrapper class around the Python print function used to only print
7
+ """
8
+ DEBUG = False
9
+
10
+ @staticmethod
11
+ def print_message(message, logging_level=0):
12
+ """
13
+ A wrapper function around the Python print function used to only print
14
+ :param message: the message to print
15
+ :param override_debug: a boolean on if the DEBUG status should be override. if True a log will be printed,
16
+ irrespective of if in Debug mode.
17
+ """
18
+ if logging_level >= 1 or logger.DEBUG:
19
+ now = datetime.now()
20
+ current_time = now.strftime("%H:%M:%S")
21
+ print("{} | {}".format(current_time, message))
Pinpoint_Internal/RandomForest.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ import pickle
5
+ from datetime import datetime
6
+
7
+ import pandas
8
+ import pandas as pd
9
+ from sklearn import metrics
10
+ from sklearn.ensemble import RandomForestClassifier
11
+ from sklearn.model_selection import train_test_split
12
+
13
+ from Pinpoint_Internal import Logger
14
+
15
+
16
+ class random_forest():
17
+ """
18
+ A class used for creating a random forest binary classifier.
19
+ """
20
+
21
+ model = None
22
+ accuracy = None
23
+ precision = None
24
+ recall = None
25
+ f_measure = None
26
+
27
+ # Model variables populated on creation or reading of file
28
+
29
+ original_name = None
30
+ creation_date = None
31
+
32
+ _FRAMEWORK_VERSION = 0.2 # Used when creating a new model file
33
+ # v0.1 - versioning added.
34
+ # v0.2 - Added more LIWC scores and minkowski distance
35
+
36
+ model_version = _FRAMEWORK_VERSION # can be updated if reading and using a model file of a different version
37
+
38
+ _outputs_folder = None
39
+ _model_folder = None
40
+
41
+ # Categories of features used in the model
42
+ RADICAL_LANGUAGE_ENABLED = True # RF-IDF Scores, Word Embeddings
43
+ PSYCHOLOGICAL_SIGNALS_ENABLED = True # LIWC Dictionaries, Minkowski distance
44
+ BEHAVIOURAL_FEATURES_ENABLED = True # frequency of tweets, followers / following ratio, centrality
45
+
46
+ def __init__(self, outputs_folder="outputs", model_folder=None):
47
+ """
48
+ Constructor
49
+
50
+ The random_forest() class can be initialised with outputs_folder() and model_folder(). The outputs folder is
51
+ where output files are stored and the model folder is where the model will be created if not overwritten.
52
+ """
53
+
54
+ if model_folder is None:
55
+ model_folder = outputs_folder
56
+
57
+ self._outputs_folder = outputs_folder
58
+ self._model_folder = model_folder
59
+
60
+ def get_features_as_df(self, features_file, force_new_dataset=True):
61
+ """
62
+ Reads a JSON file file and converts to a Pandas dataframe that can be used to train and test the classifier.
63
+ :param features_file: the location of the JSON features file to convert to a dataframe
64
+ :param force_new_dataset: if true a new CSV file will be created even if one already exists.
65
+ :return: a Pandas dataframe with the features.
66
+ """
67
+
68
+ with open(features_file) as json_features_file:
69
+ csv_file = "{}.csv".format(features_file)
70
+
71
+ if force_new_dataset or not os.path.isfile(csv_file):
72
+ features = json.load(json_features_file)
73
+
74
+ # todo remove the data for the features not being used.
75
+ filtered_list_after_filters_applied = []
76
+
77
+ # If any of the filters are not true remove the features not requested
78
+ column_names = []
79
+
80
+ if self.PSYCHOLOGICAL_SIGNALS_ENABLED:
81
+ column_names = column_names + ["clout", "analytic", "tone", "authentic",
82
+ "anger", "sadness", "anxiety",
83
+ "power", "reward", "risk", "achievement", "affiliation",
84
+ "i_pronoun", "p_pronoun",
85
+ "minkowski"]
86
+ if self.BEHAVIOURAL_FEATURES_ENABLED:
87
+ column_names = column_names + ['centrality']
88
+
89
+ if self.RADICAL_LANGUAGE_ENABLED:
90
+ # Add column names
91
+ column_names = column_names + ["cap_freq", "violent_freq"]
92
+ # Add the two hundred vectors columns
93
+ for iterator in range(1, 201):
94
+ column_names.append("message_vector_{}".format(iterator))
95
+
96
+ column_names = column_names + ['is_extremist']
97
+
98
+ if not self.BEHAVIOURAL_FEATURES_ENABLED or not self.PSYCHOLOGICAL_SIGNALS_ENABLED or self.RADICAL_LANGUAGE_ENABLED:
99
+
100
+ # Loops through list of dicts (messages)
101
+ number_of_processed_messages = 0
102
+ for message in features:
103
+ number_of_processed_messages = number_of_processed_messages + 1
104
+ Logger.logger.print_message(
105
+ "Extracting information from message {} of {} in file {}".format(
106
+ number_of_processed_messages,
107
+ len(features),
108
+ features_file),
109
+ logging_level=1)
110
+
111
+ # Loops through dict keys (usernames)
112
+ for user in message.keys():
113
+
114
+ message_features = message[user]
115
+
116
+ feature_dict = {}
117
+
118
+ if self.PSYCHOLOGICAL_SIGNALS_ENABLED:
119
+ # Summary variables
120
+ feature_dict["clout"] = message_features["clout"]
121
+ feature_dict["analytic"] = message_features["analytic"]
122
+ feature_dict["tone"] = message_features["tone"]
123
+ feature_dict["authentic"] = message_features["authentic"]
124
+
125
+ # Emotional Analysis
126
+ feature_dict["anger"] = message_features["anger"]
127
+ feature_dict["sadness"] = message_features["sadness"]
128
+ feature_dict["anxiety"] = message_features["anxiety"]
129
+
130
+ # Personal Drives
131
+ feature_dict["power"] = message_features["power"]
132
+ feature_dict["reward"] = message_features["reward"]
133
+ feature_dict["risk"] = message_features["risk"]
134
+ feature_dict["achievement"] = message_features["achievement"]
135
+ feature_dict["affiliation"] = message_features["affiliation"]
136
+
137
+ # Personal Pronouns
138
+ feature_dict["i_pronoun"] = message_features["i_pronoun"]
139
+ feature_dict["p_pronoun"] = message_features["p_pronoun"]
140
+
141
+ # Minkowski distance
142
+ feature_dict["minkowski"] = message_features["minkowski"]
143
+
144
+ if self.BEHAVIOURAL_FEATURES_ENABLED:
145
+ #feature_dict['post_freq'] = message_features['post_freq']
146
+ #feature_dict['follower_freq'] = message_features['follower_freq']
147
+ feature_dict['centrality'] = message_features['centrality']
148
+
149
+ if self.RADICAL_LANGUAGE_ENABLED:
150
+ feature_dict["message_vector"] = message_features["message_vector"]
151
+ feature_dict["violent_freq"] = message_features["violent_freq"]
152
+ feature_dict["cap_freq"] = message_features["cap_freq"]
153
+
154
+ feature_dict['is_extremist'] = message_features['is_extremist']
155
+
156
+ user = {user: feature_dict}
157
+ filtered_list_after_filters_applied.append(user)
158
+
159
+ number_of_features = len(filtered_list_after_filters_applied)
160
+
161
+ # Creates the columns for the data frame
162
+ df = pd.DataFrame(
163
+ columns=column_names)
164
+
165
+ completed_features = 0
166
+ iterator = 0
167
+ error_count = 0
168
+ for message in features:
169
+ # should only be one user per entry
170
+ for user_id in message:
171
+ feature_data = message[user_id]
172
+ # ID is not included as it's hexidecimal and not float
173
+
174
+ row = []
175
+
176
+ if self.PSYCHOLOGICAL_SIGNALS_ENABLED:
177
+ clout = feature_data['clout']
178
+ analytic = feature_data['analytic']
179
+ tone = feature_data['tone']
180
+ authentic = feature_data['authentic']
181
+
182
+ anger = feature_data["anger"]
183
+ sadness = feature_data["sadness"]
184
+ anxiety = feature_data["anxiety"]
185
+ power = feature_data["power"]
186
+ reward = feature_data["reward"]
187
+ risk = feature_data["risk"]
188
+ achievement = feature_data["achievement"]
189
+ affiliation = feature_data["affiliation"]
190
+ i_pronoun = feature_data["i_pronoun"]
191
+ p_pronoun = feature_data["p_pronoun"]
192
+ minkowski = feature_data["minkowski"]
193
+
194
+ row = row + [clout, analytic, tone, authentic, anger, sadness, anxiety, power,
195
+ reward, risk, achievement, affiliation, i_pronoun, p_pronoun, minkowski]
196
+
197
+ if self.BEHAVIOURAL_FEATURES_ENABLED:
198
+ #post_freq = feature_data['post_freq']
199
+ #follower_freq = feature_data['follower_freq']
200
+ centrality = feature_data['centrality']
201
+
202
+ row = row + [#post_freq, follower_freq,
203
+ centrality]
204
+
205
+ if self.RADICAL_LANGUAGE_ENABLED:
206
+ cap_freq = feature_data['cap_freq']
207
+ violent_freq = feature_data['violent_freq']
208
+ message_vector = feature_data['message_vector']
209
+
210
+ row = row + [cap_freq, violent_freq] + message_vector
211
+
212
+ is_extremist = feature_data['is_extremist']
213
+
214
+ row = row + [is_extremist]
215
+ try:
216
+ df.loc[iterator] = row
217
+ except ValueError as e:
218
+ print(e)
219
+ error_count = error_count + 1
220
+ pass # if error with value probably column mismatch which is down to taking a mesage with no data
221
+
222
+ iterator = iterator + 1
223
+ completed_features = completed_features + 1
224
+ user_name = list(message.keys())[0]
225
+ Logger.logger.print_message(
226
+ "Added a message from user {} to data frame - {} messages of {} completed".format(user_name,
227
+ completed_features,
228
+ number_of_features),
229
+ logging_level=1)
230
+
231
+ Logger.logger.print_message("Total errors when creating data frame: {}".format(error_count),
232
+ logging_level=1)
233
+
234
+ # Replace boolean with float
235
+ df.replace({False: 0, True: 1}, inplace=True)
236
+
237
+ # Sets ID field
238
+ df.index.name = "ID"
239
+ df.to_csv("{}.csv".format(features_file))
240
+
241
+ else:
242
+ df = pandas.read_csv(csv_file)
243
+
244
+ return df
245
+
246
+ def create_model_info_output_file(self, location_of_output_file = None, training_data_csv_location = None):
247
+ """
248
+ If the model has been loaded or trained this function will create a summary text file with information relating to
249
+ the model.
250
+ :param location_of_output_file: The location to save the output file to.
251
+ :param training_data_csv_location: The location of the training data csv. This is used to retrieve the name of the
252
+ feature columns.
253
+ """
254
+
255
+ # Check if model has been created
256
+ if not self.creation_date:
257
+ Logger.logger.print_message("Model has not been trained, created, or loaded. Cannot output model data in this state.",logging_level=1)
258
+ else:
259
+ Logger.logger.print_message("Creating model info text file")
260
+ output_text = ""
261
+
262
+ # Add summary information
263
+ output_text += "Model {}, version {}, created at {} \n".format(self.original_name, self.model_version, self.creation_date)
264
+ output_text += "\nAccuracy: {}\nRecall: {} \nPrecision: {}\nF-Measure: {}\n".format(self.accuracy, self.recall,
265
+ self.precision, self.f_measure)
266
+
267
+ # Retrieve the header names if available
268
+ if training_data_csv_location:
269
+ with open(training_data_csv_location, "r") as csv_file:
270
+ reader = csv.reader(csv_file)
271
+ headers = next(reader)
272
+
273
+ # Loop through all feature importance scores
274
+ for iterator in range(len(self.model.feature_importances_)):
275
+ if training_data_csv_location:
276
+ # Plus one to ignore ID field
277
+ output_text += "\n{}: {}".format(headers[iterator+1], self.model.feature_importances_[iterator])
278
+ else:
279
+ output_text += "\nFeature {}: {}".format(iterator,self.model.feature_importances_[iterator])
280
+
281
+ # If no name has been set write to outputs folder
282
+ if location_of_output_file:
283
+ file_name = location_of_output_file
284
+ else:
285
+ file_name = os.path.join(self._outputs_folder,"model-output-{}.txt".format(datetime.today().strftime('%Y-%m-%d-%H%M%S')))
286
+
287
+ # Write to file
288
+ with open(file_name, "w") as output_file:
289
+ output_file.write(output_text)
290
+
291
+ def train_model(self, features_file, force_new_dataset=True, model_location=None):
292
+ """
293
+ Trains the model of the proveded data unless the model file already exists or if the force new dataset flag is True.
294
+ :param features_file: the location of the feature file to be used to train the model
295
+ :param force_new_dataset: If True a new dataset will be created and new model created even if a model already exists.
296
+ :param model_location: the location to save the model file to
297
+ """
298
+
299
+ # Sets model location based on default folder location and placeholder name if none was given
300
+ if model_location is None:
301
+ model_location = os.path.join(self._model_folder, "predictor.model")
302
+
303
+ # if told to force the creation of a new dataset to train off or the model location does not exist then make a new model
304
+ if force_new_dataset or not os.path.isfile(model_location):
305
+
306
+ # Import train_test_split function
307
+ feature_data = self.get_features_as_df(features_file, force_new_dataset)
308
+
309
+ # Removes index column
310
+ if "ID" in feature_data.keys():
311
+ feature_data.drop(feature_data.columns[0], axis=1, inplace=True)
312
+ feature_data.reset_index(drop=True, inplace=True)
313
+
314
+ y = feature_data[['is_extremist']] # Labels
315
+ X = feature_data.drop(axis=1, labels=['is_extremist']) # Features
316
+
317
+ # Split dataset into training set and test set
318
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # 80% training and 20% test
319
+
320
+ # Create a Gaussian Classifier
321
+ random_forest = RandomForestClassifier(n_estimators=100, max_depth=50, oob_score=True
322
+ ) # class_weight={0:1,1:5} # A higher weight for the minority class (is_extreamist)
323
+
324
+ # Train the model using the training sets y_pred=random_forest.predict(X_test)
325
+ random_forest.fit(X_train, y_train.values.ravel())
326
+
327
+ y_pred = random_forest.predict(X_test)
328
+
329
+ # Model Accuracy, how often is the classifier correct?
330
+ self.accuracy = metrics.accuracy_score(y_test, y_pred)
331
+ self.recall = metrics.recall_score(y_test, y_pred)
332
+ self.precision = metrics.precision_score(y_test, y_pred)
333
+ self.f_measure = metrics.f1_score(y_test, y_pred)
334
+
335
+ Logger.logger.print_message("Accuracy: {}".format(self.accuracy), logging_level=1)
336
+ Logger.logger.print_message("Recall: {}".format(self.recall), logging_level=1)
337
+ Logger.logger.print_message("Precision: {}".format(self.precision), logging_level=1)
338
+ Logger.logger.print_message("F-Measure: {}".format(self.f_measure), logging_level=1)
339
+
340
+ self.model = random_forest
341
+ self.original_name = model_location
342
+ self.creation_date = datetime.today().strftime('%Y-%m-%d')
343
+
344
+ # write model and accuracy to file to file
345
+ model_data = {"model": self.model,
346
+ "original_name": self.original_name,
347
+ "creation_date": self.creation_date,
348
+ "accuracy": self.accuracy,
349
+ "recall": self.recall,
350
+ "precision": self.precision,
351
+ "f1": self.f_measure,
352
+ "version": self._FRAMEWORK_VERSION
353
+ }
354
+
355
+ pickle.dump(model_data, open(model_location, "wb"))
356
+
357
+ else:
358
+ # Read model and accuracy from file
359
+ saved_file = pickle.load(open(model_location, "rb"))
360
+
361
+ self.accuracy = saved_file["accuracy"]
362
+ self.recall = saved_file["recall"]
363
+ self.precision = saved_file["precision"]
364
+ self.f_measure = saved_file["f1"]
365
+ self.model = saved_file["model"]
366
+ self.model_version = saved_file["version"]
367
+ self.original_name = saved_file["original_name"]
368
+ self.creation_date = saved_file["creation_date"]
369
+
370
+ # A check to identify if the loaded model is of the same version as the tooling
371
+ if self.model_version is not self._FRAMEWORK_VERSION:
372
+ Logger.logger.print_message("Model provided is of version {}, tooling is of "
373
+ "version {}. Using the model may not work as expected."
374
+ .format(self.model_version, self._FRAMEWORK_VERSION))
Pinpoint_Internal/Sanitizer.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+
3
+ from nltk import *
4
+ from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
5
+
6
+ from Pinpoint_Internal.Logger import *
7
+
8
+ # If NLTK data doesn't exist, downloads it
9
+ try:
10
+ tagged = pos_tag(["test"])
11
+ except LookupError:
12
+ download()
13
+
14
+
15
+ # nltk.download() #todo how to get this to run once?
16
+
17
+ class sanitization():
18
+ """
19
+ This class is used to sanitize a given corpus of data. In turn removing stop words, stemming words, removing small
20
+ words, removing no alphabet words, and setting words to lower case. To save on repeat runs a local copy of the
21
+ serialised corpus is saved that is used unless this feature is overwritten.
22
+ """
23
+
24
+ def sanitize(self, text, output_folder, force_new_data_and_dont_persisit=False):
25
+ """
26
+ Entry function for sanitizing text
27
+ :param text:
28
+ :param force_new_data_and_dont_persisit:
29
+ :return: sanitized text
30
+ """
31
+ sanitize_file_name = os.path.join(output_folder, "sanitized_text.txt")
32
+ final_text = ""
33
+
34
+ # If a file exists don't sanitize given text
35
+ if os.path.isfile(sanitize_file_name) and not force_new_data_and_dont_persisit:
36
+ logger.print_message("Sanitized file exists. Using data")
37
+
38
+ with open(sanitize_file_name, 'r', encoding="utf8") as file_to_write:
39
+ final_text = file_to_write.read()
40
+
41
+ else:
42
+ total_words = len(text.split(" "))
43
+ number = 0
44
+ logger.print_message("Starting sanitization... {} words to go".format(total_words))
45
+ for word in text.split(" "):
46
+ number = number + 1
47
+ word = self.remove_non_alpha(word)
48
+ word = self.lower(word)
49
+ word = self.stemmer(word)
50
+ word = self.remove_stop_words(word)
51
+ word = self.remove_small_words(word)
52
+
53
+ if word is None:
54
+ continue
55
+
56
+ final_text = final_text + word + " "
57
+ logger.print_message("Completed {} of {} sanitized words".format(number, total_words))
58
+
59
+ final_text = final_text.replace(" ", " ")
60
+
61
+ if not force_new_data_and_dont_persisit:
62
+ with open(sanitize_file_name, 'w', encoding="utf8") as file_to_write:
63
+ file_to_write.write(final_text)
64
+
65
+ final_text = final_text.strip()
66
+ return final_text
67
+
68
+ def stemmer(self, word):
69
+ """
70
+ Get stemms of words
71
+ :param word:
72
+ :return: the stemmed word using port stemmer
73
+ """
74
+
75
+ porter = PorterStemmer()
76
+
77
+ # todo anouther stemmer be assessed?
78
+ # lancaster = LancasterStemmer()
79
+ # stemmed_word = lancaster.stem(word)
80
+ stemmed_word = porter.stem(word)
81
+
82
+ return stemmed_word
83
+
84
+ def lower(self, word):
85
+ """
86
+ get the lower case representation of words
87
+ :param word:
88
+ :return: the lowercase representation of the word
89
+ """
90
+ return word.lower()
91
+
92
+ def remove_stop_words(self, text):
93
+ """
94
+ Remove stop words
95
+ :param text:
96
+ :return: the word without stop words
97
+ """
98
+
99
+ text_without_stopwords = [word for word in text.split() if word not in ENGLISH_STOP_WORDS]
100
+
101
+ final_string = ""
102
+
103
+ for word in text_without_stopwords:
104
+ final_string = final_string + word + " "
105
+
106
+ return final_string
107
+
108
+ def remove_non_alpha(self, word):
109
+ """
110
+ Removes non alphabet characters (Excluding spaces)
111
+ :param word:
112
+ :return: the word with non-alpha characters removed
113
+ """
114
+ word = word.replace("\n", " ").replace("\t", " ").replace(" ", " ")
115
+ regex = re.compile('[^a-zA-Z ]')
116
+
117
+ return regex.sub('', word)
118
+
119
+ def remove_small_words(self, word, length_to_remove_if_not_equal=4):
120
+ """
121
+ Removes words that are too small, defaults to words words length 3 characters or below which are removed.
122
+ :param word:
123
+ :param length_to_remove_if_not_equal:
124
+ :return: "" if word below 3 characters or the word if above
125
+ """
126
+
127
+ new_word = ""
128
+ if len(word) >= length_to_remove_if_not_equal:
129
+ new_word = word
130
+
131
+ return new_word
Pinpoint_Internal/Serializer.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # todo This file should be used to store common serialisations across aggregating data
2
+
3
+ def createPostDict(date, post_text, likes, comments, shares, source="self"):
4
+ '''
5
+ Creates a dictionary containing the pertinent information from a social media post. This should later be added to a list
6
+ of other posts from that account and then added to a master dictionary.
7
+ :param date:
8
+ :param post_text:
9
+ :param likes:
10
+ :param comments:
11
+ :param shares:
12
+ :param source:
13
+ :return: a dictionary containing pertinent post information
14
+ '''
15
+ return {"text": post_text, "likes": likes, "comments": comments, "shares": shares, "source": source, "date": date}
16
+
17
+
18
+ def createWholeUserDict(unique_id, reddit_list, instagram_list, twitter_list, survey_data):
19
+ return {"id": unique_id, "reddit": reddit_list, "instagram": instagram_list, "twitter": twitter_list,
20
+ "survey": survey_data}
Pinpoint_Internal/Twitter_api.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import re
3
+ import sys
4
+ import time
5
+
6
+ import tweepy
7
+
8
+ from Pinpoint.ConfigManager import ConfigManager
9
+
10
+
11
+ class Twitter:
12
+ '''
13
+ Twitter aggregator class
14
+ '''
15
+ tweepy_api = None
16
+
17
+ def __init__(self):
18
+ '''
19
+ Constrcutor
20
+ '''
21
+
22
+ twitter_config = ConfigManager.getTwitterConfig()
23
+ consumer_key = twitter_config["consumer_key"]
24
+ consumer_secret = twitter_config["consumer_secret"]
25
+ access_token = twitter_config["access_token"]
26
+ access_token_secret = twitter_config["access_token_secret"]
27
+
28
+ auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
29
+ auth.set_access_token(access_token, access_token_secret)
30
+ self.tweepy_api = tweepy.API(auth)
31
+
32
+ def get_tweet(self, tweet_info, attempts=1):
33
+ '''
34
+ returns a list of up to two tweets. This is because the provided tweet could be a quoted tweet. If this is the case
35
+ we take that as two seperate tweets. Otherwise one tweet is returned with the necessary extracted data.
36
+ :param tweet_info:
37
+ :return: a list of up to two tweets with the necessary data extracted as defined in the serilizer.
38
+ '''
39
+
40
+ # If we've received several errors in a row then it's probably not going to fix itself.
41
+ if attempts > 5:
42
+ return []
43
+
44
+ list_of_tweets = []
45
+ tweet = None
46
+
47
+ try:
48
+
49
+ retweets = tweet_info.retweet_count
50
+ likes = tweet_info.favorite_count
51
+ date = tweet_info.created_at.timestamp()
52
+
53
+ # Gets full tweet if normal tweet or re-tweet
54
+ if tweet_info.retweeted:
55
+ try:
56
+ tweet = tweet_info.retweeted_status.full_text
57
+ retweets = tweet_info.retweeted_status.retweet_count
58
+ likes = tweet_info.retweeted_status.favorite_count
59
+ tweet_info = self.tweepy_api.get_status(id=tweet_info.id, tweet_mode='extended')
60
+
61
+ # Gets author of tweet
62
+ source = tweet_info.full_text.split(":", 1)[0]
63
+ regex = r"RT @(.+)"
64
+ matchObj = re.match(regex, source)
65
+
66
+ if matchObj:
67
+ source = matchObj.group(1)
68
+ else:
69
+ source = "self"
70
+ except AttributeError as e:
71
+ print(e)
72
+ pass
73
+
74
+ else:
75
+ # Gets full tweet and sets author to self
76
+ tweet = tweet_info.full_text
77
+ source = "self"
78
+
79
+ # For quotes retweets we take the quoted tweet and the parent tweet as two seperate tweets.
80
+
81
+ if tweet_info.is_quote_status:
82
+ try:
83
+ quoted_id = tweet_info.quoted_status_id
84
+ quoted_tweet_info = self.tweepy_api.get_status(id=quoted_id, tweet_mode='extended')
85
+
86
+ quoted_tweet_text = quoted_tweet_info.full_text
87
+ quoted_source = quoted_tweet_info.user.name
88
+ quoted_retweets = quoted_tweet_info.retweet_count
89
+ quoted_likes = quoted_tweet_info.favorite_count
90
+ quoted_date = quoted_tweet_info.created_at.timestamp()
91
+
92
+ # As this function can return two tweets (i.e. a quoted tweet and normal tweet) the tweets are added to a list
93
+ list_of_tweets.append(
94
+ Serializer.createPostDict(date=quoted_date, post_text=quoted_tweet_text, likes=quoted_likes,
95
+ comments='', shares=quoted_retweets, source=quoted_source))
96
+ except AttributeError as e:
97
+ print("Tweepy Twitter api error. On attempt {} \n {}".format(attempts, e))
98
+ pass
99
+
100
+ # As this function can return two tweets (i.e. a quoted tweet and normal tweet) the tweets are added to a list
101
+
102
+ if tweet is not None:
103
+ list_of_tweets.append(
104
+ Serializer.createPostDict(date=date, post_text=tweet, likes=likes, comments='', shares=retweets,
105
+ source=source))
106
+
107
+ except tweepy.RateLimitError as e:
108
+ print("Tweepy Twitter api rate limit reached. On attempt {} \n {}".format(attempts, e))
109
+ time.sleep(300)
110
+ return self.get_tweet(tweet_info, attempts + 1) # if error, try again.
111
+
112
+ except tweepy.TweepError as e:
113
+ print("Tweepy Twitter api error. On attempt {} \n {}".format(attempts, e))
114
+ pass
115
+
116
+ return list_of_tweets
117
+
118
+ def get_posts(self, username, attempts=1):
119
+ '''
120
+ Loops through all tweets for the provided user
121
+ :param username:
122
+ :return: a list of serilised tweets
123
+ '''
124
+
125
+ # If a participant has enteres their username with spaces in error this will format it.
126
+ username = username.replace(" ", "")
127
+
128
+ # Checks attempts. If exceeded return empty list.
129
+ if attempts > 3:
130
+ return []
131
+
132
+ list_of_tweets = []
133
+
134
+ # If an @ symbol has been added to the string then it's removed.
135
+ if str(username).startswith("@"):
136
+ username = username[1:]
137
+
138
+ try:
139
+ for tweet_info in tweepy.Cursor(self.tweepy_api.user_timeline, id=username, tweet_mode='extended').items():
140
+ # As this function can return two tweets (i.e. a quoted tweet and normal tweet) the tweets are added to a list
141
+ list_of_tweets = list_of_tweets + self.get_tweet(tweet_info)
142
+
143
+ except tweepy.error.TweepError as e:
144
+ print("Tweepy Twitter api error on user {}. On Attempt {} .\n {}".format(username, attempts, e))
145
+ time.sleep(300)
146
+ return self.get_posts(username, sys.maxsize) # Unlinkely to be an error that can be fixed by waiting
147
+
148
+ return list_of_tweets
149
+
150
+ def get_user(self, user_name):
151
+ """
152
+ Gets a Twepy user object for a given user name
153
+ :param user_name: a string representation of a Twitter username
154
+ :return: a Tweepy user object, None if no user found
155
+ """
156
+
157
+ user = None
158
+
159
+ try:
160
+ user = self.tweepy_api.get_user(user_name)
161
+ except:
162
+ pass
163
+
164
+ return user
165
+
166
+ def is_valid_user(self, user_name):
167
+
168
+ """
169
+ Gets a Twepy user object for a given user name
170
+ :param user_name: a string representation of a Twitter username
171
+ :return: None if doesn't exist or suspended, user object if valid.
172
+ """
173
+
174
+ user = None
175
+
176
+ try:
177
+ user = self.tweepy_api.get_user(user_name)
178
+ if user.suspended:
179
+ user = None
180
+ except:
181
+ pass
182
+
183
+ return user
184
+
185
+ def get_user_post_frequency(self, user_name):
186
+ """
187
+ A utility function used to retrieve a users post frequency
188
+ :param user_name:
189
+ :return:
190
+ """
191
+ user = self.tweepy_api.get_user(user_name)
192
+
193
+ created_at_time = user.created_at
194
+ number_of_posts = user.statuses_count
195
+
196
+ current_date = datetime.datetime.now()
197
+ elapse_time = current_date - created_at_time
198
+
199
+ frequency = number_of_posts / elapse_time.days
200
+
201
+ return frequency
202
+
203
+ def get_follower_following_frequency(self, user_name):
204
+ """
205
+ A utility function used to retrieve a users follower/ following frequency
206
+ :param user_name:
207
+ :return:
208
+ """
209
+ user = self.tweepy_api.get_user(user_name)
210
+ followers_count = user.followers_count
211
+ following_count = user.friends_count
212
+
213
+ ration = following_count / followers_count
214
+
215
+ return ration
Pinpoint_Internal/__pycache__/Aggregator_NGram.cpython-38.pyc ADDED
Binary file (3.11 kB). View file
 
Pinpoint_Internal/__pycache__/Aggregator_TfIdf.cpython-38.pyc ADDED
Binary file (1.72 kB). View file
 
Pinpoint_Internal/__pycache__/Aggregator_Word2Vec.cpython-38.pyc ADDED
Binary file (998 Bytes). View file
 
Pinpoint_Internal/__pycache__/Aggregator_WordingChoice.cpython-38.pyc ADDED
Binary file (1.81 kB). View file
 
Pinpoint_Internal/__pycache__/ConfigManager.cpython-38.pyc ADDED
Binary file (946 Bytes). View file
 
Pinpoint_Internal/__pycache__/FeatureExtraction.cpython-38.pyc ADDED
Binary file (19.5 kB). View file
 
Pinpoint_Internal/__pycache__/Grapher.cpython-38.pyc ADDED
Binary file (2.14 kB). View file
 
Pinpoint_Internal/__pycache__/Logger.cpython-38.pyc ADDED
Binary file (1.04 kB). View file
 
Pinpoint_Internal/__pycache__/RandomForest.cpython-38.pyc ADDED
Binary file (8.01 kB). View file
 
Pinpoint_Internal/__pycache__/Sanitizer.cpython-38.pyc ADDED
Binary file (3.95 kB). View file
 
Pinpoint_Internal/__pycache__/Twitter_api.cpython-38.pyc ADDED
Binary file (5.21 kB). View file
 
Pinpoint_Internal/centrality-v2.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import os
3
+ import pickle
4
+ import re
5
+ from operator import itemgetter
6
+
7
+ import easy_db
8
+ from pprint import pprint
9
+ import json
10
+ import networkx as nx
11
+ from Pinpoint.RandomForest import *
12
+ import Pinpoint.FeatureExtraction
13
+ import csv
14
+
15
+ db_path = "../new-new-just-posts-and-clean-dates-parler-messages.db"
16
+
17
+ log_file = open("community_logs.txt", 'w')
18
+ log_file.write("")
19
+ log_file.close()
20
+
21
+ used_names = []
22
+
23
+ SHOULD_WRITE_CSVS = False
24
+
25
+ class grapher():
26
+ """
27
+ A wrapper class used for generating a graph for interactions between users
28
+ """
29
+ graph = None
30
+
31
+ def __init__(self):
32
+ """
33
+ Constructor.
34
+ """
35
+ self.graph = Graph()
36
+
37
+ def add_edge_wrapper(self, node_1_name, node_2_name, weight=1, relationship=None):
38
+ """
39
+ A wrapper function used to add an edge connection or node.
40
+ :param node_1_name: from
41
+ :param node_2_name: to
42
+ :param weight:
43
+ :param relationship:
44
+ :return:
45
+ """
46
+
47
+ # get node one ID
48
+
49
+ node_1 = None
50
+ for node in self.graph.vs:
51
+ if node["label"] == node_1_name.capitalize():
52
+ node_1 = node
53
+
54
+ if node_1 == None:
55
+ self.graph.add_vertices(1)
56
+ node_count = self.graph.vcount()
57
+ self.graph.vs[node_count-1]["id"] = node_count-1
58
+ self.graph.vs[node_count-1]["label"] = node_1_name.capitalize()
59
+ node_1 = self.graph.vs[node_count-1]
60
+
61
+ # get node two id
62
+ node_2 = None
63
+ for node in self.graph.vs:
64
+ if node["label"] == node_2_name.capitalize():
65
+ node_2 = node
66
+
67
+ if node_2 == None:
68
+ self.graph.add_vertices(1)
69
+ node_count = self.graph.vcount()
70
+ self.graph.vs[node_count - 1]["id"] = node_count - 1
71
+ self.graph.vs[node_count - 1]["label"] = node_2_name.capitalize()
72
+ node_2 = self.graph.vs[node_count - 1]
73
+
74
+
75
+
76
+ #print("User one {} - {}, user two {} - {}".format(node_1["label"], str(node_1["id"]),
77
+ # node_2["label"], str(node_2["id"])))
78
+ self.graph.add_edges([(node_1["id"], node_2["id"])])
79
+ #self.graph.add_edge(node_1_name, node_2_name, weight=weight, relation=relationship) # , attr={""}
80
+
81
+ def add_node(self, node_name):
82
+ """
83
+ A wrapper function that adds a node with no edges to the graph
84
+ :param node_name:
85
+ """
86
+
87
+ node_1 = None
88
+ for node in self.graph.vs:
89
+ if node["label"] == node_name.capitalize():
90
+ node_1 = node["id"]
91
+
92
+ if node_1 == None:
93
+ self.graph.add_vertices(1)
94
+ node_count = self.graph.vcount()
95
+ self.graph.vs[node_count-1]["id"] = node_count-1
96
+ self.graph.vs[node_count-1]["label"] = node_name.capitalize()
97
+ node_1 = self.graph.vs[node_count-1]
98
+
99
+
100
+ def get_database(where=None):
101
+ #print(where)
102
+ message_db = easy_db.DataBase(db_path)
103
+ if where is None:
104
+ return message_db.pull("parler_messages")
105
+ else:
106
+ return message_db.pull_where("parler_messages", where)
107
+
108
+ def get_mentioned_usernames_from_post(post):
109
+ # Process mentions
110
+ mentions = re.findall("\@([a-zA-Z\-\_]+)", post)
111
+
112
+ sanitised_list = []
113
+
114
+ for mention in mentions:
115
+ mention = mention.replace("@", "")
116
+ sanitised_list.append(mention)
117
+
118
+ return sanitised_list
119
+
120
+ def get_rows_from_csv_where_field_is(csv_name, username, month):
121
+ rows = []
122
+ with open(csv_name, 'rt', encoding="utf8") as f:
123
+ for row in csv.DictReader(f, fieldnames=["A","B","C","WC","Analytic","Clout","Authentic","Tone","WPS","Sixltr",
124
+ "Dic","function","pronoun","ppron","i","we","you","shehe","they","ipron",
125
+ "article","prep","auxverb","adverb","conj","negate","verb","adj","compare",
126
+ "interrog","number","quant","affect","posemo","negemo","anx","anger","sad",
127
+ "social","family","friend","female","male","cogproc","insight","cause","discrep",
128
+ "tentat","certain","differ","percept","see","hear","feel","bio","body","health",
129
+ "sexual","ingest","drives","affiliation","achieve","power","reward","risk",
130
+ "focuspast","focuspresent","focusfuture","relativ","motion","space","time","work",
131
+ "leisure","home","money","relig","death","informal","swear","netspeak","assent",
132
+ "nonflu","filler","AllPunc","Period","Comma","Colon","SemiC","QMark","Exclam",
133
+ "Dash","Quote","Apostro","Parenth","OtherP"]):
134
+
135
+ if username.strip().lower() in row["A"].strip().lower() \
136
+ and month.strip().lower() in row["B"].strip().lower():
137
+ rows.append(row)
138
+
139
+ return rows
140
+
141
+
142
+ month_graphs = {}
143
+
144
+ year_range = list(range(2017, 2022))
145
+ month_range = list(range(1, 13))
146
+
147
+ INITIAL_COMMUNITIES_FILE_NAME = "phase_one_communities_file.pickle"
148
+ SECOND_COMMUNITIES_FILE_NAME = "phase_two_communities_file.pickle"
149
+
150
+
151
+ print("Loading old {} file".format(INITIAL_COMMUNITIES_FILE_NAME))
152
+ pickle_file = open(INITIAL_COMMUNITIES_FILE_NAME, "rb")
153
+ month_graphs = pickle.load(pickle_file)
154
+ pickle_file.close()
155
+ print("loaded...")
156
+ # Get communities
157
+ month_graph_keys = list(month_graphs.keys())
158
+ month_graph_keys.sort()
159
+
160
+ list_of_community_objects = []
161
+
162
+ # get top 10 centrality users per month of parler
163
+ if not os.path.isfile(SECOND_COMMUNITIES_FILE_NAME):
164
+
165
+
166
+ dict_of_centrality_per_month = {}
167
+ dict_of_user_count_per_month = {}
168
+ dict_of_shrinkage = {}
169
+
170
+ total_unique_user_list = []
171
+ total_users = []
172
+
173
+ highest_centrality = 0
174
+ highest_centrality_user = None
175
+ date_of_highest_centrality = None
176
+
177
+ dict_of_messages = {}
178
+ number_of_users_dict = {}
179
+ highest_number_of_users = 0
180
+ highest_number_of_users_month = None
181
+
182
+ shrinkage_per_month = {}
183
+ last_month = None
184
+
185
+ all_months_centality = {}
186
+ all_centralities = {}
187
+ for month_key in month_graph_keys:
188
+ print("Reviewing graph for date '{}'".format(month_key))
189
+ graph = month_graphs[month_key].graph
190
+
191
+ user_nodes = graph.nodes.keys()
192
+ print("users {}".format(len(user_nodes)))
193
+ centrality_for_month = {}
194
+ iterator = 0
195
+
196
+ centrality_for_month = nx.degree_centrality(graph)
197
+ all_centralities[month_key] = centrality_for_month
198
+ # sort
199
+ if len(centrality_for_month) > 0:
200
+ sorted_list = sorted(centrality_for_month, key=centrality_for_month.get, reverse=True)[:10]
201
+ all_months_centality[month_key] = sorted_list
202
+
203
+ unique_users = {}
204
+ for month in all_months_centality:
205
+ for user in all_months_centality[month]:
206
+ if user not in unique_users.keys():
207
+ unique_users[user] = [{"month":month, "centrality":all_centralities[month][user]}]
208
+ else:
209
+ unique_users[user].append({"month":month, "centrality":all_centralities[month][user]})
210
+ pprint(unique_users)
211
+
212
+ # write to csv
213
+ if SHOULD_WRITE_CSVS:
214
+ seen_users = []
215
+ with open('all-messages.json.csv', 'w', encoding='utf8', newline='') as output_file:
216
+ writer = csv.DictWriter(output_file,fieldnames=["username","timestamp","message"])
217
+
218
+ for month in all_months_centality:
219
+ graph = month_graphs[month]
220
+ for user in all_months_centality[month]:
221
+ if user not in seen_users:
222
+ seen_users.append(user)
223
+ # get from database where username == user and month == month
224
+ # loop through messages.
225
+ # if above threshold is extremist.
226
+
227
+ if user != "-":
228
+ print("getting posts for user '{}'".format(user))
229
+ posts = get_database("username='{}' COLLATE NOCASE".format(user))
230
+ print("Posts found: {}".format(len(posts)))
231
+ if posts == None:
232
+ raise Exception("no posts, 'where' failed")
233
+ for post in posts:
234
+ #users_mentioned = get_mentioned_usernames_from_post(post["body"])
235
+ writer.writerow({"username": post["username"], "timestamp": post["Time"], "message": post["body"]})
236
+
237
+ model = random_forest()
238
+ model.train_model(features_file = None, force_new_dataset=False, model_location=r"far-right-baseline.model")
239
+ dict_of_users_all = {}
240
+ feature_extractor = Pinpoint.FeatureExtraction.feature_extraction(violent_words_dataset_location="swears",baseline_training_dataset_location="data/LIWC2015 Results (Storm_Front_Posts).csv")
241
+
242
+
243
+
244
+
245
+ # Get the is-extremist score for users for the month they were in the highest centrality
246
+ for month in all_months_centality:
247
+ for user in all_months_centality[month]:
248
+ print("Getting data for user {} and month {}".format(user, month))
249
+
250
+ # Get rows for this user and month
251
+ rows = get_rows_from_csv_where_field_is("data/LIWC2015 Results (all-messages.csv).csv", user, month)
252
+ # write these to a new (temp) csv
253
+
254
+ pprint(rows)
255
+
256
+ if len(rows) <= 1:
257
+ print("Not enough rows for {} {}".format(user, month))
258
+ continue
259
+
260
+ keys = rows[0].keys()
261
+
262
+ with open('temp.csv', 'w', newline='', encoding='utf8') as output_file:
263
+ dict_writer = csv.DictWriter(output_file, keys)
264
+ dict_writer.writeheader()
265
+ dict_writer.writerows(rows)
266
+
267
+ feature_extractor._reset_stored_feature_data()
268
+ feature_extractor._get_type_of_message_data(data_set_location="temp.csv")
269
+ with open("messages.json", 'w') as outfile:
270
+ json.dump(feature_extractor.completed_tweet_user_features, outfile, indent=4)
271
+ rows = model.get_features_as_df("messages.json", True)
272
+
273
+ print("Length of rows returned: {}".format(len(rows)))
274
+
275
+ number_of_connections = 0
276
+ number_of_connections_extremist = 0
277
+
278
+ is_extemist_count = 0
279
+ for row in rows:
280
+ post = row["C"]
281
+
282
+ is_extremist = model.model.predict(post)
283
+ print("Post '{}...' is extemist {}".format(post[:20], is_extremist))
284
+ if is_extremist:
285
+ is_extemist_count = is_extemist_count+1
286
+
287
+ # If we were to do mentione dusers we'd need to markup with LIWC again. Could I use the less reliable version without LIWC?
288
+ if is_extemist_count != 0:
289
+ percentage_extremist = len(rows) /is_extemist_count
290
+ else:
291
+ percentage_extremist = 0
292
+
293
+ if user not in dict_of_users_all:
294
+ dict_of_users_all[user] = {"months":{}}
295
+
296
+ if "months" in dict_of_users_all[user].keys():
297
+ dict_of_users_all[user]["months"][month] = percentage_extremist
298
+
299
+
300
+
301
+ with open('data.json', 'w') as fp:
302
+ json.dump(dict_of_users_all, fp)
303
+
304
+ # mark up csv with LIWC scores.
305
+
306
+ # number of unique users. manual 100 max (less users), otherwise doesn't really matter.
307
+ # classed as radicalised? Look at the accounts and posts, what are they up to over time.
308
+ # are any posts far right, mostly extremist material,
309
+ # when looking at connections - apply the same above. at time period on mention and overall.
310
+
311
+ # create the csv writer
312
+
313
+ # when have they been active, what monts are they extremist, how often, common words or phrases, etc
314
+
315
+
316
+ '''users_of_interest[user] = {
317
+ "centrality": month[user],
318
+ "is_extremist":,
319
+ "is_connections_extremist":,
320
+ }
321
+ '''
322
+
323
+ # radicalisation window?
324
+ # use high centrality users that are extremist
325
+ # look at the work.
Pinpoint_Internal/far-right-core.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Example of training a model using this package.
3
+ """
4
+
5
+ from Pinpoint.FeatureExtraction import *
6
+ from Pinpoint.RandomForest import *
7
+
8
+ # Performs feature extraction from the provided Extremist, Counterpoise, and Baseline datasets.
9
+ extractor = feature_extraction(violent_words_dataset_location=r"datasets/swears",
10
+ baseline_training_dataset_location=r"datasets/far-right/LIWC2015 Results (Storm_Front_Posts).csv")
11
+
12
+ extractor.MAX_RECORD_SIZE = 50000
13
+
14
+ extractor.dump_training_data_features(
15
+ feature_file_path_to_save_to=r"outputs/training_features.json",
16
+ extremist_data_location=r"datasets/far-right/LIWC2015 Results (extreamist-messages.csv).csv",
17
+ baseline_data_location=r"datasets/far-right/LIWC2015 Results (non-extreamist-messages.csv).csv")
18
+
19
+ # Trains a model off the features file created in the previous stage
20
+ model = random_forest()
21
+
22
+ model.RADICAL_LANGUAGE_ENABLED = True
23
+ model.BEHAVIOURAL_FEATURES_ENABLED = True
24
+ model.PSYCHOLOGICAL_SIGNALS_ENABLED = True
25
+
26
+ model.train_model(features_file= r"outputs/training_features.json",
27
+ force_new_dataset=True, model_location=r"outputs/far-right-radical-language.model") # , model_location=r"Pinpoint/model/my.model"
28
+
29
+ model.create_model_info_output_file(location_of_output_file="outputs/far-right-radical-language-output.txt",
30
+ training_data_csv_location=r"outputs/training_features.json.csv")
31
+
32
+ #############################################################################################
33
+ model.RADICAL_LANGUAGE_ENABLED = False
34
+ model.BEHAVIOURAL_FEATURES_ENABLED = True
35
+ model.PSYCHOLOGICAL_SIGNALS_ENABLED = False
36
+
37
+ model.train_model(features_file= r"outputs/training_features.json",
38
+ force_new_dataset=True, model_location=r"outputs/far-right-behavioural.model") # , model_location=r"Pinpoint/model/my.model"
39
+
40
+ model.create_model_info_output_file(location_of_output_file="outputs/far-right-behavioural-output.txt",
41
+ training_data_csv_location=r"outputs/training_features.json.csv")
42
+
43
+ ############################################################################
44
+ model.RADICAL_LANGUAGE_ENABLED = False
45
+ model.BEHAVIOURAL_FEATURES_ENABLED = False
46
+ model.PSYCHOLOGICAL_SIGNALS_ENABLED = True
47
+
48
+ model.train_model(features_file= r"outputs/training_features.json",
49
+ force_new_dataset=True, model_location=r"outputs/far-right-psychological.model") # , model_location=r"Pinpoint/model/my.model"
50
+
51
+ model.create_model_info_output_file(location_of_output_file="outputs/far-right-psychological-output.txt",
52
+ training_data_csv_location=r"outputs/training_features.json.csv")
53
+
54
+ ##############################################################################################
55
+ model.RADICAL_LANGUAGE_ENABLED = True
56
+ model.BEHAVIOURAL_FEATURES_ENABLED = False
57
+ model.PSYCHOLOGICAL_SIGNALS_ENABLED = False
58
+
59
+ model.train_model(features_file= r"outputs/training_features.json",
60
+ force_new_dataset=True, model_location=r"outputs/far-right-baseline.model") # , model_location=r"Pinpoint/model/my.model"
61
+
62
+ model.create_model_info_output_file(location_of_output_file="outputs/far-right-baseline-output.txt",
63
+ training_data_csv_location=r"outputs/training_features.json.csv")
64
+
65
+ print("Finished")
README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Pinpoint Web
3
- emoji: 🐢
4
- colorFrom: red
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.0.20
8
- app_file: app.py
9
- pinned: false
10
- license: gpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py DELETED
@@ -1,22 +0,0 @@
1
- import gradio as gr
2
- import predictor
3
-
4
- def check_string(string_to_predict):
5
- try:
6
- is_extremist = predictor.predictor().predict(string_to_predict)
7
-
8
-
9
- if is_extremist:
10
- return "The message has been identified as potentially containing violent far-right extremist content."
11
- else:
12
- return "The message has been identified as not containing violent far-right extremist content."
13
- except FileNotFoundError as e:
14
- return "The message was not feature rich enough to identify, try something else. {}".format(e)
15
-
16
- demo = gr.Interface(
17
- fn=check_string,
18
- inputs=gr.Textbox(lines=2, placeholder="Text to predict here..."),
19
- outputs="text",
20
- )
21
-
22
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
far-right-radical-language.model DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:06e237fb2ff6e8e9eac7bd42273266e22c97d5c5b9ecf251adb37942ace4f6bb
3
- size 564085480
 
 
 
 
predictor.py DELETED
@@ -1,96 +0,0 @@
1
- import csv
2
- import time
3
- from pprint import pprint
4
- import Pinpoint_Internal.FeatureExtraction
5
- from Pinpoint_Internal.RandomForest import *
6
-
7
- class predictor():
8
-
9
- def __init__(self):
10
- self.model = random_forest()
11
- self.model.PSYCHOLOGICAL_SIGNALS_ENABLED = False # Needs LIWC markup
12
- self.model.BEHAVIOURAL_FEATURES_ENABLED = False
13
- self.model.train_model(features_file=None, force_new_dataset=False,
14
- model_location=r"far-right-radical-language.model")
15
- self.dict_of_users_all = {}
16
- self.feature_extractor = Pinpoint_Internal.FeatureExtraction.feature_extraction(
17
- violent_words_dataset_location="swears",
18
- baseline_training_dataset_location="LIWC2015 Results (Storm_Front_Posts).csv")
19
-
20
- def predict(self, string_to_predict):
21
- self.__init__()
22
- try:
23
- os.remove("./messages.json")
24
- except:
25
- pass
26
- try:
27
- os.remove("messages.json")
28
- except:
29
- pass
30
-
31
- try:
32
- os.remove("./all-messages.csv")
33
- except:
34
- pass
35
-
36
- users_posts = [{"username": "tmp", "timestamp": "tmp", "message": "{}".format(string_to_predict)}]
37
-
38
- with open('all-messages.csv', 'w', encoding='utf8', newline='') as output_file:
39
- writer = csv.DictWriter(output_file, fieldnames=["username", "timestamp", "message"])
40
- for users_post in users_posts:
41
- writer.writerow(users_post)
42
-
43
- self.feature_extractor._get_standard_tweets("all-messages.csv")
44
-
45
-
46
- with open("./messages.json", 'w') as outfile:
47
- features = self.feature_extractor.completed_tweet_user_features
48
-
49
- json.dump(features, outfile, indent=4)
50
-
51
- rows = self.model.get_features_as_df("./messages.json", True)
52
- rows.pop("is_extremist")
53
-
54
- iter = 0
55
-
56
- message_vector_list = []
57
-
58
- for user_iter in range(0, len(users_posts)):
59
- rows_as_json = json.loads(rows.iloc[iter].to_json())
60
-
61
- tmp = []
62
- for i in range(1, 201):
63
- vect_str = "message_vector_{}".format(str(i))
64
- vector = rows_as_json[vect_str]
65
- tmp.append(vector)
66
- message_vector_list.append(tmp)
67
-
68
- iter = iter + 1
69
-
70
- for row in users_posts:
71
- user = row["username"]
72
- timestamp = row["timestamp"]
73
- message = row["message"]
74
- user_unique_id = str(self.feature_extractor._get_unique_id_from_username(user))
75
-
76
- iter = 0
77
- user_found = False
78
- while not user_found:
79
- try:
80
- user_features = self.feature_extractor.completed_tweet_user_features[iter][user_unique_id]
81
- user_found = True
82
- break
83
- except KeyError as e:
84
- iter = iter + 1
85
-
86
- formated_vectors = [float('%.10f' % elem) for elem in user_features["message_vector"]]
87
- iter = 0
88
- for vector_list in message_vector_list:
89
-
90
- if message_vector_list[iter] == formated_vectors:
91
- is_extremist = self.model.model.predict([rows.iloc[iter]])
92
-
93
- if is_extremist == 1:
94
- return True
95
- else:
96
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1,8 +0,0 @@
1
- gensim
2
- networkx
3
- nltk
4
- numpy
5
- pandas
6
- scikit-learn
7
- scipy
8
- gradio