This repository was archived by the owner on Nov 7, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathtopic_model.py
More file actions
168 lines (134 loc) · 5.85 KB
/
topic_model.py
File metadata and controls
168 lines (134 loc) · 5.85 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
# -*- coding: utf-8 -*-
"""Topic Model
Topic model with Gensin and Scikit-learn
This code is inspired by the code writed by:
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
# https://scikit-learn.org/
In additional this code uses the changes made by:
Lucas Félix - lucasgsfelix
Vinícius Vieira - vfvieira
"""
import uuid
import gensim
import matplotlib.pyplot as plt
import nltk
from gensim import corpora
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sara.core.utils import create_path
# pylint:disable=too-many-locals
def plot_top_words(model, feature_names, number_topics, title, color):
"""Plot list with topics."""
figure_distribution = {1: (1, 1), 2: (1, 2),
3: (1, 3), 4: (1, 4),
5: (1, 5), 6: (2, 3),
7: (2, 4), 8: (2, 4),
9: (2, 5), 10: (2, 5)}
try:
rows, columns = figure_distribution[number_topics]
except KeyError as error:
print(f"Error: {error}, The max topics to plot is 10"
f"the value passed is {number_topics}")
return
fig, axes = plt.subplots(rows, columns, figsize=(30, 15), sharex=True)
axes = axes.flatten()
for topic_idx, topic in enumerate(model.components_):
top_features_ind = topic.argsort()[:-number_topics - 1:-1]
top_features = [feature_names[i] for i in top_features_ind]
weights = topic[top_features_ind]
position = axes[topic_idx]
position.barh(top_features, weights, height=0.7, color=color)
position.set_title(f'Tópico {topic_idx +1}',
fontdict={'fontsize': 15})
position.invert_yaxis()
position.tick_params(axis='both', which='major', labelsize=12)
for i in 'top right left'.split():
position.spines[i].set_visible(False)
fig.suptitle(title, fontsize=25)
plt.subplots_adjust(top=0.90, bottom=0.05, wspace=0.90, hspace=0.3)
# plt.show()
create_path("resultados/topic_model/")
save_name = f"resultados/topic_model/{title}_{str(uuid.uuid4().hex)}.pdf"
print(f"Saving file in the path {save_name}")
fig.savefig(save_name)
def _unpack_tupla(tupla):
"""Unpack_tupla"""
(word1, word2) = tupla
return word1 + " " + word2
# pylint:disable=too-many-locals
def get_lda_topics(tweets, n_topics):
"""Get LDA topics."""
docfinal = []
# bigrams
for doc in tweets:
nltk_tokens = nltk.word_tokenize(doc)
docfinal.append(list(nltk.bigrams(nltk_tokens)))
l_final = []
# combina os bigramas em conjunto de palavras.
for i in docfinal:
lista = [_unpack_tupla(elementos) for elementos in i]
l_final.append(lista)
docfinal = l_final
# Creating the term dictionary of our courpus,
dictionary = corpora.Dictionary(docfinal)
# Converting list of documents (corpus) into Document Term Matrix
# using dictionary prepared above.
doc_term_matrix = [dictionary.doc2bow(doc) for doc in docfinal]
# Creating the object for LDA model using gensim library
Lda = gensim.models.ldamodel.LdaModel
# Running and Trainign LDA model on the document term matrix.
ldamodel = Lda(corpus=doc_term_matrix, id2word=dictionary, num_topics=10,
random_state=1000, update_every=1, chunksize=1000,
passes=100, alpha='auto', per_word_topics=True)
topics = ldamodel.top_topics(corpus=doc_term_matrix,
dictionary=dictionary, coherence='u_mass',
topn=10, processes=-1)
top_topics = [top[0] for top in topics]
topics = []
for l_topic in top_topics:
for topic in l_topic:
topics.append(topic)
return topics, n_topics
def generate_tf_idf(corpus, n_features=1000, bigram=False):
"""Generate TF-idf bag-of-words."""
ngram_interval = (1, 1)
if bigram:
# to use only bigram change to (2, 2)
ngram_interval = (1, 2)
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=1,
max_features=n_features,
ngram_range=ngram_interval)
tfidf = tfidf_vectorizer.fit_transform(corpus)
return tfidf_vectorizer, tfidf
def generate_tf(corpus, n_features=1000, bigram=False):
"""Generate TF vectorization."""
# set to use unigram or bigram
ngram_interval = (1, 1)
if bigram:
ngram_interval = (1, 2)
# generate term-frequency representation
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=1,
max_features=n_features,
ngram_range=ngram_interval)
term_frequency = tf_vectorizer.fit_transform(corpus)
return tf_vectorizer, term_frequency
def lda_scikit(corpus, n_components=10, bigram=False):
"""Run LDA topic model using Scikit-learn"""
tf_vectorizer, term_frequency = generate_tf(corpus, bigram=bigram)
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10,
learning_method='online',
learning_offset=50,
random_state=0)
lda.fit(term_frequency)
tf_feature_names = tf_vectorizer.get_feature_names()
return lda, tf_feature_names
def nmf_scikit(corpus, n_components=10, bigram=False):
"""Run NMF topic model analysis using Scikit learn."""
tfidf_vectorizer, tfidf = generate_tf_idf(corpus, bigram=bigram)
nmf = NMF(n_components=n_components, random_state=1, max_iter=1600,
alpha=.1, l1_ratio=.5, init='nndsvd').fit(tfidf)
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
return nmf, tfidf_feature_names