import glob
from txtutil import txtnoun
# 2015 ~ 2018 지속가능 경영 보고서 Token을 수집
filelist = glob.glob('./data/kr-Report_201?.txt')
print(filelist)
# 불러온 Document 명사Token만 추출
skiplist = {'갤러시':'갤럭시', '가치창출':'가치창출'}
texts = [txtnoun(file, skip=skiplist) for file in filelist]
texts = " ".join(texts)
texts[:300]
# 명사 Token 작업된 자료를 ssResport.txt 로 저장
texts_file = './data/ssResport.txt'
with open(texts_file, 'w', encoding='utf-8') as file:
file.write(texts)
# ! cat ./data/ssResport.txt | head -n 10
# ! pip3 install gensim
%%time
texts_file = './data/ssResport.txt'
from gensim.models import word2vec
data = word2vec.LineSentence(texts_file)
model = word2vec.Word2Vec(data, size=200, window=2, min_count=20, hs=1,
workers=4, iter=100, sg=1)
model.save("./data/ssReport.model")
print("model saved.")
%reset
%who
from gensim.models import word2vec
model = word2vec.Word2Vec.load('./data/ssReport.model')
len(model.wv.vocab.keys())
list(model.wv.index2word)[:10]
model.wv.most_similar(positive=['삼성전자'])
model.wv.most_similar(negative=['삼성전자'])
model.wv.most_similar(positive=['글로벌'])
model.wv.most_similar(negative=['글로벌'])
model.wv.most_similar(positive=['삼성전자','경영활동'],
negative=['근무환경']) # 담당자, 직원
list(model.wv.vocab.keys())[:10]
# model.wv.vocab : { word: object of numeric vector }
vocab = list(model.wv.vocab)
X = model[vocab]
X.shape
%%time
from sklearn.manifold import TSNE
tsne = TSNE(n_components = "=Quiz!=")
X_tsne = tsne.fit_transform(X)
import pandas as pd
df = pd.DataFrame(X_tsne,
index = vocab,
columns=['x', 'y'])
df.head()
%matplotlib inline
from matplotlib import rc
rc('font', family=['NanumGothic','Malgun Gothic'])
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12,12))
ax = fig.add_subplot(1, 1, 1)
ax.scatter(df['x'], df['y'])
for word, pos in df.iterrows():
ax.annotate(word, pos)
plt.grid(True)