目的
通过分析豆瓣用户电影评论数据,来对不同国家在不同时间内的电影进行情感分析,并通过云图及直方图进行效果展示。
效果
云图
直方图
全代码+数据地址
资源链接 :https://download.csdn.net/download/zhuqiuhui/85100293
核心代码片断
data_analysis.py
分析随着时间增长,不同国家拍摄的电影类型的变化
with open("data/kmeans.csv", 'r') as outfile:
data = csv.reader(outfile)
li = []
years = []
movie_dict = {}
country = []
for item in data:
if(item[11]=='nan'):
continue
else:
li.append(item[11])
years.append(item[3])
country.append(item[4])
if(item[4] not in movie_dict):
movie_dict[item[4]]={item[3]:[item[11]]}
else:
if(item[3] not in movie_dict[item[4]]):
movie_dict[item[4]][item[3]]=[item[11]]
else:
movie_dict[item[4]][item[3]].append(item[11])
for k,v in movie_dict.items():
for k1,v1 in v.items():
movie_dict[k][k1]=Counter(v1).items()
data_tfidf.py
为由用户电影评论构建 tf-idf 模型抽取的关键短语。分为正向关键短语和负向关键短语。
def data_clean(SetPath):
corpus_pos = []
corpus_neg = []
corpus = []
result = []
feature_list = []
with open(SetPath) as file:
data = file.readlines()
for i, item in enumerate(data):
row = item.strip().split("\t")
if i == 0 or len(row)<5:
continue
else:
subdata = row[3]
substr = jieba.lcut(subdata, cut_all=False, HMM=True) # 默认参数
if(row[4]=='pos'):
corpus_pos.append(" ".join(substr))
else:
corpus_neg.append(" ".join(substr))
corpus.append(" ".join(substr))
data_apriori.py
为由电影风格标签抽取的关联规则
def runApriori(data_iter, minSupport, minConfidence):
itemSet, transactionList = getItemSetTransactionList(data_iter)
freqSet = defaultdict(int)
largeSet = dict()
assocRules = dict()
oneCSet = returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet)
currentLSet = oneCSet
data_kmeans.py
通过 kmeans 算法聚类电影。样本通过 one-hot 编码为特征,然后再使用 kmeans 算法聚类。共聚4类,通过云图展示(见效果)
def data_analysis(Setpath):
data = pd.read_csv(Setpath,encoding='gbk') # 读取文件中所有数据
X_value = []
for cname in data.columns.values:
if 'Unnamed' not in cname:
X_data = np.array(data[cname])
model = preprocessing.LabelEncoder()
model.fit_transform(data[cname])
X_reshape = X_data.reshape(len(X_data), 1)
X_value.append(preprocessing.OneHotEncoder().fit_transform(X_reshape).toarray())
value = np.concatenate(X_value,axis=1)
y_pred = KMeans(n_clusters=4, random_state=10).fit_predict(value)
data['cluster']= y_pred.tolist()
data = np.array(data).tolist()
号v公众:方辰的博客