from __future__ import print_function
import requests
import json
import re #正则匹配
import time #时间处理模块
import jieba #中文分词
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from PIL import Image
from wordcloud import WordCloud #绘制词云模块
import paddlehub as hub
#去除文本中特殊字符
def clear_special_char(content):
'''
正则处理特殊字符
参数 content:原文本
return: 清除后的文本
'''
s = re.sub(r"</?(.+?)>| |\t|\r","",content)
s = re.sub(r"\n"," ",s)
s = re.sub(r"\*","\\*",s)
s = re.sub('[^\u4e00-\u9fa5^a-z^A-Z^0-9]','',s)
s = re.sub('[\001\002\003\004\005\006\007\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a]+','',s)
s = re.sub('[a-zA-Z]','',s)
s = re.sub('^\d+(\.\d+)?$','',s)
return s
def fenci(text):
'''
利用jieba进行分词
参数 text:需要分词的句子或文本
return:分词结果
'''
jieba.load_userdict("add_words.txt")
seg = jieba.lcut(text,cut_all=False)
return seg
def stopwordslist(file_path):
'''
创建停用词表
参数 file_path:停用词文本路径
return:停用词list
'''
stopwords = [line.strip() for line in open(file_path,encoding='UTF-8').readlines()]
return stopwords
def movestopwords(sentence,stopwords,counts):
'''
去除停用词,统计词频
参数 file_path:停用词文本路径 stopwords:停用词list counts: 词频统计结果
return:None
'''
out = []
for word in sentence:
if word not in stopwords:
if len(word) != 1:
counts[word] = counts.get(word,0)+1
return None
def text_detection(text,file_path):
'''
使用hub对评论进行内容分析
return:分析结果
'''
porn_detection_lstm = hub.Module(name="porn_detection_lstm")
f = open('aqy.txt','r',encoding='utf-8')
for line in f:
if len(line.strip())==1:
continue
else:
test_text.append(line)
f.close()
input_dict = {"text":test_text}
results = porn_detection_lstm.detection(data=input_dict,use_gpu=True,batch_size=1)
# print(results)
for index,item in enumerate(results):
if item['porn_detection_key']=='porn':
print(item['text'],':',item['porn_probs'])
#评论是多分页的,得多次请求爱奇艺的评论接口才能获取多页评论,有些评论含有表情、特殊字符之类的
#num 是页数,一页10条评论,假如爬取1000条评论,设置num=100
if __name__ == "__main__":
num = 300
lastId = '0'
arr = [ ]
with open('aqy.txt','a',encoding='utf-8') as f:
for i in range(num):
lastId = saveMovieInfoToFile(lastId,arr)
# for i in arr:
# print("arr的内容是:",i)
##正常运行
time.sleep(0.5)
for item in arr:
Item = clear_special_char(item)
# print("Item的内容是",Item)
# print("Item类型是",type(Item))
if Item.strip()!='':
try:
f.write(Item+'\n')
#print()
except Exception as e:
print(e)
#print("含特殊字符")
print('共取评论:',len(arr))
f = open('aqy.txt','r',encoding='utf-8')
counts = {}
for line in f:
words = fenci(line)
stopwords = stopwordslist('cn_stopwords.txt')
movestopwords(words,stopwords,counts)
#print("counts 的类型是",type(counts))
drawcounts(counts,10)
drawcloud(counts)
f.close()
file_path = 'aqy.txt'
test_text = []
text_detection(test_text,file_path)
display(Image.open('pic.png')) #显示生成的词云图像