ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

功能整合

2022-01-06 17:34:15  阅读:91  来源: 互联网

标签:功能 name text 整合 print import csv data


import requests
import csv
import time

import numpy as np
from bs4 import BeautifulSoup

import json
import pandas as pd
from snownlp import SnowNLP
from snownlp import sentiment
import matplotlib.pyplot as plt
import jieba #分词库
import wordcloud #词云库

# 必要的库

print("开始爬取评论")
name='评论'
def get_html(url):

    headers = {

    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',

     'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) appleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',

    }

    # 模拟访问信息
    r = requests.get(url, timeout=30, headers=headers)

    r.raise_for_status()

    r.endcodding = 'utf-8'

    return r.text



def get_content(url):

        comments = []

        html = get_html(url)

        try:

            s = json.loads(html)

        except:

             print("jsonload error")



        num = len(s['data']['replies']) # 获取每页评论栏的数量



        i = 0

        while i < num:

            comment = s['data']['replies'][i] # 获取每栏信息

            InfoDict = {}  # 存储每组信息字典
            InfoDict['用户名'] = comment['member']['uname']

            InfoDict['uid号'] = comment['member']['mid']

            InfoDict['评论内容'] = comment['content']['message']

            InfoDict['性别'] = comment['member']['sex']
            InfoDict['点赞'] = comment['like']
            InfoDict['回复数量'] = comment['rcount']
            InfoDict['用户等级'] = comment['member']['level_info']['current_level']
            InfoDict['用户类别'] = comment['member']['vip']['vipType']
            comments.append(InfoDict)

            i+=1
        return comments

def Out2File(dict):
    with open('data\\txt\\'+name+'.txt', 'a+', encoding='utf-8') as f:
        for user in dict:
            try:

                f.write('{},{},{},{},{},{},{},{}\n'.format(
                    user['用户名'], user['uid号'], user['性别'], user['点赞'], user['回复数量'], user['用户等级'], user['用户类别'],
                    user['评论内容']))



            except:

             print("out2File error")

        print('当前页面保存完成')





e = 0

page = 1

while e == 0:

    url = "https://api.bilibili.com/x/v2/reply/main?&jsonp=jsonp&next=" + str(page) + "&type=1&oid=379315227&mode=3&plat=1&_=1641373150736"
    #https: // api.bilibili.com / x / v2 / reply / main?87 & jsonp = jsonp & next = 2 & type = 1 & oid = 892600469 & mode = 3 & plat = 1 & _ = 1641279217907

    try:

        print()

        content = get_content(url)

        print("page:", page)
        Out2File(content)

        page = page + 1



# 为了降低被封ip的风险,每爬10页便歇5秒。
        if page % 10 == 0: # 求余数

            time.sleep(5)
    except:
            e = 1
print("爬取评论完成")
print("开始转化文件格式")

txt = pd.read_csv('data\\txt\\'+name+'.txt',names=['用户名','uid号','性别','点赞数量','回复数量','用户等级','vip类别','评论内容'],error_bad_lines=False,delimiter=',',encoding='utf-8')

txt.to_csv('data\\beforecsv\\'+name+'.csv',index=False,encoding='utf-8',float_format=int)
print("转化文件格式完成")
print("开始情感分析")
df=pd.read_csv('data\\beforecsv\\'+name+'.csv',header=1,usecols=[7])

contents=df.values.tolist()
print(len(contents))
score=[]

for contents in contents:
    try:
        s=SnowNLP(contents[0])
        score.append(s.sentiments)
    except:
        print("something is wrong")
        score.append(0.5)
print(len(score))
data2=pd.DataFrame(score)
data2.to_csv('data\\emotion\\'+name+'.csv',index=False,mode='a+')
print("情感分析完成")
print("开始得出结果数据")
f1 = pd.read_csv('data\\beforecsv\\'+name+'.csv',header=1)
f2 = pd.read_csv('data\\emotion\\'+name+'.csv')
file = [f1,f2]
train = pd.concat(file,axis=1)
train.to_csv('data\\finalcsv\\'+name+'.csv',sep=',',index=False)
print("成功得出结果数据")

print("产生词云图")
#1.读取文件
f=open('data\\txt\\'+name+'.txt',encoding='utf-8')
#f=open('..\\paqushuju\\评论文件\\魔王勇者.txt',encoding='utf-8')
text=f.read()
#2.分词,把一句话分割成一个个词语
text_list=jieba.lcut(text)
#list转换为字符串
text_str=''.join(text_list)
print(text_list)
wc = wordcloud.WordCloud(
    width=500,#宽度
    height=500,#高度
    background_color='white',#背景颜色
    stopwords={'姓名','uid','性别','点赞数量','回复数量','用户等级','用户vip等级','评论内容',
               '男','女','保密','doge','藏狐'},
    font_path='msyh.ttc'#字体

)
wc.generate(text_str)
wc.to_file('data\\wordcloud\\'+name+'.png')
print("成功得出词云图")

#df=pd.read_csv('d:/python/out/cut.csv',encoding='utf8')bins=[0,20,40,60,80,100,120]#设置自定义标签,定义的标签需要和区间个数及顺序都一一对应customLabels=['0到20','20到40','40到60','60到80','80到100','100到120']df['cut']=pd.cut(df.cost,bins,right=False,labels=customLabels)df.to_csv('d:/python/out/cut628c_out.csv')df

实现将爬虫,text格式转换csv,再进行情感分析,再将结果整合到一起,最后得出目标表和词云图

 

 

 

 

 

 

 

 

 

 后续应该就是数据的重复爬取了

标签:功能,name,text,整合,print,import,csv,data
来源: https://www.cnblogs.com/520520520zl/p/15772085.html

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有