短文本分类-朴素贝叶斯

背景

一个相似智能客服机器人,当用户输入一段文字的时候,判断该文字的类型,根据类型给予回复python

前期准备知识

朴素贝叶斯(NBC)

朴素贝叶斯分类(NBC)是以贝叶斯定理为基础而且假设特征条件之间相互独立的方法,先经过已给定的训练集,以特征词之间独立做为前提假设,学习从输入到输出的联合几率分布,再基于学习到的模型,输入 求出使得后验几率最大的输出 json

朴素贝叶斯的例子

有两个箱子,A箱子和B箱子,A箱子中有100个小球,20白80黑,B箱子里面有50个小球,40白10黑,求在已知石头出自 B 桶的条件下,取出白色石头的几率。bash

P(白色|B) = P(白色)*P(B|白色)/P(B)app

P(白色) = (20+40)/(100+50) = 2/5 白色球的总数/全部球的总数学习

P(B) = 50/150 = 1/3 B桶球的个数/全部球的总数测试

P(B|白色) = 40/50 = 4/5 B桶中白色球的几率ui

P(白色|B) = P(白色) * P(B|白色)/P(B) = (2/5 * 4/5)/(1/3)spa

使用条件几率来分类

  • 若是 p1(x, y) > p2(x, y), 那么属于类别 1;
  • 若是 p2(x, y) > p1(X, y), 那么属于类别 2;

开始

数据准备(文本分类数据以及类别)

dataSet = [
"自动闪退",
"太卡,闪退",
"上传视频老是闪退",
"添加字幕时会闪退",
"不能够下载高像素的视频。",
"我高清导不出怎么办",
"不能保存好像素的视屏",
]
classVec  =[
"闪退",
"闪退",
"闪退",
"闪退",
"高清导出",
"高清导出",
]
复制代码

处理数据

  1. 对短文本进行分词,去掉无用词,造成每一个文本对特征词 方法:用jieba分词后用停用词过滤(停用词表网上百度不少的)
import jieba


# 停用词表 stop_words.txt 停用词表
def get_stop_words():
    stop = []
    with open('./stop_words.txt', 'r', encoding='utf-8-sig') as f:
        lines = f.readlines()
        for line in lines:
            lline = line.strip()
            stop.append(lline)
    return stop


# 分词 停用
def segment_and_stop_word(word):
    stop_words = get_stop_words()
    a = list(jieba.cut(word, cut_all=True))
    data = []
    for i in a:
        if i not in stop_words and i != ' ' and i != '':
            data.append(i)
    return data


dataSet = [
    "自动闪退",
    "太卡,闪退",
    "上传视频老是闪退",
    "添加字幕时会闪退",
    "不能够下载高像素的视频。",
    "我高清导不出怎么办",
    "不能保存好像素的视屏",
]
# 1.闪退 2.高清导出
listClasses = [
    1,
    1,
    1,
    1,
    2,
    2,
    2,
]


if __name__ == "__main__":
    # 1. 加载数据集
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
复制代码

newDataSet的值为code

[
        ['自动', '闪', '退'],
        ['太卡', '闪', '退'],
        ['上传', '视频', '老是', '闪', '退'],
        ['添加', '字幕', '时会', '闪', '退'],
        ['不可', '如下', '下载', '高像素', '像素', '视频'],
        ['高清', '导', '不出'],
        ['不能', '保存', '好像', '像素', '视屏']
]
复制代码
  1. 建立单词集合
def createVocabList():
    """ 获取单词集 :return: 单词列表vocablist """
    vocabSet = set([]) 
    for document in dataSet:
        vocabSet = vocabSet | set(document)  
    return list(vocabSet)

if __name__ == "__main__":
    # 1. 加载数据集
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
    # 2.获取单词集
    vocabList = createVocabList()
复制代码

vocabListcdn

vocabList = ['高像素', '视屏', '不能', '添加', '保存', '时会', '导', '不可', '像素', '不出', '下载', '老是', '太卡', '字幕', '上传', '退', '如下', '闪', '好像', '视频', '自动', '高清']

复制代码
  1. 计算单词在类型中出现几率
def setOfWords2Vec(classId, vocabList, dataSet, classVec):
    """ 获取每一个类型单词出现个数和单词出现总个数 p(a|b) = p(a)p(b|a)/p(b) :param classId: 类型ID :param vocabList: 不重复单词表 :param dataSet: 训练集 :param classVec: 类型总列表 :return: """
    # pa: 统计训练中的该类别的比例
    # pb: 统计训练中的含有该单词比例
    # pba: 该类别汇总含有该单词比例
    # pab: 含有该单词的文本为该类型的几率
    pa_num = 0
    for i in range(len(classVec)):
        if int(classVec[i]) == int(classId):
            pa_num += 1
    pa = pa_num / len(classVec)
    wordJson = {}
    for i in vocabList:
        wordJson[i] = {
            "pb_num": 0,
            "pb_in_num": 0
        }
    for i in range(len(dataSet)):
        for j in dataSet[i]:
            wordJson[j]['pb_num'] += 1
            if int(classVec[i]) == int(classId):
                wordJson[j]['pb_in_num'] += 1
    wordJsonPab = {}
    for i in wordJson:
        if wordJson[i]['pb_num'] and wordJson[i]['pb_in_num']:
            pb = wordJson[i]['pb_num'] / len(classVec)
            pba = wordJson[i]['pb_in_num'] / pa_num
            # p(a|b) = p(a)p(b|a)/p(b)
            wordJsonPab[i] = {
                "pab": round(pa * pba / pb, 4),
                "pa_num": pa_num,
                "pb_num": wordJson[i]['pb_num'],
                "pb_in_num": wordJson[i]['pb_in_num'],
                "pb": pb,
                "pa": pa
            }

    return wordJsonPab
    
if __name__ == "__main__":
    # 1. 加载数据集
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
    # 2.获取单词集
    vocabList = createVocabList()
    # 3. 单词在类型中出现几率
    wordJson = {}
    classListSet = set(listClasses)
    for i in classListSet:
        wordJson[i] = setOfWords2Vec(i, vocabList, listOPosts, listClasses)
复制代码

wordJson 就是每一个特征词(vocabList)在各个类型中的几率

{
  1: {
    '时会': {
      'pab': 1.0
    },
    '添加': {
      'pab': 1.0
    },
    '闪': {
      'pab': 1.0
    },
    '视频': {
      'pab': 0.5
    },
    '退': {
      'pab': 1.0
    },
    '老是': {
      'pab': 1.0
    },
    '字幕': {
      'pab': 1.0
    },
    '上传': {
      'pab': 1.0
    },
    '太卡': {
      'pab': 1.0
    },
    '自动': {
      'pab': 1.0
    }
  },
  2: {
    '好像': {
      'pab': 1.0
    },
    '高清': {
      'pab': 1.0
    },
    '如下': {
      'pab': 1.0
    },
    '不可': {
      'pab': 1.0
    },
    '视频': {
      'pab': 0.5
    },
    '高像素': {
      'pab': 1.0
    },
    '不能': {
      'pab': 1.0
    },
    '下载': {
      'pab': 1.0
    },
    '视屏': {
      'pab': 1.0
    },
    '导': {
      'pab': 1.0
    },
    '像素': {
      'pab': 1.0
    },
    '保存': {
      'pab': 1.0
    },
    '不出': {
      'pab': 1.0
    }
  }
}

复制代码

接下来就是测试了!!!!!

def getTestWordClassId(wordJson, words, text):
    """ 获取测试文本所属类别 :param wordJson: 单词在类型中出现几率 :param words: 测试文本 :return: """
    p = 0
    classId = 1  
    for i in wordJson:
        num = 0
        for j in words:
            if j in wordJson[i]:
                num += wordJson[i][j]['pab']
        if float(num) > float(p):
            p = num
            classId = i
    return int(classId)

if __name__ == "__main__":
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
    vocabList = createVocabList(newDataSet)
    # 3. 单词在类型中出现几率
    wordJson = {}
    classListSet = set(listClasses)
    for i in classListSet:
        wordJson[i] = setOfWords2Vec(i, vocabList, newDataSet, listClasses)

    testDataSet = newDataSet  # 测试集
    classResult = {}
    trainTextAndIds = []
    for i in range(len(dataSet)):
        trainTextAndIds.append({
            "id": i,
            "text": dataSet[i],
        })
    for i in range(len(testDataSet)):
        classResult[trainTextAndIds[i]['id']] = {
            "train_id": listClasses[i],
            "test_id": getTestWordClassId(wordJson, testDataSet[i], trainTextAndIds[i]['text']),
            "id": trainTextAndIds[i]['id'],
            "text": trainTextAndIds[i]['text']
        }
复制代码
  1. 计算召回率正确率
# 计算准确率召回率
def getRate(dataSet, classVec):
    rates = {}
    for i in classVec:
        rates[i] = {
            'TP': 0, 'FN': 0, 'FP': 0, 'TN': 0
        }
    for i in dataSet:
        if dataSet[i]['train_id'] == dataSet[i]['test_id']:
            rates[dataSet[i]['train_id']]['TP'] += 1  # TP: 将正类预测为正类数
        else:
            rates[dataSet[i]['train_id']]['FN'] += 1  # FN: 将正类预测为负类数
            rates[dataSet[i]['test_id']]['FP'] += 1  # FP: 将负类预测为正类数
    for i in rates:
        rates[i]['TN'] = len(dataSet) - rates[i]['TP'] - rates[i]['FP'] - rates[i]['FN']  # TN: 将负类预测为负类数
    accuracy_recall_list = []
    for i in rates:
        row = rates[i]
        _row = {
            "tag_id": i,
            "recall": 0.0 if row['TP'] + row['FN'] == 0 else round(row['TP'] / (row['TP'] + row['FN']), 4),
            "accuracy": round((row['TP'] + row['TN']) / (row['TP'] + row['FP'] + row['TN'] + row['FN']), 4),
            "row": row
        }
        accuracy_recall_list.append(_row)
    return accuracy_recall_list
    
if __name__ == "__main__":
    newDataSet = []
    for i in dataSet:
        newDataSet.append(segment_and_stop_word(i))
    vocabList = createVocabList(newDataSet)
    # 3. 单词在类型中出现几率
    wordJson = {}
    classListSet = set(listClasses)
    for i in classListSet:
        wordJson[i] = setOfWords2Vec(i, vocabList, newDataSet, listClasses)

    testDataSet = newDataSet  # 测试集
    classResult = {}
    trainTextAndIds = []
    for i in range(len(dataSet)):
        trainTextAndIds.append({
            "id": i,
            "text": dataSet[i],
        })
    for i in range(len(testDataSet)):
        classResult[trainTextAndIds[i]['id']] = {
            "train_id": listClasses[i],
            "test_id": getTestWordClassId(wordJson, testDataSet[i], trainTextAndIds[i]['text']),
            "id": trainTextAndIds[i]['id'],
            "text": trainTextAndIds[i]['text']
        }

    # 4.计算召回率正确率
    accuracy_recall_list = getRate(classResult, set(listClasses))
复制代码

accuracy_recall_list

[
  {
    'tag_id': 1,
    'recall': 1.0,
    'accuracy': 1.0,
    'row': {
      'TP': 4,
      'FN': 0,
      'FP': 0,
      'TN': 3
    }
  },
  {
    'tag_id': 2,
    'recall': 1.0,
    'accuracy': 1.0,
    'row': {
      'TP': 3,
      'FN': 0,
      'FP': 0,
      'TN': 4
    }
  }
]
复制代码

结束

由于训练集的数据过少,分类比较明确,因此准确率和召回率很高,可是方法差很少,能够用大量的训练集来测试

相关文章
相关标签/搜索