赞
踩
Apriori算法在发现关联规则领域具有很大影响力。算法命名源于算法使用了频繁项集性质的先验(prior)知识。在具体实验时,Apriori算法将发现关联规则的过程分为两个步骤:第一步通过迭代,检索出事务数据库中的所有频繁项集,即支持度不低于用户设定的阈值的项集;第二步利用频繁项集构造出满足用户最小信任度的规则。其中,挖掘或识别出所有频繁项集是该算法的核心,占整个计算量的大部分。
在对深度优先数据挖掘算法的研究工作中,Han等人没有采用潜在频繁项集的方法求解频繁项集,而是提出了称为频率模式增长(FP_growth)的算法。该算法通过扫描数据库创建FP_tree的根节点并标示为null,对数据库D中的每一个事务Tran,按L中的次序对Tran中的频繁项排序,设Tran中排序后的频繁项列表[p|P],这里p是第一个元素,P是保留列表。接着调用函数insert_tree([p|P],T),如果树T有一个子节点N且N.item_name=p.item_name,就将N节点计数加1;否则就创建一个新节点N,设计数为1,它的父节点连接到T,节点连接到同名的节点连接结构上。如果P是非空的,就递归调用insert_tree(P,N)。由于压缩了数据库内容,并且在将频繁项写入FP_tree结构时,保留了项集间的相连信息。求解频繁项集的问题,就转化为递归地找出最短频繁模式并连接其后缀构成长频繁模式的问题。
# -*- coding: utf-8 -*- """ Created on Fri May 20 20:25:24 2022 @author: zhenkai """ def load_data_set(): data_set = [ ['a','b','d','e'], ['b', 'c', 'd'], ['a','b','d','e'], ['a','b','d','e'], ['a','b','d','e'], ['b', 'd', 'e'], ['c', 'd'], ['a', 'b', 'c'], ['a', 'd', 'e'], ['b', 'd'] ] return data_set ''' 参数:数据库事务集 ''' def Create_C1(data_set): C1 = set() for t in data_set: for item in t: item_set = frozenset([item]) # 为生成频繁项目集时扫描数据库时以提供issubset()功能 C1.add(item_set) return C1 ''' 参数:候选频繁k项集,频繁k-1项集 ''' def is_apriori(Ck_item, Lk_sub_1): for item in Ck_item: sub_item = Ck_item - frozenset([item]) if sub_item not in Lk_sub_1: return False return True ''' # 参数:频繁k-1项集,当前要生成的候选频繁几项集 ''' def Create_Ck(Lk_sub_1, k): Ck = set() len_Lk_sub_1 = len(Lk_sub_1) list_Lk_sub_1 = list(Lk_sub_1) for i in range(len_Lk_sub_1): #i: [0, len_Lk_sub_1) for j in range(i+1, len_Lk_sub_1):#j: [i+1, len_Lk_sub_1) l1 = list(list_Lk_sub_1[i]) l2 = list(list_Lk_sub_1[j]) l1.sort() l2.sort()# list[s:t]:截取s到t范围的元素生成一个新list if l1[0:k-2] == l2[0:k-2]:# 判断l1的前k-1-1个元素与l2的前k-1-1个元素对应位是否全部相同 Ck_item = list_Lk_sub_1[i] | list_Lk_sub_1[j] if is_apriori(Ck_item, Lk_sub_1): Ck.add(Ck_item) return Ck ''' 参数:数据库事务集,候选频繁k项集,最小支持度,项目集-支持度dic ''' def Generate_Lk_By_Ck(data_set, Ck, min_support, support_data): Lk = set() # 通过dic记录候选频繁k项集的事务支持个数 item_count = {} for t in data_set: for Ck_item in Ck: if Ck_item.issubset(t): if Ck_item not in item_count: item_count[Ck_item] = 1 else: item_count[Ck_item] += 1 data_num = float(len(data_set)) for item in item_count: if(item_count[item] / data_num) >= min_support: Lk.add(item) support_data[item] = item_count[item] / data_num return Lk ''' 参数:数据库事务集,求的最高项目集为k项,最小支持度 ''' def Generate_L(data_set, max_k, min_support): # 创建一个频繁项目集为key,其支持度为value的dic support_data = {} C1 = Create_C1(data_set) L1 = Generate_Lk_By_Ck(data_set, C1, min_support, support_data) Lk_sub_1 = L1.copy() # 对L1进行浅copy L = [] L.append(Lk_sub_1) # 末尾添加指定元素 for k in range(2, max_k+1): Ck = Create_Ck(Lk_sub_1, k) Lk = Generate_Lk_By_Ck(data_set, Ck, min_support, support_data) Lk_sub_1 = Lk.copy() L.append(Lk_sub_1) return L, support_data ''' 参数:所有的频繁项目集,项目集-支持度dic,最小置信度 ''' def Generate_Rule(L, support_data, min_confidence): rule_list = [] sub_set_list = [] for i in range(len(L)): for frequent_set in L[i]: for sub_set in sub_set_list: if sub_set.issubset(frequent_set): conf = support_data[frequent_set] / support_data[sub_set] # 将rule声明为tuple rule = (sub_set, frequent_set-sub_set, conf) if conf >= min_confidence and rule not in rule_list: rule_list.append(rule) sub_set_list.append(frequent_set) return rule_list if __name__ == "__main__": data_set = load_data_set() L, support_data = Generate_L(data_set, 4, 0.3) #最小支持度是30% rule_list = Generate_Rule(L, support_data, 0.7) for Lk in L: print("="*55) print("frequent " + str(len(list(Lk)[0])) + "-itemsets\t\tsupport") print("="*55) for frequent_set in Lk: print(frequent_set,"\t\t", support_data[frequent_set]) print() print("Rules") for item in rule_list: print(item[0], "=>", item[1], "'s conf: ", item[2])
将rule注释掉后的运行结果
将rule打印出的部分截图
#2、FP树增长算法发现频繁项集 from collections import defaultdict, Counter, deque import math import copy class node: def __init__(self, item, count, parent): # 本程序将节点之间的链接信息存储到项头表中,后续可遍历项头表添加该属性 self.item = item # 该节点的项 self.count = count # 项的计数 self.parent = parent # 该节点父节点的id self.children = [] # 该节点的子节点的list class FP: def __init__(self, minsup=0.5): self.minsup = minsup self.minsup_num = None # 支持度计数 self.N = None self.item_head = defaultdict(list) # 项头表 self.fre_one_itemset = defaultdict(lambda: 0) # 频繁一项集,值为支持度 self.sort_rules = None # 项头表中的项排序规则,按照支持度从大到小有序排列 self.tree = defaultdict() # fp树, 键为节点的id, 值为node self.max_node_id = 0 # 当前树中最大的node_id, 用于插入新节点时,新建node_id self.fre_itemsets = [] # 频繁项集 self.fre_itemsets_sups = [] # 频繁项集的支持度计数 def init_param(self, data): self.N = len(data) self.minsup_num = math.ceil(self.minsup * self.N) self.get_fre_one_itemset(data) self.build_tree(data) return def get_fre_one_itemset(self, data): # 获取频繁1项,并排序,第一次扫描数据集 c = Counter() for t in data: c += Counter(t) for key, val in c.items(): if val >= self.minsup_num: self.fre_one_itemset[key] = val sort_keys = sorted(self.fre_one_itemset, key=self.fre_one_itemset.get, reverse=True) self.sort_rules = {k: i for i, k in enumerate(sort_keys)} # 频繁一项按照支持度降低的顺序排列,构建排序规则 return def insert_item(self, parent, item): # 将事务中的项插入到FP树中,并返回插入节点的id children = self.tree[parent].children for child_id in children: child_node = self.tree[child_id] if child_node.item == item: self.tree[child_id].count += 1 next_node_id = child_id break else: # 循环正常结束,表明当前父节点的子节点中没有项与之匹配,所以新建子节点,更新项头表和树 self.max_node_id += 1 next_node_id = copy.copy(self.max_node_id) # 注意self.max_node_id 是可变的,引用时需要copy self.tree[next_node_id] = node(item=item, count=1, parent=parent) # 更新树,添加节点 self.tree[parent].children.append(next_node_id) # 更新父节点的孩子列表 self.item_head[item].append(next_node_id) # 更新项头表 return next_node_id def build_tree(self, data): # 构建项头表以及FP树, 第二次扫描数据集 one_itemset = set(self.fre_one_itemset.keys()) self.tree[0] = node(item=None, count=0, parent=-1) for t in data: t = list(set(t) & one_itemset) # 去除该事务中非频繁项 if len(t) > 0: t = sorted(t, key=lambda x: self.sort_rules[x]) # 按照项的频繁程度从大到小排序 parent = 0 # 每个事务都是从树根开始插起 for item in t: parent = self.insert_item(parent, item) # 将排序后的事务中每个项依次插入FP树 return def get_path(self, pre_tree, condition_tree, node_id, suffix_items_count): # 根据后缀的某个叶节点的父节点出发,选取出路径,并更新计数。suffix_item_count为后缀的计数 if node_id == 0: return else: if node_id not in condition_tree.keys(): current_node = copy.deepcopy(pre_tree[node_id]) current_node.count = suffix_items_count # 更新计数 condition_tree[node_id] = current_node else: # 若叶节点有多个,则路径可能有重复,计数叠加 condition_tree[node_id].count += suffix_items_count node_id = condition_tree[node_id].parent self.get_path(pre_tree, condition_tree, node_id, suffix_items_count) # 递归构建路径 return def get_condition_tree(self, pre_tree, suffix_items_ids): # 构建后缀为一个项的条件模式基。可能对应多个叶节点,综合后缀的各个叶节点的路径 condition_tree = defaultdict() # 字典存储条件FP树,值为父节点 for suffix_id in suffix_items_ids: # 从各个后缀叶节点出发,综合各条路径形成条件FP树 suffix_items_count = copy.copy(pre_tree[suffix_id].count) # 叶节点计数 node_id = pre_tree[suffix_id].parent # 注意条件FP树不包括后缀 if node_id == 0: continue self.get_path(pre_tree, condition_tree, node_id, suffix_items_count) return condition_tree def extract_suffix_set(self, condition_tree, suffix_items): # 根据条件模式基,提取频繁项集, suffix_item为该条件模式基对应的后缀 # 返回新的后缀,以及新添加项(将作为下轮的叶节点)的id new_suffix_items_list = [] # 后缀中添加的新项 new_item_head = defaultdict(list) # 基于当前的条件FP树,更新项头表, 新添加的后缀项 item_sup_dict = defaultdict(int) for key, val in condition_tree.items(): item_sup_dict[val.item] += val.count # 对项出现次数进行统计 new_item_head[val.item].append(key) for item, sup in item_sup_dict.items(): if sup >= self.minsup_num: # 若条件FP树中某个项是频繁的,则添加到后缀中 current_item_set = [item] + suffix_items self.fre_itemsets.append(current_item_set) self.fre_itemsets_sups.append(sup) new_suffix_items_list.append(current_item_set) else: new_item_head.pop(item) return new_suffix_items_list, new_item_head.values() def get_fre_set(self, data): # 构建以每个频繁1项为后缀的频繁项集 self.init_param(data) suffix_items_list = [] suffix_items_id_list = [] for key, val in self.fre_one_itemset.items(): suffix_items = [key] suffix_items_list.append(suffix_items) suffix_items_id_list.append(self.item_head[key]) self.fre_itemsets.append(suffix_items) self.fre_itemsets_sups.append(val) pre_tree = copy.deepcopy(self.tree) # pre_tree 是尚未去除任何后缀的前驱,若其叶节点的项有多种,则可以形成多种条件FP树 self.dfs_search(pre_tree, suffix_items_list, suffix_items_id_list) return def bfs_search(self, pre_tree, suffix_items_list, suffix_items_id_list): # 宽度优先,递增构建频繁k项集 q = deque() q.appendleft((pre_tree, suffix_items_list, suffix_items_id_list)) while len(q) > 0: param_tuple = q.pop() pre_tree = param_tuple[0] for suffix_items, suffix_items_ids in zip(param_tuple[1], param_tuple[2]): condition_tree = self.get_condition_tree(pre_tree, suffix_items_ids) new_suffix_items_list, new_suffix_items_id_list = self.extract_suffix_set(condition_tree, suffix_items) if new_suffix_items_list: q.appendleft( (condition_tree, new_suffix_items_list, new_suffix_items_id_list)) # 储存前驱,以及产生该前驱的后缀的信息 return def dfs_search(self, pre_tree, suffix_items_list, suffix_items_id_list): # 深度优先,递归构建以某个项为后缀的频繁k项集 for suffix_items, suffix_items_ids in zip(suffix_items_list, suffix_items_id_list): condition_tree = self.get_condition_tree(pre_tree, suffix_items_ids) new_suffix_items_list, new_suffix_items_id_list = self.extract_suffix_set(condition_tree, suffix_items) if new_suffix_items_list: # 如果后缀有新的项添加进来,则继续深度搜索 self.dfs_search(condition_tree, new_suffix_items_list, new_suffix_items_id_list) return if __name__ == '__main__': data2 = [list('abde'), list('bcd'), list('acde'), list('abde'), list('bcde'), list('bde'), list('cd')] fp = FP(minsup=3/7) fp.get_fre_set(data2) for itemset, sup in zip(fp.fre_itemsets, fp.fre_itemsets_sups): print(itemset, sup)
运行结果
通过本次的实验,了解了对频繁项级的处理,通过Apriori算法实现,理解了Apriori算法将发现关联规则的过程分为两个步骤:第一步通过迭代,检索出事务数据库中的所有频繁项集;第二步利用频繁项集构造出满足用户最小信任度的规则。重要的是要挖掘或识别出所有频繁项集。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。