python 去除nan inf_Python实现半自动评分卡建模(附代码)
生活随笔
收集整理的這篇文章主要介紹了
python 去除nan inf_Python实现半自动评分卡建模(附代码)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
作者:Summer Memories
個人公眾號:風控汪的數據分析之路
知乎專欄:小鑫的數據分析筆記
個人公眾號:風控汪的數據分析之路
知乎專欄:小鑫的數據分析筆記
這次分享一個自己寫的python腳本,可以實現半自動化的評分卡建模。運行腳本時需要input已經預處理好的訓練集和測試集數據,所以建模前期的EDA,數據清洗,缺失值填充等需要人工完成。
Github鏈接:
taenggu0309/Semi-auto-modeling?github.com使用方法:直接調用 get_scorecard_model 函數即可。
PS: 在做缺失值填充時建議當缺失率<=5%時,填充中位數或平均數,>5%時,填充一個映射值,例如-999(把缺失單獨分為一箱)。
整個腳本的大概流程是:
PSI預篩選 --> 特征分箱 --> IV篩選特征 --> 相關性/多重共線性篩選 --> woe單調調整 -- >
顯著性篩選 --> 系數一致篩選 --> 建模 --> 模型評估 --> 標準評分轉換
自己拿一份數據(2w+條, 150+特征)測試了一下,腳本跑完大概需要20s,速度還是可以的。
目前這個腳本只是一個初版,后續各位小伙伴在使用過程中碰到什么問題,有改進的建議,都可以在評論區留言,或者直接私信我。
下面貼上各個模塊的代碼,注釋寫的比較少,見諒。。。
需要import的包import 計算PSIdef cal_psi(df1,df2,col,bin_num=5):"""計算psiparam:df1 -- 數據集A Dataframedf2 -- 數據集B Dataframecol -- 字段名 stringbin_num -- 連續型特征的分箱數 默認為5return:psi floatbin_df -- psi明細表 Dataframe"""# 對于離散型特征直接根據類別進行分箱,分箱邏輯以數據集A為準if df1[col].dtype == np.dtype('object') or df1[col].dtype == np.dtype('bool') or df1[col].nunique()<=bin_num:bin_df1 = df1[col].value_counts().to_frame().reset_index().rename(columns={'index':col,col:'total_A'})bin_df1['totalrate_A'] = bin_df1['total_A']/df1.shape[0]bin_df2 = df2[col].value_counts().to_frame().reset_index().rename(columns={'index':col,col:'total_B'})bin_df2['totalrate_B'] = bin_df2['total_B']/df2.shape[0]else:# 這里采用的是等頻分箱bin_series,bin_cut = pd.qcut(df1[col],q=bin_num,duplicates='drop',retbins=True)bin_cut[0] = float('-inf')bin_cut[-1] = float('inf')bucket1 = pd.cut(df1[col],bins=bin_cut)group1 = df1.groupby(bucket1)bin_df1=pd.DataFrame()bin_df1['total_A'] = group1[col].count()bin_df1['totalrate_A'] = bin_df1['total_A']/df1.shape[0]bin_df1 = bin_df1.reset_index()bucket2 = pd.cut(df2[col],bins=bin_cut)group2 = df2.groupby(bucket2)bin_df2=pd.DataFrame()bin_df2['total_B'] = group2[col].count()bin_df2['totalrate_B'] = bin_df2['total_B']/df2.shape[0]bin_df2 = bin_df2.reset_index()# 計算psibin_df = pd.merge(bin_df1,bin_df2,on=col)bin_df['a'] = bin_df['totalrate_B'] - bin_df['totalrate_A']bin_df['b'] = np.log(bin_df['totalrate_B']/bin_df['totalrate_A'])bin_df['Index'] = bin_df['a']*bin_df['b']bin_df['PSI'] = bin_df['Index'].sum()bin_df = bin_df.drop(['a','b'],axis=1)psi =bin_df.PSI.iloc[0]return psi,bin_df決策樹分箱def tree_split(df,col,target,max_bin,min_binpct,nan_value):"""決策樹分箱param:df -- 數據集 Dataframecol -- 分箱的字段名 stringtarget -- 標簽的字段名 stringmax_bin -- 最大分箱數 intmin_binpct -- 箱體的最小占比 floatnan_value -- 缺失的映射值 int/floatreturn:split_list -- 分割點 list"""miss_value_rate = df[df[col]==nan_value].shape[0]/df.shape[0]# 如果缺失占比小于5%,則直接對特征進行分箱if miss_value_rate<0.05:x = np.array(df[col]).reshape(-1,1)y = np.array(df[target])tree = DecisionTreeClassifier(max_leaf_nodes=max_bin,min_samples_leaf = min_binpct)tree.fit(x,y)thresholds = tree.tree_.thresholdthresholds = thresholds[thresholds!=_tree.TREE_UNDEFINED]split_list = sorted(thresholds.tolist())# 如果缺失占比大于5%,則把缺失單獨分為一箱,剩余部分再進行決策樹分箱else:max_bin2 = max_bin-1x = np.array(df[~(df[col]==nan_value)][col]).reshape(-1,1)y = np.array(df[~(df[col]==nan_value)][target])tree = DecisionTreeClassifier(max_leaf_nodes=max_bin2,min_samples_leaf = min_binpct)tree.fit(x,y)thresholds = tree.tree_.thresholdthresholds = thresholds[thresholds!=_tree.TREE_UNDEFINED]split_list = sorted(thresholds.tolist())split_list.insert(0,nan_value)return split_list等頻分箱def quantile_split(df,col,target,max_bin,nan_value):"""等頻分箱param:df -- 數據集 Dataframecol -- 分箱的字段名 stringtarget -- 標簽的字段名 stringmax_bin -- 最大分箱數 intnan_value -- 缺失的映射值 int/floatreturn:split_list -- 分割點 list"""miss_value_rate = df[df[col]==nan_value].shape[0]/df.shape[0]# 如果缺失占比小于5%,則直接對特征進行分箱if miss_value_rate<0.05:bin_series,bin_cut = pd.qcut(df[col],q=max_bin,duplicates='drop',retbins=True)split_list = bin_cut.tolist()split_list.remove(split_list[0])# 如果缺失占比大于5%,則把缺失單獨分為一箱,剩余部分再進行等頻分箱else:df2 = df[~(df[col]==nan_value)]max_bin2 = max_bin-1bin_series,bin_cut = pd.qcut(df2[col],q=max_bin2,duplicates='drop',retbins=True)split_list = bin_cut.tolist()split_list[0] = nan_valuesplit_list.remove(split_list[-1])# 當出現某個箱體只有好用戶或只有壞用戶時,進行前向合并箱體var_arr = np.array(df[col])target_arr = np.array(df[target])bin_trans = np.digitize(var_arr,split_list,right=True)var_tuple = [(x,y) for x,y in zip(bin_trans,target_arr)]delete_cut_list = []for i in set(bin_trans):target_list = [y for x,y in var_tuple if x==i]if target_list.count(1)==0 or target_list.count(0)==0:if i ==min(bin_trans):index=ielse:index = i-1delete_cut_list.append(split_list[index])split_list = [x for x in split_list if x not in delete_cut_list]return split_list計算woedef cal_woe(df,col,target,nan_value,cut=None):"""計算woeparam:df -- 數據集 Dataframecol -- 分箱的字段名 stringtarget -- 標簽的字段名 stringnan_value -- 缺失的映射值 int/floatcut -- 箱體分割點 listreturn:woe_list -- 每個箱體的woe list"""total = df[target].count()bad = df[target].sum()good = total-badbucket = pd.cut(df[col],cut)group = df.groupby(bucket)bin_df = pd.DataFrame()bin_df['total'] = group[target].count()bin_df['bad'] = group[target].sum()bin_df['good'] = bin_df['total'] - bin_df['bad']bin_df['badattr'] = bin_df['bad']/badbin_df['goodattr'] = bin_df['good']/goodbin_df['woe'] = np.log(bin_df['badattr']/bin_df['goodattr'])# 當cut里有缺失映射值時,說明是把缺失單獨分為一箱的,后續在進行調成單調分箱時# 不考慮缺失的箱,故將缺失映射值剔除if nan_value in cut:woe_list = bin_df['woe'].tolist()[1:]else:woe_list = bin_df['woe'].tolist()return woe_listwoe調成單調遞減或單調遞增def monot_trim(df,col,target,nan_value,cut=None):"""woe調成單調遞減或單調遞增param:df -- 數據集 Dataframecol -- 分箱的字段名 stringtarget -- 標簽的字段名 stringnan_value -- 缺失的映射值 int/floatcut -- 箱體分割點 listreturn:new_cut -- 調整后的分割點 list"""woe_lst = cal_woe(df,col,target,nan_value,cut = cut)# 若第一個箱體大于0,說明特征整體上服從單調遞減if woe_lst[0]>0:while not judge_decreasing(woe_lst):# 找出哪幾個箱不服從單調遞減的趨勢judge_list = [x>y for x, y in zip(woe_lst, woe_lst[1:])]# 用前向合并箱體的方式,找出需要剔除的分割點的索引,如果有缺失映射值,則索引+1if nan_value in cut:index_list = [i+2 for i,j in enumerate(judge_list) if j==False]else:index_list = [i+1 for i,j in enumerate(judge_list) if j==False]new_cut = [j for i,j in enumerate(cut) if i not in index_list]woe_lst = cal_woe(df,col,target,nan_value,cut = new_cut)# 若第一個箱體小于0,說明特征整體上服從單調遞增elif woe_lst[0]<0:while not judge_increasing(woe_lst):# 找出哪幾個箱不服從單調遞增的趨勢judge_list = [x<y for x, y in zip(woe_lst, woe_lst[1:])]# 用前向合并箱體的方式,找出需要剔除的分割點的索引,如果有缺失映射值,則索引+1if nan_value in cut:index_list = [i+2 for i,j in enumerate(judge_list) if j==False]else:index_list = [i+1 for i,j in enumerate(judge_list) if j==False]new_cut = [j for i,j in enumerate(cut) if i not in index_list]woe_lst = cal_woe(df,col,target,nan_value,cut = new_cut)return new_cut判斷一個list是否是單調變化def judge_increasing(L):"""判斷一個list是否單調遞增"""return all(x<y for x, y in zip(L, L[1:]))def judge_decreasing(L):"""判斷一個list是否單調遞減"""return all(x>y for x, y in zip(L, L[1:]))特征分箱,計算ivdef binning_var(df,col,target,bin_type='dt',max_bin=5,min_binpct=0.05,nan_value=-999):"""特征分箱,計算ivparam:df -- 數據集 Dataframecol -- 分箱的字段名 stringtarget -- 標簽的字段名 stringbin_type -- 分箱方式 默認是'dt',還有'quantile'(等頻分箱)max_bin -- 最大分箱數 intmin_binpct -- 箱體的最小占比 floatnan_value -- 缺失映射值 int/floatreturn:bin_df -- 特征的分箱明細表 Dataframecut -- 分割點 list"""total = df[target].count()bad = df[target].sum()good = total-bad# 離散型特征分箱,直接根據類別進行groupbyif df[col].dtype == np.dtype('object') or df[col].dtype == np.dtype('bool') or df[col].nunique()<=max_bin:group = df.groupby([col],as_index=True)bin_df = pd.DataFrame()bin_df['total'] = group[target].count()bin_df['totalrate'] = bin_df['total']/totalbin_df['bad'] = group[target].sum()bin_df['badrate'] = bin_df['bad']/bin_df['total']bin_df['good'] = bin_df['total'] - bin_df['bad']bin_df['goodrate'] = bin_df['good']/bin_df['total']bin_df['badattr'] = bin_df['bad']/badbin_df['goodattr'] = (bin_df['total']-bin_df['bad'])/goodbin_df['woe'] = np.log(bin_df['badattr']/bin_df['goodattr'])bin_df['bin_iv'] = (bin_df['badattr']-bin_df['goodattr'])*bin_df['woe']bin_df['IV'] = bin_df['bin_iv'].sum()cut = df[col].unique().tolist()# 連續型特征的分箱else:if bin_type=='dt':cut = tree_split(df,col,target,max_bin=max_bin,min_binpct=min_binpct,nan_value=nan_value)elif bin_type=='quantile':cut = quantile_split(df,col,target,max_bin=max_bin,nan_value=nan_value)cut.insert(0,float('-inf'))cut.append(float('inf'))bucket = pd.cut(df[col],cut)group = df.groupby(bucket)bin_df = pd.DataFrame()bin_df['total'] = group[target].count()bin_df['totalrate'] = bin_df['total']/totalbin_df['bad'] = group[target].sum()bin_df['badrate'] = bin_df['bad']/bin_df['total']bin_df['good'] = bin_df['total'] - bin_df['bad']bin_df['goodrate'] = bin_df['good']/bin_df['total']bin_df['badattr'] = bin_df['bad']/badbin_df['goodattr'] = (bin_df['total']-bin_df['bad'])/goodbin_df['woe'] = np.log(bin_df['badattr']/bin_df['goodattr'])bin_df['bin_iv'] = (bin_df['badattr']-bin_df['goodattr'])*bin_df['woe']bin_df['IV'] = bin_df['bin_iv'].sum()return bin_df,cut調整單調后的分箱,計算IVdef binning_trim(df,col,target,cut=None,right_border=True):"""調整單調后的分箱,計算IVparam:df -- 數據集 Dataframecol -- 分箱的字段名 stringtarget -- 標簽的字段名 stringcut -- 分割點 listright_border -- 箱體的右邊界是否閉合 boolreturn:bin_df -- 特征的分箱明細表 Dataframe"""total = df[target].count()bad = df[target].sum()good = total - badbucket = pd.cut(df[col],cut,right=right_border)group = df.groupby(bucket)bin_df = pd.DataFrame()bin_df['total'] = group[target].count()bin_df['totalrate'] = bin_df['total']/totalbin_df['bad'] = group[target].sum()bin_df['badrate'] = bin_df['bad']/bin_df['total']bin_df['good'] = bin_df['total'] - bin_df['bad']bin_df['goodrate'] = bin_df['good']/bin_df['total']bin_df['badattr'] = bin_df['bad']/badbin_df['goodattr'] = (bin_df['total']-bin_df['bad'])/goodbin_df['woe'] = np.log(bin_df['badattr']/bin_df['goodattr'])bin_df['bin_iv'] = (bin_df['badattr']-bin_df['goodattr'])*bin_df['woe']bin_df['IV'] = bin_df['bin_iv'].sum()return bin_df 相關性篩選def forward_corr_delete(df,col_list):"""相關性篩選,設定的閾值為0.65param:df -- 數據集 Dataframecol_list -- 需要篩選的特征集合,需要提前按IV值從大到小排序好 listreturn:select_corr_col -- 篩選后的特征集合 list"""corr_list=[]corr_list.append(col_list[0])delete_col = []# 根據IV值的大小進行遍歷for col in col_list[1:]:corr_list.append(col)corr = df.loc[:,corr_list].corr()corr_tup = [(x,y) for x,y in zip(corr[col].index,corr[col].values)]corr_value = [y for x,y in corr_tup if x!=col]# 若出現相關系數大于0.65,則將該特征剔除if len([x for x in corr_value if abs(x)>=0.65])>0:delete_col.append(col)select_corr_col = [x for x in col_list if x not in delete_col]return select_corr_col多重共線性篩選def vif_delete(df,list_corr):"""多重共線性篩選param:df -- 數據集 Dataframelist_corr -- 相關性篩選后的特征集合,按IV值從大到小排序 listreturn:col_list -- 篩選后的特征集合 list"""col_list = list_corr.copy()# 計算各個特征的方差膨脹因子vif_matrix=np.matrix(df[col_list])vifs_list=[variance_inflation_factor(vif_matrix,i) for i in range(vif_matrix.shape[1])]# 篩選出系數>10的特征vif_high = [x for x,y in zip(col_list,vifs_list) if y>10]# 根據IV從小到大的順序進行遍歷if len(vif_high)>0:for col in reversed(vif_high):col_list.remove(col)vif_matrix=np.matrix(df[col_list])vifs=[variance_inflation_factor(vif_matrix,i) for i in range(vif_matrix.shape[1])]# 當系數矩陣里沒有>10的特征時,循環停止if len([x for x in vifs if x>10])==0:breakreturn col_list顯著性篩選(前向/后向逐步回歸)def forward_pvalue_delete(x,y):"""顯著性篩選,前向逐步回歸param:x -- 特征數據集,woe轉化后,且字段順序按IV值從大到小排列 Dataframey -- 標簽列 Seriesreturn:pvalues_col -- 篩選后的特征集合 list"""col_list = x.columns.tolist()pvalues_col=[]# 按IV值逐個引入模型for col in col_list:pvalues_col.append(col)# 每引入一個特征就做一次顯著性檢驗x_const = sm.add_constant(x.loc[:,pvalues_col])sm_lr = sm.Logit(y,x_const)sm_lr = sm_lr.fit()pvalue = sm_lr.pvalues[col]# 當引入的特征P值>=0.05時,則剔除,原先滿足顯著性檢驗的則保留,不再剔除if pvalue>=0.05:pvalues_col.remove(col)return pvalues_coldef backward_pvalue_delete(x,y):"""顯著性篩選,后向逐步回歸param:x -- 特征數據集,woe轉化后,且字段順序按IV值從大到小排列 Dataframey -- 標簽列 Seriesreturn:pvalues_col -- 篩選后的特征集合 list"""x_c = x.copy()# 所有特征引入模型,做顯著性檢驗x_const = sm.add_constant(x_c)sm_lr = sm.Logit(y,x_const).fit()pvalue_tup = [(i,j) for i,j in zip(sm_lr.pvalues.index,sm_lr.pvalues.values)][1:]delete_count = len([i for i,j in pvalue_tup if j>=0.05])# 當有P值>=0.05的特征時,執行循環while delete_count>0:# 按IV值從小到大的順序依次逐個剔除remove_col = [i for i,j in pvalue_tup if j>=0.05][-1]del x_c[remove_col]# 每次剔除特征后都要重新做顯著性檢驗,直到入模的特征P值都小于0.05x2_const = sm.add_constant(x_c)sm_lr2 = sm.Logit(y,x2_const).fit()pvalue_tup2 = [(i,j) for i,j in zip(sm_lr2.pvalues.index,sm_lr2.pvalues.values)][1:]delete_count = len([i for i,j in pvalue_tup2 if j>=0.05])pvalues_col = x_c.columns.tolist()return pvalues_col 系數一致篩選def forward_delete_coef(x,y):"""系數一致篩選param:x -- 特征數據集,woe轉化后,且字段順序按IV值從大到小排列 Dataframey -- 標簽列 Seriesreturn:coef_col -- 篩選后的特征集合 list"""col_list = list(x.columns)coef_col = []# 按IV值逐個引入模型,輸出系數for col in col_list:coef_col.append(col)x2 = x.loc[:,coef_col]sk_lr = LogisticRegression(random_state=0).fit(x2,y)coef_dict = {k:v for k,v in zip(coef_col,sk_lr.coef_[0])}# 當引入特征的系數為負,則將其剔除if coef_dict[col]<0:coef_col.remove(col)return coef_col得到特征woe映射集合表def get_map_df(bin_df_list):"""得到特征woe映射集合表param:bin_df_list -- 每個特征的woe映射表 listreturn:map_merge_df -- 特征woe映射集合表 Dataframe"""map_df_list=[]for dd in bin_df_list:# 添加特征名列map_df = dd.reset_index().assign(col=dd.index.name).rename(columns={dd.index.name:'bin'})# 將特征名列移到第一列,便于查看temp1 = map_df['col']temp2 = map_df.iloc[:,:-1]map_df2 = pd.concat([temp1,temp2],axis=1)map_df_list.append(map_df2)map_merge_df = pd.concat(map_df_list,axis=0)return map_merge_df特征映射def var_mapping(df,map_df,var_map,target):"""特征映射param:df -- 原始數據集 Dataframemap_df -- 特征映射集合表 Dataframevar_map -- map_df里映射的字段名,如"woe","score" stringtarget -- 標簽字段名 stringreturn:df2 -- 映射后的數據集 Dataframe"""df2 = df.copy()# 去掉標簽字段,遍歷特征for col in df2.drop([target],axis=1).columns:x = df2[col]# 找到特征的映射表bin_map = map_df[map_df.col==col]# 新建一個映射array,填充0bin_res = np.array([0]*x.shape[0],dtype=float)for i in bin_map.index:# 每個箱的最小值和最大值lower = bin_map['min_bin'][i]upper = bin_map['max_bin'][i]# 對于類別型特征,每個箱的lower和upper時一樣的if lower == upper:x1 = x[np.where(x == lower)[0]]# 連續型特征,左開右閉else:x1 = x[np.where((x>lower)&(x<=upper))[0]]mask = np.in1d(x,x1)# 映射array里填充對應的映射值bin_res[mask] = bin_map[var_map][i]bin_res = pd.Series(bin_res,index=x.index)bin_res.name = x.name# 將原始值替換為映射值df2[col] = bin_resreturn df2繪制ROC曲線和KS曲線def plot_roc(y_label,y_pred):"""繪制roc曲線param:y_label -- 真實的y值 list/arrayy_pred -- 預測的y值 list/arrayreturn:roc曲線"""tpr,fpr,threshold = metrics.roc_curve(y_label,y_pred) AUC = metrics.roc_auc_score(y_label,y_pred) fig = plt.figure(figsize=(6,4))ax = fig.add_subplot(1,1,1)ax.plot(tpr,fpr,color='blue',label='AUC=%.3f'%AUC) ax.plot([0,1],[0,1],'r--')ax.set_ylim(0,1)ax.set_xlim(0,1)ax.set_title('ROC')ax.legend(loc='best')return plt.show(ax)def plot_model_ks(y_label,y_pred):"""繪制ks曲線param:y_label -- 真實的y值 list/arrayy_pred -- 預測的y值 list/arrayreturn:ks曲線"""pred_list = list(y_pred) label_list = list(y_label)total_bad = sum(label_list)total_good = len(label_list)-total_bad items = sorted(zip(pred_list,label_list),key=lambda x:x[0]) step = (max(pred_list)-min(pred_list))/200 pred_bin=[]good_rate=[] bad_rate=[] ks_list = [] for i in range(1,201): idx = min(pred_list)+i*step pred_bin.append(idx) label_bin = [x[1] for x in items if x[0]<idx] bad_num = sum(label_bin)good_num = len(label_bin)-bad_num goodrate = good_num/total_good badrate = bad_num/total_badks = abs(goodrate-badrate) good_rate.append(goodrate)bad_rate.append(badrate)ks_list.append(ks)fig = plt.figure(figsize=(6,4))ax = fig.add_subplot(1,1,1)ax.plot(pred_bin,good_rate,color='green',label='good_rate')ax.plot(pred_bin,bad_rate,color='red',label='bad_rate')ax.plot(pred_bin,ks_list,color='blue',label='good-bad')ax.set_title('KS:{:.3f}'.format(max(ks_list)))ax.legend(loc='best')return plt.show(ax)計算分數校準的A,B值,基礎分def cal_scale(score,odds,PDO,model):"""計算分數校準的A,B值,基礎分param:odds:設定的壞好比 floatscore: 在這個odds下的分數 intPDO: 好壞翻倍比 intmodel:模型return:A,B,base_score(基礎分)"""B = 20/(np.log(odds)-np.log(2*odds))A = score-B*np.log(odds)base_score = A+B*model.intercept_[0]return A,B,base_score得到特征score的映射集合表def get_score_map(woe_df,coe_dict,B):"""得到特征score的映射集合表param:woe_df -- woe映射集合表 Dataframecoe_dict -- 系數對應的字典return:score_df -- score的映射集合表 Dataframe"""scores=[]for cc in woe_df.col.unique():woe_list = woe_df[woe_df.col==cc]['woe'].tolist()coe = coe_dict[cc]score = [round(coe*B*w,0) for w in woe_list]scores.extend(score)woe_df['score'] = scoresscore_df = woe_df.copy()return score_df繪制好壞用戶得分分布圖def plot_score_hist(df,target,score_col,title,plt_size=None):"""繪制好壞用戶得分分布圖param:df -- 數據集 Dataframetarget -- 標簽字段名 stringscore_col -- 模型分的字段名 stringplt_size -- 繪圖尺寸 tupletitle -- 圖表標題 stringreturn:好壞用戶得分分布圖""" plt.figure(figsize=plt_size)plt.title(title)x1 = df[df[target]==1][score_col]x2 = df[df[target]==0][score_col]sns.kdeplot(x1,shade=True,label='bad',color='hotpink')sns.kdeplot(x2,shade=True,label='good',color ='seagreen')plt.legend()return plt.show()評分卡建模def get_scorecard_model(train_data,test_data,target,nan_value=-999,score=400,odds=999/1,pdo=20):"""評分卡建模param:train_data -- 訓練數據集,預處理好的 Dataframetest_data -- 測試數據集,預處理好的 Dataframetarget -- 標簽字段名 stringnan_value -- 缺失的映射值 int 默認-999odds -- 設定的壞好比 float 默認999/1score -- 在這個odds下的分數 int 默認400PDO -- 好壞翻倍比 int 默認20return:lr_model -- lr模型score_map_df -- woe,score映射集合表 Dataframevalid_score -- 驗證集模型分表 Dataframetest_score -- 測試集模型分表 Dataframe"""# psi篩選,剔除psi大于0.25以上的特征all_col = [x for x in train_data.columns if x!=target]psi_tup = []for col in all_col:psi,psi_bin_df = cal_psi(train_data,test_data,col)psi_tup.append((col,psi))psi_delete = [x for x,y in psi_tup if y>=0.25]train = train_data.drop(psi_delete,axis=1)print('psi篩選特征完成')print('-------------')# 特征分箱,默認用的是決策樹分箱train_col = [x for x in train.columns if x!=target]bin_df_list=[]cut_list=[]for col in train_col:try:bin_df,cut = binning_var(train,col,target)bin_df_list.append(bin_df)cut_list.append(cut)except:passprint('特征分箱完成')print('-------------')# 剔除iv無限大的特征bin_df_list = [x for x in bin_df_list if x.IV.iloc[0]!=float('inf')]# 保存每個特征的分割點listcut_dict={}for dd,cc in zip(bin_df_list,cut_list):col = dd.index.namecut_dict[col] = cc# 將IV從大到小進行排序iv_col = [x.index.name for x in bin_df_list]iv_value = [x.IV.iloc[0] for x in bin_df_list]iv_sort = sorted(zip(iv_col,iv_value),key=lambda x:x[1],reverse=True)# iv篩選,篩選iv大于0.02的特征iv_select_col = [x for x,y in iv_sort if y>=0.02]print('iv篩選特征完成')print('-------------')# 特征分類cate_col = []num_col = []for col in iv_select_col:if train[col].dtype==np.dtype('object') or train[col].dtype==np.dtype('bool') or train[col].nunique()<=5:cate_col.append(col)else:num_col.append(col)#相關性篩選,相關系數閾值0.65corr_select_col = forward_corr_delete(train,num_col)print('相關性篩選完成')print('-------------')# 多重共線性篩選,系數閾值10vif_select_col = vif_delete(train,corr_select_col)print('多重共線性篩選完成')print('-------------')# 自動調整單調分箱trim_var_dict = {k:v for k,v in cut_dict.items() if k in vif_select_col}trim_bin_list=[]for col in trim_var_dict.keys():bin_cut = trim_var_dict[col]df_bin = [x for x in bin_df_list if x.index.name==col][0]woe_lst = df_bin['woe'].tolist()if not judge_decreasing(woe_lst) and not judge_increasing(woe_lst):monot_cut = monot_trim(train, col, target, nan_value=nan_value, cut=bin_cut)monot_bin_df = binning_trim(train, col, target, cut=monot_cut, right_border=True)trim_bin_list.append(monot_bin_df)else:trim_bin_list.append(df_bin)# 調整后的分箱再根據iv篩選一遍select_num_df = []for dd in trim_bin_list:if dd.IV.iloc[0]>=0.02:select_num_df.append(dd)print('自動調整單調分箱完成')print('-------------')# 連續型特征的woe映射集合表woe_map_num = get_map_df(select_num_df)woe_map_num['bin'] = woe_map_num['bin'].map(lambda x:str(x))woe_map_num['min_bin'] = woe_map_num['bin'].map(lambda x:x.split(',')[0][1:])woe_map_num['max_bin'] = woe_map_num['bin'].map(lambda x:x.split(',')[1][:-1])woe_map_num['min_bin'] = woe_map_num['min_bin'].map(lambda x:float(x))woe_map_num['max_bin'] = woe_map_num['max_bin'].map(lambda x:float(x))if len(cate_col)>0:bin_cate_list = [x for x in bin_df_list if x.index.name in cate_col]# 剔除woe不單調的離散形特征select_cate_df=[]for i,dd in enumerate(bin_cate_list):woe_lst = dd['woe'].tolist()if judge_decreasing(woe_lst) or judge_increasing(woe_lst):select_cate_df.append(dd)# 離散型特征的woe映射集合表if len(select_cate_df)>0:woe_map_cate = get_map_df(select_cate_df)woe_map_cate['min_bin'] = list(woe_map_cate['bin'])woe_map_cate['max_bin'] = list(woe_map_cate['bin'])woe_map_df = pd.concat([woe_map_cate,woe_map_num],axis=0).reset_index(drop=True)else:woe_map_df = woe_map_num.reset_index(drop=True)# 顯著性篩選,前向逐步回歸select_all_col = woe_map_df['col'].unique().tolist()select_sort_col = [x for x,y in iv_sort if x in select_all_col]train2 = train.loc[:,select_sort_col+[target]].reset_index(drop=True)# woe映射train_woe = var_mapping(train2,woe_map_df,'woe',target)X = train_woe.loc[:,select_sort_col]y = train_woe[target]pvalue_select_col = forward_pvalue_delete(X,y)print('顯著性篩選完成')print('-------------')# 剔除系數為負數的特征X2 = X.loc[:,pvalue_select_col]coef_select_col = forward_delete_coef(X2,y)# LR建模X3 = X2.loc[:,coef_select_col]x_train,x_valid,y_train,y_valid = train_test_split(X3,y,test_size=0.2,random_state=0)# 保存驗證集的indexvalid_index = x_valid.index.tolist()lr_model = LogisticRegression(C=1.0).fit(x_train,y_train)print('建模完成')print('-------------')# 繪制驗證集的auc,ksvalid_pre = lr_model.predict_proba(x_valid)[:,1]print('驗證集的AUC,KS:')plot_roc(y_valid,valid_pre)plot_model_ks(y_valid,valid_pre)woe_map_df2 = woe_map_df[woe_map_df.col.isin(coef_select_col)].reset_index(drop=True)# 繪制測試集的auc,kstest = test_data.loc[:,coef_select_col+[target]].reset_index(drop=True)test_woe = var_mapping(test,woe_map_df2,'woe',target)x_test = test_woe.drop([target],axis=1)y_test = test_woe[target]test_pre = lr_model.predict_proba(x_test)[:,1]print('測試集的AUC,KS:')plot_roc(y_test,test_pre)plot_model_ks(y_test,test_pre)# 評分轉換A,B,base_score = cal_scale(score,odds,pdo,lr_model)score_map_df = get_score_map(woe_map_df2,lr_model,B)# 分數映射valid_data = train2.iloc[valid_index,:].loc[:,coef_select_col+[target]].reset_index(drop=True)valid_score = var_mapping(valid_data,score_map_df,'score',target)valid_score['final_score'] = base_scorefor col in coef_select_col:valid_score['final_score']+=valid_score[col]valid_score['final_score'] = valid_score['final_score'].map(lambda x:int(x))test_score = var_mapping(test,score_map_df,'score',target)test_score['final_score'] = base_scorefor col in coef_select_col:test_score['final_score']+=test_score[col]test_score['final_score'] = test_score['final_score'].map(lambda x:int(x))print('評分轉換完成')print('-------------')# 驗證集的評分分布plot_score_hist(valid_score, target, 'final_score','vaild_score',plt_size=(6,4))# 測試集的評分分布plot_score_hist(test_score, target, 'final_score','test_score',plt_size=(6,4))return lr_model,score_map_df,valid_score,test_score關于作者:
本人就職于某金融科技公司從事風控建模工作,歡迎交流。
對于風控和機器學習感興趣的童鞋可以關注下我的公眾號:風控汪的數據分析之路。
總結
以上是生活随笔為你收集整理的python 去除nan inf_Python实现半自动评分卡建模(附代码)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: JAVA入门级教学之(if语句)
- 下一篇: 浙江嘉兴计算机学校排名,嘉兴计算机考研线