勉強中

https://zenn.dev/s1ok69oo/articles/4a36fee0297234  
https://www.lifull.blog/entry/2020/04/20/114200  
!pip install econml
!pip install japanize_matplotlib

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import japanize_matplotlib
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from econml.metalearners import TLearner
from econml.dr import DRLearner
%matplotlib inline
size = 1000
np.random.seed(0)
x = np.random.uniform(-1, 1, size)
noise = np.random.randn(size)
_T = x + noise
T = np.where(_T>0, 1, 0)
t = np.zeros(size)
for i in range(size):
  if x[i] < 0:
    t[i] = 1
  elif x[i] < 0.5:
    t[i] = 2
  else:
    t[i] = 3
noise = np.random.uniform
from sklearn.datasets import load_diabetes
diabetes = load_diabetes(as_frame=True)
diabetes.data['bmi_flg'] = diabetes.data['bmi'].apply(lambda x:1 if x>=0 else 0)
diabetes.data.head()
X = diabetes.data.drop(['bmi', 'bmi_flg'], axis=1)
df = pd.concat([diabetes.data, diabetes.target], axis=1)
df.head()
# 手順1: 広告を見ていないグループの回帰モデルを作成
df_t0 = df[df["bmi_flg"] == 0]
reg_0 = RandomForestRegressor(max_depth=3, random_state=0)
reg_0.fit(df_t0.drop(['bmi', 'bmi_flg', 'target'], axis=1), df_t0['target'])

# 手順2: 広告を見たグループの回帰モデルを作成
df_t1 = df[df["bmi_flg"] == 1]
reg_1 = RandomForestRegressor(max_depth=3, random_state=0)
reg_1.fit(df_t1.drop(['bmi', 'bmi_flg', 'target'], axis=1), df_t1['target'])

# 手順3: 効果の推定
mu_0 = reg_0.predict(df.drop(['bmi', 'bmi_flg', 'target'], axis=1))
mu_1 = reg_1.predict(df.drop(['bmi', 'bmi_flg', 'target'], axis=1))
tau = mu_1 - mu_0
plt.scatter(df['bmi_flg'], tau, alpha=0.3)
from sklearn.ensemble import RandomForestRegressor
from econml.metalearners import TLearner
models = RandomForestRegressor(max_depth=3, random_state=0)
T_learner = TLearner(models=models)
T_learner.fit(df['target'], df['bmi_flg'], X=df.drop(['bmi', 'bmi_flg', 'target'], axis=1))
tau = T_learner.effect(df.drop(['bmi', 'bmi_flg', 'target'], axis=1))
plt.scatter(df['bmi'], tau, alpha=0.3)
# plt.hlines(1000, -1, 0, linestyles='--', color="red")
# plt.hlines(1000, -1, 1, linestyles='--', color="red", alpha=0.3)
# plt.hlines(2000, 0, 0.5, linestyles='--', color="red")
# plt.hlines(2000, -1, 1, linestyles='--', color="red", alpha=0.3)
# plt.hlines(3000, 0.5, 1.0, linestyles='--', color="red")
# plt.hlines(3000, -1, 1, linestyles='--', color="red", alpha=0.3)
# plt.xlabel("ダイエットへの意識の高さ")
# plt.ylabel("広告効果")
# plt.ylim(0, 4000)
# plt.show()

!pip install pycausalimpact
#必要なライブラリをインポートする
import pandas as pd
import numpy as np
from statsmodels.tsa.arima_process import ArmaProcess
from matplotlib import pyplot as plt
from causalimpact import CausalImpact ##CausalImpactのライブラリ

%matplotlib inline
#CausalImpactを実行する際、わかりやすいように日付データをindexに指定する
df = df.set_index('date')
df.head()
#TVCM放送前と後を指定する
pre_period = ['2019-10-01', '2020-01-07'] #TVCM放映開始前
post_period = ['2020-01-08', '2020-02-29'] #TVCM放映開始後

#CausalImpactの実行
ci = CausalImpact(df, pre_period, post_period)
ci.plot(figsize=(22, 20))
ci.trained_model.summary()
#CausalImpactの実行
ci = CausalImpact(df, pre_period, post_period, nseasons=[{'period': 7}]) #7日間の周期性を考慮する
ci.plot(figsize=(22, 20))

========================================================================

 
 
import pandas as pd

def compare_dataframes(df1, df2, key_col):
    
    # Check row counts
    if len(df1) != len(df2):
        print(f"Row counts are different: {len(df1)} in df1, {len(df2)} in df2")
    
    # Check column names
    for col in list(df1.columns):
      if col not in list(df2.columns):
        print('self: ', col)

    for col in list(df2.columns):
      if col not in list(df1.columns):
        print('other: ', col)    
    
    # compare dataframes

    check_id_list = pd.merge(df1, df2, on=key_col, how='inner')[key_col].unique().tolist()
    check_col_list = df1.columns.values.tolist()
    df1_prepared = df1[df1[key_col].isin(check_id_list)][check_col_list].copy()
    df2_prepared = df2[df2[key_col].isin(check_id_list)][check_col_list].copy()
    diff = df1_prepared.compare(df2_prepared)

    # print differences
    print(diff)

# Example usage
data1 = {'A': [1, 2, 3], 'B': [4, 5, 6]}
data2 = {'A': [1, 2, 3], 'B': [4, 5, 7]}
df1 = pd.DataFrame(data1)
df2 = pd.DataFrame(data2)

compare_dataframes(df1, df2, 'A')

 
 
 
 
 
 
 

label_key =
'brand'
# 最小データ数
minimum_num = train_data[label_key].value_counts().min()
# ラベル毎に最小データ数だけサンプリング
dfs = [d.sample(minimum_num, random_state=0) for name, d in train_data.groupby(label_key)]
# 結合。ラベル順に並んでいるのでshuffleする
under_resampled_df = pd.concat(dfs).sample(frac=1, random_state=0)

X_resampled = under_resampled_df.drop(labels=label_key, axis=1)
y_resampled = under_resampled_df[label_key]

 

###TPOTの特徴: カテゴリ型できない+チューニング機能が弱そう
!pip install TPOT
from tpot import TPOTClassifier
#model = TPOTClassifier(generations=5, population_size=50, scoring='accuracy', verbosity=2, random_state=42, n_jobs=-1)
model = TPOTClassifier(generations=5, population_size=25, scoring='accuracy', verbosity=2, random_state=42, n_jobs=-1)
#model = TPOTClassifier(generations=5, population_size=25, scoring='roc_auc', verbosity=2, random_state=42, n_jobs=-1)
model.fit(train_data_encoded.drop('purchase', axis=1), train_data_encoded'purchase')
model.fitted_pipeline_
model.evaluated_individuals_

predict_y = model.predict(test_data_encoded.drop('purchase', axis=1))
###generations=5, population_size=50, scoring='roc_auc', verbosity=2, random_state=42, n_jobs=-1
from sklearn.metrics import roc_curve
roc_auc_score(test_data_encoded['purchase'], predict_y)
###generations=5, population_size=25, scoring='roc_auc', verbosity=2, random_state=42, n_jobs=-1
from sklearn.metrics import roc_curve
roc_auc_score(test_data_encoded['purchase'], predict_y)
###generations=5, population_size=25, scoring='accuracy', verbosity=2, random_state=42, n_jobs=-1
from sklearn.metrics import roc_curve
roc_auc_score(test_data_encoded['purchase'], predict_y)
#ドキュメント1 http://epistasislab.github.io/tpot/related/
#ドキュメント2 https://qiita.com/ksonoda/items/965bcb072984a1fb3cbb
#ドキュメント3 https://qiita.com/issakuss/items/f05d90cc5893ecce8b1a

###autogluonの特徴: カテゴリ型できる+チューニング機能があり(一部処理が思うことはあるが、モデル抽出などの機能が便利)
dir_base_name = "autogluon"
eval_metric = "roc_auc"
dir_auc = f"{dir_base_name}_auc"

###presets利用する場合
predictor = TabularPredictor(label='purchase', path=dir_auc, eval_metric=eval_metric, verbosity=3, ).fit(train_data=train_data_encoded, presets='best_quality') #'best_quality'は重く、実際に利用には‘good_quality’, ‘optimize_for_deployment’がよいかも
#predictor = TabularPredictor(label='purchase').fit(train_data=train_data_encoded)
test_perf = predictor.leaderboard(test_data_encoded, silent=True)
display(test_perf)

###hyperparameterオプション利用する場合
from autogluon.common import space
nn_options = {  # specifies non-default hyperparameter values for neural network models
    'num_epochs': 10,  # number of training epochs (controls training time of NN models)
    'learning_rate': space.Real(1e-4, 1e-2, default=5e-4, log=True),  # learning rate used in training (real-valued hyperparameter searched on log-scale)
    'activation': space.Categorical('relu', 'softrelu', 'tanh'),  # activation function used in NN (categorical hyperparameter, default = first entry)
    'dropout_prob': space.Real(0.0, 0.5, default=0.1),  # dropout probability (real-valued hyperparameter)
}
gbm_options = {  # specifies non-default hyperparameter values for lightGBM gradient boosted trees
    'num_boost_round': 100,  # number of boosting rounds (controls training time of GBM models)
    'num_leaves': space.Int(lower=26, upper=66, default=36),  # number of leaves in trees (integer hyperparameter)
}
hyperparameters = {  # hyperparameters of each model type
                   'GBM': gbm_options,
                   'NN_TORCH': nn_options,  # NOTE: comment this line out if you get errors on Mac OSX
                  }  # When these keys are missing from hyperparameters dict, no models of that type are trained

time_limit = 2*60  # train various models for ~2 min
num_trials = 5  # try at most 5 different hyperparameter configurations for each type of model
search_strategy = 'auto'  # to tune hyperparameters using random search routine with a local scheduler

hyperparameter_tune_kwargs = {  # HPO is not performed unless hyperparameter_tune_kwargs is specified
    'num_trials': num_trials,
    'scheduler' : 'local',
    'searcher': search_strategy,
}  # Refer to TabularPredictor.fit docstring for all valid values

predictor = TabularPredictor(label='purchase', path=dir_auc, eval_metric=eval_metric, verbosity=3).fit(
    train_data=train_data_encoded,
    time_limit=time_limit,
    hyperparameters=hyperparameters,
    hyperparameter_tune_kwargs=hyperparameter_tune_kwargs,
)

results = predictor.fit_summary()
feature_importances = predictor.feature_importance(test_data_encoded)
print("特徴量の重要度:")
display(feature_importances)
print("AutoGluonが推察した問題のタイプ:", predictor.problem_type)
print("AutoGluonが各特徴量に対して推察したデータの型:")
print(predictor.feature_metadata.to_dict())

model_perf = predictor.leaderboard(test_data_encoded, silent=True)
display(model_perf)

loaded_model = predictor._trainer.load_model("WeightedEnsemble_L2")
y_pred = predictor.predict(test_data_encoded)
perf = predictor.evaluate_predictions(y_true=test_data_encoded['purchase'], y_pred=y_pred, auxiliary_metrics=True)
print("Predictions:\n", y_pred)
y_predprob = predictor.predict_proba(test_data_encoded)
display(pd.DataFrame(y_predprob,columns=predictor.class_labels))

#ドキュメント1 https://aws.amazon.com/jp/builders-flash/202201/autogluon-tabular-tutorials/?awsf.filter-name=*all
#ドキュメント2 https://auto.gluon.ai/stable/index.html

 
!pip install pycaret
#from pycaret.regression import *
from pycaret.classification import *
#exp1 = setup(train_data, target = 'purchase', ignore_features = ['key_col', 'product_id', 'category_id'])
exp1 = setup(data=train_data, target = 'purchase', session_id=123, ignore_features = ['key_col', 'product_id', 'category_id'])
compare_models()
gbc = create_model('gbc')
tuned_gbc = tune_model(gbc)
plot_model(tuned_gbc, plot = 'auc')
plot_model(tuned_gbc, plot = 'confusion_matrix')
evaluate_model(tuned_gbc)
final_gbc = finalize_model(tuned_gbc)
predictions_result = predict_model(final_gbc, data=)
predictions_result.head()

 
 
 
 
!pip install pycaret
from pycaret.classification import *
import pandas as pd
import numpy as np
import sklearn.datasets
iris = sklearn.datasets.load_iris()
data1 = pd.DataFrame(data= np.c_[iris['data'], iris['target']],
columns= iris['feature_names'] + ['target'])
data1.head()
len(data1)
exp_name = setup(data1, target = 'target')
best_model = compare_models()
evaluate_model(best_model)
!pip install TPOT
from tpot import TPOTClassifier
model = TPOTClassifier(generations=5, population_size=50, scoring='accuracy', verbosity=2, random_state=1, n_jobs=-1)
model.fit(data1.drop('target', axis=1), data1[['target']])

!pip install autogluon
from autogluon.tabular import TabularDataset, TabularPredictor
train_data = TabularDataset(data1)
predictor = TabularPredictor(label='target').fit(train_data=train_data)
#predictions = predictor.predict(test_data)
from sklearn.model_selection import train_test_split
# ランダムシード値
RANDOM_STATE = 10

# 学習データと評価データの割合
TEST_SIZE = 0.2

# 学習データと評価データを作成
x_train, x_test, y_train, y_test = train_test_split(
data1.drop('target', axis=1),
data1[['target']],
test_size=TEST_SIZE,
random_state=RANDOM_STATE,
)
 
pycaret - 分析機能あり(前処理あり)
feature importance, pdp, shap全部あり → interpret_model
そして利用性もよい → dashboard
 
tpop - 分析機能なし(前処理あり)
 
autogluon - 分析機能一部あり(前処理あり)
feature importanceのみ