from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from sklearn.model_selection import cross_val_score
dt_reg =DecisionTreeRegressor(random_state=0, max_depth=4)
ridge = Ridge(alpha = 10)
lasso = Lasso(alpha = 10)
ela = ElasticNet(alpha = 10)
# rf_reg = RandomForestRegressor(random_state=0,n_estimators=1000)
# gb_reg = GradientBoostingRegressor(random_state=0, n_estimators=1000)
xgb_reg = XGBRegressor(n_estimators=1000)
lgb_reg = LGBMRegressor(n_estimators=1000)
score = cross_val_score(ridge, X, Y, scoring='neg_mean_squared_error')
print(score)
# #트리 기반의 회귀 모델을 반복하면서 평가 수행
models = [dt_reg, ridge, lasso, ela, xgb_reg, lgb_reg]
for m in models:
m.fit(X_train, y_train)
Y_predict = m.predict(X_test)
mse = mean_squared_error(y_test, Y_predict)
rmse = np.sqrt(mse)
# score = cross_val_score(re_reg, X, y_1d, scoring='neg_mean_squared_error')
print(m.__class__.__name__)
print('MSE: {:.3f}, RMSE: {:.3f}'.format(mse,rmse))
print('R^2(Variance score):{:.3f}'.format(r2_score(y_test,Y_predict)))
n_estimators = 1000
n_estimators =100
n_estimators = 500, lr = 0.05
n_estimators = 1000, lr = 0.05
n_estimators = 3000, lr = 0.01
'머신러닝 > 개념익히기' 카테고리의 다른 글
ML 결정 트리 모델 시각화 Graphviz (2) | 2023.11.28 |
---|---|
ML 데이터 전처리, Data preprocessing 정의 (0) | 2023.11.26 |
ML 앙상블 Ensemble- 랜덤포레스트, RandomForest (0) | 2023.11.02 |
ML F1 score, ROC 곡선과 AUC (0) | 2023.10.23 |
ML 정밀도Precision와 재현율Recall, 트레이드오프Trade-off (1) | 2023.10.22 |