본문 바로가기
머신러닝 & 딥러닝

11. 다양한 모델 적용

by 곽정우 2024. 6. 13.

1.  AirQualityUCL 데이터셋

import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
air_df = pd.read_csv('/content/drive/MyDrive/KDT/6.머신러닝과 딥러닝/Data/AirQualityUCI.csv')
air_df.info()

AirQualityUCI.csv
0.74MB

Date: 측정 날짜
Time: 측정 시간
CO(GT): 일산화탄소 농도 (mg/m^3)
PT08.S1(CO): 일산화탄소에 대한 센서 응답
NMHC(GT): 비메탄 탄화수소 농도 (microg/m^3)
C6H6(GT): 벤젠 농도 (microg/m^3)
PT08.S2(NMHC): 탄화수소에 대한 센서 응답
NOx(GT): 산화 질소 농도 (ppb)
PT08.S3(NOx): 산화 질소에 대한 센서 응답
NO2(GT): 이산화질소 농도 (microg/m^3)
PT08.S4(NO2): 이산화질소에 대한 센서 응답
PT08.S5(O3): 오존에 대한 센서 응답
T: 온도 (°C)
RH: 상대 습도 (%)
AH: 절대 습도 (g/m^3)
air_df.drop(['Unnamed: 15','Unnamed: 16'], axis=1, inplace=True)
air_df.dropna(inplace=True)
air_df.info()

# Date 컬럼의 타입을 datetime으로 변경

air_df['Date'] = pd.to_datetime(air_df.Date, format='%d-%m-%Y')
air_df.head()

# Date 컬럼에 의한 Month 파생변수를 생성

air_df['Month'] = air_df['Date'].dt.month
air_df.head()

# Time 컬럼에 의한 Hour 파생변수를 생성

air_df['Hour'] = air_df['Time'].str.split(':').str[0].fillna(0).astype(int)
air_df.head()

# Date와 Time 컬럼을 제거

air_df.drop(['Date', 'Time'], axis=1, inplace=True)
air_df.head()

# heatmap을 통해 상관관계를 확인

plt.figure(figsize=(12,12))
sns.heatmap(air_df.corr(),cmap='coolwarm',vmin=-1, vmax=1,annot=True)

# 종속변수 (RH)를 제외한 모든 컬럼을 StandardScaler로 정규화

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.metrics import mean_squared_error
ss = StandardScaler()
X = air_df.drop('RH', axis=1) # 독립변수, 변하지 않는 값
y = air_df['RH'] # 종속변수, 추출하고자 하는 값
Xss = ss.fit_transform(X)
Xss

X_train, X_test, y_train, y_test = train_test_split(Xss, y, test_size=0.2, random_state=2024)
X_train.shape, y_train.shape

X_test.shape, y_test.shape

2. 모델별 성능 확인하기

# 문제

# MSE로 확인하기
# Linear Regression
# Decision Tree Regression
# Random Foreset Regression
# Support Vector (Machine) REgression
# lightGBM Regression

 

my_predictions = {}
colors = ['r', 'c', 'm', 'y', 'k', 'khaki', 'teal', 'orchid', 'sandybrown',
          'greenyellow', 'dodgerblue', 'deepskyblue', 'rosybrown', 'firebrick',
          'deeppink', 'crimson', 'salmon', 'darkred', 'olivedrab', 'olive',
          'forestgreen', 'royalblue', 'indigo', 'navy', 'mediumpurple', 'chocolate',
          'gold', 'darkorange', 'seagreen', 'turquoise', 'steelblue', 'slategray',
          'peru', 'midnightblue', 'slateblue', 'dimgray', 'cadetblue', 'tomato']
def plot_predictions(name_, pred, actual):
  df = pd.DataFrame({'prediction': pred, 'actual': y_test})
  df = df.sort_values(by='actual').reset_index(drop=True)
  plt.figure(figsize=(12, 9))
  plt.scatter(df.index, df['prediction'], marker='x', color='r')
  plt.scatter(df.index, df['actual'], alpha=0.7, marker='o', color='black')
  plt.title(name_, fontsize=15)
  plt.legend(['prediction', 'actual'], fontsize=12)
  plt.show()
def mse_eval(name_, pred, actual):
  global my_predictions
  global colors

  plot_predictions(name_, pred, actual)
  mse = mean_squared_error(pred, actual)
  my_predictions[name_] = mse

  y_value = sorted(my_predictions.items(), key=lambda x: x[1], reverse=True)

  df = pd.DataFrame(y_value, columns=['model', 'mse'])
  print(df)

  min_ = df['mse'].min() - 10
  max_ = df['mse'].max() + 10

  length = len(df)

  plt.figure(figsize=(10, length))
  ax = plt.subplot()
  ax.set_yticks(np.arange(len(df)))
  ax.set_yticklabels(df['model'], fontsize=15)
  bars = ax.barh(np.arange(len(df)), df['mse'])

  for i, v in enumerate(df['mse']):
    idx = np.random.choice(len(colors))
    bars[i].set_color(colors[idx])
    ax.text(v + 2, i, str(round(v, 3)), color='k', fontsize=15, fontweight='bold')

  plt.title('MSE Error', fontsize=18)
  plt.xlim(min_, max_)
  
  plt.show()

 

2-1. Linear Regression

from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train, y_train)
pred1 = model.predict(X_test)
pred1

rs1 = np.sqrt(mean_squared_error(y_test, pred1))
mse_eval('Linear Regression', pred1, y_test)

 

2-2. Decision Tree Regression

from sklearn.tree import DecisionTreeRegressor
model2 = DecisionTreeRegressor()
model2.fit(X_train,y_train)
pred2 = model2.predict(X_test)
pred2

rs2 = np.sqrt(mean_squared_error(y_test, pred2))
rs2

mse_eval('Decision Tree Regression', pred2, y_test)

 

2-3. Random Forest Regression

from sklearn.ensemble import RandomForestRegressor
model3 = RandomForestRegressor()
model3.fit(X_train, y_train)
pred3 = model3.predict(X_test)
pred3

rs3 = np.sqrt(mean_squared_error(y_test, pred3))
rs3

mse_eval('Random Forest Regression', pred3, y_test)
 

 

2-4. Suppert Vector Machine

from sklearn.svm import SVR
model4 = SVR()
model4.fit(X_train, y_train)
pred4 = model4.predict(X_test)
pred4

rs4 = np.sqrt(mean_squared_error(y_test, pred4))
rs4

mse_eval('Suppert Vector Machine', pred4, y_test)

 

2-5. lightGBM

from lightgbm import LGBMRegressor
model5 = LGBMRegressor(random_state=2024)
model5.fit(X_train, y_train)
pred5 = model5.predict(X_test)
pred5

rs5 = np.sqrt(mean_squared_error(y_test, pred5))
rs5

mse_eval('lightGBM', pred5, y_test)

dic = {'LinearRegression': rs1, 
       'DecisionTreeRegressor': rs2, 
       'RandomForestRegressor': rs3, 
       'SVR': rs4, 
       'lightGBM': rs5}
      
res = [key for key in dic if all(dic[temp] >= dic[key] for temp in dic)]
print(res)
min = {k: dic[k] for k in dic.keys() & set(res)}
print(min)

'머신러닝 & 딥러닝' 카테고리의 다른 글

13. 파이토치(Pytorch)  (0) 2024.06.13
12. KMeans  (0) 2024.06.13
10. lightGBM  (0) 2024.06.13
9. 랜덤 포레스트  (0) 2024.06.12
8. 서포트 벡터 머신  (0) 2024.06.12