from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
X = data.data
y = 1 - data.target
#Invert labels 0 and 1
X = X[:, :10]
from sklearn.linear_model import LogisticRegression
model_lor = LogisticRegression(max_iter=1000)
model_lor.fit(X, y)
y_pred = model_lor.predict(X)
・ 2 rows x 2 columns are displayed ・ A matrix of real data and forecast data is created ・ Upper left is (0, 0), lower right is (1, 1)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y, y_pred)
print(cm)
・ Percentage of correct forecasts for all forecast results
from sklearn.metrics import accuracy_score
accuracy_score(y, y_pred)
・ Percentage of what was predicted to be positive to what was predicted to be positive (Right column side)
from sklearn.metrics import precision_score
precision_score(y, y_pred)
・ Percentage of what can be correctly predicted as positive to what is actually positive (Descending side)
from sklearn.metrics import f1_score
f1_score(y, y_pred)
・ Harmonic mean of recall and precision ・ There is a trade-off between the precision rate and the recall rate.
from sklearn.metrics import f1_score
f1_score(y, y_pred)
・ A method of expressing whether it is classified as 0 or 1 by a continuous value of 0-1 (when added, it becomes equal to 1) ・ 0.5 is set as the threshold for scilit-learn by default.
#model_lor.predict_proba(X)
import numpy as np
y_pred2 = (model_lor.predict_proba(X)[:, 1]>0.1).astype(np.int)
print(confusion_matrix(y, y_pred2))
print(accuracy_score(y, y_pred2))
print(recall_score(y, y_pred2))
・ AUC: Area Under the Curve ・ ROC: Receiver Operating Characteristic ・ AUC is the lower area of the ROC curve ・ ROC curve ・ ・ ・ Horizontal axis: False Positive Rate, FP Vertical axis: True Positive Rate, TP
from sklearn.metrics import roc_curve
probas = model_lor.predict_proba(X)
fpr, tpr, thresholds = roc_curve(y, probas[:, 1])
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots()
fig.set_size_inches(4.8, 5)
ax.step(fpr, tpr, 'gray')
ax.fill_between(fpr, tpr, 0, color='skyblue', alpha=0.8)
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_facecolor('xkcd:white')
plt.show()
from sklearn.metrics import roc_auc_score
roc_auc_score(y, probas[:, 1])
Recommended Posts