2
votes

I've a small decision tree code and I believe I convert everything to int, and I have checked my train/test data with isnan, max etc.

I genuinely have no idea why its giving that error.

So I'm trying to pass Mnist dataset from Decision Tree and then I'll attack using a class.

Here is the code:

 from AttackUtils import Attack
    from AttackUtils import calc_output_weighted_weights, targeted_gradient, non_targeted_gradient, non_targeted_sign_gradient
    (X_train_woae, y_train_woae), (X_test_woae, y_test_woae) = mnist.load_data()
    X_train_woae = X_train_woae.reshape((len(X_train_woae), np.prod(X_train_woae.shape[1:])))
    X_test_woae = X_test_woae.reshape((len(X_test_woae), np.prod(X_test_woae.shape[1:])))

    from sklearn import tree
    #model_woae = LogisticRegression(multi_class='multinomial', solver='lbfgs', fit_intercept=False)
    model_woae = tree.DecisionTreeClassifier(class_weight='balanced')
    model_woae.fit(X_train_woae, y_train_woae)
    #model_woae.coef_ = model_woae.feature_importances_
    coef_int = np.round(model_woae.tree_.compute_feature_importances(normalize=False) * X_train_woae.size).astype(int)
    attack_woae = Attack(model_woae)
    attack_woae.prepare(X_train_woae, y_train_woae, X_test_woae, y_test_woae)
    weights_woae = attack_woae.weights
    num_classes_woae = len(np.unique(y_train_woae))
    attack_woae.create_one_hot_targets(y_test_woae)
    attack_woae.attack_to_max_epsilon(non_targeted_gradient, 50)
    non_targeted_scores_woae = attack_woae.scores

So the attack class does perturbation and non-targeted gradient attack. And here is the attack class:

import numpy as np
from sklearn.metrics import accuracy_score


def calc_output_weighted_weights(output, w):
    for c in range(len(output)):
        if c == 0:
            weighted_weights = output[c] * w[c]
        else:
            weighted_weights += output[c] * w[c]
    return weighted_weights


def targeted_gradient(foolingtarget, output, w):
    ww = calc_output_weighted_weights(output, w)
    for k in range(len(output)):
        if k == 0:
            gradient = foolingtarget[k] * (w[k]-ww)
        else:
            gradient += foolingtarget[k] * (w[k]-ww)
    return gradient


def non_targeted_gradient(target, output, w):
    ww = calc_output_weighted_weights(output, w)
    for k in range(len(target)):
        if k == 0:
            gradient = (1-target[k]) * (w[k]-ww)
        else:
            gradient += (1-target[k]) * (w[k]-ww)
    return gradient


def non_targeted_sign_gradient(target, output, w):
    gradient = non_targeted_gradient(target, output, w)
    return np.sign(gradient)


class Attack:

    def __init__(self, model):
        self.fooling_targets = None
        self.model = model

    def prepare(self, X_train, y_train, X_test, y_test):
        self.images = X_test
        self.true_targets = y_test
        self.num_samples = X_test.shape[0]
        self.train(X_train, y_train)
        print("Model training finished.")
        self.test(X_test, y_test)
        print("Model testing finished. Initial accuracy score: " + str(self.initial_score))

    def set_fooling_targets(self, fooling_targets):
        self.fooling_targets = fooling_targets

    def train(self, X_train, y_train):
        self.model.fit(X_train, y_train)
        self.weights = self.model.coef_
        self.num_classes = self.weights.shape[0]

    def test(self, X_test, y_test):
        self.preds = self.model.predict(X_test)
        self.preds_proba = self.model.predict_proba(X_test)
        self.initial_score = accuracy_score(y_test, self.preds)

    def create_one_hot_targets(self, targets):
        self.one_hot_targets = np.zeros(self.preds_proba.shape)
        for n in range(targets.shape[0]):
            self.one_hot_targets[n, targets[n]] = 1

    def attack(self, attackmethod, epsilon):
        perturbed_images, highest_epsilon = self.perturb_images(epsilon, attackmethod)
        perturbed_preds = self.model.predict(perturbed_images)
        score = accuracy_score(self.true_targets, perturbed_preds)
        return perturbed_images, perturbed_preds, score, highest_epsilon

    def perturb_images(self, epsilon, gradient_method):
        perturbed = np.zeros(self.images.shape)
        max_perturbations = []
        for n in range(self.images.shape[0]):
            perturbation = self.get_perturbation(epsilon, gradient_method, self.one_hot_targets[n], self.preds_proba[n])
            perturbed[n] = self.images[n] + perturbation
            max_perturbations.append(np.max(perturbation))
        highest_epsilon = np.max(np.array(max_perturbations))
        return perturbed, highest_epsilon

    def get_perturbation(self, epsilon, gradient_method, target, pred_proba):
        gradient = gradient_method(target, pred_proba, self.weights)
        inf_norm = np.max(gradient)
        perturbation = epsilon / inf_norm * gradient
        return perturbation

    def attack_to_max_epsilon(self, attackmethod, max_epsilon):
        self.max_epsilon = max_epsilon
        self.scores = []
        self.epsilons = []
        self.perturbed_images_per_epsilon = []
        self.perturbed_outputs_per_epsilon = []
        for epsilon in range(0, self.max_epsilon):
            perturbed_images, perturbed_preds, score, highest_epsilon = self.attack(attackmethod, epsilon)
            self.epsilons.append(highest_epsilon)
            self.scores.append(score)
            self.perturbed_images_per_epsilon.append(perturbed_images)
            self.perturbed_outputs_per_epsilon.append(perturbed_preds)

And this is the traceback it gives:

ValueError

Traceback (most recent call last) in 4 num_classes_woae = len(np.unique(y_train_woae)) 5 attack_woae.create_one_hot_targets(y_test_woae) ----> 6 attack_woae.attack_to_max_epsilon(non_targeted_gradient, 50) 7 non_targeted_scores_woae = attack_woae.scores

~\MULTIATTACK\AttackUtils.py in attack_to_max_epsilon(self, attackmethod, max_epsilon) 106 self.perturbed_outputs_per_epsilon = [] 107 for epsilon in range(0, self.max_epsilon): --> 108 perturbed_images, perturbed_preds, score, highest_epsilon = self.attack(attackmethod, epsilon) 109 self.epsilons.append(highest_epsilon) 110 self.scores.append(score)

~\MULTIATTACK\AttackUtils.py in attack(self, attackmethod, epsilon) 79 def attack(self, attackmethod, epsilon): 80 perturbed_images, highest_epsilon = self.perturb_images(epsilon, attackmethod) ---> 81 perturbed_preds = self.model.predict(perturbed_images) 82 score = accuracy_score(self.true_targets, perturbed_preds) 83 return perturbed_images, perturbed_preds, score, highest_epsilon

...\appdata\local\programs\python\python35\lib\site-packages\sklearn\tree\tree.py in predict(self, X, check_input) 413 """ 414 check_is_fitted(self, 'tree_') --> 415 X = self._validate_X_predict(X, check_input) 416 proba = self.tree_.predict(X) 417 n_samples = X.shape[0]

...\appdata\local\programs\python\python35\lib\site-packages\sklearn\tree\tree.py in _validate_X_predict(self, X, check_input) 374 """Validate X whenever one tries to predict, apply, predict_proba""" 375 if check_input: --> 376 X = check_array(X, dtype=DTYPE, accept_sparse="csr") 377 if issparse(X) and (X.indices.dtype != np.intc or 378 X.indptr.dtype != np.intc):

...\appdata\local\programs\python\python35\lib\site-packages\sklearn\utils\validation.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, warn_on_dtype, estimator) 566 if force_all_finite: 567 _assert_all_finite(array, --> 568 allow_nan=force_all_finite == 'allow-nan') 569 570 shape_repr = _shape_repr(array.shape)

...\appdata\local\programs\python\python35\lib\site-packages\sklearn\utils\validation.py in _assert_all_finite(X, allow_nan) 54 not allow_nan and not np.isfinite(X).all()): 55 type_err = 'infinity' if allow_nan else 'NaN, infinity' ---> 56 raise ValueError(msg_err.format(type_err, X.dtype)) 57 58

ValueError: Input contains NaN, infinity or a value too large for dtype('float32').

EDIT:

I've added coefficient numbers as 0 and it now gives the same error just below the line, at attack.attack_to_max_epsilon(non_targeted_gradient, epsilon_number)

1
maybe just float32 overflow, but lol the digit is hugeuser8426627
@user8426627 I've made them smaller but still the same... You were talking about coef_int digits right?DevelopmentPeasant
try to apply one-hot encode to your targets or labels before to train the clf,.Freddy Daniel
@FreddyDaniel can you give more detail? I'm not sure I completely understoodDevelopmentPeasant
I think that you are new in machine learning, please see what is one-hot enconde machinelearningmastery.com/… and then try to search dataset normalization to train machine learning algorithms.Freddy Daniel

1 Answers

0
votes

Try to apply one-hot enconde to your labels before to train..

from sklearn.preprocessing import LabelEncoder

mylabels= ["label1", "label2", "label2"..."n.label"]
le = LabelEncoder()
labels = le.fit_transform(mylabels)

and then try to split your data:

from sklearn.model_selection import train_test_split
(x_train, x_test, y_train, y_test) = train_test_split(data,
                                                     labels,
                                                     test_size=0.25)

now probably your labels will be encoded with numbers which is good to train a machine learning algorithm.