# compare probability distributions vs entropy from math import log2 from matplotlib import pyplot # calculate entropy defentropy(events, ets=1e-15): return -sum([p * log2(p + ets) for p in events]) # define probabilities probs = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5] # create probability distribution dists = [[p, 1.0 - p] for p in probs] # calculate entropy for each distribution ents = [entropy(d) for d in dists] # plot probability distribution vs entropy pyplot.plot(probs, ents, marker='.') pyplot.title('Probability Distribution vs Entropy') pyplot.xticks(probs, [str(d) for d in dists]) pyplot.xlabel('Probability Distribution') pyplot.ylabel('Entropy (bits)') pyplot.show()
defcorss_entropy(p, q): # 为了防止 log 定义域为 0 的情况 # p 是训练数据标签, q 是预测值 return -sum([p[i]*log(q[i]) for i in range(len(p))])
results = [] for i in range(len(p)): # calculate cross entropy for the two events ce = cross_entropy(p, q) print('>[y=%.1f, yhat=%.1f] ce: %.3f nats' % (p[i], q[i], ce)) results.append(ce)
defkl_divergence(p, q): # 为了防止 log 定义域为 0 的情况 # p 是训练数据标签+10e-5, q 是预测值 return sum([p[i]*log((p[i]+10e-5)/q[i]) for i in range(len(p))])
results = [] for i in range(len(p)): # calculate cross entropy for the two events kl = kl_divergence(p, q) print('>[y=%.1f, yhat=%.1f] kl: %.3f nats' % (p[i], q[i], ce)) results.append(kl)
import keras from keras.datasets import cifar10 from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras import backend as K
# Create the model model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.50)) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.50)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dense(no_classes, activation='softmax'))
loss = keras.losses.kullback_leibler_divergence # loss = keras.losses.categorical_crossentropy
# Compile the model model.compile(loss=loss, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])
# Fit data to model model.fit(input_train, target_train, batch_size=batch_size, epochs=no_epochs, verbose=verbosity, validation_split=validation_split )