Bonjour,

Je travaille actuellement sur un projet dont le but est de me servir d'un CNN pré-entraîné (Mobilenet) et de réaliser une classification incrémentale c'est à dire que je souhaite sur le même réseau ajouter de nouvelles classes tout en gardant les 1000 classes déjà existantes d'Imagenet. Mon problème est que je ne parviens pas à trainer et classifier mes nouvelles classes correctement.
En ce qui concerne la classification mon problème est qu'il n'affiche pas le bon label ni la bonne valeur.
Est ce que quelqu'un pourrait m'aider ?
Voici le code que j'ai réalisé pour trainer mon réseau avec mes nouvelles classes
Code python : Sélectionner tout - Visualiser dans une fenêtre à part
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def training():
 
	list = os.listdir('C:/Users/rasam/Documents/ING5/PFE/classes/train') 
	number_class = len(list)
	batch_size = 16
 
	# build the MobileNet network
	model = applications.mobilenet.MobileNet(input_shape=(224, 224, 3), alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=False, weights='imagenet', input_tensor=None, pooling=None)
 
 
	# this is the augmentation configuration we will use for training
	datagen = ImageDataGenerator(rescale=1. / 255)
 
 
	generator = datagen.flow_from_directory(
		'C:/Users/rasam/Documents/ING5/PFE/classes/train',
        	target_size=(224, 224),
        	batch_size=batch_size,
        	class_mode=None,  # this means our generator will only yield batches of data, no labels #None
        	shuffle=False)  # our data will be in order, so all first 1000 images will be cats, then 1000 dogs
       	 	# the predict_generator method returns the output of a model, given
        	# a generator that yields batches of numpy data
 
	bottleneck_features_train = model.predict_generator(generator, (number_class * 1000)/batch_size)
    	# save the output as a Numpy array
	np.save(open('bottleneck_features_train.npy', 'wb'), bottleneck_features_train)
 
	generator = datagen.flow_from_directory(
        	'C:/Users/rasam/Documents/ING5/PFE/classes/validation',
        	target_size=(224, 224),
        	batch_size=batch_size,
        	class_mode=None, #class_mode=None,
        	shuffle=False)
 
 
	print(generator.class_indices) #mapping from class names to class indices
    	#print(generator.classes) #array of class indices = labels
 
	bottleneck_features_validation = model.predict_generator(generator, (number_class * 400)/batch_size)
	np.save(open('bottleneck_features_validation.npy', 'wb'), bottleneck_features_validation)
 
	train_data = np.load(open('bottleneck_features_train.npy', 'rb'))
    	# the features were saved in order, so recreating the labels is easy  
	validation_data = np.load(open('bottleneck_features_validation.npy', 'rb'))
 
 
 
	for i in range(number_class-1):
		if i==0:
			train_labels = [i] * 1000 + [i+1] * 1000
			validation_labels = [i] * 400 + [i+1] * 400
		else:
			train_labels = train_labels + [i+1] * 1000
			validation_labels = validation_labels + [i+1] * 400    
 
 
 
	#train_labels = [0] * 1000 + [1] * 1000
	#validation_labels = [0] * 400 + [1] * 400  
 
	train_labels = to_categorical(train_labels, num_classes=number_class)
	validation_labels = to_categorical(validation_labels, num_classes=number_class)
 
 
	top_model = Sequential()
	top_model.add(GlobalAveragePooling2D(input_shape=train_data.shape[1:]))
	top_model.add(Reshape((-1, 1, 1)))
	top_model.add(Permute((3, 2, 1)))
	top_model.add(Dropout(0.5))
	top_model.add(Conv2D(number_class, (1, 1))) #top_model.add(Conv2D(number_class, (1, 1)))
	top_model.add(Activation('softmax'))  #top_model.add(Activation('softmax'))
	top_model.add(Reshape(target_shape=(number_class, ))) #top_model.add(Reshape(target_shape=(number_class, ))) #model.add(Reshape(target_shape=(None, number_class)))
 
    #plot_model(top_model, to_file='custom_last_layers_mobilenet.png')
 
	top_model.summary()
 
	"""
        top_model.compile(optimizer='rmsprop', #optimizer='rmsprop'
                loss='categorical_hinge', #loss='binary_crossentropy' in case of 2 classes #loss='categorical_crossentropy'
                metrics=['accuracy'])
        """
 
	sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
	top_model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])
 
 
	top_model.fit(train_data, train_labels,
		epochs=25, #25
		batch_size=batch_size,
		validation_data=(validation_data, validation_labels))
	top_model.save_weights('bottleneck_fc_model.h5')