mirror of
https://github.com/arnaucube/objectImageIdentifierAI.git
synced 2026-02-07 11:46:55 +01:00
training neural network ok, needs too much ram. Started implementation of the server
This commit is contained in:
1
nnTrain/.gitignore
vendored
Normal file
1
nnTrain/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
trainBACKUP.py
|
||||
24
nnTrain/README.md
Normal file
24
nnTrain/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# serverImgPredictor
|
||||
Need the file dataset.data
|
||||
|
||||
### install Flask
|
||||
http://flask.pocoo.org/docs/0.12/quickstart/#a-minimal-application
|
||||
(sudo) pip install Flask
|
||||
|
||||
pip install flask_restful
|
||||
pip install flask-jsonpify
|
||||
|
||||
### install scikit-neuralnetwork
|
||||
https://scikit-neuralnetwork.readthedocs.io/en/latest/guide_installation.html
|
||||
pip install scikit-neuralnetwork
|
||||
|
||||
also need to upgrade the Lasagne library:
|
||||
(sudo) pip install --upgrade https://github.com/Lasagne/Lasagne/archive/master.zip
|
||||
|
||||
|
||||
## Run
|
||||
python train.py
|
||||
|
||||
will generate nn.pkl
|
||||
|
||||
copy nn.pkl to the serverPredictor directory
|
||||
BIN
nnTrain/dataset.npy
Normal file
BIN
nnTrain/dataset.npy
Normal file
Binary file not shown.
265
nnTrain/nn.pkl
Normal file
265
nnTrain/nn.pkl
Normal file
File diff suppressed because one or more lines are too long
32
nnTrain/predict.py
Normal file
32
nnTrain/predict.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from sklearn.neural_network import MLPClassifier
|
||||
from skimage import io
|
||||
|
||||
img1 = io.imread("imgs/25.png")
|
||||
img2 = io.imread("imgs/24.png")
|
||||
img3 = io.imread("imgs/104.png")
|
||||
|
||||
img4 = io.imread("otherimgs/image_0008.jpg")
|
||||
|
||||
|
||||
data_train = [img1, img2, img3, img4]
|
||||
data_labels = [1, 1, 1, 0]
|
||||
data_test = [img4, img3]
|
||||
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
|
||||
hidden_layer_sizes=(5,2), random_state=1)
|
||||
clf.fit(data_train, data_labels)
|
||||
|
||||
clf.predict(data_test)
|
||||
|
||||
print "MPLClassifier values:"
|
||||
[coef.shape for coef in clf.coefs_]
|
||||
|
||||
|
||||
|
||||
'''
|
||||
images_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))
|
||||
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
|
||||
plt.subplot(2, 4, index + 5)
|
||||
plt.axis('off')
|
||||
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
|
||||
plt.title('Prediction: %i' % prediction)
|
||||
'''
|
||||
62
nnTrain/train.py
Normal file
62
nnTrain/train.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from random import randint
|
||||
import pickle
|
||||
from sknn.mlp import Classifier, Layer, Convolution
|
||||
|
||||
def datasetToTrainAndTestData(dataset, numtest):
|
||||
np.random.shuffle(dataset)
|
||||
print "length total data:" + str(len(dataset))
|
||||
|
||||
traindata = np.copy(dataset)
|
||||
testdata = []
|
||||
for i in range(numtest):
|
||||
#get random integer between 0 and the total amount of images in the dataset
|
||||
n = randint(0, len(traindata))
|
||||
testdata.append(dataset[n])
|
||||
|
||||
#delete the n image (dataset[n]) of the traindata
|
||||
traindata = np.delete(traindata, n, axis=0)
|
||||
testdataNP = np.array(testdata)
|
||||
return traindata, testdataNP
|
||||
|
||||
|
||||
#read the dataset made with the 'imagesToDataset' repository
|
||||
dataset = np.load('dataset.npy')
|
||||
|
||||
traindata, testdata = datasetToTrainAndTestData(dataset, 10)
|
||||
print "length traindata: " + str(len(traindata))
|
||||
print "length testdata: " + str(len(testdata))
|
||||
|
||||
#traindataAttributes contains all the pixels of each image
|
||||
traindataAttributes = traindata[:,0]
|
||||
traindataAttributes = np.array([[row] for row in traindataAttributes])
|
||||
|
||||
#traindataLabels contains each label of each image
|
||||
traindataLabels = traindata[:,1]
|
||||
traindataLabels = traindataLabels.astype('int')
|
||||
|
||||
#testdataAttributes contains the pixels of the test images
|
||||
testdataAttributes = testdata[:,0]
|
||||
testdataAttributes = np.array([[row] for row in testdataAttributes])
|
||||
|
||||
#testdataLabels contains each label of each image
|
||||
testdataLabels = testdata[:,1]
|
||||
testdataLabels = testdataLabels.astype('int')
|
||||
|
||||
#default: units=100, learning_rate=0.001, n_iter=25
|
||||
nn = Classifier(
|
||||
layers=[
|
||||
Layer("Sigmoid", units=10),
|
||||
Layer("Softmax")],
|
||||
learning_rate=0.001,
|
||||
n_iter=20,
|
||||
verbose=True)
|
||||
|
||||
nn.fit(traindataAttributes, traindataLabels)
|
||||
|
||||
print('\nTRAIN SCORE', nn.score(traindataAttributes, traindataLabels))
|
||||
print('TEST SCORE', nn.score(testdataAttributes, testdataLabels))
|
||||
|
||||
#save the neural network configuration
|
||||
pickle.dump(nn, open('nn.pkl', 'wb'))
|
||||
Reference in New Issue
Block a user