-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgeometricNormalization.js
139 lines (117 loc) · 5.12 KB
/
geometricNormalization.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
require('@tensorflow/tfjs-node')
var glob = require('glob')
// implements nodejs wrappers for HTMLCanvasElement, HTMLImageElement, ImageData
var canvas = require('canvas');
var faceapi = require('face-api.js');
var Batch = require('batch')
, batch = new Batch;
// patch nodejs environment, we need to provide an implementation of
// HTMLCanvasElement and HTMLImageElement, additionally an implementation
// of ImageData is required, in case you want to use the MTCNN
const { Canvas, Image, ImageData } = canvas
faceapi.env.monkeyPatch({ Canvas, Image, ImageData })
var config = require('./recognition')
var fs = require('fs')
var mathHelper = require('./mathHelper')
const desiredFaceWidth = 256
const desiredFaceHeight = desiredFaceWidth
const minFaceWidth = desiredFaceWidth*2
const minFaceHeight = minFaceWidth
async function geometricNormalization(image, name) {
const options = new faceapi.TinyFaceDetectorOptions({
inputSize: config.INPUT_SIZE,
scoreThreshold: config.MIN_CONFIDENCE
})
console.log("procesando imagen "+name)
var imageCanvas = canvas.createCanvas(image.width+desiredFaceWidth, image.height+desiredFaceHeight)
var ctx = imageCanvas.getContext('2d')
ctx.fillStyle = "black";
ctx.fillRect(0, 0, image.width+desiredFaceWidth, image.height+desiredFaceHeight);
ctx.drawImage(image, desiredFaceWidth/2, desiredFaceHeight/2)
console.log("detectando caras en "+name)
var fullFaceDescriptions = await faceapi
.detectSingleFace(imageCanvas, options)
.withFaceLandmarks()
console.log("deteccion completa en "+name)
if (fullFaceDescriptions){
console.log("cara en "+name)
var rightEyeCentroid = mathHelper.centroid(fullFaceDescriptions.landmarks.getRightEye())
var leftEyeCentroid = mathHelper.centroid(fullFaceDescriptions.landmarks.getLeftEye())
var faceAngle = Math.atan(mathHelper.slope(leftEyeCentroid,rightEyeCentroid))
var canvasFace = extractFaces(image, imageCanvas, ctx, rightEyeCentroid, leftEyeCentroid, faceAngle, name)
}else{
console.log("no hay cara en "+name)
}
}
function extractFaces(image, imageCanvas, ctx, rightEyeCentroid, leftEyeCentroid, faceAngle, name){
var desiredLeftEye = {x: 0.35, y: 0.2}
var desiredRightEyeX = 1.0 - desiredLeftEye.x
// calculate scale to desired width
var dist = mathHelper.distance(rightEyeCentroid, leftEyeCentroid)
var desiredDist = (desiredRightEyeX - desiredLeftEye.x)
desiredDist *= desiredFaceWidth
var scale = desiredDist / dist
// generate the box
var eyesCentroid = mathHelper.centroid([leftEyeCentroid, rightEyeCentroid])
const topLeftCorner = {
x: eyesCentroid.x-desiredFaceWidth/2,
y: eyesCentroid.y-desiredFaceHeight*desiredLeftEye.y
}
ctx.save();
ctx.clearRect(0,0,imageCanvas.width, imageCanvas.height);
ctx.translate(eyesCentroid.x, eyesCentroid.y)
ctx.rotate(-faceAngle)
ctx.scale(scale,scale)
ctx.translate(-eyesCentroid.x, -eyesCentroid.y)
ctx.rect(topLeftCorner.x,topLeftCorner.y,desiredFaceWidth, desiredFaceHeight)
ctx.fill()
ctx.drawImage(image, desiredFaceWidth/2, desiredFaceHeight/2)
ctx.restore();
// resize the new canvas to the size of the clipping area
var canvasFace = canvas.createCanvas(desiredFaceWidth, desiredFaceHeight)
var ctxFace = canvasFace.getContext('2d')
ctxFace.clearRect(0, 0, canvasFace.width, canvasFace.height)
// drawcd des the clipped image from the main canvas to the new canvas
ctxFace.drawImage(imageCanvas, topLeftCorner.x, topLeftCorner.y,
desiredFaceWidth, desiredFaceHeight, 0, 0, desiredFaceWidth, desiredFaceHeight);
saveCallback(canvasFace, name)
return canvasFace
}
function saveCallback(canvas, name) {
// Get the DataUrl from the Canvas
const url = canvas.toDataURL('image/jpg', 1);
// remove Base64 stuff from the Image
const base64Data = url.replace(/^data:image\/png;base64,/, "");
fs.writeFileSync('./processedImages/'+name, base64Data, 'base64', function (err) {
console.log(err);
fs.fdatasync('./processedImages/'+name /*, optional callback here */);
});
}
async function runRecognition() {
await loadModels()
glob('./data/image/origin/*.jpg', async (er, files) => {
// er is an error object or null.
// files is an array of filenames.
console.log(files)
var i,j,filesBatch,chunk = 2;
for (i=0,j=files.length; i<j; i+=chunk) {
filesBatch = files.slice(i,i+chunk);
filesBatch.forEach((file) => {
batch.push(function(done){
console.log("procesando " + file)
canvas.loadImage(file).then(async (image) => {
await geometricNormalization(image, file.substr(file.lastIndexOf('/') + 1))
done()
})
});
})
}
batch.end()
})
}
async function loadModels(){
await faceapi.nets.tinyFaceDetector.loadFromDisk(config.MODEL_URL)
await faceapi.nets.faceLandmark68Net.loadFromDisk(config.MODEL_URL)
}
batch.concurrency(6);
runRecognition()