深度学习AI美颜系列----AI人像美妆算法初识

人像美妆是近几年来深受广大女孩儿群体喜欢的修图功能之一,目前市面中做的比较好的有美妆相机、玩美彩妆、天天P图等APP,当然还有一些PC专用的秀图软件,本文将给大家做个算法初识;

什么是人像美妆?通俗的看个样例图:

《深度学习AI美颜系列----AI人像美妆算法初识》

这个图中,由左边的原图,到右边的化妆效果图,就叫做人像美妆。

本人对AI美妆的一些看法如下:

1.妆容自然,逼真;

2.鲁棒性高,不受五官遮挡影响;

3.速度越快越好;

4.完全智能化,可以针对不同人像照片智能匹配最优妆容;

目前传统美妆的优缺点:

优点:

1.妆容种类丰富,可自由搭配,用户自主选择;

美妆相机和玩美彩妆两款App均提供了数十种不同的妆容效果,供用户自由选择;

2.上妆速度快,可以实时处理;

玩美彩妆、美妆相机、天天P图、无他相机、FaceU等APP均已支持实时上妆效果;

缺点:

1.妆容鲁棒性不高,被光线,五官遮挡影响较大;

2.逼真度不高;

3.无法完全智能化;

目前市面基于传统算法的美妆类APP均无法达到上述3点;

传统人像美妆算法流程:

1.妆容模版制作(Photoshop等编辑软件制作,由设计完成)

2.人脸检测,特征点识别;

这一步骤主要通过人脸检测+人脸对齐来获得N个特征点,目前开源的有Dlib,OpenCV等,商用版有商汤科技、旷世科技、虹软科技等,以及腾讯、美图等;

这里给出一个开源的人脸检测+对齐(68点位)的资源链接:github.com/XiuSdk/cnn-…

《深度学习AI美颜系列----AI人像美妆算法初识》

3.基于人脸特征点,将模版变形,对齐到人脸五官区域;

变形算法有很多,仿射变换,IDW变换,MLS变换,RMLS变换等;

相关代码连接:

IDW

RMLS

MLS(MLS代码在博客内容中)

如果大家懒得看博客,这里给出MLS变形代码:


  
  
  1. static void setSrcPoints(const vector<PointD> &qsrc, vector<PointD> &newDotL, int* nPoint) {
  2. *nPoint = qsrc.size();
  3. newDotL.clear();
  4. newDotL.reserve(*nPoint);
  5. for (size_t i = 0; i < qsrc.size(); i++)
  6. newDotL.push_back(qsrc[i]);
  7. }
  8. static void setDstPoints(const vector<PointD> &qdst,vector<PointD> &oldDotL, int* nPoint) {
  9. *nPoint = qdst.size();
  10. oldDotL.clear();
  11. oldDotL.reserve(*nPoint);
  12. for (size_t i = 0; i < qdst.size(); i++) oldDotL.push_back(qdst[i]);
  13. }
  14. static double bilinear_interp(double x, double y, double v11, double v12,
  15. double v21, double v22) {
  16. return (v11 * (1 - y) + v12 * y) * (1 - x) + (v21 * (1 - y) + v22 * y) * x;
  17. }
  18. static double calcArea(const vector<PointD> &V) {
  19. PointD lt, rb;
  20. lt.x = lt.y = 1e10;
  21. rb.x = rb.y = -1e10;
  22. for (vector<PointD >::const_iterator i = V.begin(); i != V.end();
  23. i++) {
  24. if (i->x < lt.x) lt.x = i->x;
  25. if (i->x > rb.x) rb.x = i->x;
  26. if (i->y < lt.y) lt.y = i->y;
  27. if (i->y > rb.y) rb.y = i->y;
  28. }
  29. return (rb.x - lt.x) * (rb.y - lt.y);
  30. }
  31. static void calcDelta_rigid(int srcW, int srcH, int tarW, int tarH, double alpha, int gridSize, int nPoint, int preScale, double *rDx, double *rDy, vector<PointD> &oldDotL, vector<PointD> &newDotL)
  32. {
  33. int i, j, k;
  34. PointD swq, qstar, newP, tmpP;
  35. double sw;
  36. double ratio;
  37. if (preScale) {
  38. ratio = sqrt(calcArea(newDotL) / calcArea(oldDotL));
  39. for (i = 0; i < nPoint; i++) {
  40. newDotL[i].x *= 1 / ratio;
  41. newDotL[i].y *= 1 / ratio;
  42. }
  43. }
  44. double *w = new double[nPoint];
  45. if (nPoint < 2) {
  46. //rDx.setTo(0);
  47. //rDy.setTo(0);
  48. return;
  49. }
  50. PointD swp, pstar, curV, curVJ, Pi, PiJ, Qi;
  51. double miu_r;
  52. for (i = 0;; i += gridSize) {
  53. if (i >= tarW && i < tarW + gridSize - 1)
  54. i = tarW - 1;
  55. else if (i >= tarW)
  56. break;
  57. for (j = 0;; j += gridSize) {
  58. if (j >= tarH && j < tarH + gridSize - 1)
  59. j = tarH - 1;
  60. else if (j >= tarH)
  61. break;
  62. sw = 0;
  63. swp.x = swp.y = 0;
  64. swq.x = swq.y = 0;
  65. newP.x = newP.y = 0;
  66. curV.x = i;
  67. curV.y = j;
  68. for (k = 0; k < nPoint; k++) {
  69. if ((i == oldDotL[k].x) && j == oldDotL[k].y) break;
  70. if (alpha == 1)
  71. w[k] = 1 / ((i - oldDotL[k].x) * (i - oldDotL[k].x) +
  72. (j - oldDotL[k].y) * (j - oldDotL[k].y));
  73. else
  74. w[k] = pow((i - oldDotL[k].x) * (i - oldDotL[k].x) +
  75. (j - oldDotL[k].y) * (j - oldDotL[k].y),
  76. -alpha);
  77. sw = sw + w[k];
  78. swp.x = swp.x + w[k] * oldDotL[k].x;
  79. swp.y = swp.y + w[k] * oldDotL[k].y;
  80. swq.x = swq.x + w[k] * newDotL[k].x;
  81. swq.y = swq.y + w[k] * newDotL[k].y;
  82. }
  83. if (k == nPoint) {
  84. pstar.x = (1 / sw) * swp.x;
  85. pstar.y = (1 / sw) * swp.y;
  86. qstar.x = 1 / sw * swq.x;
  87. qstar.y = 1 / sw * swq.y;
  88. // Calc miu_r
  89. double s1 = 0, s2 = 0;
  90. for (k = 0; k < nPoint; k++) {
  91. if (i == oldDotL[k].x && j == oldDotL[k].y) continue;
  92. Pi.x = oldDotL[k].x - pstar.x;
  93. Pi.y = oldDotL[k].y - pstar.y;
  94. PiJ.x = -Pi.y, PiJ.y = Pi.x;
  95. Qi.x = newDotL[k].x - qstar.x;
  96. Qi.y = newDotL[k].y - qstar.y;
  97. s1 += w[k] * (Qi.x*Pi.x+Qi.y*Pi.y);
  98. s2 += w[k] * (Qi.x*PiJ.x+Qi.y*PiJ.y);
  99. }
  100. miu_r = sqrt(s1 * s1 + s2 * s2);
  101. curV.x -= pstar.x;
  102. curV.y -= pstar.y;
  103. curVJ.x = -curV.y, curVJ.y = curV.x;
  104. for (k = 0; k < nPoint; k++) {
  105. if (i == oldDotL[k].x && j == oldDotL[k].y) continue;
  106. Pi.x = oldDotL[k].x - pstar.x;
  107. Pi.y = oldDotL[k].y - pstar.y;
  108. PiJ.x = -Pi.y, PiJ.y = Pi.x;
  109. tmpP.x = (Pi.x*curV.x+Pi.y*curV.y)* newDotL[k].x -
  110. (PiJ.x*curV.x+PiJ.y*curV.y)* newDotL[k].y;
  111. tmpP.y = -(Pi.x*curVJ.x+Pi.y*curVJ.y) * newDotL[k].x +
  112. (PiJ.x*curVJ.x+PiJ.y*curVJ.y) * newDotL[k].y;
  113. tmpP.x *= w[k] / miu_r;
  114. tmpP.y *= w[k] / miu_r;
  115. newP.x += tmpP.x;
  116. newP.y += tmpP.y;
  117. }
  118. newP.x += qstar.x;
  119. newP.y += qstar.y;
  120. } else {
  121. newP = newDotL[k];
  122. }
  123. if (preScale) {
  124. rDx[j * tarW + i] = newP.x * ratio - i;
  125. rDy[j * tarW + i] = newP.y * ratio - j;
  126. } else {
  127. rDx[j * tarW + i] = newP.x - i;
  128. rDy[j * tarW + i] = newP.y - j;
  129. }
  130. }
  131. }
  132. delete[] w;
  133. if (preScale!=0) {
  134. for (i = 0; i < nPoint; i++){
  135. newDotL[i].x *= ratio;
  136. newDotL[i].y *= ratio;
  137. }
  138. }
  139. }
  140. static void calcDelta_Similarity(int srcW, int srcH, int tarW, int tarH, double alpha, int gridSize, int nPoint, int preScale, double *rDx, double *rDy, vector<PointD> &oldDotL, vector<PointD> &newDotL)
  141. {
  142. int i, j, k;
  143. PointD swq, qstar, newP, tmpP;
  144. double sw;
  145. double ratio;
  146. if (preScale) {
  147. ratio = sqrt(calcArea(newDotL) / calcArea(oldDotL));
  148. for (i = 0; i < nPoint; i++) {
  149. newDotL[i].x *= 1 / ratio;
  150. newDotL[i].y *= 1 / ratio;
  151. }
  152. }
  153. double *w = new double[nPoint];
  154. if (nPoint < 2) {
  155. return;
  156. }
  157. PointD swp, pstar, curV, curVJ, Pi, PiJ;
  158. double miu_s;
  159. for (i = 0;; i += gridSize) {
  160. if (i >= tarW && i < tarW + gridSize - 1)
  161. i = tarW - 1;
  162. else if (i >= tarW)
  163. break;
  164. for (j = 0;; j += gridSize) {
  165. if (j >= tarH && j < tarH + gridSize - 1)
  166. j = tarH - 1;
  167. else if (j >= tarH)
  168. break;
  169. sw = 0;
  170. swp.x = swp.y = 0;
  171. swq.x = swq.y = 0;
  172. newP.x = newP.y = 0;
  173. curV.x = i;
  174. curV.y = j;
  175. for (k = 0; k < nPoint; k++) {
  176. if ((i == oldDotL[k].x) && j == oldDotL[k].y) break;
  177. w[k] = 1 / ((i - oldDotL[k].x) * (i - oldDotL[k].x) +
  178. (j - oldDotL[k].y) * (j - oldDotL[k].y));
  179. sw = sw + w[k];
  180. swp.x = swp.x + w[k] * oldDotL[k].x;
  181. swp.y = swp.y + w[k] * oldDotL[k].y;
  182. swq.x = swq.x + w[k] * newDotL[k].x;
  183. swq.y = swq.y + w[k] * newDotL[k].y;
  184. }
  185. if (k == nPoint) {
  186. pstar.x = (1 / sw) * swp.x;
  187. pstar.y = (1 / sw) * swp.y;
  188. qstar.x = 1 / sw * swq.x;
  189. qstar.y = 1 / sw * swq.y;
  190. // Calc miu_s
  191. miu_s = 0;
  192. for (k = 0; k < nPoint; k++) {
  193. if (i == oldDotL[k].x && j == oldDotL[k].y) continue;
  194. Pi.x = oldDotL[k].x - pstar.x;
  195. Pi.y = oldDotL[k].y - pstar.y;
  196. miu_s += w[k] * (Pi.x*Pi.x+Pi.y*Pi.y);
  197. }
  198. curV.x -= pstar.x;
  199. curV.y -= pstar.y;
  200. curVJ.x = -curV.y, curVJ.y = curV.x;
  201. for (k = 0; k < nPoint; k++) {
  202. if (i == oldDotL[k].x && j == oldDotL[k].y) continue;
  203. Pi.x = oldDotL[k].x - pstar.x;
  204. Pi.y = oldDotL[k].y - pstar.y;
  205. PiJ.x = -Pi.y, PiJ.y = Pi.x;
  206. tmpP.x = (Pi.x*curV.x+Pi.y*curV.y) * newDotL[k].x -
  207. (PiJ.x*curV.x+PiJ.y*curV.y) * newDotL[k].y;
  208. tmpP.y = -(Pi.x*curVJ.x+Pi.y*curVJ.y) * newDotL[k].x +
  209. (PiJ.x*curVJ.x+PiJ.y*curVJ.y) * newDotL[k].y;
  210. tmpP.x *= w[k] / miu_s;
  211. tmpP.y *= w[k] / miu_s;
  212. newP.x += tmpP.x;
  213. newP.y += tmpP.y;
  214. }
  215. newP.x += qstar.x;
  216. newP.y += qstar.y;
  217. } else {
  218. newP = newDotL[k];
  219. }
  220. rDx[j * tarW + i] = newP.x - i;
  221. rDy[j * tarW + i] = newP.y - j;
  222. }
  223. }
  224. delete[] w;
  225. if (preScale!=0) {
  226. for (i = 0; i < nPoint; i++){
  227. newDotL[i].x *= ratio;
  228. newDotL[i].y *= ratio;
  229. }
  230. }
  231. }
  232. static int GetNewImg(unsigned char* oriImg, int width, int height, int stride, unsigned char* tarImg, int tarW, int tarH, int tarStride, int gridSize, double* rDx, double* rDy, double transRatio)
  233. {
  234. int i, j;
  235. double di, dj;
  236. double nx, ny;
  237. int nxi, nyi, nxi1, nyi1;
  238. double deltaX, deltaY;
  239. double w, h;
  240. int ni, nj;
  241. int pos, posa, posb, posc, posd;
  242. for (i = 0; i < tarH; i += gridSize)
  243. for (j = 0; j < tarW; j += gridSize) {
  244. ni = i + gridSize, nj = j + gridSize;
  245. w = h = gridSize;
  246. if (ni >= tarH) ni = tarH - 1, h = ni - i + 1;
  247. if (nj >= tarW) nj = tarW - 1, w = nj - j + 1;
  248. for (di = 0; di < h; di++)
  249. for (dj = 0; dj < w; dj++) {
  250. deltaX =
  251. bilinear_interp(di / h, dj / w, rDx[i * tarW + j], rDx[i * tarW + nj],
  252. rDx[ni * tarW + j], rDx[ni * tarW + nj]);
  253. deltaY =
  254. bilinear_interp(di / h, dj / w, rDy[i * tarW + j], rDy[i * tarW + nj],
  255. rDy[ni * tarW + j], rDy[ni * tarW + nj]);
  256. nx = j + dj + deltaX * transRatio;
  257. ny = i + di + deltaY * transRatio;
  258. if (nx > width - 1) nx = width - 1;
  259. if (ny > height - 1) ny = height - 1;
  260. if (nx < 0) nx = 0;
  261. if (ny < 0) ny = 0;
  262. nxi = int(nx);
  263. nyi = int(ny);
  264. nxi1 = ceil(nx);
  265. nyi1 = ceil(ny);
  266. pos = (int)(i + di) * tarStride + ((int)(j + dj) << 2);
  267. posa = nyi * stride + (nxi << 2);
  268. posb = nyi * stride + (nxi1 << 2);
  269. posc = nyi1 * stride + (nxi << 2);
  270. posd = nyi1 * stride + (nxi1 << 2);
  271. tarImg[pos] = (unsigned char)bilinear_interp(ny - nyi, nx - nxi, oriImg[posa], oriImg[posb], oriImg[posc], oriImg[posd]);
  272. tarImg[pos + 1] = (unsigned char)bilinear_interp(ny - nyi, nx - nxi, oriImg[posa + 1],oriImg[posb + 1], oriImg[posc + 1], oriImg[posd + 1]);
  273. tarImg[pos + 2] = (unsigned char)bilinear_interp(ny - nyi, nx - nxi, oriImg[posa + 2],oriImg[posb + 2], oriImg[posc + 2], oriImg[posd + 2]);
  274. tarImg[pos + 3] = (unsigned char)bilinear_interp(ny - nyi, nx - nxi, oriImg[posa + 3],oriImg[posb + 3], oriImg[posc + 3], oriImg[posd + 3]);
  275. }
  276. }
  277. return 0;
  278. };
  279. static void MLSImageWrapping(unsigned char* oriImg,int width, int height, int stride,const vector<PointD > &qsrc, const vector<PointD > &qdst, unsigned char* tarImg, int outW, int outH, int outStride, double transRatio, int preScale, int gridSize, int method)
  280. {
  281. int srcW = width;
  282. int srcH = height;
  283. int tarW = outW;
  284. int tarH = outH;
  285. double alpha = 1;
  286. int nPoint;
  287. int len = tarH * tarW;
  288. vector<PointD> oldDotL, newDotL;
  289. double *rDx = NULL,*rDy = NULL;
  290. setSrcPoints(qsrc,newDotL,&nPoint);
  291. setDstPoints(qdst,oldDotL,&nPoint);
  292. rDx = (double*)malloc(sizeof(double) * len);
  293. rDy = (double*)malloc(sizeof(double) * len);
  294. memset(rDx, 0, sizeof(double) * len);
  295. memset(rDy, 0, sizeof(double) * len);
  296. if(method!=0)
  297. calcDelta_Similarity(srcW, srcH, tarW, tarH, alpha, gridSize, nPoint, preScale, rDx, rDy, oldDotL, newDotL);
  298. else
  299. calcDelta_rigid(srcW, srcH, tarW, tarH, alpha, gridSize, nPoint, preScale, rDx, rDy, oldDotL, newDotL);
  300. GetNewImg(oriImg, srcW, srcH, stride, tarImg, tarW, tarH, outStride, gridSize, rDx, rDy, transRatio);
  301. if(rDx != NULL)
  302. free(rDx);
  303. if(rDy != NULL)
  304. free(rDy);
  305. };
  306. int f_TMLSImagewarpping(unsigned char* srcData, int width ,int height, int stride, unsigned char* dstData, int outW, int outH, int outStride, int srcPoint[], int dragPoint[], int pointNum, double intensity, int preScale, int gridSize, int method)
  307. {
  308. int res = 0;
  309. vector<PointD> qDst;
  310. vector<PointD> qSrc;
  311. PointD point = {0};
  312. int len = 0;
  313. for(int i = 0; i < pointNum; i++)
  314. {
  315. len = (i << 1);
  316. point.x = srcPoint[len];
  317. point.y = srcPoint[len + 1];
  318. qSrc.push_back(point);
  319. point.x = dragPoint[len];
  320. point.y = dragPoint[len + 1];
  321. qDst.push_back(point);
  322. }
  323. MLSImageWrapping(srcData, width, height, stride, qSrc, qDst, dstData, outW, outH, outStride, intensity, preScale,gridSize, method);
  324. return res;
  325. };

4.将模版与人脸五官图像进行融合;

融合算法主要有alpha融合,Photoshop图层混合,泊松融合等;

alpha融合: S * alpha + D*(1-alpha)

图层混合公式如下:

《深度学习AI美颜系列----AI人像美妆算法初识》

泊松融合:算法详解

上述过程即传统算法流程,其中对美妆效果起决定性的是人脸特征点识别,如果没有准确的特征点,再好的妆容模版,上妆效果也出不来;

比如下面的例子:

《深度学习AI美颜系列----AI人像美妆算法初识》

《深度学习AI美颜系列----AI人像美妆算法初识》

《深度学习AI美颜系列----AI人像美妆算法初识》

图1中,由于眼睛特征点位置不准确,睫毛妆容已经偏离了眼睛区域;

图2中,由于拍照光线较暗,腮红明显,逼真度过低;

图3中,由于人眼和眉毛被部分遮挡,因此,传统算法的睫毛和眉毛效果悬浮在了头发之上;

目前传统算法相关的论文资料如下:

Rule-Based Facial Makeup Recommendation System.

Example-Based Cosmetic Transfer.

Region-based Face Makeup using example face images.

Simulating Makeup through Physics-based Manipulation of Intrinsic Image Layers.

A Visual Representation for editing face images.

Digital Face Makeup By Example.

在传统算法中,有一种妆容迁移算法,该算法可以直接将一张妆容效果图中的妆容特征,迁移到任意一张人像照片中去,实际上也是与人脸特征点密不可分,具体连接可参考:blog.csdn.net/trent1985/a…

目前AI美妆相关的论文资料如下:

Makeup Like a Superstar Deep Localized Makeup Transfer Network.

Examples-Rules Guided Deep Neural Network for Makeup Recommendation.

上述两篇基于深度学习的美妆算法论文主要思想有两个:

1,对于第一篇论文主要是对人像进行五官分析,获取肤色,眉毛颜色,唇色等等信息,然后进行不同妆容的最佳匹配,最后上妆;

框架如下:

《深度学习AI美颜系列----AI人像美妆算法初识》

2,对五官进行分别提取分类成不同的style,依据样例数据的特征style,进行最优匹配并上妆;

框架如下:

《深度学习AI美颜系列----AI人像美妆算法初识》

上述两篇算法论文,依旧是建立在人脸特征点的基础上研究的。

本人针对传统美妆算法,结合深度学习,做了如下改进:

1.只需要人脸检测框,不依赖于人脸特征点;

2.不受五官遮挡和光线影响;

3.妆容效果逼真度提高;

本人算法框架:

1.人脸检测,得到正方形人脸框,包含五官区域;

2.基于全卷积网络,以人脸框图像作为输入,上妆之后的人脸五官效果图作为输出,进行学习训练;

妆容模版使用如下模版:

《深度学习AI美颜系列----AI人像美妆算法初识》

在Fig.4模板妆容中,分别进行了眉毛处理,眼影、睫毛和唇彩的上妆,整体肤色以及其他内容均无调整;

训练中迭代10次,训练集和验证集准确率均达到了94%-95%,本人训练样本选取了500张,数据比较少,这里仅仅探讨可行性与方法分析;

3.使用2中的训练模型,对测试图进行上妆;

效果图如下:

《深度学习AI美颜系列----AI人像美妆算法初识》

《深度学习AI美颜系列----AI人像美妆算法初识》

上述效果图中我们可以看到,基于深度学习的美妆效果,避免了五官遮挡的影响,同时上妆效果更加自然,对环境光的鲁棒性也较高,本文这里未给出具体的网络模型与参数,不过思路大家已经可以借鉴!目前算法处于研究测试阶段,后续本人将公布完整的DEMO!

本人QQ:1358009172

    原文作者:算法小白
    原文地址: https://juejin.im/entry/5b580f85f265da0fab4020d0
    本文转自网络文章,转载此文章仅为分享知识,如有侵权,请联系博主进行删除。
点赞