darknet.js 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. var darknet = {};
  2. var text = require('./text');
  3. darknet.ModelFactory = class {
  4. match(context) {
  5. const identifier = context.identifier;
  6. const extension = identifier.split('.').pop().toLowerCase();
  7. switch (extension) {
  8. case 'weights':
  9. if (darknet.Weights.open(context.stream)) {
  10. return 'darknet.weights';
  11. }
  12. break;
  13. default:
  14. try {
  15. const reader = text.Reader.open(context.stream, 65536);
  16. for (;;) {
  17. const line = reader.read();
  18. if (line === undefined) {
  19. break;
  20. }
  21. const content = line.trim();
  22. if (content.length === 0 || content.startsWith('#')) {
  23. continue;
  24. }
  25. if (content.startsWith('[') && content.endsWith(']')) {
  26. return 'darknet.model';
  27. }
  28. return undefined;
  29. }
  30. }
  31. catch (err) {
  32. // continue regardless of error
  33. }
  34. break;
  35. }
  36. return undefined;
  37. }
  38. open(context, match) {
  39. return context.metadata('darknet-metadata.json').then((metadata) => {
  40. const openModel = (metadata, cfg, weights) => {
  41. return new darknet.Model(metadata, cfg, darknet.Weights.open(weights));
  42. };
  43. const identifier = context.identifier;
  44. const parts = identifier.split('.');
  45. parts.pop();
  46. const basename = parts.join('.');
  47. switch (match) {
  48. case 'darknet.weights':
  49. return context.request(basename + '.cfg', null).then((stream) => {
  50. const buffer = stream.read();
  51. return openModel(metadata, buffer, context.stream);
  52. });
  53. case 'darknet.model':
  54. return context.request(basename + '.weights', null).then((stream) => {
  55. return openModel(metadata, context.stream.peek(), stream);
  56. }).catch(() => {
  57. return openModel(metadata, context.stream.peek(), null);
  58. });
  59. default: {
  60. throw new darknet.Error("Unsupported Darknet format '" + match + "'.");
  61. }
  62. }
  63. });
  64. }
  65. };
  66. darknet.Model = class {
  67. constructor(metadata, cfg, weights) {
  68. this._graphs = [ new darknet.Graph(metadata, cfg, weights) ];
  69. }
  70. get format() {
  71. return 'Darknet';
  72. }
  73. get graphs() {
  74. return this._graphs;
  75. }
  76. };
  77. darknet.Graph = class {
  78. constructor(metadata, cfg, weights) {
  79. this._inputs = [];
  80. this._outputs = [];
  81. this._nodes = [];
  82. // read_cfg
  83. const sections = [];
  84. let section = null;
  85. const reader = text.Reader.open(cfg);
  86. let lineNumber = 0;
  87. for (;;) {
  88. lineNumber++;
  89. const content = reader.read();
  90. if (content === undefined) {
  91. break;
  92. }
  93. const line = content.replace(/\s/g, '');
  94. if (line.length > 0) {
  95. switch (line[0]) {
  96. case '#':
  97. case ';':
  98. break;
  99. case '[': {
  100. const type = line[line.length - 1] === ']' ? line.substring(1, line.length - 1) : line.substring(1);
  101. section = {
  102. line: lineNumber,
  103. type: type,
  104. options: {}
  105. };
  106. sections.push(section);
  107. break;
  108. }
  109. default: {
  110. if (!section || line[0] < 0x20 || line[0] > 0x7E) {
  111. throw new darknet.Error("Invalid cfg '" + content.replace(/[^\x20-\x7E]+/g, '?').trim() + "' at line " + lineNumber.toString() + ".");
  112. }
  113. const index = line.indexOf('=');
  114. if (index < 0) {
  115. throw new darknet.Error("Invalid cfg '" + content.replace(/[^\x20-\x7E]+/g, '?').trim() + "' at line " + lineNumber.toString() + ".");
  116. }
  117. const key = line.substring(0, index);
  118. const value = line.substring(index + 1);
  119. section.options[key] = value;
  120. break;
  121. }
  122. }
  123. }
  124. }
  125. const option_find_int = (options, key, defaultValue) => {
  126. let value = options[key];
  127. if (typeof value === 'string' && value.startsWith('$')) {
  128. const key = value.substring(1);
  129. value = globals.has(key) ? globals.get(key) : value;
  130. }
  131. if (value !== undefined) {
  132. const number = parseInt(value, 10);
  133. if (!Number.isInteger(number)) {
  134. throw new darknet.Error("Invalid int option '" + JSON.stringify(options[key]) + "'.");
  135. }
  136. return number;
  137. }
  138. return defaultValue;
  139. };
  140. const option_find_str = (options, key, defaultValue) => {
  141. const value = options[key];
  142. return value !== undefined ? value : defaultValue;
  143. };
  144. const make_shape = (dimensions, source) => {
  145. if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) {
  146. throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "' in '" + source + "'.");
  147. }
  148. return new darknet.TensorShape(dimensions);
  149. };
  150. const load_weights = (name, shape, visible) => {
  151. const data = weights ? weights.read(4 * shape.reduce((a, b) => a * b, 1)) : null;
  152. const type = new darknet.TensorType('float32', make_shape(shape, 'load_weights'));
  153. const initializer = new darknet.Tensor(type, data);
  154. const argument = new darknet.Argument('', null, initializer);
  155. return new darknet.Parameter(name, visible === false ? false : true, [ argument ]);
  156. };
  157. const load_batch_normalize_weights = (layer, prefix, size) => {
  158. layer.weights.push(load_weights(prefix + 'scale', [ size ], prefix === ''));
  159. layer.weights.push(load_weights(prefix + 'mean', [ size ], prefix === ''));
  160. layer.weights.push(load_weights(prefix + 'variance', [ size ], prefix === ''));
  161. };
  162. const make_convolutional_layer = (layer, prefix, w, h, c, n, groups, size, stride_x, stride_y, padding, batch_normalize) => {
  163. layer.out_w = Math.floor((w + 2 * padding - size) / stride_x) + 1;
  164. layer.out_h = Math.floor((h + 2 * padding - size) / stride_y) + 1;
  165. layer.out_c = n;
  166. layer.out = layer.out_w * layer.out_h * layer.out_c;
  167. layer.weights.push(load_weights(prefix + 'biases', [ n ], prefix === ''));
  168. if (batch_normalize) {
  169. if (prefix) {
  170. load_batch_normalize_weights(layer, prefix, n);
  171. }
  172. else {
  173. const batchnorm_layer = { weights: [] };
  174. load_batch_normalize_weights(batchnorm_layer, prefix, n);
  175. layer.chain.push({ type: 'batchnorm', layer: batchnorm_layer });
  176. }
  177. }
  178. layer.weights.push(load_weights(prefix + 'weights', [ Math.floor(c / groups), n, size, size ], prefix === ''));
  179. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'make_convolutional_layer'));
  180. };
  181. const make_connected_layer = (layer, prefix, inputs, outputs, batch_normalize) => {
  182. layer.out_h = 1;
  183. layer.out_w = 1;
  184. layer.out_c = outputs;
  185. layer.out = outputs;
  186. layer.weights.push(load_weights(prefix + 'biases', [ outputs ], prefix === ''));
  187. if (batch_normalize) {
  188. if (prefix) {
  189. load_batch_normalize_weights(layer, prefix, outputs);
  190. }
  191. else {
  192. const batchnorm_layer = { weights: [] };
  193. load_batch_normalize_weights(batchnorm_layer, prefix, outputs);
  194. layer.chain.push({ type: 'batchnorm', layer: batchnorm_layer });
  195. }
  196. }
  197. layer.weights.push(load_weights(prefix + 'weights', [ inputs, outputs ], prefix === ''));
  198. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'make_connected_layer'));
  199. };
  200. if (sections.length === 0) {
  201. throw new darknet.Error('Config file has no sections.');
  202. }
  203. const params = {};
  204. const globals = new Map();
  205. const net = sections.shift();
  206. switch (net.type) {
  207. case 'net':
  208. case 'network': {
  209. params.h = option_find_int(net.options, 'height', 0);
  210. params.w = option_find_int(net.options, 'width', 0);
  211. params.c = option_find_int(net.options, 'channels', 0);
  212. params.inputs = option_find_int(net.options, 'inputs', params.h * params.w * params.c);
  213. for (const key of Object.keys(net.options)) {
  214. globals.set(key, net.options[key]);
  215. }
  216. break;
  217. }
  218. default: {
  219. throw new darknet.Error("Unexpected '[" + net.type + "]' section. First section must be [net] or [network].");
  220. }
  221. }
  222. const inputType = params.w && params.h && params.c ?
  223. new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'params-if')) :
  224. new darknet.TensorType('float32', make_shape([ params.inputs ], 'params-else'));
  225. const inputName = 'input';
  226. params.arguments = [ new darknet.Argument(inputName, inputType, null) ];
  227. this._inputs.push(new darknet.Parameter(inputName, true, params.arguments));
  228. for (let i = 0; i < sections.length; i++) {
  229. const section = sections[i];
  230. section.name = i.toString();
  231. section.layer = {
  232. inputs: [],
  233. weights: [],
  234. outputs: [ new darknet.Argument(section.name, null, null) ],
  235. chain: []
  236. };
  237. }
  238. let infer = true;
  239. for (let i = 0; i < sections.length; i++) {
  240. const section = sections[i];
  241. const options = section.options;
  242. const layer = section.layer;
  243. layer.inputs.push(...params.arguments);
  244. switch (section.type) {
  245. case 'shortcut': {
  246. let remove = true;
  247. const from = options.from ? options.from.split(',').map((item) => Number.parseInt(item.trim(), 10)) : [];
  248. for (const route of from) {
  249. const index = route < 0 ? i + route : route;
  250. const exists = index >= 0 && index < sections.length;
  251. remove = exists && remove;
  252. if (exists) {
  253. const source = sections[index].layer;
  254. layer.inputs.push(source.outputs[0]);
  255. }
  256. }
  257. if (remove) {
  258. delete options.from;
  259. }
  260. break;
  261. }
  262. case 'sam':
  263. case 'scale_channels': {
  264. const from = option_find_int(options, 'from', 0);
  265. const index = from < 0 ? i + from : from;
  266. if (index >= 0 && index < sections.length) {
  267. const source = sections[index].layer;
  268. layer.from = source;
  269. layer.inputs.push(source.outputs[0]);
  270. delete options.from;
  271. }
  272. break;
  273. }
  274. case 'route': {
  275. layer.inputs = [];
  276. layer.layers = [];
  277. let remove = true;
  278. const routes = options.layers ? options.layers.split(',').map((route) => Number.parseInt(route.trim(), 10)) : [];
  279. for (const route of routes) {
  280. const index = route < 0 ? i + route : route;
  281. const exists = index >= 0 && index < sections.length;
  282. remove = exists && remove;
  283. if (exists) {
  284. const source = sections[index].layer;
  285. layer.inputs.push(source.outputs[0]);
  286. layer.layers.push(source);
  287. }
  288. }
  289. if (remove) {
  290. delete options.layers;
  291. }
  292. break;
  293. }
  294. default:
  295. break;
  296. }
  297. if (infer) {
  298. switch (section.type) {
  299. case 'conv':
  300. case 'convolutional':
  301. case 'deconvolutional': {
  302. const shape = layer.inputs[0].type.shape.dimensions;
  303. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  304. throw new darknet.Error('Layer before convolutional layer must output image.');
  305. }
  306. const size = option_find_int(options, 'size', 1);
  307. const n = option_find_int(options, 'filters', 1);
  308. const pad = option_find_int(options, 'pad', 0);
  309. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  310. let stride_x = option_find_int(options, 'stride_x', -1);
  311. let stride_y = option_find_int(options, 'stride_y', -1);
  312. if (stride_x < 1 || stride_y < 1) {
  313. const stride = option_find_int(options, 'stride', 1);
  314. stride_x = stride_x < 1 ? stride : stride_x;
  315. stride_y = stride_y < 1 ? stride : stride_y;
  316. }
  317. const groups = option_find_int(options, 'groups', 1);
  318. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  319. const activation = option_find_str(options, 'activation', 'logistic');
  320. make_convolutional_layer(layer, '', params.w, params.h, params.c, n, groups, size, stride_x, stride_y, padding, batch_normalize);
  321. if (activation !== 'logistic' && activation !== 'none') {
  322. layer.chain.push({ type: activation });
  323. }
  324. break;
  325. }
  326. case 'connected': {
  327. const outputs = option_find_int(options, 'output', 1);
  328. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  329. const activation = option_find_str(options, 'activation', 'logistic');
  330. make_connected_layer(layer, '', params.inputs, outputs, batch_normalize);
  331. if (activation !== 'logistic' && activation !== 'none') {
  332. layer.chain.push({ type: activation });
  333. }
  334. break;
  335. }
  336. case 'local': {
  337. const shape = layer.inputs[0].type.shape.dimensions;
  338. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  339. throw new darknet.Error('Layer before avgpool layer must output image.');
  340. }
  341. const n = option_find_int(options, 'filters' , 1);
  342. const size = option_find_int(options, 'size', 1);
  343. const stride = option_find_int(options, 'stride', 1);
  344. const pad = option_find_int(options, 'pad', 0);
  345. const activation = option_find_str(options, 'activation', 'logistic');
  346. layer.out_h = Math.floor((params.h - (pad ? 1 : size)) / stride) + 1;
  347. layer.out_w = Math.floor((params.w - (pad ? 1 : size)) / stride) + 1;
  348. layer.out_c = n;
  349. layer.out = layer.out_w * layer.out_h * layer.out_c;
  350. layer.weights.push(load_weights('weights', [ params.c, n, size, size, layer.out_h * layer.out_w ]));
  351. layer.weights.push(load_weights('biases',[ layer.out_w * layer.out_h * layer.out_c ]));
  352. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'local'));
  353. if (activation !== 'logistic' && activation !== 'none') {
  354. layer.chain.push({ type: activation });
  355. }
  356. break;
  357. }
  358. case 'batchnorm': {
  359. layer.out_h = params.h;
  360. layer.out_w = params.w;
  361. layer.out_c = params.c;
  362. layer.out = layer.in;
  363. load_batch_normalize_weights(layer, '', layer.out_c);
  364. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'batchnorm'));
  365. break;
  366. }
  367. case 'activation': {
  368. layer.out_h = params.h;
  369. layer.out_w = params.w;
  370. layer.out_c = params.c;
  371. layer.out = layer.in;
  372. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'activation'));
  373. break;
  374. }
  375. case 'max':
  376. case 'maxpool': {
  377. const shape = layer.inputs[0].type.shape.dimensions;
  378. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  379. throw new darknet.Error('Layer before maxpool layer must output image.');
  380. }
  381. const antialiasing = option_find_int(options, 'antialiasing', 0);
  382. const stride = option_find_int(options, 'stride', 1);
  383. const blur_stride_x = option_find_int(options, 'stride_x', stride);
  384. const blur_stride_y = option_find_int(options, 'stride_y', stride);
  385. const stride_x = antialiasing ? 1 : blur_stride_x;
  386. const stride_y = antialiasing ? 1 : blur_stride_y;
  387. const size = option_find_int(options, 'size', stride);
  388. const padding = option_find_int(options, 'padding', size - 1);
  389. const out_channels = option_find_int(options, 'out_channels', 1);
  390. const maxpool_depth = option_find_int(options, 'maxpool_depth', 0);
  391. if (maxpool_depth) {
  392. layer.out_c = out_channels;
  393. layer.out_w = params.w;
  394. layer.out_h = params.h;
  395. }
  396. else {
  397. layer.out_w = Math.floor((params.w + padding - size) / stride_x) + 1;
  398. layer.out_h = Math.floor((params.h + padding - size) / stride_y) + 1;
  399. layer.out_c = params.c;
  400. }
  401. if (antialiasing) {
  402. const blur_size = antialiasing === 2 ? 2 : 3;
  403. const blur_pad = antialiasing === 2 ? 0 : Math.floor(blur_size / 3);
  404. layer.input_layer = { weights: [], outputs: layer.outputs, chain: [] };
  405. make_convolutional_layer(layer.input_layer, '', layer.out_h, layer.out_w, layer.out_c, layer.out_c, layer.out_c, blur_size, blur_stride_x, blur_stride_y, blur_pad, 0);
  406. layer.out_w = layer.input_layer.out_w;
  407. layer.out_h = layer.input_layer.out_h;
  408. layer.out_c = layer.input_layer.out_c;
  409. }
  410. else {
  411. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'maxpool'));
  412. }
  413. layer.out = layer.out_w * layer.out_h * layer.out_c;
  414. break;
  415. }
  416. case 'avgpool': {
  417. const shape = layer.inputs[0].type.shape.dimensions;
  418. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  419. throw new darknet.Error('Layer before avgpool layer must output image.');
  420. }
  421. layer.out_w = 1;
  422. layer.out_h = 1;
  423. layer.out_c = params.c;
  424. layer.out = layer.out_c;
  425. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'avgpool'));
  426. break;
  427. }
  428. case 'crnn': {
  429. const size = option_find_int(options, 'size', 3);
  430. const stride = option_find_int(options, 'stride', 1);
  431. const output_filters = option_find_int(options, 'output', 1);
  432. const hidden_filters = option_find_int(options, 'hidden', 1);
  433. const groups = option_find_int(options, 'groups', 1);
  434. const pad = option_find_int(options, 'pad', 0);
  435. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  436. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  437. layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  438. make_convolutional_layer(layer.input_layer, 'input_', params.h, params.w, params.c, hidden_filters, groups, size, stride, stride, padding, batch_normalize);
  439. layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  440. make_convolutional_layer(layer.self_layer, 'self_', params.h, params.w, hidden_filters, hidden_filters, groups, size, stride, stride, padding, batch_normalize);
  441. layer.output_layer = { weights: [], outputs: layer.outputs, chain: [] };
  442. make_convolutional_layer(layer.output_layer, 'output_', params.h, params.w, hidden_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  443. layer.weights = layer.weights.concat(layer.input_layer.weights);
  444. layer.weights = layer.weights.concat(layer.self_layer.weights);
  445. layer.weights = layer.weights.concat(layer.output_layer.weights);
  446. layer.out_h = layer.output_layer.out_h;
  447. layer.out_w = layer.output_layer.out_w;
  448. layer.out_c = output_filters;
  449. layer.out = layer.output_layer.out;
  450. break;
  451. }
  452. case 'rnn': {
  453. const outputs = option_find_int(options, 'output', 1);
  454. const hidden = option_find_int(options, 'hidden', 1);
  455. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  456. const inputs = params.inputs;
  457. layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  458. make_connected_layer(layer.input_layer, 'input_', inputs, hidden, batch_normalize);
  459. layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  460. make_connected_layer(layer.self_layer, 'self_', hidden, hidden, batch_normalize);
  461. layer.output_layer = { weights: [], outputs: layer.outputs, chain: [] };
  462. make_connected_layer(layer.output_layer, 'output_', hidden, outputs, batch_normalize);
  463. layer.weights = layer.weights.concat(layer.input_layer.weights);
  464. layer.weights = layer.weights.concat(layer.self_layer.weights);
  465. layer.weights = layer.weights.concat(layer.output_layer.weights);
  466. layer.out_w = 1;
  467. layer.out_h = 1;
  468. layer.out_c = outputs;
  469. layer.out = outputs;
  470. break;
  471. }
  472. case 'gru': {
  473. const inputs = params.inputs;
  474. const outputs = option_find_int(options, 'output', 1);
  475. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  476. layer.input_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  477. make_connected_layer(layer.input_z_layer, 'input_z', inputs, outputs, batch_normalize);
  478. layer.state_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  479. make_connected_layer(layer.state_z_layer, 'state_z', outputs, outputs, batch_normalize);
  480. layer.input_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  481. make_connected_layer(layer.input_r_layer, 'input_r', inputs, outputs, batch_normalize);
  482. layer.state_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  483. make_connected_layer(layer.state_r_layer, 'state_r', outputs, outputs, batch_normalize);
  484. layer.input_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  485. make_connected_layer(layer.input_h_layer, 'input_h', inputs, outputs, batch_normalize);
  486. layer.state_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  487. make_connected_layer(layer.state_h_layer, 'state_h', outputs, outputs, batch_normalize);
  488. layer.weights = layer.weights.concat(layer.input_z_layer.weights);
  489. layer.weights = layer.weights.concat(layer.state_z_layer.weights);
  490. layer.weights = layer.weights.concat(layer.input_r_layer.weights);
  491. layer.weights = layer.weights.concat(layer.state_r_layer.weights);
  492. layer.weights = layer.weights.concat(layer.input_h_layer.weights);
  493. layer.weights = layer.weights.concat(layer.state_h_layer.weights);
  494. layer.out = outputs;
  495. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'gru'));
  496. break;
  497. }
  498. case 'lstm': {
  499. const inputs = params.inputs;
  500. const outputs = option_find_int(options, 'output', 1);
  501. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  502. layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  503. make_connected_layer(layer.uf, 'uf_', inputs, outputs, batch_normalize);
  504. layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  505. make_connected_layer(layer.ui, 'ui_', inputs, outputs, batch_normalize);
  506. layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  507. make_connected_layer(layer.ug, 'ug_', inputs, outputs, batch_normalize);
  508. layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  509. make_connected_layer(layer.uo, 'uo_', inputs, outputs, batch_normalize);
  510. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  511. make_connected_layer(layer.wf, 'wf_', outputs, outputs, batch_normalize);
  512. layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  513. make_connected_layer(layer.wi, 'wi_', outputs, outputs, batch_normalize);
  514. layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  515. make_connected_layer(layer.wg, 'wg_', outputs, outputs, batch_normalize);
  516. layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  517. make_connected_layer(layer.wo, 'wo_', outputs, outputs, batch_normalize);
  518. layer.weights = layer.weights.concat(layer.uf.weights);
  519. layer.weights = layer.weights.concat(layer.ui.weights);
  520. layer.weights = layer.weights.concat(layer.ug.weights);
  521. layer.weights = layer.weights.concat(layer.uo.weights);
  522. layer.weights = layer.weights.concat(layer.wf.weights);
  523. layer.weights = layer.weights.concat(layer.wi.weights);
  524. layer.weights = layer.weights.concat(layer.wg.weights);
  525. layer.weights = layer.weights.concat(layer.wo.weights);
  526. layer.out_w = 1;
  527. layer.out_h = 1;
  528. layer.out_c = outputs;
  529. layer.out = outputs;
  530. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'lstm'));
  531. weights = null;
  532. break;
  533. }
  534. case 'conv_lstm': {
  535. const size = option_find_int(options, "size", 3);
  536. const stride = option_find_int(options, "stride", 1);
  537. const output_filters = option_find_int(options, "output", 1);
  538. const groups = option_find_int(options, "groups", 1);
  539. const pad = option_find_int(options, "pad", 0);
  540. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  541. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  542. const bottleneck = option_find_int(options, "bottleneck", 0);
  543. const peephole = option_find_int(options, "peephole", 0);
  544. layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  545. make_convolutional_layer(layer.uf, 'uf_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  546. layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  547. make_convolutional_layer(layer.ui, 'ui_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  548. layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  549. make_convolutional_layer(layer.ug, 'ug_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  550. layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  551. make_convolutional_layer(layer.uo, 'uo_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  552. layer.weights = layer.weights.concat(layer.uf.weights);
  553. layer.weights = layer.weights.concat(layer.ui.weights);
  554. layer.weights = layer.weights.concat(layer.ug.weights);
  555. layer.weights = layer.weights.concat(layer.uo.weights);
  556. if (bottleneck) {
  557. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  558. make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters * 2, output_filters, groups, size, stride, stride, padding, batch_normalize);
  559. layer.weights = layer.weights.concat(layer.wf.weights);
  560. }
  561. else {
  562. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  563. make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  564. layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  565. make_convolutional_layer(layer.wi, 'wi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  566. layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  567. make_convolutional_layer(layer.wg, 'wg_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  568. layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  569. make_convolutional_layer(layer.wo, 'wo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  570. layer.weights = layer.weights.concat(layer.wf.weights);
  571. layer.weights = layer.weights.concat(layer.wi.weights);
  572. layer.weights = layer.weights.concat(layer.wg.weights);
  573. layer.weights = layer.weights.concat(layer.wo.weights);
  574. }
  575. if (peephole) {
  576. layer.vf = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  577. make_convolutional_layer(layer.vf, 'vf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  578. layer.vi = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  579. make_convolutional_layer(layer.vi, 'vi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  580. layer.vo = { weights: [], outputs: [ new darknet.Argument('', null, null) ], chain: [] };
  581. make_convolutional_layer(layer.vo, 'vo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  582. layer.weights = layer.weights.concat(layer.vf.weights);
  583. layer.weights = layer.weights.concat(layer.vi.weights);
  584. layer.weights = layer.weights.concat(layer.vo.weights);
  585. }
  586. layer.out_h = layer.uo.out_h;
  587. layer.out_w = layer.uo.out_w;
  588. layer.out_c = output_filters;
  589. layer.out = layer.out_h * layer.out_w * layer.out_c;
  590. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'conv_lstm'));
  591. break;
  592. }
  593. case 'softmax': {
  594. layer.out_w = params.w;
  595. layer.out_h = params.h;
  596. layer.out_c = params.c;
  597. layer.out = params.inputs;
  598. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'softmax'));
  599. break;
  600. }
  601. case 'dropout': {
  602. layer.out_w = params.w;
  603. layer.out_h = params.h;
  604. layer.out_c = params.c;
  605. layer.out = params.inputs;
  606. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'dropout'));
  607. break;
  608. }
  609. case 'upsample': {
  610. const stride = option_find_int(options, 'stride', 2);
  611. layer.out_w = params.w * stride;
  612. layer.out_h = params.h * stride;
  613. layer.out_c = params.c;
  614. layer.out = layer.out_w * layer.out_h * layer.out_c;
  615. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'upsample'));
  616. break;
  617. }
  618. case 'crop': {
  619. const shape = layer.inputs[0].type.shape.dimensions;
  620. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  621. throw new darknet.Error('Layer before crop layer must output image.');
  622. }
  623. const crop_height = option_find_int(options, 'crop_height', 1);
  624. const crop_width = option_find_int(options, 'crop_width', 1);
  625. layer.out_w = crop_width;
  626. layer.out_h = crop_height;
  627. layer.out_c = params.c;
  628. layer.out = layer.out_w * layer.out_h * layer.out_c;
  629. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'crop'));
  630. break;
  631. }
  632. case 'yolo': {
  633. const classes = option_find_int(options, 'classes', 20);
  634. const n = option_find_int(options, 'num', 1);
  635. layer.out_h = params.h;
  636. layer.out_w = params.w;
  637. layer.out_c = n * (classes + 4 + 1);
  638. layer.out = layer.out_h * layer.out_w * layer.out_c;
  639. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'yolo'));
  640. break;
  641. }
  642. case 'Gaussian_yolo': {
  643. const classes = option_find_int(options, 'classes', 20);
  644. const n = option_find_int(options, 'num', 1);
  645. layer.out_h = params.h;
  646. layer.out_w = params.w;
  647. layer.out_c = n * (classes + 8 + 1);
  648. layer.out = layer.out_h * layer.out_w * layer.out_c;
  649. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'Gaussian_yolo'));
  650. break;
  651. }
  652. case 'region': {
  653. const coords = option_find_int(options, 'coords', 4);
  654. const classes = option_find_int(options, 'classes', 20);
  655. const num = option_find_int(options, 'num', 1);
  656. layer.out = params.h * params.w * num * (classes + coords + 1);
  657. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.h, params.w, num, (classes + coords + 1) ], 'region'));
  658. break;
  659. }
  660. case 'cost': {
  661. layer.out = params.inputs;
  662. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'cost'));
  663. break;
  664. }
  665. case 'reorg': {
  666. const stride = option_find_int(options, 'stride', 1);
  667. const reverse = option_find_int(options, 'reverse', 0);
  668. const extra = option_find_int(options, 'extra', 0);
  669. if (reverse) {
  670. layer.out_w = params.w * stride;
  671. layer.out_h = params.h * stride;
  672. layer.out_c = Math.floor(params.c / (stride * stride));
  673. layer.out = layer.out_h * layer.out_w * layer.out_c;
  674. }
  675. else {
  676. layer.out_w = Math.floor(params.w / stride);
  677. layer.out_h = Math.floor(params.h / stride);
  678. layer.out_c = params.c * (stride * stride);
  679. layer.out = layer.out_h * layer.out_w * layer.out_c;
  680. }
  681. if (extra) {
  682. layer.out_w = 0;
  683. layer.out_h = 0;
  684. layer.out_c = 0;
  685. layer.out = (params.h * params.w * params.c) + extra;
  686. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'reorg'));
  687. }
  688. else {
  689. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'reorg'));
  690. }
  691. break;
  692. }
  693. case 'route': {
  694. const layers = [].concat(layer.layers);
  695. const groups = option_find_int(options, 'groups', 1);
  696. layer.out = 0;
  697. for (const next of layers) {
  698. layer.out += next.outputs / groups;
  699. }
  700. if (layers.length > 0) {
  701. const first = layers.shift();
  702. layer.out_w = first.out_w;
  703. layer.out_h = first.out_h;
  704. layer.out_c = first.out_c / groups;
  705. while (layers.length > 0) {
  706. const next = layers.shift();
  707. if (next.out_w === first.out_w && next.out_h === first.out_h) {
  708. layer.out_c += next.out_c;
  709. continue;
  710. }
  711. infer = false;
  712. break;
  713. }
  714. if (infer) {
  715. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'route'));
  716. }
  717. }
  718. else {
  719. infer = false;
  720. }
  721. if (!infer) {
  722. layer.out_h = 0;
  723. layer.out_w = 0;
  724. layer.out_c = 0;
  725. }
  726. break;
  727. }
  728. case 'sam':
  729. case 'scale_channels': {
  730. const activation = option_find_str(options, 'activation', 'linear');
  731. const from = layer.from;
  732. if (from) {
  733. layer.out_w = from.out_w;
  734. layer.out_h = from.out_h;
  735. layer.out_c = from.out_c;
  736. layer.out = layer.out_w * layer.out_h * layer.out_c;
  737. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'shortcut|scale_channels|sam'));
  738. }
  739. if (activation !== 'linear' && activation !== 'none') {
  740. layer.chain.push({ type: activation });
  741. }
  742. break;
  743. }
  744. case 'shortcut': {
  745. const activation = option_find_str(options, 'activation', 'linear');
  746. layer.out_w = params.w;
  747. layer.out_h = params.h;
  748. layer.out_c = params.c;
  749. layer.out = params.w * params.h * params.c;
  750. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'shortcut|scale_channels|sam'));
  751. if (activation !== 'linear' && activation !== 'none') {
  752. layer.chain.push({ type: activation });
  753. }
  754. break;
  755. }
  756. case 'detection': {
  757. layer.out_w = params.w;
  758. layer.out_h = params.h;
  759. layer.out_c = params.c;
  760. layer.out = params.inputs;
  761. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'detection'));
  762. break;
  763. }
  764. default: {
  765. infer = false;
  766. break;
  767. }
  768. }
  769. params.h = layer.out_h;
  770. params.w = layer.out_w;
  771. params.c = layer.out_c;
  772. params.inputs = layer.out;
  773. params.last = section;
  774. }
  775. params.arguments = layer.outputs;
  776. }
  777. for (let i = 0; i < sections.length; i++) {
  778. this._nodes.push(new darknet.Node(metadata, net, sections[i]));
  779. }
  780. if (weights) {
  781. weights.validate();
  782. }
  783. }
  784. get inputs() {
  785. return this._inputs;
  786. }
  787. get outputs() {
  788. return this._outputs;
  789. }
  790. get nodes() {
  791. return this._nodes;
  792. }
  793. };
  794. darknet.Parameter = class {
  795. constructor(name, visible, args) {
  796. this._name = name;
  797. this._visible = visible;
  798. this._arguments = args;
  799. }
  800. get name() {
  801. return this._name;
  802. }
  803. get visible() {
  804. return this._visible;
  805. }
  806. get arguments() {
  807. return this._arguments;
  808. }
  809. };
  810. darknet.Argument = class {
  811. constructor(name, type, initializer) {
  812. if (typeof name !== 'string') {
  813. throw new darknet.Error("Invalid argument identifier '" + JSON.stringify(name) + "'.");
  814. }
  815. this._name = name;
  816. this._type = type;
  817. this._initializer = initializer;
  818. }
  819. get name() {
  820. return this._name;
  821. }
  822. get type() {
  823. if (this._initializer) {
  824. return this._initializer.type;
  825. }
  826. return this._type;
  827. }
  828. set type(value) {
  829. if (this._type) {
  830. throw new darknet.Error('Invalid argument type set operation.');
  831. }
  832. this._type = value;
  833. }
  834. get initializer() {
  835. return this._initializer;
  836. }
  837. };
  838. darknet.Node = class {
  839. constructor(metadata, net, section) {
  840. this._name = section.name || '';
  841. this._location = section.line !== undefined ? section.line.toString() : undefined;
  842. this._attributes = [];
  843. this._inputs = [];
  844. this._outputs = [];
  845. this._chain = [];
  846. const type = section.type;
  847. this._type = metadata.type(type) || { name: type };
  848. const layer = section.layer;
  849. if (layer && layer.inputs && layer.inputs.length > 0) {
  850. this._inputs.push(new darknet.Parameter(layer.inputs.length <= 1 ? 'input' : 'inputs', true, layer.inputs));
  851. }
  852. if (layer && layer.weights && layer.weights.length > 0) {
  853. this._inputs = this._inputs.concat(layer.weights);
  854. }
  855. if (layer && layer.outputs && layer.outputs.length > 0) {
  856. this._outputs.push(new darknet.Parameter(layer.outputs.length <= 1 ? 'output' : 'outputs', true, layer.outputs));
  857. }
  858. if (layer && layer.chain) {
  859. for (const chain of layer.chain) {
  860. this._chain.push(new darknet.Node(metadata, net, chain, ''));
  861. }
  862. }
  863. const options = section.options;
  864. if (options) {
  865. for (const key of Object.keys(options)) {
  866. this._attributes.push(new darknet.Attribute(metadata.attribute(type, key), key, options[key]));
  867. }
  868. }
  869. }
  870. get name() {
  871. return this._name;
  872. }
  873. get location() {
  874. return this._location;
  875. }
  876. get type() {
  877. return this._type;
  878. }
  879. get attributes() {
  880. return this._attributes;
  881. }
  882. get inputs() {
  883. return this._inputs;
  884. }
  885. get outputs() {
  886. return this._outputs;
  887. }
  888. get chain() {
  889. return this._chain;
  890. }
  891. };
  892. darknet.Attribute = class {
  893. constructor(schema, name, value) {
  894. this._name = name;
  895. this._value = value;
  896. if (schema) {
  897. this._type = schema.type || '';
  898. switch (this._type) {
  899. case '':
  900. case 'string': {
  901. break;
  902. }
  903. case 'int32': {
  904. const number = parseInt(this._value, 10);
  905. if (Number.isInteger(number)) {
  906. this._value = number;
  907. }
  908. break;
  909. }
  910. case 'float32': {
  911. const number = parseFloat(this._value);
  912. if (!isNaN(number)) {
  913. this._value = number;
  914. }
  915. break;
  916. }
  917. case 'int32[]': {
  918. const numbers = this._value.split(',').map((item) => parseInt(item.trim(), 10));
  919. if (numbers.every((number) => Number.isInteger(number))) {
  920. this._value = numbers;
  921. }
  922. break;
  923. }
  924. default: {
  925. throw new darknet.Error("Unsupported attribute type '" + this._type + "'.");
  926. }
  927. }
  928. if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) {
  929. this._visible = false;
  930. }
  931. else if (Object.prototype.hasOwnProperty.call(schema, 'default')) {
  932. if (this._value == schema.default) {
  933. this._visible = false;
  934. }
  935. }
  936. }
  937. }
  938. get name() {
  939. return this._name;
  940. }
  941. get type() {
  942. return this._type;
  943. }
  944. get value() {
  945. return this._value;
  946. }
  947. get visible() {
  948. return this._visible == false ? false : true;
  949. }
  950. };
  951. darknet.Tensor = class {
  952. constructor(type, data) {
  953. this._type = type;
  954. this._values = data;
  955. }
  956. get category() {
  957. return 'Weights';
  958. }
  959. get name() {
  960. return '';
  961. }
  962. get type() {
  963. return this._type;
  964. }
  965. get values() {
  966. return this._values;
  967. }
  968. };
  969. darknet.TensorType = class {
  970. constructor(dataType, shape) {
  971. this._dataType = dataType;
  972. this._shape = shape;
  973. }
  974. get dataType() {
  975. return this._dataType;
  976. }
  977. get shape() {
  978. return this._shape;
  979. }
  980. toString() {
  981. return (this._dataType || '?') + this._shape.toString();
  982. }
  983. };
  984. darknet.TensorShape = class {
  985. constructor(dimensions) {
  986. if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) {
  987. throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "'.");
  988. }
  989. this._dimensions = dimensions;
  990. }
  991. get dimensions() {
  992. return this._dimensions;
  993. }
  994. toString() {
  995. if (this._dimensions) {
  996. if (this._dimensions.length == 0) {
  997. return '';
  998. }
  999. return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']';
  1000. }
  1001. return '';
  1002. }
  1003. };
  1004. darknet.Weights = class {
  1005. static open(stream) {
  1006. if (stream && stream.length >= 20) {
  1007. const buffer = stream.peek(12);
  1008. const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength);
  1009. const major = view.getInt32(0, true);
  1010. const minor = view.getInt32(4, true);
  1011. view.getInt32(8, true); // revision
  1012. const transpose = (major > 1000) || (minor > 1000);
  1013. if (!transpose) {
  1014. stream.skip(12 + (((major * 10 + minor) >= 2) ? 8 : 4));
  1015. return new darknet.Weights(stream);
  1016. }
  1017. }
  1018. return null;
  1019. }
  1020. constructor(stream) {
  1021. this._stream = stream;
  1022. }
  1023. read(size) {
  1024. return this._stream.read(size);
  1025. }
  1026. validate() {
  1027. if (this._stream.position != this._stream.length) {
  1028. throw new darknet.Error('Invalid weights size.');
  1029. }
  1030. }
  1031. };
  1032. darknet.Error = class extends Error {
  1033. constructor(message) {
  1034. super(message);
  1035. this.name = 'Error loading Darknet model.';
  1036. }
  1037. };
  1038. if (typeof module !== 'undefined' && typeof module.exports === 'object') {
  1039. module.exports.ModelFactory = darknet.ModelFactory;
  1040. }