darknet.js 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. /* jshint esversion: 6 */
  2. var darknet = darknet || {};
  3. var base = base || require('./base');
  4. darknet.ModelFactory = class {
  5. match(context) {
  6. const identifier = context.identifier;
  7. const extension = identifier.split('.').pop().toLowerCase();
  8. switch (extension) {
  9. case 'weights':
  10. if (darknet.Weights.open(context.stream)) {
  11. return true;
  12. }
  13. break;
  14. default:
  15. try {
  16. const reader = base.TextReader.create(context.stream.peek(), 65536);
  17. for (;;) {
  18. const line = reader.read();
  19. if (line === undefined) {
  20. break;
  21. }
  22. const text = line.trim();
  23. if (text.length === 0 || text.startsWith('#')) {
  24. continue;
  25. }
  26. if (text.startsWith('[') && text.endsWith(']')) {
  27. return true;
  28. }
  29. return false;
  30. }
  31. }
  32. catch (err) {
  33. // continue regardless of error
  34. }
  35. break;
  36. }
  37. return false;
  38. }
  39. open(context) {
  40. return darknet.Metadata.open(context).then((metadata) => {
  41. const open = (metadata, cfg, weights) => {
  42. return new darknet.Model(metadata, cfg, darknet.Weights.open(weights));
  43. };
  44. const identifier = context.identifier;
  45. const parts = identifier.split('.');
  46. const extension = parts.pop().toLowerCase();
  47. const basename = parts.join('.');
  48. switch (extension) {
  49. case 'weights':
  50. return context.request(basename + '.cfg', null).then((stream) => {
  51. const buffer = stream.read();
  52. return open(metadata, buffer, context.stream);
  53. });
  54. default:
  55. return context.request(basename + '.weights', null).then((stream) => {
  56. return open(metadata, context.stream.peek(), stream);
  57. }).catch(() => {
  58. return open(metadata, context.stream.peek(), null);
  59. });
  60. }
  61. });
  62. }
  63. };
  64. darknet.Model = class {
  65. constructor(metadata, cfg, weights) {
  66. this._graphs = [ new darknet.Graph(metadata, cfg, weights) ];
  67. }
  68. get format() {
  69. return 'Darknet';
  70. }
  71. get graphs() {
  72. return this._graphs;
  73. }
  74. };
  75. darknet.Graph = class {
  76. constructor(metadata, cfg, weights) {
  77. this._inputs = [];
  78. this._outputs = [];
  79. this._nodes = [];
  80. // read_cfg
  81. const sections = [];
  82. let section = null;
  83. const reader = base.TextReader.create(cfg);
  84. let lineNumber = 0;
  85. for (;;) {
  86. lineNumber++;
  87. const text = reader.read();
  88. if (text === undefined) {
  89. break;
  90. }
  91. const line = text.replace(/\s/g, '');
  92. if (line.length > 0) {
  93. switch (line[0]) {
  94. case '#':
  95. case ';':
  96. break;
  97. case '[': {
  98. const type = line[line.length - 1] === ']' ? line.substring(1, line.length - 1) : line.substring(1);
  99. section = {
  100. line: lineNumber,
  101. type: type,
  102. options: {}
  103. };
  104. sections.push(section);
  105. break;
  106. }
  107. default: {
  108. if (!section || line[0] < 0x20 || line[0] > 0x7E) {
  109. throw new darknet.Error("Invalid cfg '" + text.replace(/[^\x20-\x7E]+/g, '?').trim() + "' at line " + lineNumber.toString() + ".");
  110. }
  111. const index = line.indexOf('=');
  112. if (index < 0) {
  113. throw new darknet.Error("Invalid cfg '" + text.replace(/[^\x20-\x7E]+/g, '?').trim() + "' at line " + lineNumber.toString() + ".");
  114. }
  115. const key = line.substring(0, index);
  116. const value = line.substring(index + 1);
  117. section.options[key] = value;
  118. break;
  119. }
  120. }
  121. }
  122. }
  123. const option_find_int = (options, key, defaultValue) => {
  124. let value = options[key];
  125. if (typeof value === 'string' && value.startsWith('$')) {
  126. const key = value.substring(1);
  127. value = globals.has(key) ? globals.get(key) : value;
  128. }
  129. if (value !== undefined) {
  130. const number = parseInt(value, 10);
  131. if (!Number.isInteger(number)) {
  132. throw new darknet.Error("Invalid int option '" + JSON.stringify(options[key]) + "'.");
  133. }
  134. return number;
  135. }
  136. return defaultValue;
  137. };
  138. const option_find_str = (options, key, defaultValue) => {
  139. const value = options[key];
  140. return value !== undefined ? value : defaultValue;
  141. };
  142. const make_shape = (dimensions, source) => {
  143. if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) {
  144. throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "' in '" + source + "'.");
  145. }
  146. return new darknet.TensorShape(dimensions);
  147. };
  148. const load_weights = (name, shape, visible) => {
  149. const data = weights ? weights.read(4 * shape.reduce((a, b) => a * b)) : null;
  150. const type = new darknet.TensorType('float32', make_shape(shape, 'load_weights'));
  151. const initializer = new darknet.Tensor(type, data);
  152. const argument = new darknet.Argument('', null, initializer);
  153. return new darknet.Parameter(name, visible === false ? false : true, [ argument ]);
  154. };
  155. const load_batch_normalize_weights = (layer, prefix, size) => {
  156. layer.weights.push(load_weights(prefix + 'scale', [ size ], prefix === ''));
  157. layer.weights.push(load_weights(prefix + 'mean', [ size ], prefix === ''));
  158. layer.weights.push(load_weights(prefix + 'variance', [ size ], prefix === ''));
  159. };
  160. const make_convolutional_layer = (layer, prefix, w, h, c, n, groups, size, stride_x, stride_y, padding, batch_normalize) => {
  161. layer.out_w = Math.floor((w + 2 * padding - size) / stride_x) + 1;
  162. layer.out_h = Math.floor((h + 2 * padding - size) / stride_y) + 1;
  163. layer.out_c = n;
  164. layer.out = layer.out_w * layer.out_h * layer.out_c;
  165. layer.weights.push(load_weights(prefix + 'biases', [ n ], prefix === ''));
  166. if (batch_normalize) {
  167. load_batch_normalize_weights(layer, prefix, n);
  168. }
  169. layer.weights.push(load_weights(prefix + 'weights', [ Math.floor(c / groups), n, size, size ], prefix === ''));
  170. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'make_convolutional_layer'));
  171. };
  172. const make_connected_layer = (layer, prefix, inputs, outputs, batch_normalize) => {
  173. layer.out_h = 1;
  174. layer.out_w = 1;
  175. layer.out_c = outputs;
  176. layer.out = outputs;
  177. layer.weights.push(load_weights(prefix + 'biases', [ outputs ], prefix === ''));
  178. if (batch_normalize) {
  179. load_batch_normalize_weights(layer, prefix, outputs);
  180. }
  181. layer.weights.push(load_weights(prefix + 'weights', [ inputs, outputs ], prefix === ''));
  182. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'make_connected_layer'));
  183. };
  184. if (sections.length === 0) {
  185. throw new darknet.Error('Config file has no sections.');
  186. }
  187. const params = {};
  188. const globals = new Map();
  189. const net = sections.shift();
  190. switch (net.type) {
  191. case 'net':
  192. case 'network': {
  193. params.h = option_find_int(net.options, 'height', 0);
  194. params.w = option_find_int(net.options, 'width', 0);
  195. params.c = option_find_int(net.options, 'channels', 0);
  196. params.inputs = option_find_int(net.options, 'inputs', params.h * params.w * params.c);
  197. for (const key of Object.keys(net.options)) {
  198. globals.set(key, net.options[key]);
  199. }
  200. break;
  201. }
  202. default: {
  203. throw new darknet.Error("Unexpected '[" + net.type + "]' section. First section must be [net] or [network].");
  204. }
  205. }
  206. const inputType = params.w && params.h && params.c ?
  207. new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'params-if')) :
  208. new darknet.TensorType('float32', make_shape([ params.inputs ], 'params-else'));
  209. const inputName = 'input';
  210. params.arguments = [ new darknet.Argument(inputName, inputType, null) ];
  211. this._inputs.push(new darknet.Parameter(inputName, true, params.arguments));
  212. for (let i = 0; i < sections.length; i++) {
  213. const section = sections[i];
  214. section.name = i.toString();
  215. section.chain = [];
  216. section.layer = {
  217. inputs: [],
  218. weights: [],
  219. outputs: [ new darknet.Argument(section.name, null, null) ]
  220. };
  221. }
  222. let infer = true;
  223. for (let i = 0; i < sections.length; i++) {
  224. const section = sections[i];
  225. const options = section.options;
  226. const layer = section.layer;
  227. layer.inputs.push(...params.arguments);
  228. switch (section.type) {
  229. case 'shortcut': {
  230. let remove = true;
  231. const from = options.from ? options.from.split(',').map((item) => Number.parseInt(item.trim(), 10)) : [];
  232. for (const route of from) {
  233. const index = route < 0 ? i + route : route;
  234. const exists = index >= 0 && index < sections.length;
  235. remove = exists && remove;
  236. if (exists) {
  237. const source = sections[index].layer;
  238. layer.inputs.push(source.outputs[0]);
  239. }
  240. }
  241. if (remove) {
  242. delete options.from;
  243. }
  244. break;
  245. }
  246. case 'sam':
  247. case 'scale_channels': {
  248. const from = option_find_int(options, 'from', 0);
  249. const index = from < 0 ? i + from : from;
  250. if (index >= 0 && index < sections.length) {
  251. const source = sections[index].layer;
  252. layer.from = source;
  253. layer.inputs.push(source.outputs[0]);
  254. delete options.from;
  255. }
  256. break;
  257. }
  258. case 'route': {
  259. layer.inputs = [];
  260. layer.layers = [];
  261. let remove = true;
  262. const routes = options.layers ? options.layers.split(',').map((route) => Number.parseInt(route.trim(), 10)) : [];
  263. for (const route of routes) {
  264. const index = route < 0 ? i + route : route;
  265. const exists = index >= 0 && index < sections.length;
  266. remove = exists && remove;
  267. if (exists) {
  268. const source = sections[index].layer;
  269. layer.inputs.push(source.outputs[0]);
  270. layer.layers.push(source);
  271. }
  272. }
  273. if (remove) {
  274. delete options.layers;
  275. }
  276. break;
  277. }
  278. }
  279. if (infer) {
  280. switch (section.type) {
  281. case 'conv':
  282. case 'convolutional':
  283. case 'deconvolutional': {
  284. const shape = layer.inputs[0].type.shape.dimensions;
  285. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  286. throw new darknet.Error('Layer before convolutional layer must output image.');
  287. }
  288. const size = option_find_int(options, 'size', 1);
  289. const n = option_find_int(options, 'filters', 1);
  290. const pad = option_find_int(options, 'pad', 0);
  291. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  292. let stride_x = option_find_int(options, 'stride_x', -1);
  293. let stride_y = option_find_int(options, 'stride_y', -1);
  294. if (stride_x < 1 || stride_y < 1) {
  295. const stride = option_find_int(options, 'stride', 1);
  296. stride_x = stride_x < 1 ? stride : stride_x;
  297. stride_y = stride_y < 1 ? stride : stride_y;
  298. }
  299. const groups = option_find_int(options, 'groups', 1);
  300. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  301. const activation = option_find_str(options, 'activation', 'logistic');
  302. make_convolutional_layer(layer, '', params.w, params.h, params.c, n, groups, size, stride_x, stride_y, padding, batch_normalize);
  303. if (activation !== 'logistic' && activation !== 'none') {
  304. section.chain.push({ type: activation });
  305. }
  306. break;
  307. }
  308. case 'connected': {
  309. const outputs = option_find_int(options, 'output', 1);
  310. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  311. const activation = option_find_str(options, 'activation', 'logistic');
  312. make_connected_layer(layer, '', params.inputs, outputs, batch_normalize);
  313. if (activation !== 'logistic' && activation !== 'none') {
  314. section.chain.push({ type: activation });
  315. }
  316. break;
  317. }
  318. case 'local': {
  319. const shape = layer.inputs[0].type.shape.dimensions;
  320. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  321. throw new darknet.Error('Layer before avgpool layer must output image.');
  322. }
  323. const n = option_find_int(options, 'filters' , 1);
  324. const size = option_find_int(options, 'size', 1);
  325. const stride = option_find_int(options, 'stride', 1);
  326. const pad = option_find_int(options, 'pad', 0);
  327. const activation = option_find_str(options, 'activation', 'logistic');
  328. layer.out_h = Math.floor((params.h - (pad ? 1 : size)) / stride) + 1;
  329. layer.out_w = Math.floor((params.w - (pad ? 1 : size)) / stride) + 1;
  330. layer.out_c = n;
  331. layer.out = layer.out_w * layer.out_h * layer.out_c;
  332. layer.weights.push(load_weights('weights', [ params.c, n, size, size, layer.out_h * layer.out_w ]));
  333. layer.weights.push(load_weights('biases',[ layer.out_w * layer.out_h * layer.out_c ]));
  334. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'local'));
  335. if (activation !== 'logistic' && activation !== 'none') {
  336. section.chain.push({ type: activation });
  337. }
  338. break;
  339. }
  340. case 'batchnorm': {
  341. layer.out_h = params.h;
  342. layer.out_w = params.w;
  343. layer.out_c = params.c;
  344. layer.out = layer.in;
  345. load_batch_normalize_weights(layer, '', layer.out_c);
  346. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'batchnorm'));
  347. break;
  348. }
  349. case 'activation': {
  350. layer.out_h = params.h;
  351. layer.out_w = params.w;
  352. layer.out_c = params.c;
  353. layer.out = layer.in;
  354. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'activation'));
  355. break;
  356. }
  357. case 'max':
  358. case 'maxpool': {
  359. const shape = layer.inputs[0].type.shape.dimensions;
  360. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  361. throw new darknet.Error('Layer before maxpool layer must output image.');
  362. }
  363. const antialiasing = option_find_int(options, 'antialiasing', 0);
  364. const stride = option_find_int(options, 'stride', 1);
  365. const blur_stride_x = option_find_int(options, 'stride_x', stride);
  366. const blur_stride_y = option_find_int(options, 'stride_y', stride);
  367. const stride_x = antialiasing ? 1 : blur_stride_x;
  368. const stride_y = antialiasing ? 1 : blur_stride_y;
  369. const size = option_find_int(options, 'size', stride);
  370. const padding = option_find_int(options, 'padding', size - 1);
  371. const out_channels = option_find_int(options, 'out_channels', 1);
  372. const maxpool_depth = option_find_int(options, 'maxpool_depth', 0);
  373. if (maxpool_depth) {
  374. layer.out_c = out_channels;
  375. layer.out_w = params.w;
  376. layer.out_h = params.h;
  377. }
  378. else {
  379. layer.out_w = Math.floor((params.w + padding - size) / stride_x) + 1;
  380. layer.out_h = Math.floor((params.h + padding - size) / stride_y) + 1;
  381. layer.out_c = params.c;
  382. }
  383. if (antialiasing) {
  384. const blur_size = antialiasing === 2 ? 2 : 3;
  385. const blur_pad = antialiasing === 2 ? 0 : Math.floor(blur_size / 3);
  386. layer.input_layer = { weights: [], outputs: layer.outputs };
  387. make_convolutional_layer(layer.input_layer, '', layer.out_h, layer.out_w, layer.out_c, layer.out_c, layer.out_c, blur_size, blur_stride_x, blur_stride_y, blur_pad, 0);
  388. layer.out_w = layer.input_layer.out_w;
  389. layer.out_h = layer.input_layer.out_h;
  390. layer.out_c = layer.input_layer.out_c;
  391. }
  392. else {
  393. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'maxpool'));
  394. }
  395. layer.out = layer.out_w * layer.out_h * layer.out_c;
  396. break;
  397. }
  398. case 'avgpool': {
  399. const shape = layer.inputs[0].type.shape.dimensions;
  400. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  401. throw new darknet.Error('Layer before avgpool layer must output image.');
  402. }
  403. layer.out_w = 1;
  404. layer.out_h = 1;
  405. layer.out_c = params.c;
  406. layer.out = layer.out_c;
  407. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'avgpool'));
  408. break;
  409. }
  410. case 'crnn': {
  411. const size = option_find_int(options, 'size', 3);
  412. const stride = option_find_int(options, 'stride', 1);
  413. const output_filters = option_find_int(options, 'output', 1);
  414. const hidden_filters = option_find_int(options, 'hidden', 1);
  415. const groups = option_find_int(options, 'groups', 1);
  416. const pad = option_find_int(options, 'pad', 0);
  417. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  418. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  419. layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  420. make_convolutional_layer(layer.input_layer, 'input_', params.h, params.w, params.c, hidden_filters, groups, size, stride, stride, padding, batch_normalize);
  421. layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  422. make_convolutional_layer(layer.self_layer, 'self_', params.h, params.w, hidden_filters, hidden_filters, groups, size, stride, stride, padding, batch_normalize);
  423. layer.output_layer = { weights: [], outputs: layer.outputs };
  424. make_convolutional_layer(layer.output_layer, 'output_', params.h, params.w, hidden_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  425. layer.weights = layer.weights.concat(layer.input_layer.weights);
  426. layer.weights = layer.weights.concat(layer.self_layer.weights);
  427. layer.weights = layer.weights.concat(layer.output_layer.weights);
  428. layer.out_h = layer.output_layer.out_h;
  429. layer.out_w = layer.output_layer.out_w;
  430. layer.out_c = output_filters;
  431. layer.out = layer.output_layer.out;
  432. break;
  433. }
  434. case 'rnn': {
  435. const outputs = option_find_int(options, 'output', 1);
  436. const hidden = option_find_int(options, 'hidden', 1);
  437. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  438. const inputs = params.inputs;
  439. layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  440. make_connected_layer(layer.input_layer, 'input_', inputs, hidden, batch_normalize);
  441. layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  442. make_connected_layer(layer.self_layer, 'self_', hidden, hidden, batch_normalize);
  443. layer.output_layer = { weights: [], outputs: layer.outputs };
  444. make_connected_layer(layer.output_layer, 'output_', hidden, outputs, batch_normalize);
  445. layer.weights = layer.weights.concat(layer.input_layer.weights);
  446. layer.weights = layer.weights.concat(layer.self_layer.weights);
  447. layer.weights = layer.weights.concat(layer.output_layer.weights);
  448. layer.out_w = 1;
  449. layer.out_h = 1;
  450. layer.out_c = outputs;
  451. layer.out = outputs;
  452. break;
  453. }
  454. case 'gru': {
  455. const inputs = params.inputs;
  456. const outputs = option_find_int(options, 'output', 1);
  457. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  458. layer.input_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  459. make_connected_layer(layer.input_z_layer, 'input_z', inputs, outputs, batch_normalize);
  460. layer.state_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  461. make_connected_layer(layer.state_z_layer, 'state_z', outputs, outputs, batch_normalize);
  462. layer.input_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  463. make_connected_layer(layer.input_r_layer, 'input_r', inputs, outputs, batch_normalize);
  464. layer.state_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  465. make_connected_layer(layer.state_r_layer, 'state_r', outputs, outputs, batch_normalize);
  466. layer.input_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  467. make_connected_layer(layer.input_h_layer, 'input_h', inputs, outputs, batch_normalize);
  468. layer.state_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  469. make_connected_layer(layer.state_h_layer, 'state_h', outputs, outputs, batch_normalize);
  470. layer.weights = layer.weights.concat(layer.input_z_layer.weights);
  471. layer.weights = layer.weights.concat(layer.state_z_layer.weights);
  472. layer.weights = layer.weights.concat(layer.input_r_layer.weights);
  473. layer.weights = layer.weights.concat(layer.state_r_layer.weights);
  474. layer.weights = layer.weights.concat(layer.input_h_layer.weights);
  475. layer.weights = layer.weights.concat(layer.state_h_layer.weights);
  476. layer.out = outputs;
  477. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'gru'));
  478. break;
  479. }
  480. case 'lstm': {
  481. const inputs = params.inputs;
  482. const outputs = option_find_int(options, 'output', 1);
  483. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  484. layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  485. make_connected_layer(layer.uf, 'uf_', inputs, outputs, batch_normalize);
  486. layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  487. make_connected_layer(layer.ui, 'ui_', inputs, outputs, batch_normalize);
  488. layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  489. make_connected_layer(layer.ug, 'ug_', inputs, outputs, batch_normalize);
  490. layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  491. make_connected_layer(layer.uo, 'uo_', inputs, outputs, batch_normalize);
  492. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  493. make_connected_layer(layer.wf, 'wf_', outputs, outputs, batch_normalize);
  494. layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  495. make_connected_layer(layer.wi, 'wi_', outputs, outputs, batch_normalize);
  496. layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  497. make_connected_layer(layer.wg, 'wg_', outputs, outputs, batch_normalize);
  498. layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  499. make_connected_layer(layer.wo, 'wo_', outputs, outputs, batch_normalize);
  500. layer.weights = layer.weights.concat(layer.uf.weights);
  501. layer.weights = layer.weights.concat(layer.ui.weights);
  502. layer.weights = layer.weights.concat(layer.ug.weights);
  503. layer.weights = layer.weights.concat(layer.uo.weights);
  504. layer.weights = layer.weights.concat(layer.wf.weights);
  505. layer.weights = layer.weights.concat(layer.wi.weights);
  506. layer.weights = layer.weights.concat(layer.wg.weights);
  507. layer.weights = layer.weights.concat(layer.wo.weights);
  508. layer.out_w = 1;
  509. layer.out_h = 1;
  510. layer.out_c = outputs;
  511. layer.out = outputs;
  512. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'lstm'));
  513. weights = null;
  514. break;
  515. }
  516. case 'conv_lstm': {
  517. const size = option_find_int(options, "size", 3);
  518. const stride = option_find_int(options, "stride", 1);
  519. const output_filters = option_find_int(options, "output", 1);
  520. const groups = option_find_int(options, "groups", 1);
  521. const pad = option_find_int(options, "pad", 0);
  522. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  523. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  524. const bottleneck = option_find_int(options, "bottleneck", 0);
  525. const peephole = option_find_int(options, "peephole", 0);
  526. layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  527. make_convolutional_layer(layer.uf, 'uf_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  528. layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  529. make_convolutional_layer(layer.ui, 'ui_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  530. layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  531. make_convolutional_layer(layer.ug, 'ug_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  532. layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  533. make_convolutional_layer(layer.uo, 'uo_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  534. layer.weights = layer.weights.concat(layer.uf.weights);
  535. layer.weights = layer.weights.concat(layer.ui.weights);
  536. layer.weights = layer.weights.concat(layer.ug.weights);
  537. layer.weights = layer.weights.concat(layer.uo.weights);
  538. if (bottleneck) {
  539. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  540. make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters * 2, output_filters, groups, size, stride, stride, padding, batch_normalize);
  541. layer.weights = layer.weights.concat(layer.wf.weights);
  542. }
  543. else {
  544. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  545. make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  546. layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  547. make_convolutional_layer(layer.wi, 'wi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  548. layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  549. make_convolutional_layer(layer.wg, 'wg_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  550. layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  551. make_convolutional_layer(layer.wo, 'wo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  552. layer.weights = layer.weights.concat(layer.wf.weights);
  553. layer.weights = layer.weights.concat(layer.wi.weights);
  554. layer.weights = layer.weights.concat(layer.wg.weights);
  555. layer.weights = layer.weights.concat(layer.wo.weights);
  556. }
  557. if (peephole) {
  558. layer.vf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  559. make_convolutional_layer(layer.vf, 'vf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  560. layer.vi = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  561. make_convolutional_layer(layer.vi, 'vi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  562. layer.vo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  563. make_convolutional_layer(layer.wo, 'vo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  564. layer.weights = layer.weights.concat(layer.vf.weights);
  565. layer.weights = layer.weights.concat(layer.vi.weights);
  566. layer.weights = layer.weights.concat(layer.vo.weights);
  567. }
  568. layer.out_h = layer.uo.out_h;
  569. layer.out_w = layer.uo.out_w;
  570. layer.out_c = output_filters;
  571. layer.out = layer.out_h * layer.out_w * layer.out_c;
  572. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'conv_lstm'));
  573. break;
  574. }
  575. case 'softmax': {
  576. layer.out_w = params.w;
  577. layer.out_h = params.h;
  578. layer.out_c = params.c;
  579. layer.out = params.inputs;
  580. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'softmax'));
  581. break;
  582. }
  583. case 'dropout': {
  584. layer.out_w = params.w;
  585. layer.out_h = params.h;
  586. layer.out_c = params.c;
  587. layer.out = params.inputs;
  588. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'dropout'));
  589. break;
  590. }
  591. case 'upsample': {
  592. const stride = option_find_int(options, 'stride', 2);
  593. layer.out_w = params.w * stride;
  594. layer.out_h = params.h * stride;
  595. layer.out_c = params.c;
  596. layer.out = layer.out_w * layer.out_h * layer.out_c;
  597. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'upsample'));
  598. break;
  599. }
  600. case 'crop': {
  601. const shape = layer.inputs[0].type.shape.dimensions;
  602. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  603. throw new darknet.Error('Layer before crop layer must output image.');
  604. }
  605. const crop_height = option_find_int(options, 'crop_height', 1);
  606. const crop_width = option_find_int(options, 'crop_width', 1);
  607. layer.out_w = crop_width;
  608. layer.out_h = crop_height;
  609. layer.out_c = params.c;
  610. layer.out = layer.out_w * layer.out_h * layer.out_c;
  611. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'crop'));
  612. break;
  613. }
  614. case 'yolo': {
  615. const classes = option_find_int(options, 'classes', 20);
  616. const n = option_find_int(options, 'num', 1);
  617. layer.out_h = params.h;
  618. layer.out_w = params.w;
  619. layer.out_c = n * (classes + 4 + 1);
  620. layer.out = layer.out_h * layer.out_w * layer.out_c;
  621. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'yolo'));
  622. break;
  623. }
  624. case 'Gaussian_yolo': {
  625. const classes = option_find_int(options, 'classes', 20);
  626. const n = option_find_int(options, 'num', 1);
  627. layer.out_h = params.h;
  628. layer.out_w = params.w;
  629. layer.out_c = n * (classes + 8 + 1);
  630. layer.out = layer.out_h * layer.out_w * layer.out_c;
  631. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'Gaussian_yolo'));
  632. break;
  633. }
  634. case 'region': {
  635. const coords = option_find_int(options, 'coords', 4);
  636. const classes = option_find_int(options, 'classes', 20);
  637. const num = option_find_int(options, 'num', 1);
  638. layer.out = params.h * params.w * num * (classes + coords + 1);
  639. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.h, params.w, num, (classes + coords + 1) ], 'region'));
  640. break;
  641. }
  642. case 'cost': {
  643. layer.out = params.inputs;
  644. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'cost'));
  645. break;
  646. }
  647. case 'reorg': {
  648. const stride = option_find_int(options, 'stride', 1);
  649. const reverse = option_find_int(options, 'reverse', 0);
  650. const extra = option_find_int(options, 'extra', 0);
  651. if (reverse) {
  652. layer.out_w = params.w * stride;
  653. layer.out_h = params.h * stride;
  654. layer.out_c = Math.floor(params.c / (stride * stride));
  655. }
  656. else {
  657. layer.out_w = Math.floor(params.w / stride);
  658. layer.out_h = Math.floor(params.h / stride);
  659. layer.out_c = params.c * (stride * stride);
  660. }
  661. layer.out = layer.out_h * layer.out_w * layer.out_c;
  662. if (extra) {
  663. layer.out_w = 0;
  664. layer.out_h = 0;
  665. layer.out_c = 0;
  666. layer.out = (params.h * params.w * params.c) + extra;
  667. }
  668. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'reorg'));
  669. break;
  670. }
  671. case 'route': {
  672. const layers = [].concat(layer.layers);
  673. const groups = option_find_int(options, 'groups', 1);
  674. layer.out = 0;
  675. for (const next of layers) {
  676. layer.out += next.outputs / groups;
  677. }
  678. if (layers.length > 0) {
  679. const first = layers.shift();
  680. layer.out_w = first.out_w;
  681. layer.out_h = first.out_h;
  682. layer.out_c = first.out_c / groups;
  683. while (layers.length > 0) {
  684. const next = layers.shift();
  685. if (next.out_w === first.out_w && next.out_h === first.out_h) {
  686. layer.out_c += next.out_c;
  687. continue;
  688. }
  689. infer = false;
  690. break;
  691. }
  692. if (infer) {
  693. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'route'));
  694. }
  695. }
  696. else {
  697. infer = false;
  698. }
  699. if (!infer) {
  700. layer.out_h = 0;
  701. layer.out_w = 0;
  702. layer.out_c = 0;
  703. }
  704. break;
  705. }
  706. case 'sam':
  707. case 'scale_channels': {
  708. const activation = option_find_str(options, 'activation', 'linear');
  709. const from = layer.from;
  710. if (from) {
  711. layer.out_w = from.out_w;
  712. layer.out_h = from.out_h;
  713. layer.out_c = from.out_c;
  714. layer.out = layer.out_w * layer.out_h * layer.out_c;
  715. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'shortcut|scale_channels|sam'));
  716. }
  717. if (activation !== 'linear' && activation !== 'none') {
  718. section.chain.push({ type: activation });
  719. }
  720. break;
  721. }
  722. case 'shortcut': {
  723. const activation = option_find_str(options, 'activation', 'linear');
  724. layer.out_w = params.w;
  725. layer.out_h = params.h;
  726. layer.out_c = params.c;
  727. layer.out = params.w * params.h * params.c;
  728. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'shortcut|scale_channels|sam'));
  729. if (activation !== 'linear' && activation !== 'none') {
  730. section.chain.push({ type: activation });
  731. }
  732. break;
  733. }
  734. case 'detection': {
  735. layer.out_w = params.w;
  736. layer.out_h = params.h;
  737. layer.out_c = params.c;
  738. layer.out = params.inputs;
  739. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'detection'));
  740. break;
  741. }
  742. default: {
  743. infer = false;
  744. break;
  745. }
  746. }
  747. params.h = layer.out_h;
  748. params.w = layer.out_w;
  749. params.c = layer.out_c;
  750. params.inputs = layer.out;
  751. params.last = section;
  752. }
  753. params.arguments = layer.outputs;
  754. }
  755. for (let i = 0; i < sections.length; i++) {
  756. this._nodes.push(new darknet.Node(metadata, net, sections[i]));
  757. }
  758. if (weights) {
  759. weights.validate();
  760. }
  761. }
  762. get inputs() {
  763. return this._inputs;
  764. }
  765. get outputs() {
  766. return this._outputs;
  767. }
  768. get nodes() {
  769. return this._nodes;
  770. }
  771. };
  772. darknet.Parameter = class {
  773. constructor(name, visible, args) {
  774. this._name = name;
  775. this._visible = visible;
  776. this._arguments = args;
  777. }
  778. get name() {
  779. return this._name;
  780. }
  781. get visible() {
  782. return this._visible;
  783. }
  784. get arguments() {
  785. return this._arguments;
  786. }
  787. };
  788. darknet.Argument = class {
  789. constructor(name, type, initializer) {
  790. if (typeof name !== 'string') {
  791. throw new darknet.Error("Invalid argument identifier '" + JSON.stringify(name) + "'.");
  792. }
  793. this._name = name;
  794. this._type = type;
  795. this._initializer = initializer;
  796. }
  797. get name() {
  798. return this._name;
  799. }
  800. get type() {
  801. if (this._initializer) {
  802. return this._initializer.type;
  803. }
  804. return this._type;
  805. }
  806. set type(value) {
  807. if (this._type) {
  808. throw new darknet.Error('Invalid argument type set operation.');
  809. }
  810. this._type = value;
  811. }
  812. get initializer() {
  813. return this._initializer;
  814. }
  815. };
  816. darknet.Node = class {
  817. constructor(metadata, net, section) {
  818. this._name = section.name || '';
  819. this._location = section.line !== undefined ? section.line.toString() : undefined;
  820. this._metadata = metadata;
  821. this._type = section.type;
  822. this._attributes = [];
  823. this._inputs = [];
  824. this._outputs = [];
  825. this._chain = [];
  826. const layer = section.layer;
  827. if (layer && layer.inputs && layer.inputs.length > 0) {
  828. this._inputs.push(new darknet.Parameter(layer.inputs.length <= 1 ? 'input' : 'inputs', true, layer.inputs));
  829. }
  830. if (layer && layer.weights && layer.weights.length > 0) {
  831. this._inputs = this._inputs.concat(layer.weights);
  832. }
  833. if (layer && layer.outputs && layer.outputs.length > 0) {
  834. this._outputs.push(new darknet.Parameter(layer.outputs.length <= 1 ? 'output' : 'outputs', true, layer.outputs));
  835. }
  836. if (section.chain) {
  837. for (const chain of section.chain) {
  838. this._chain.push(new darknet.Node(metadata, net, chain, ''));
  839. }
  840. }
  841. const options = section.options;
  842. if (options) {
  843. for (const key of Object.keys(options)) {
  844. this._attributes.push(new darknet.Attribute(metadata.attribute(this._type, key), key, options[key]));
  845. }
  846. }
  847. }
  848. get name() {
  849. return this._name;
  850. }
  851. get location() {
  852. return this._location;
  853. }
  854. get type() {
  855. return this._type;
  856. }
  857. get metadata() {
  858. return this._metadata.type(this._type);
  859. }
  860. get attributes() {
  861. return this._attributes;
  862. }
  863. get inputs() {
  864. return this._inputs;
  865. }
  866. get outputs() {
  867. return this._outputs;
  868. }
  869. get chain() {
  870. return this._chain;
  871. }
  872. };
  873. darknet.Attribute = class {
  874. constructor(schema, name, value) {
  875. this._name = name;
  876. this._value = value;
  877. if (schema) {
  878. this._type = schema.type || '';
  879. switch (this._type) {
  880. case 'int32': {
  881. const number = parseInt(this._value, 10);
  882. if (Number.isInteger(number)) {
  883. this._value = number;
  884. }
  885. break;
  886. }
  887. case 'float32': {
  888. const number = parseFloat(this._value);
  889. if (!isNaN(number)) {
  890. this._value = number;
  891. }
  892. break;
  893. }
  894. case 'int32[]': {
  895. const numbers = this._value.split(',').map((item) => parseInt(item.trim(), 10));
  896. if (numbers.every((number) => Number.isInteger(number))) {
  897. this._value = numbers;
  898. }
  899. break;
  900. }
  901. }
  902. if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) {
  903. this._visible = false;
  904. }
  905. else if (Object.prototype.hasOwnProperty.call(schema, 'default')) {
  906. if (this._value == schema.default) {
  907. this._visible = false;
  908. }
  909. }
  910. }
  911. }
  912. get name() {
  913. return this._name;
  914. }
  915. get type() {
  916. return this._type;
  917. }
  918. get value() {
  919. return this._value;
  920. }
  921. get visible() {
  922. return this._visible == false ? false : true;
  923. }
  924. };
  925. darknet.Tensor = class {
  926. constructor(type, data) {
  927. this._type = type;
  928. this._data = data;
  929. }
  930. get kind() {
  931. return 'Tensor';
  932. }
  933. get name() {
  934. return '';
  935. }
  936. get type() {
  937. return this._type;
  938. }
  939. get state() {
  940. return this._context().state;
  941. }
  942. get value() {
  943. const context = this._context();
  944. if (context.state) {
  945. return null;
  946. }
  947. context.limit = Number.MAX_SAFE_INTEGER;
  948. return this._decode(context, 0);
  949. }
  950. toString() {
  951. const context = this._context();
  952. if (context.state) {
  953. return '';
  954. }
  955. context.limit = 10000;
  956. const value = this._decode(context, 0);
  957. return JSON.stringify(value, null, 4);
  958. }
  959. _context() {
  960. const context = {};
  961. if (!this._data) {
  962. context.state = 'Tensor data is empty.';
  963. return context;
  964. }
  965. context.state = null;
  966. context.position = 0;
  967. context.count = 0;
  968. context.dataView = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength);
  969. context.dimensions = this.type.shape.dimensions;
  970. return context;
  971. }
  972. _decode(context, dimension) {
  973. const results = [];
  974. const size = context.dimensions[dimension];
  975. if (dimension == context.dimensions.length - 1) {
  976. for (let i = 0; i < size; i++) {
  977. if (context.count > context.limit) {
  978. results.push('...');
  979. return results;
  980. }
  981. results.push(context.dataView.getFloat32(context.position, true));
  982. context.position += 4;
  983. context.count++;
  984. }
  985. }
  986. else {
  987. for (let j = 0; j < size; j++) {
  988. if (context.count > context.limit) {
  989. results.push('...');
  990. return results;
  991. }
  992. results.push(this._decode(context, dimension + 1));
  993. }
  994. }
  995. return results;
  996. }
  997. };
  998. darknet.TensorType = class {
  999. constructor(dataType, shape) {
  1000. this._dataType = dataType;
  1001. this._shape = shape;
  1002. }
  1003. get dataType() {
  1004. return this._dataType;
  1005. }
  1006. get shape() {
  1007. return this._shape;
  1008. }
  1009. toString() {
  1010. return (this._dataType || '?') + this._shape.toString();
  1011. }
  1012. };
  1013. darknet.TensorShape = class {
  1014. constructor(dimensions) {
  1015. if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) {
  1016. throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "'.");
  1017. }
  1018. this._dimensions = dimensions;
  1019. }
  1020. get dimensions() {
  1021. return this._dimensions;
  1022. }
  1023. toString() {
  1024. if (this._dimensions) {
  1025. if (this._dimensions.length == 0) {
  1026. return '';
  1027. }
  1028. return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']';
  1029. }
  1030. return '';
  1031. }
  1032. };
  1033. darknet.Weights = class {
  1034. static open(stream) {
  1035. if (stream && stream.length >= 20) {
  1036. const buffer = stream.peek(12);
  1037. const view = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength);
  1038. const major = view.getInt32(0, true);
  1039. const minor = view.getInt32(4, true);
  1040. view.getInt32(8, true); // revision
  1041. const transpose = (major > 1000) || (minor > 1000);
  1042. if (!transpose) {
  1043. stream.skip(12 + (((major * 10 + minor) >= 2) ? 8 : 4));
  1044. return new darknet.Weights(stream);
  1045. }
  1046. // else {
  1047. // throw new darknet.Error("Unsupported transpose weights file version '" + [ major, minor, revision ].join('.') + "'.");
  1048. // }
  1049. }
  1050. return null;
  1051. }
  1052. constructor(stream) {
  1053. this._stream = stream;
  1054. }
  1055. read(size) {
  1056. return this._stream.read(size);
  1057. }
  1058. validate() {
  1059. if (this._stream.position != this._stream.length) {
  1060. throw new darknet.Error('Invalid weights size.');
  1061. }
  1062. }
  1063. };
  1064. darknet.Metadata = class {
  1065. static open(context) {
  1066. if (darknet.Metadata._metadata) {
  1067. return Promise.resolve(darknet.Metadata._metadata);
  1068. }
  1069. return context.request('darknet-metadata.json', 'utf-8', null).then((data) => {
  1070. darknet.Metadata._metadata = new darknet.Metadata(data);
  1071. return darknet.Metadata._metadata;
  1072. }).catch(() => {
  1073. darknet.Metadata._metadata = new darknet.Metadata(null);
  1074. return darknet.Metadata._metadata;
  1075. });
  1076. }
  1077. constructor(data) {
  1078. this._map = new Map();
  1079. this._attributeMap = new Map();
  1080. if (data) {
  1081. const items = JSON.parse(data);
  1082. if (items) {
  1083. for (const item of items) {
  1084. if (item && item.name && item.schema) {
  1085. if (this._map.has(item.name)) {
  1086. throw new darknet.Error("Duplicate metadata key '" + item.name + "'.");
  1087. }
  1088. item.schema.name = item.name;
  1089. this._map.set(item.name, item.schema);
  1090. }
  1091. }
  1092. }
  1093. }
  1094. }
  1095. type(name) {
  1096. return this._map.get(name) || null;
  1097. }
  1098. attribute(type, name) {
  1099. const key = type + ':' + name;
  1100. if (!this._attributeMap.has(key)) {
  1101. this._attributeMap.set(key, null);
  1102. const schema = this.type(type);
  1103. if (schema && schema.attributes) {
  1104. for (const attribute of schema.attributes) {
  1105. this._attributeMap.set(type + ':' + attribute.name, attribute);
  1106. }
  1107. }
  1108. }
  1109. return this._attributeMap.get(key);
  1110. }
  1111. };
  1112. darknet.Error = class extends Error {
  1113. constructor(message) {
  1114. super(message);
  1115. this.name = 'Error loading Darknet model.';
  1116. }
  1117. };
  1118. if (typeof module !== 'undefined' && typeof module.exports === 'object') {
  1119. module.exports.ModelFactory = darknet.ModelFactory;
  1120. }