darknet.js 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. /* jshint esversion: 6 */
  2. var darknet = darknet || {};
  3. var base = base || require('./base');
  4. darknet.ModelFactory = class {
  5. match(context) {
  6. try {
  7. const reader = base.TextReader.create(context.buffer);
  8. for (;;) {
  9. const line = reader.read();
  10. if (line === undefined) {
  11. break;
  12. }
  13. const text = line.trim();
  14. if (text.length === 0 || text.startsWith('#')) {
  15. continue;
  16. }
  17. if (text.startsWith('[') && text.endsWith(']')) {
  18. return true;
  19. }
  20. }
  21. }
  22. catch (err) {
  23. // continue regardless of error
  24. }
  25. return false;
  26. }
  27. open(context, host) {
  28. return darknet.Metadata.open(host).then((metadata) => {
  29. const identifier = context.identifier;
  30. const parts = identifier.split('.');
  31. parts.pop();
  32. const basename = parts.join('.');
  33. return context.request(basename + '.weights', null).then((weights) => {
  34. return this._openModel(metadata, identifier, context.buffer, weights);
  35. }).catch(() => {
  36. return this._openModel(metadata, identifier, context.buffer, null);
  37. });
  38. });
  39. }
  40. _openModel( metadata, identifier, cfg, weights) {
  41. return new darknet.Model(metadata, cfg, weights ? new darknet.Weights(weights) : null);
  42. }
  43. };
  44. darknet.Model = class {
  45. constructor(metadata, cfg, weights) {
  46. this._graphs = [ new darknet.Graph(metadata, cfg, weights) ];
  47. }
  48. get format() {
  49. return 'Darknet';
  50. }
  51. get graphs() {
  52. return this._graphs;
  53. }
  54. };
  55. darknet.Graph = class {
  56. constructor(metadata, cfg, weights) {
  57. this._inputs = [];
  58. this._outputs = [];
  59. this._nodes = [];
  60. // read_cfg
  61. const sections = [];
  62. let section = null;
  63. const reader = base.TextReader.create(cfg);
  64. let lineNumber = 0;
  65. for (;;) {
  66. lineNumber++;
  67. const text = reader.read();
  68. if (text === undefined) {
  69. break;
  70. }
  71. const line = text.replace(/\s/g, '');
  72. if (line.length > 0) {
  73. switch (line[0]) {
  74. case '#':
  75. case ';':
  76. break;
  77. case '[': {
  78. const type = line[line.length - 1] === ']' ? line.substring(1, line.length - 1) : line.substring(1);
  79. section = {
  80. line: lineNumber,
  81. type: type,
  82. options: {}
  83. };
  84. sections.push(section);
  85. break;
  86. }
  87. default: {
  88. if (!section || line[0] < 0x20 || line[0] > 0x7E) {
  89. throw new darknet.Error("Invalid cfg '" + text.replace(/[^\x20-\x7E]+/g, '?').trim() + "' at line " + lineNumber.toString() + ".");
  90. }
  91. const index = line.indexOf('=');
  92. if (index < 0) {
  93. throw new darknet.Error("Invalid cfg '" + text.replace(/[^\x20-\x7E]+/g, '?').trim() + "' at line " + lineNumber.toString() + ".");
  94. }
  95. const key = line.substring(0, index);
  96. const value = line.substring(index + 1);
  97. section.options[key] = value;
  98. break;
  99. }
  100. }
  101. }
  102. }
  103. const option_find_int = (options, key, defaultValue) => {
  104. let value = options[key];
  105. if (typeof value === 'string' && value.startsWith('$')) {
  106. const key = value.substring(1);
  107. value = globals.has(key) ? globals.get(key) : value;
  108. }
  109. if (value !== undefined) {
  110. const number = parseInt(value, 10);
  111. if (!Number.isInteger(number)) {
  112. throw new darknet.Error("Invalid int option '" + JSON.stringify(options[key]) + "'.");
  113. }
  114. return number;
  115. }
  116. return defaultValue;
  117. };
  118. const option_find_str = (options, key, defaultValue) => {
  119. const value = options[key];
  120. return value !== undefined ? value : defaultValue;
  121. };
  122. const make_shape = (dimensions, source) => {
  123. if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) {
  124. throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "' in '" + source + "'.");
  125. }
  126. return new darknet.TensorShape(dimensions);
  127. };
  128. const load_weights = (name, shape, visible) => {
  129. const data = weights ? weights.bytes(4 * shape.reduce((a, b) => a * b)) : null;
  130. const type = new darknet.TensorType('float32', make_shape(shape, 'load_weights'));
  131. const initializer = new darknet.Tensor(type, data);
  132. const argument = new darknet.Argument('', null, initializer);
  133. return new darknet.Parameter(name, visible === false ? false : true, [ argument ]);
  134. };
  135. const load_batch_normalize_weights = (layer, prefix, size) => {
  136. layer.weights.push(load_weights(prefix + 'scale', [ size ], prefix === ''));
  137. layer.weights.push(load_weights(prefix + 'mean', [ size ], prefix === ''));
  138. layer.weights.push(load_weights(prefix + 'variance', [ size ], prefix === ''));
  139. };
  140. const make_convolutional_layer = (layer, prefix, w, h, c, n, groups, size, stride_x, stride_y, padding, batch_normalize) => {
  141. layer.out_w = Math.floor((w + 2 * padding - size) / stride_x) + 1;
  142. layer.out_h = Math.floor((h + 2 * padding - size) / stride_y) + 1;
  143. layer.out_c = n;
  144. layer.out = layer.out_w * layer.out_h * layer.out_c;
  145. layer.weights.push(load_weights(prefix + 'biases', [ n ], prefix === ''));
  146. if (batch_normalize) {
  147. load_batch_normalize_weights(layer, prefix, n);
  148. }
  149. layer.weights.push(load_weights(prefix + 'weights', [ Math.floor(c / groups), n, size, size ], prefix === ''));
  150. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'make_convolutional_layer'));
  151. };
  152. const make_connected_layer = (layer, prefix, inputs, outputs, batch_normalize) => {
  153. layer.out_h = 1;
  154. layer.out_w = 1;
  155. layer.out_c = outputs;
  156. layer.out = outputs;
  157. layer.weights.push(load_weights(prefix + 'biases', [ outputs ], prefix === ''));
  158. if (batch_normalize) {
  159. load_batch_normalize_weights(layer, prefix, outputs);
  160. }
  161. layer.weights.push(load_weights(prefix + 'weights', [ inputs, outputs ], prefix === ''));
  162. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'make_connected_layer'));
  163. };
  164. if (sections.length === 0) {
  165. throw new darknet.Error('Config file has no sections.');
  166. }
  167. const params = {};
  168. const globals = new Map();
  169. const net = sections.shift();
  170. switch (net.type) {
  171. case 'net':
  172. case 'network': {
  173. params.h = option_find_int(net.options, 'height', 0);
  174. params.w = option_find_int(net.options, 'width', 0);
  175. params.c = option_find_int(net.options, 'channels', 0);
  176. params.inputs = option_find_int(net.options, 'inputs', params.h * params.w * params.c);
  177. for (const key of Object.keys(net.options)) {
  178. globals.set(key, net.options[key]);
  179. }
  180. break;
  181. }
  182. default: {
  183. throw new darknet.Error("First section must be [net] or [network].");
  184. }
  185. }
  186. const inputType = params.w && params.h && params.c ?
  187. new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'params-if')) :
  188. new darknet.TensorType('float32', make_shape([ params.inputs ], 'params-else'));
  189. const inputName = 'input';
  190. params.arguments = [ new darknet.Argument(inputName, inputType, null) ];
  191. this._inputs.push(new darknet.Parameter(inputName, true, params.arguments));
  192. for (let i = 0; i < sections.length; i++) {
  193. const section = sections[i];
  194. section.name = i.toString();
  195. section.chain = [];
  196. section.layer = {
  197. inputs: [],
  198. weights: [],
  199. outputs: [ new darknet.Argument(section.name, null, null) ]
  200. };
  201. }
  202. let infer = true;
  203. for (let i = 0; i < sections.length; i++) {
  204. const section = sections[i];
  205. const options = section.options;
  206. const layer = section.layer;
  207. layer.inputs.push(...params.arguments);
  208. switch (section.type) {
  209. case 'shortcut': {
  210. let remove = true;
  211. const from = options.from ? options.from.split(',').map((item) => Number.parseInt(item.trim(), 10)) : [];
  212. for (const route of from) {
  213. const index = route < 0 ? i + route : route;
  214. const exists = index >= 0 && index < sections.length;
  215. remove = exists && remove;
  216. if (exists) {
  217. const source = sections[index].layer;
  218. layer.inputs.push(source.outputs[0]);
  219. }
  220. }
  221. if (remove) {
  222. delete options.from;
  223. }
  224. break;
  225. }
  226. case 'sam':
  227. case 'scale_channels': {
  228. const from = option_find_int(options, 'from', 0);
  229. const index = from < 0 ? i + from : from;
  230. if (index >= 0 && index < sections.length) {
  231. const source = sections[index].layer;
  232. layer.from = source;
  233. layer.inputs.push(source.outputs[0]);
  234. delete options.from;
  235. }
  236. break;
  237. }
  238. case 'route': {
  239. layer.inputs = [];
  240. layer.layers = [];
  241. let remove = true;
  242. const routes = options.layers ? options.layers.split(',').map((route) => Number.parseInt(route.trim(), 10)) : [];
  243. for (const route of routes) {
  244. const index = route < 0 ? i + route : route;
  245. const exists = index >= 0 && index < sections.length;
  246. remove = exists && remove;
  247. if (exists) {
  248. const source = sections[index].layer;
  249. layer.inputs.push(source.outputs[0]);
  250. layer.layers.push(source);
  251. }
  252. }
  253. if (remove) {
  254. delete options.layers;
  255. }
  256. break;
  257. }
  258. }
  259. if (infer) {
  260. switch (section.type) {
  261. case 'conv':
  262. case 'convolutional':
  263. case 'deconvolutional': {
  264. const shape = layer.inputs[0].type.shape.dimensions;
  265. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  266. throw new darknet.Error('Layer before convolutional layer must output image.');
  267. }
  268. const size = option_find_int(options, 'size', 1);
  269. const n = option_find_int(options, 'filters', 1);
  270. const pad = option_find_int(options, 'pad', 0);
  271. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  272. let stride_x = option_find_int(options, 'stride_x', -1);
  273. let stride_y = option_find_int(options, 'stride_y', -1);
  274. if (stride_x < 1 || stride_y < 1) {
  275. const stride = option_find_int(options, 'stride', 1);
  276. stride_x = stride_x < 1 ? stride : stride_x;
  277. stride_y = stride_y < 1 ? stride : stride_y;
  278. }
  279. const groups = option_find_int(options, 'groups', 1);
  280. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  281. const activation = option_find_str(options, 'activation', 'logistic');
  282. make_convolutional_layer(layer, '', params.w, params.h, params.c, n, groups, size, stride_x, stride_y, padding, batch_normalize);
  283. if (activation !== 'logistic' && activation !== 'none') {
  284. section.chain.push({ type: activation });
  285. }
  286. break;
  287. }
  288. case 'connected': {
  289. const outputs = option_find_int(options, 'output', 1);
  290. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  291. const activation = option_find_str(options, 'activation', 'logistic');
  292. make_connected_layer(layer, '', params.inputs, outputs, batch_normalize);
  293. if (activation !== 'logistic' && activation !== 'none') {
  294. section.chain.push({ type: activation });
  295. }
  296. break;
  297. }
  298. case 'local': {
  299. const shape = layer.inputs[0].type.shape.dimensions;
  300. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  301. throw new darknet.Error('Layer before avgpool layer must output image.');
  302. }
  303. const n = option_find_int(options, 'filters' , 1);
  304. const size = option_find_int(options, 'size', 1);
  305. const stride = option_find_int(options, 'stride', 1);
  306. const pad = option_find_int(options, 'pad', 0);
  307. const activation = option_find_str(options, 'activation', 'logistic');
  308. layer.out_h = Math.floor((params.h - (pad ? 1 : size)) / stride) + 1;
  309. layer.out_w = Math.floor((params.w - (pad ? 1 : size)) / stride) + 1;
  310. layer.out_c = n;
  311. layer.out = layer.out_w * layer.out_h * layer.out_c;
  312. layer.weights.push(load_weights('weights', [ params.c, n, size, size, layer.out_h * layer.out_w ]));
  313. layer.weights.push(load_weights('biases',[ layer.out_w * layer.out_h * layer.out_c ]));
  314. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'local'));
  315. if (activation !== 'logistic' && activation !== 'none') {
  316. section.chain.push({ type: activation });
  317. }
  318. break;
  319. }
  320. case 'batchnorm': {
  321. layer.out_h = params.h;
  322. layer.out_w = params.w;
  323. layer.out_c = params.c;
  324. layer.out = layer.in;
  325. load_batch_normalize_weights(weights, section, '', layer.out);
  326. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.ouputs ], 'batchnorm'));
  327. break;
  328. }
  329. case 'activation': {
  330. layer.out_h = params.h;
  331. layer.out_w = params.w;
  332. layer.out_c = params.c;
  333. layer.out = layer.in;
  334. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.ouputs ], 'activation'));
  335. break;
  336. }
  337. case 'max':
  338. case 'maxpool': {
  339. const shape = layer.inputs[0].type.shape.dimensions;
  340. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  341. throw new darknet.Error('Layer before maxpool layer must output image.');
  342. }
  343. const antialiasing = option_find_int(options, 'antialiasing', 0);
  344. const stride = option_find_int(options, 'stride', 1);
  345. const blur_stride_x = option_find_int(options, 'stride_x', stride);
  346. const blur_stride_y = option_find_int(options, 'stride_y', stride);
  347. const stride_x = antialiasing ? 1 : blur_stride_x;
  348. const stride_y = antialiasing ? 1 : blur_stride_y;
  349. const size = option_find_int(options, 'size', stride);
  350. const padding = option_find_int(options, 'padding', size - 1);
  351. const out_channels = option_find_int(options, 'out_channels', 1);
  352. const maxpool_depth = option_find_int(options, 'maxpool_depth', 0);
  353. if (maxpool_depth) {
  354. layer.out_c = out_channels;
  355. layer.out_w = params.w;
  356. layer.out_h = params.h;
  357. }
  358. else {
  359. layer.out_w = Math.floor((params.w + padding - size) / stride_x) + 1;
  360. layer.out_h = Math.floor((params.h + padding - size) / stride_y) + 1;
  361. layer.out_c = params.c;
  362. }
  363. if (antialiasing) {
  364. const blur_size = antialiasing === 2 ? 2 : 3;
  365. const blur_pad = antialiasing === 2 ? 0 : Math.floor(blur_size / 3);
  366. layer.input_layer = { weights: [], outputs: layer.outputs };
  367. make_convolutional_layer(layer.input_layer, '', layer.out_h, layer.out_w, layer.out_c, layer.out_c, layer.out_c, blur_size, blur_stride_x, blur_stride_y, blur_pad, 0);
  368. layer.out_w = layer.input_layer.out_w;
  369. layer.out_h = layer.input_layer.out_h;
  370. layer.out_c = layer.input_layer.out_c;
  371. }
  372. else {
  373. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'maxpool'));
  374. }
  375. layer.out = layer.out_w * layer.out_h * layer.out_c;
  376. break;
  377. }
  378. case 'avgpool': {
  379. const shape = layer.inputs[0].type.shape.dimensions;
  380. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  381. throw new darknet.Error('Layer before avgpool layer must output image.');
  382. }
  383. layer.out_w = 1;
  384. layer.out_h = 1;
  385. layer.out_c = params.c;
  386. layer.out = layer.out_c;
  387. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'avgpool'));
  388. break;
  389. }
  390. case 'crnn': {
  391. const size = option_find_int(options, 'size', 3);
  392. const stride = option_find_int(options, 'stride', 1);
  393. const output_filters = option_find_int(options, 'output', 1);
  394. const hidden_filters = option_find_int(options, 'hidden', 1);
  395. const groups = option_find_int(options, 'groups', 1);
  396. const pad = option_find_int(options, 'pad', 0);
  397. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  398. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  399. layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  400. make_convolutional_layer(layer.input_layer, 'input_', params.h, params.w, params.c, hidden_filters, groups, size, stride, stride, padding, batch_normalize);
  401. layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  402. make_convolutional_layer(layer.self_layer, 'self_', params.h, params.w, hidden_filters, hidden_filters, groups, size, stride, stride, padding, batch_normalize);
  403. layer.output_layer = { weights: [], outputs: layer.outputs };
  404. make_convolutional_layer(layer.output_layer, 'output_', params.h, params.w, hidden_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  405. layer.weights = layer.weights.concat(layer.input_layer.weights);
  406. layer.weights = layer.weights.concat(layer.self_layer.weights);
  407. layer.weights = layer.weights.concat(layer.output_layer.weights);
  408. layer.out_h = layer.output_layer.out_h;
  409. layer.out_w = layer.output_layer.out_w;
  410. layer.out_c = output_filters;
  411. layer.out = layer.output_layer.out;
  412. break;
  413. }
  414. case 'rnn': {
  415. const outputs = option_find_int(options, 'output', 1);
  416. const hidden = option_find_int(options, 'hidden', 1);
  417. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  418. const inputs = params.inputs;
  419. layer.input_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  420. make_connected_layer(layer.input_layer, 'input_', inputs, hidden, batch_normalize);
  421. layer.self_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  422. make_connected_layer(layer.self_layer, 'self_', hidden, hidden, batch_normalize);
  423. layer.output_layer = { weights: [], outputs: layer.outputs };
  424. make_connected_layer(layer.output_layer, 'output_', hidden, outputs, batch_normalize);
  425. layer.weights = layer.weights.concat(layer.input_layer.weights);
  426. layer.weights = layer.weights.concat(layer.self_layer.weights);
  427. layer.weights = layer.weights.concat(layer.output_layer.weights);
  428. layer.out_w = 1;
  429. layer.out_h = 1;
  430. layer.out_c = outputs;
  431. layer.out = outputs;
  432. break;
  433. }
  434. case 'gru': {
  435. const inputs = params.inputs;
  436. const outputs = option_find_int(options, 'output', 1);
  437. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  438. layer.input_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  439. make_connected_layer(layer.input_z_layer, 'input_z', inputs, outputs, batch_normalize);
  440. layer.state_z_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  441. make_connected_layer(layer.state_z_layer, 'state_z', outputs, outputs, batch_normalize);
  442. layer.input_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  443. make_connected_layer(layer.input_r_layer, 'input_r', inputs, outputs, batch_normalize);
  444. layer.state_r_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  445. make_connected_layer(layer.state_r_layer, 'state_r', outputs, outputs, batch_normalize);
  446. layer.input_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  447. make_connected_layer(layer.input_h_layer, 'input_h', inputs, outputs, batch_normalize);
  448. layer.state_h_layer = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  449. make_connected_layer(layer.state_h_layer, 'state_h', outputs, outputs, batch_normalize);
  450. layer.weights = layer.weights.concat(layer.input_z_layer.weights);
  451. layer.weights = layer.weights.concat(layer.state_z_layer.weights);
  452. layer.weights = layer.weights.concat(layer.input_r_layer.weights);
  453. layer.weights = layer.weights.concat(layer.state_r_layer.weights);
  454. layer.weights = layer.weights.concat(layer.input_h_layer.weights);
  455. layer.weights = layer.weights.concat(layer.state_h_layer.weights);
  456. layer.out = outputs;
  457. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'gru'));
  458. break;
  459. }
  460. case 'lstm': {
  461. const inputs = params.inputs;
  462. const outputs = option_find_int(options, 'output', 1);
  463. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  464. layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  465. make_connected_layer(layer.uf, 'uf_', inputs, outputs, batch_normalize);
  466. layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  467. make_connected_layer(layer.ui, 'ui_', inputs, outputs, batch_normalize);
  468. layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  469. make_connected_layer(layer.ug, 'ug_', inputs, outputs, batch_normalize);
  470. layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  471. make_connected_layer(layer.uo, 'uo_', inputs, outputs, batch_normalize);
  472. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  473. make_connected_layer(layer.wf, 'wf_', outputs, outputs, batch_normalize);
  474. layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  475. make_connected_layer(layer.wi, 'wi_', outputs, outputs, batch_normalize);
  476. layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  477. make_connected_layer(layer.wg, 'wg_', outputs, outputs, batch_normalize);
  478. layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  479. make_connected_layer(layer.wo, 'wo_', outputs, outputs, batch_normalize);
  480. layer.weights = layer.weights.concat(layer.uf.weights);
  481. layer.weights = layer.weights.concat(layer.ui.weights);
  482. layer.weights = layer.weights.concat(layer.ug.weights);
  483. layer.weights = layer.weights.concat(layer.uo.weights);
  484. layer.weights = layer.weights.concat(layer.wf.weights);
  485. layer.weights = layer.weights.concat(layer.wi.weights);
  486. layer.weights = layer.weights.concat(layer.wg.weights);
  487. layer.weights = layer.weights.concat(layer.wo.weights);
  488. layer.out_w = 1;
  489. layer.out_h = 1;
  490. layer.out_c = outputs;
  491. layer.out = outputs;
  492. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ outputs ], 'lstm'));
  493. weights = null;
  494. break;
  495. }
  496. case 'conv_lstm': {
  497. const size = option_find_int(options, "size", 3);
  498. const stride = option_find_int(options, "stride", 1);
  499. const output_filters = option_find_int(options, "output", 1);
  500. const groups = option_find_int(options, "groups", 1);
  501. const pad = option_find_int(options, "pad", 0);
  502. const padding = pad ? (size >> 1) : option_find_int(options, 'padding', 0);
  503. const batch_normalize = option_find_int(options, 'batch_normalize', 0);
  504. const bottleneck = option_find_int(options, "bottleneck", 0);
  505. const peephole = option_find_int(options, "peephole", 0);
  506. layer.uf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  507. make_convolutional_layer(layer.uf, 'uf_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  508. layer.ui = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  509. make_convolutional_layer(layer.ui, 'ui_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  510. layer.ug = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  511. make_convolutional_layer(layer.ug, 'ug_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  512. layer.uo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  513. make_convolutional_layer(layer.uo, 'uo_', params.h, params.w, params.c, output_filters, groups, size, stride, stride, padding, batch_normalize);
  514. layer.weights = layer.weights.concat(layer.uf.weights);
  515. layer.weights = layer.weights.concat(layer.ui.weights);
  516. layer.weights = layer.weights.concat(layer.ug.weights);
  517. layer.weights = layer.weights.concat(layer.uo.weights);
  518. if (bottleneck) {
  519. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  520. make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters * 2, output_filters, groups, size, stride, stride, padding, batch_normalize);
  521. layer.weights = layer.weights.concat(layer.wf.weights);
  522. }
  523. else {
  524. layer.wf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  525. make_convolutional_layer(layer.wf, 'wf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  526. layer.wi = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  527. make_convolutional_layer(layer.wi, 'wi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  528. layer.wg = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  529. make_convolutional_layer(layer.wg, 'wg_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  530. layer.wo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  531. make_convolutional_layer(layer.wo, 'wo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  532. layer.weights = layer.weights.concat(layer.wf.weights);
  533. layer.weights = layer.weights.concat(layer.wi.weights);
  534. layer.weights = layer.weights.concat(layer.wg.weights);
  535. layer.weights = layer.weights.concat(layer.wo.weights);
  536. }
  537. if (peephole) {
  538. layer.vf = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  539. make_convolutional_layer(layer.vf, 'vf_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  540. layer.vi = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  541. make_convolutional_layer(layer.vi, 'vi_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  542. layer.vo = { weights: [], outputs: [ new darknet.Argument('', null, null) ] };
  543. make_convolutional_layer(layer.wo, 'vo_', params.h, params.w, output_filters, output_filters, groups, size, stride, stride, padding, batch_normalize);
  544. layer.weights = layer.weights.concat(layer.vf.weights);
  545. layer.weights = layer.weights.concat(layer.vi.weights);
  546. layer.weights = layer.weights.concat(layer.vo.weights);
  547. }
  548. layer.out_h = layer.uo.out_h;
  549. layer.out_w = layer.uo.out_w;
  550. layer.out_c = output_filters;
  551. layer.out = layer.out_h * layer.out_w * layer.out_c;
  552. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'conv_lstm'));
  553. break;
  554. }
  555. case 'softmax': {
  556. layer.out_w = params.w;
  557. layer.out_h = params.h;
  558. layer.out_c = params.c;
  559. layer.out = params.inputs;
  560. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'softmax'));
  561. break;
  562. }
  563. case 'dropout': {
  564. layer.out_w = params.w;
  565. layer.out_h = params.h;
  566. layer.out_c = params.c;
  567. layer.out = params.inputs;
  568. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'dropout'));
  569. break;
  570. }
  571. case 'upsample': {
  572. const stride = option_find_int(options, 'stride', 2);
  573. layer.out_w = params.w * stride;
  574. layer.out_h = params.h * stride;
  575. layer.out_c = params.c;
  576. layer.out = layer.out_w * layer.out_h * layer.out_c;
  577. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'upsample'));
  578. break;
  579. }
  580. case 'crop': {
  581. const shape = layer.inputs[0].type.shape.dimensions;
  582. if (shape[0] !== params.w || shape[1] !== params.h || shape[2] !== params.c) {
  583. throw new darknet.Error('Layer before crop layer must output image.');
  584. }
  585. const crop_height = option_find_int(options, 'crop_height', 1);
  586. const crop_width = option_find_int(options, 'crop_width', 1);
  587. layer.out_w = crop_width;
  588. layer.out_h = crop_height;
  589. layer.out_c = params.c;
  590. layer.out = layer.out_w * layer.out_h * layer.out_c;
  591. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'crop'));
  592. break;
  593. }
  594. case 'yolo': {
  595. const classes = option_find_int(options, 'classes', 20);
  596. const n = option_find_int(options, 'num', 1);
  597. layer.out_h = params.h;
  598. layer.out_w = params.w;
  599. layer.out_c = n * (classes + 4 + 1);
  600. layer.out = layer.out_h * layer.out_w * layer.out_c;
  601. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'yolo'));
  602. break;
  603. }
  604. case 'Gaussian_yolo': {
  605. const classes = option_find_int(options, 'classes', 20);
  606. const n = option_find_int(options, 'num', 1);
  607. layer.out_h = params.h;
  608. layer.out_w = params.w;
  609. layer.out_c = n * (classes + 8 + 1);
  610. layer.out = layer.out_h * layer.out_w * layer.out_c;
  611. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'Gaussian_yolo'));
  612. break;
  613. }
  614. case 'region': {
  615. const coords = option_find_int(options, 'coords', 4);
  616. const classes = option_find_int(options, 'classes', 20);
  617. const num = option_find_int(options, 'num', 1);
  618. layer.out = params.h * params.w * num * (classes + coords + 1);
  619. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.h, params.w, num, (classes + coords + 1) ], 'region'));
  620. break;
  621. }
  622. case 'cost': {
  623. layer.out = params.inputs;
  624. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'cost'));
  625. break;
  626. }
  627. case 'reorg': {
  628. const stride = option_find_int(options, 'stride', 1);
  629. const reverse = option_find_int(options, 'reverse', 0);
  630. const extra = option_find_int(options, 'extra', 0);
  631. if (reverse) {
  632. layer.out_w = params.w * stride;
  633. layer.out_h = params.h * stride;
  634. layer.out_c = Math.floor(params.c / (stride * stride));
  635. }
  636. else {
  637. layer.out_w = Math.floor(params.w / stride);
  638. layer.out_h = Math.floor(params.h / stride);
  639. layer.out_c = params.c * (stride * stride);
  640. }
  641. layer.out = layer.out_h * layer.out_w * layer.out_c;
  642. if (extra) {
  643. layer.out_w = 0;
  644. layer.out_h = 0;
  645. layer.out_c = 0;
  646. layer.out = (params.h * params.w * params.c) + extra;
  647. }
  648. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'reorg'));
  649. break;
  650. }
  651. case 'route': {
  652. const layers = [].concat(layer.layers);
  653. const groups = option_find_int(options, 'groups', 1);
  654. layer.out = 0;
  655. for (const next of layers) {
  656. layer.out += next.outputs / groups;
  657. }
  658. if (layers.length > 0) {
  659. const first = layers.shift();
  660. layer.out_w = first.out_w;
  661. layer.out_h = first.out_h;
  662. layer.out_c = first.out_c / groups;
  663. while (layers.length > 0) {
  664. const next = layers.shift();
  665. if (next.out_w === first.out_w && next.out_h === first.out_h) {
  666. layer.out_c += next.out_c;
  667. continue;
  668. }
  669. infer = false;
  670. break;
  671. }
  672. if (infer) {
  673. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'route'));
  674. }
  675. }
  676. else {
  677. infer = false;
  678. }
  679. if (!infer) {
  680. layer.out_h = 0;
  681. layer.out_w = 0;
  682. layer.out_c = 0;
  683. }
  684. break;
  685. }
  686. case 'sam':
  687. case 'scale_channels': {
  688. const activation = option_find_str(options, 'activation', 'linear');
  689. const from = layer.from;
  690. if (from) {
  691. layer.out_w = from.out_w;
  692. layer.out_h = from.out_h;
  693. layer.out_c = from.out_c;
  694. layer.out = layer.out_w * layer.out_h * layer.out_c;
  695. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out_w, layer.out_h, layer.out_c ], 'shortcut|scale_channels|sam'));
  696. }
  697. if (activation !== 'linear' && activation !== 'none') {
  698. section.chain.push({ type: activation });
  699. }
  700. break;
  701. }
  702. case 'shortcut': {
  703. const activation = option_find_str(options, 'activation', 'linear');
  704. layer.out_w = params.w;
  705. layer.out_h = params.h;
  706. layer.out_c = params.c;
  707. layer.out = params.w * params.h * params.c;
  708. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ params.w, params.h, params.c ], 'shortcut|scale_channels|sam'));
  709. if (activation !== 'linear' && activation !== 'none') {
  710. section.chain.push({ type: activation });
  711. }
  712. break;
  713. }
  714. case 'detection': {
  715. layer.out_w = params.w;
  716. layer.out_h = params.h;
  717. layer.out_c = params.c;
  718. layer.out = params.inputs;
  719. layer.outputs[0].type = new darknet.TensorType('float32', make_shape([ layer.out ], 'detection'));
  720. break;
  721. }
  722. default: {
  723. infer = false;
  724. break;
  725. }
  726. }
  727. params.h = layer.out_h;
  728. params.w = layer.out_w;
  729. params.c = layer.out_c;
  730. params.inputs = layer.out;
  731. params.last = section;
  732. }
  733. params.arguments = layer.outputs;
  734. }
  735. for (let i = 0; i < sections.length; i++) {
  736. this._nodes.push(new darknet.Node(metadata, net, sections[i]));
  737. }
  738. if (weights) {
  739. weights.validate();
  740. }
  741. }
  742. get inputs() {
  743. return this._inputs;
  744. }
  745. get outputs() {
  746. return this._outputs;
  747. }
  748. get nodes() {
  749. return this._nodes;
  750. }
  751. };
  752. darknet.Parameter = class {
  753. constructor(name, visible, args) {
  754. this._name = name;
  755. this._visible = visible;
  756. this._arguments = args;
  757. }
  758. get name() {
  759. return this._name;
  760. }
  761. get visible() {
  762. return this._visible;
  763. }
  764. get arguments() {
  765. return this._arguments;
  766. }
  767. };
  768. darknet.Argument = class {
  769. constructor(name, type, initializer) {
  770. if (typeof name !== 'string') {
  771. throw new darknet.Error("Invalid argument identifier '" + JSON.stringify(name) + "'.");
  772. }
  773. this._name = name;
  774. this._type = type;
  775. this._initializer = initializer;
  776. }
  777. get name() {
  778. return this._name;
  779. }
  780. get type() {
  781. if (this._initializer) {
  782. return this._initializer.type;
  783. }
  784. return this._type;
  785. }
  786. set type(value) {
  787. if (this._type) {
  788. throw new darknet.Error('Invalid argument type set operation.');
  789. }
  790. this._type = value;
  791. }
  792. get initializer() {
  793. return this._initializer;
  794. }
  795. };
  796. darknet.Node = class {
  797. constructor(metadata, net, section) {
  798. this._name = section.name || '';
  799. this._location = section.line !== undefined ? section.line.toString() : undefined;
  800. this._metadata = metadata;
  801. this._type = section.type;
  802. this._attributes = [];
  803. this._inputs = [];
  804. this._outputs = [];
  805. this._chain = [];
  806. const layer = section.layer;
  807. if (layer && layer.inputs && layer.inputs.length > 0) {
  808. this._inputs.push(new darknet.Parameter(layer.inputs.length <= 1 ? 'input' : 'inputs', true, layer.inputs));
  809. }
  810. if (layer && layer.weights && layer.weights.length > 0) {
  811. this._inputs = this._inputs.concat(layer.weights);
  812. }
  813. if (layer && layer.outputs && layer.outputs.length > 0) {
  814. this._outputs.push(new darknet.Parameter(layer.outputs.length <= 1 ? 'output' : 'outputs', true, layer.outputs));
  815. }
  816. if (section.chain) {
  817. for (const chain of section.chain) {
  818. this._chain.push(new darknet.Node(metadata, net, chain, ''));
  819. }
  820. }
  821. const options = section.options;
  822. if (options) {
  823. for (const key of Object.keys(options)) {
  824. this._attributes.push(new darknet.Attribute(metadata.attribute(this._type, key), key, options[key]));
  825. }
  826. }
  827. }
  828. get name() {
  829. return this._name;
  830. }
  831. get location() {
  832. return this._location;
  833. }
  834. get type() {
  835. return this._type;
  836. }
  837. get metadata() {
  838. return this._metadata.type(this._type);
  839. }
  840. get attributes() {
  841. return this._attributes;
  842. }
  843. get inputs() {
  844. return this._inputs;
  845. }
  846. get outputs() {
  847. return this._outputs;
  848. }
  849. get chain() {
  850. return this._chain;
  851. }
  852. };
  853. darknet.Attribute = class {
  854. constructor(schema, name, value) {
  855. this._name = name;
  856. this._value = value;
  857. if (schema) {
  858. this._type = schema.type || '';
  859. switch (this._type) {
  860. case 'int32': {
  861. const number = parseInt(this._value, 10);
  862. if (Number.isInteger(number)) {
  863. this._value = number;
  864. }
  865. break;
  866. }
  867. case 'float32': {
  868. const number = parseFloat(this._value);
  869. if (!isNaN(number)) {
  870. this._value = number;
  871. }
  872. break;
  873. }
  874. case 'int32[]': {
  875. const numbers = this._value.split(',').map((item) => parseInt(item.trim(), 10));
  876. if (numbers.every((number) => Number.isInteger(number))) {
  877. this._value = numbers;
  878. }
  879. break;
  880. }
  881. }
  882. if (Object.prototype.hasOwnProperty.call(schema, 'visible') && !schema.visible) {
  883. this._visible = false;
  884. }
  885. else if (Object.prototype.hasOwnProperty.call(schema, 'default')) {
  886. if (this._value == schema.default) {
  887. this._visible = false;
  888. }
  889. }
  890. }
  891. }
  892. get name() {
  893. return this._name;
  894. }
  895. get type() {
  896. return this._type;
  897. }
  898. get value() {
  899. return this._value;
  900. }
  901. get visible() {
  902. return this._visible == false ? false : true;
  903. }
  904. };
  905. darknet.Tensor = class {
  906. constructor(type, data) {
  907. this._type = type;
  908. this._data = data;
  909. }
  910. get kind() {
  911. return 'Tensor';
  912. }
  913. get name() {
  914. return '';
  915. }
  916. get type() {
  917. return this._type;
  918. }
  919. get state() {
  920. return this._context().state;
  921. }
  922. get value() {
  923. const context = this._context();
  924. if (context.state) {
  925. return null;
  926. }
  927. context.limit = Number.MAX_SAFE_INTEGER;
  928. return this._decode(context, 0);
  929. }
  930. toString() {
  931. const context = this._context();
  932. if (context.state) {
  933. return '';
  934. }
  935. context.limit = 10000;
  936. const value = this._decode(context, 0);
  937. return JSON.stringify(value, null, 4);
  938. }
  939. _context() {
  940. const context = {};
  941. if (!this._data) {
  942. context.state = 'Tensor data is empty.';
  943. return context;
  944. }
  945. context.state = null;
  946. context.position = 0;
  947. context.count = 0;
  948. context.dataView = new DataView(this._data.buffer, this._data.byteOffset, this._data.byteLength);
  949. context.dimensions = this.type.shape.dimensions;
  950. return context;
  951. }
  952. _decode(context, dimension) {
  953. const results = [];
  954. const size = context.dimensions[dimension];
  955. if (dimension == context.dimensions.length - 1) {
  956. for (let i = 0; i < size; i++) {
  957. if (context.count > context.limit) {
  958. results.push('...');
  959. return results;
  960. }
  961. results.push(context.dataView.getFloat32(context.position, true));
  962. context.position += 4;
  963. context.count++;
  964. }
  965. }
  966. else {
  967. for (let j = 0; j < size; j++) {
  968. if (context.count > context.limit) {
  969. results.push('...');
  970. return results;
  971. }
  972. results.push(this._decode(context, dimension + 1));
  973. }
  974. }
  975. return results;
  976. }
  977. };
  978. darknet.TensorType = class {
  979. constructor(dataType, shape) {
  980. this._dataType = dataType;
  981. this._shape = shape;
  982. }
  983. get dataType() {
  984. return this._dataType;
  985. }
  986. get shape() {
  987. return this._shape;
  988. }
  989. toString() {
  990. return (this._dataType || '?') + this._shape.toString();
  991. }
  992. };
  993. darknet.TensorShape = class {
  994. constructor(dimensions) {
  995. if (dimensions.some((dimension) => dimension === 0 || dimension === undefined || isNaN(dimension))) {
  996. throw new darknet.Error("Invalid tensor shape '" + JSON.stringify(dimensions) + "'.");
  997. }
  998. this._dimensions = dimensions;
  999. }
  1000. get dimensions() {
  1001. return this._dimensions;
  1002. }
  1003. toString() {
  1004. if (this._dimensions) {
  1005. if (this._dimensions.length == 0) {
  1006. return '';
  1007. }
  1008. return '[' + this._dimensions.map((dimension) => dimension.toString()).join(',') + ']';
  1009. }
  1010. return '';
  1011. }
  1012. };
  1013. darknet.Weights = class {
  1014. constructor(buffer) {
  1015. this._buffer = buffer;
  1016. this._dataView = new DataView(buffer.buffer, buffer.byteOffset, buffer.byteLength);
  1017. this._position = 0;
  1018. const major = this.int32();
  1019. const minor = this.int32();
  1020. const revision = this.int32();
  1021. this._seen = ((major * 10 + minor) >= 2) ? this.int64() : this.int32();
  1022. const transpose = (major > 1000) || (minor > 1000);
  1023. if (transpose) {
  1024. throw new darknet.Error("Unsupported transpose weights file version '" + [ major, minor, revision ].join('.') + "'.");
  1025. }
  1026. }
  1027. int32() {
  1028. const position = this._position;
  1029. this.skip(4);
  1030. return this._dataView.getInt32(position, true);
  1031. }
  1032. int64() {
  1033. const position = this._position;
  1034. this.skip(8);
  1035. return this._dataView.getInt64(position, true);
  1036. }
  1037. bytes(length) {
  1038. const position = this._position;
  1039. this.skip(length);
  1040. return this._buffer.subarray(position, this._position);
  1041. }
  1042. skip(offset) {
  1043. this._position += offset;
  1044. if (this._position > this._buffer.length) {
  1045. throw new darknet.Error('Expected ' + (this._position - this._buffer.length) + ' more bytes. The file might be corrupted. Unexpected end of file.');
  1046. }
  1047. }
  1048. validate() {
  1049. if (this._position !== this._buffer.length) {
  1050. throw new darknet.Error('Invalid weights size.');
  1051. }
  1052. }
  1053. };
  1054. darknet.Metadata = class {
  1055. static open(host) {
  1056. if (darknet.Metadata._metadata) {
  1057. return Promise.resolve(darknet.Metadata._metadata);
  1058. }
  1059. return host.request(null, 'darknet-metadata.json', 'utf-8').then((data) => {
  1060. darknet.Metadata._metadata = new darknet.Metadata(data);
  1061. return darknet.Metadata._metadata;
  1062. }).catch(() => {
  1063. darknet.Metadata._metadata = new darknet.Metadata(null);
  1064. return darknet.Metadata._metadata;
  1065. });
  1066. }
  1067. constructor(data) {
  1068. this._map = new Map();
  1069. this._attributeMap = new Map();
  1070. if (data) {
  1071. const items = JSON.parse(data);
  1072. if (items) {
  1073. for (const item of items) {
  1074. if (item && item.name && item.schema) {
  1075. if (this._map.has(item.name)) {
  1076. throw new darknet.Error("Duplicate metadata key '" + item.name + "'.");
  1077. }
  1078. item.schema.name = item.name;
  1079. this._map.set(item.name, item.schema);
  1080. }
  1081. }
  1082. }
  1083. }
  1084. }
  1085. type(name) {
  1086. return this._map.get(name) || null;
  1087. }
  1088. attribute(type, name) {
  1089. const key = type + ':' + name;
  1090. if (!this._attributeMap.has(key)) {
  1091. this._attributeMap.set(key, null);
  1092. const schema = this.type(type);
  1093. if (schema && schema.attributes) {
  1094. for (const attribute of schema.attributes) {
  1095. this._attributeMap.set(type + ':' + attribute.name, attribute);
  1096. }
  1097. }
  1098. }
  1099. return this._attributeMap.get(key);
  1100. }
  1101. };
  1102. darknet.Error = class extends Error {
  1103. constructor(message) {
  1104. super(message);
  1105. this.name = 'Error loading Darknet model.';
  1106. }
  1107. };
  1108. if (typeof module !== 'undefined' && typeof module.exports === 'object') {
  1109. module.exports.ModelFactory = darknet.ModelFactory;
  1110. }