backend.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. #!/usr/bin/env python
  2. """ Expermiental Python Server backend test """
  3. import logging
  4. import os
  5. import sys
  6. root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
  7. sys.path.append(root_dir)
  8. sys.pycache_prefix = os.path.join(root_dir, "dist", "pycache", "backend")
  9. netron = __import__("source")
  10. third_party_dir = os.path.join(root_dir, "third_party")
  11. test_data_dir = os.path.join(third_party_dir, "test")
  12. logger = logging.getLogger(__name__)
  13. logging.basicConfig(level=logging.INFO, format="%(message)s")
  14. def _test_onnx():
  15. file = os.path.join(test_data_dir, "onnx", "candy.onnx")
  16. onnx = __import__("onnx")
  17. model = onnx.load(file)
  18. netron.serve(None, model)
  19. def _test_onnx_iterate():
  20. logging.getLogger(netron.__name__).setLevel(logging.WARNING)
  21. folder = os.path.join(test_data_dir, "onnx")
  22. for item in os.listdir(folder):
  23. file = os.path.join(folder, item)
  24. skip = (
  25. "super_resolution.onnx",
  26. "arcface-resnet100.onnx",
  27. "aten_sum_dim_onnx_inlined.onnx",
  28. "phi3-mini-128k-instruct-cuda-fp16.onnx",
  29. "if_k1.onnx"
  30. )
  31. if file.endswith(".onnx") and item not in skip:
  32. logger.info(item)
  33. onnx = __import__("onnx")
  34. model = onnx.load(file)
  35. address = netron.serve(file, model)
  36. netron.stop(address)
  37. def _test_torchscript(file):
  38. torch = __import__("torch")
  39. path = os.path.join(test_data_dir, "pytorch", file)
  40. model = torch.load(path, weights_only=False)
  41. torch._C._jit_pass_inline(model.graph)
  42. netron.serve(file, model)
  43. def _test_torchscript_transformer():
  44. torch = __import__("torch")
  45. model = torch.nn.Transformer(nhead=16, num_encoder_layers=12)
  46. module = torch.jit.trace(model, (torch.rand(10, 32, 512), torch.rand(20, 32, 512)))
  47. # module = torch.jit.script(model)
  48. torch._C._jit_pass_inline(module.graph)
  49. netron.serve("transformer", module)
  50. def _test_torchscript_resnet34():
  51. torch = __import__("torch")
  52. torchvision = __import__("torchvision")
  53. model = torchvision.models.resnet34()
  54. file = os.path.join(test_data_dir, "pytorch", "resnet34-333f7ec4.pth")
  55. state_dict = torch.load(file)
  56. model.load_state_dict(state_dict)
  57. trace = torch.jit.trace(model, torch.zeros([1, 3, 224, 224]), strict=True)
  58. torch._C._jit_pass_inline(trace.graph)
  59. netron.serve("resnet34", trace)
  60. def _test_torchscript_quantized():
  61. torch = __import__("torch")
  62. __import__("torchvision")
  63. torch.backends.quantized.engine = "qnnpack"
  64. trace = torch.jit.load(os.path.join(test_data_dir, "pytorch", "d2go.pt"))
  65. torch._C._jit_pass_inline(trace.graph)
  66. netron.serve("d2go", trace)
  67. # _test_onnx()
  68. # _test_onnx_iterate()
  69. # _test_torchscript('alexnet.pt')
  70. _test_torchscript("gpt2.pt")
  71. # _test_torchscript('inception_v3_traced.pt')
  72. # _test_torchscript('netron_issue_920.pt') # scalar
  73. # _test_torchscript('fasterrcnn_resnet50_fpn.pt') # tuple
  74. # _test_torchscript('mobilenetv2-quant_full-nnapi.pt') # nnapi
  75. # _test_torchscript_quantized()
  76. # _test_torchscript_resnet34()
  77. # _test_torchscript_transformer()