Jelajahi Sumber

swig support

JasonWang 7 tahun lalu
induk
melakukan
f2dc4cd726

+ 71 - 1
.vscode/settings.json

@@ -1,3 +1,73 @@
 {
-    "python.pythonPath": "C:\\Users\\jstzw\\AppData\\Local\\Programs\\Python\\Python36\\python.exe"
+    "python.pythonPath": "C:\\Users\\jstzw\\AppData\\Local\\Programs\\Python\\Python36\\python.exe",
+    "files.associations": {
+        "algorithm": "cpp",
+        "array": "cpp",
+        "atomic": "cpp",
+        "bitset": "cpp",
+        "chrono": "cpp",
+        "cmath": "cpp",
+        "complex": "cpp",
+        "condition_variable": "cpp",
+        "cstddef": "cpp",
+        "cstdint": "cpp",
+        "cstdio": "cpp",
+        "cstdlib": "cpp",
+        "cstring": "cpp",
+        "ctime": "cpp",
+        "cwchar": "cpp",
+        "deque": "cpp",
+        "exception": "cpp",
+        "fstream": "cpp",
+        "functional": "cpp",
+        "initializer_list": "cpp",
+        "iomanip": "cpp",
+        "ios": "cpp",
+        "iosfwd": "cpp",
+        "iostream": "cpp",
+        "istream": "cpp",
+        "iterator": "cpp",
+        "limits": "cpp",
+        "list": "cpp",
+        "locale": "cpp",
+        "map": "cpp",
+        "memory": "cpp",
+        "mutex": "cpp",
+        "new": "cpp",
+        "numeric": "cpp",
+        "ostream": "cpp",
+        "random": "cpp",
+        "ratio": "cpp",
+        "regex": "cpp",
+        "sstream": "cpp",
+        "stdexcept": "cpp",
+        "streambuf": "cpp",
+        "string": "cpp",
+        "system_error": "cpp",
+        "xthread": "cpp",
+        "thread": "cpp",
+        "tuple": "cpp",
+        "type_traits": "cpp",
+        "typeinfo": "cpp",
+        "unordered_map": "cpp",
+        "utility": "cpp",
+        "vector": "cpp",
+        "xfacet": "cpp",
+        "xhash": "cpp",
+        "xiosbase": "cpp",
+        "xlocale": "cpp",
+        "xlocbuf": "cpp",
+        "xlocinfo": "cpp",
+        "xlocmes": "cpp",
+        "xlocmon": "cpp",
+        "xlocnum": "cpp",
+        "xloctime": "cpp",
+        "xmemory": "cpp",
+        "xmemory0": "cpp",
+        "xstddef": "cpp",
+        "xstring": "cpp",
+        "xtr1common": "cpp",
+        "xtree": "cpp",
+        "xutility": "cpp"
+    }
 }

+ 10 - 1
build.py

@@ -5,13 +5,22 @@ import sys
 import subprocess
 
 def main(argv):
+    system_type = platform.system()
+
     traph_build = argv[0]
     traph_root, build_file = os.path.split(traph_build)
-    if not os.path.exists(traph_root + "/build"):
+    if not os.path.exists(os.path.join(traph_root, "build")):
         os.mkdir("build")
     os.chdir(traph_root + "/build")
     subprocess.run("cmake ../")
     subprocess.run("cmake --build .")
+
+    os.chdir("../")
+
+    if system_type == 'windows':
+        shutil.copyfile(os.path.join('build/traph/source/interface/Release/_swig-tensor.pyd'), 'python/pytraph')
+    else:
+        print('unsupport system')
     
 if __name__ == '__main__':
 	main(sys.argv)

+ 1 - 0
traph/CMakeLists.txt

@@ -1,3 +1,4 @@
 
 ADD_SUBDIRECTORY(contrib)
 ADD_SUBDIRECTORY(source)
+

+ 0 - 0
traph/include/traph/core/array.h


+ 159 - 0
traph/include/traph/core/index.h

@@ -0,0 +1,159 @@
+#ifndef TRAPH_INDEX_H
+#define TRAPH_INDEX_H
+
+#include <cstdint>
+#include <cstring>
+#include <cassert>
+#include <stdexcept>
+#include <memory>
+#include <utility>
+#include <traph/core/type.h>
+
+#define DIMVECTOR_SMALL_VECTOR_OPTIMIZATION 4
+
+namespace traph
+{
+    class DimVector
+    {
+    private:
+        std::unique_ptr<idx_type[]> data;
+        idx_type stack_data[DIMVECTOR_SMALL_VECTOR_OPTIMIZATION];
+        idx_type dim_num;
+    public:
+        DimVector()
+            :data(nullptr), dim_num(0)
+        {
+        }
+
+        DimVector(idx_type size)
+        {
+            if(size < 0)
+                return;
+            if(size > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+            {
+                data = std::make_unique<idx_type[]>(size);
+            }
+            
+            dim_num = size;
+        }
+
+        DimVector(const DimVector& other)
+        {
+            if(other.dim_num > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+            {
+                data = std::make_unique<idx_type[]>(other.dim_num);
+                std::memcpy(data.get(), other.data.get(), other.dim_num * sizeof(idx_type));
+            }
+            else
+            {
+                std::memcpy(stack_data, other.stack_data, other.dim_num * sizeof(idx_type));
+            }
+            dim_num = other.dim_num;
+        }
+
+        DimVector(DimVector&& other)
+        {
+            if(other.dim_num > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+            {
+                data = std::move(other.data);
+            }
+            else
+            {
+                std::memcpy(stack_data, other.stack_data, other.dim_num * sizeof(idx_type));
+            }
+            dim_num = other.dim_num;
+        }
+
+        DimVector& operator=(const DimVector& other) noexcept
+        {
+            if(other.dim_num > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+            {
+                data = std::make_unique<idx_type[]>(other.dim_num);
+                std::memcpy(data.get(), other.data.get(), other.dim_num * sizeof(idx_type));
+            }
+            else
+            {
+                std::memcpy(stack_data, other.stack_data, other.dim_num * sizeof(idx_type));
+            }
+            dim_num = other.dim_num;
+            return *this;
+        }
+
+        DimVector& operator=(DimVector&& other) noexcept
+        {
+            if(other.dim_num > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+            {
+                data = std::move(other.data);
+            }
+            else
+            {
+                std::memcpy(stack_data, other.stack_data, other.dim_num * sizeof(idx_type));
+            }
+            dim_num = other.dim_num;
+            return *this;
+        }
+
+        void push_back(idx_type idx)
+        {
+            resize(size() + 1);
+            this->operator[](size() - 1) = idx;
+        }
+
+        void resize(idx_type size)
+        {
+            if(size < 0 || size == dim_num)
+                return;
+            if(size > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+            {
+                if(dim_num > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+                {
+                    idx_type move_size = (size > dim_num ? dim_num: size);
+                    std::unique_ptr<idx_type[]> temp(new idx_type[size]);
+                    std::memcpy(temp.get(), data.get(), move_size * sizeof(idx_type));
+                    data = std::move(temp);
+                }
+                else
+                {
+                    data = std::unique_ptr<idx_type[]>(new idx_type[size]);
+                    std::memcpy(data.get(), stack_data, dim_num * sizeof(idx_type));
+                }
+            }
+            else
+            {
+                if(dim_num > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+                {
+                    data = std::unique_ptr<idx_type[]>(nullptr);
+                    if (size != 0)
+                        std::memcpy(stack_data, data.get(), size * sizeof(idx_type));
+                }
+            }
+            dim_num = size;
+        }
+
+        idx_type size() const { return this->dim_num; }
+
+        idx_type& operator[](idx_type dim)
+        {
+            if(dim<0 || dim >= dim_num)
+                throw std::runtime_error("index out of dim vector size");
+            
+            if(dim_num > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+                return data[dim];
+            else
+                return stack_data[dim];
+        }
+
+        idx_type operator[](idx_type dim) const
+        {
+            if(dim<0 || dim >= dim_num)
+                throw std::runtime_error("index out of dim vector size");
+            
+            if(dim_num > DIMVECTOR_SMALL_VECTOR_OPTIMIZATION)
+                return data[dim];
+            else
+                return stack_data[dim];
+        }
+    };
+}
+
+#endif

+ 185 - 1
traph/include/traph/core/tensor.h

@@ -1,9 +1,193 @@
 
+#include<traph/core/type.h>
+#include<traph/core/index.h>
+#include<traph/core/utils.h>
 
 namespace traph
 {
+    // The real representation of all tensors.
+    template<typename T>
+    class TensorStorage
+    {
+    public:
+        std::unique_ptr<T[]> data;
+        idx_type len;
+        TensorStorage()
+            :data(nullptr), len(0)
+        {
+
+        }
+
+        void resize(idx_type size)
+        {
+            if(size < 0 || size == len)
+                return;
+            idx_type move_size = (size > len ? len: size);
+            std::unique_ptr<T[]> temp(new idx_type[size]);
+            std::memcpy(temp.get(), data.get(), move_size * sizeof(idx_type));
+            data = std::move(temp);
+
+            len = size;
+        }
+
+        void resize(const DimVector& dimensions)
+        {
+            idx_type size = 1;
+            for(idx_type i = 0; i < dimensions.size(); ++i)
+            {
+                size *= dimensions[i];
+            }
+
+            if(size < 0 || size == len)
+                return;
+            idx_type move_size = (size > len ? len: size);
+            std::unique_ptr<T[]> temp(new idx_type[size]);
+            std::memcpy(temp.get(), data.get(), move_size * sizeof(idx_type));
+            data = std::move(temp);
+
+            len = size;
+        }
+    };
+
+    enum layout_type
+    {
+        row_major,
+        column_major
+    };
+
+    // ndarray
+    template<typename T>
     class Tensor
     {
-        
+    private:
+        std::unique_ptr<TensorStorage<T>> rep;
+        DimVector dimensions;
+        idx_type offset;
+		DimVector strides;
+        layout_type order;
+    private:
+        void auto_strides()
+        {
+            idx_type dim_num = dimensions.size();
+            strides.resize(dim_num);
+            size_type stride = 1;
+            if(order == layout_type::column_major)
+            {
+                for(idx_type i = 0; i < dim_num; ++i)
+                {
+                    strides[i] = stride;
+                    stride *= dimensions[i];
+                }
+            }
+            else
+            {
+                for(idx_type i = dim_num - 1; i >= 0; --i)
+                {
+                    strides[i] = stride;
+                    stride *= dimensions[i];
+                }
+            }
+        }
+    public:
+        Tensor()
+            :rep(new TensorStorage<T>),
+            dimensions(), offset(0), strides(), order(layout_type::column_major)
+        {
+        }
+
+        explicit Tensor(const DimVector& dimensions)
+            :rep(new TensorStorage<T>),
+            dimensions(dimensions), offset(0), strides(), order(layout_type::column_major)
+        {
+            auto_strides();
+        }
+
+        explicit Tensor(const DimVector& dimensions, layout_type order)
+            :rep(new TensorStorage<T>),
+            dimensions(dimensions), offset(0), strides(), order(order)
+        {
+
+        }
+
+        explicit Tensor(const DimVector& dimensions, const DimVector& strides)
+            :rep(new TensorStorage<T>),
+            dimensions(dimensions), offset(0), strides(strides), order(layout_type::column_major)
+        {
+            auto_strides();
+        }
+
+        explicit Tensor(const DimVector& dimensions, const DimVector& strides, layout_type order)
+            :rep(new TensorStorage<T>),
+            dimensions(dimensions), offset(0), strides(strides), order(order)
+        {
+
+        }
+
+        Tensor(const T& t)
+            :rep(new TensorStorage<T>),
+            dimensions(), offset(0), strides(), order(order)
+        {
+            dimensions.resize(1);
+            auto_strides();
+        }
+
+        Tensor(nested_initializer_list_t<T, 1> t)
+            :rep(new TensorStorage<T>),
+            dimensions(), offset(0), strides(), order(order)
+        {
+            dimensions.resize(1);
+            auto_strides();
+        }
+        Tensor(nested_initializer_list_t<T, 2> t)
+            :rep(new TensorStorage<T>),
+            dimensions(), offset(0), strides(), order(order)
+        {
+            dimensions.resize(2);
+            auto_strides();
+        }
+
+        Tensor(nested_initializer_list_t<T, 3> t)
+            :rep(new TensorStorage<T>),
+            dimensions(), offset(0), strides(), order(order)
+        {
+            dimensions.resize(3);
+            auto_strides();
+        }
+        Tensor(nested_initializer_list_t<T, 4> t)
+            :rep(new TensorStorage<T>),
+            dimensions(), offset(0), strides(), order(order)
+        {
+            dimensions.resize(4);
+            auto_strides();
+        }
+        Tensor(nested_initializer_list_t<T, 5> t)
+            :rep(new TensorStorage<T>),
+            dimensions(), offset(0), strides(), order(order)
+        {
+            dimensions.resize(5);
+            auto_strides();
+        }
+
+        void reshape(std::initializer_list<idx_type> t)
+        {
+
+        }
+
+        Tensor& operator ()(idx_type idx)
+        {
+            
+        }
+
+        template<class... Args>
+        Tensor& operator ()(idx_type idx, Args... args)
+        {
+            
+        }
+
+        template<class... Args>
+        const Tensor& operator ()(idx_type idx, Args... args) const
+        {
+            
+        }
     };
 }

+ 22 - 0
traph/include/traph/core/type.h

@@ -0,0 +1,22 @@
+#ifndef TRAPH_TYPE_H
+#define TRAPH_TYPE_H
+
+#include <cstdint>
+
+namespace traph
+{
+    using f32 = float;
+    using f64 = double;
+    using i8 = std::int8_t;
+    using i16 = std::int16_t;
+    using i32 = std::int32_t;
+    using i64 = std::int64_t;
+    using u8 = std::uint8_t;
+    using u16 = std::uint16_t;
+    using u32 = std::uint32_t;
+    using u64 = std::uint64_t;
+    using idx_type = i32;
+    using size_type = i32;
+}
+
+#endif

+ 27 - 0
traph/include/traph/core/utils.h

@@ -0,0 +1,27 @@
+#ifndef TENSORA_UTILS_H_
+#define TENSORA_UTILS_H_
+
+#include <initializer_list>
+
+#include <traph/core/type.h>
+#include <traph/core/index.h>
+
+namespace traph
+{
+    template <class T, idx_type I>
+    struct nested_initializer_list
+    {
+        using type = std::initializer_list<typename nested_initializer_list<T, I - 1>::type>;
+    };
+
+    template <class T>
+    struct nested_initializer_list<T, 0>
+    {
+        using type = T;
+    };
+
+    template <class T, idx_type I>
+    using nested_initializer_list_t = typename nested_initializer_list<T, I>::type;
+}
+
+#endif

+ 3 - 1
traph/source/CMakeLists.txt

@@ -1,4 +1,6 @@
 INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/traph/include)
 INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/traph/contrib)
 
-ADD_SUBDIRECTORY(core)
+ADD_SUBDIRECTORY(core)
+ADD_SUBDIRECTORY(interface)
+ADD_SUBDIRECTORY(test)

+ 1 - 1
traph/source/core/CMakeLists.txt

@@ -9,6 +9,6 @@ SET(CORE_LIST
 	${SOURCE_PATH}/tensor.cpp
 )
 
-ADD_LIBRARY(${LIB_OUTNAME} SHARED ${CORE_LIST})
+ADD_LIBRARY(${LIB_OUTNAME} ${CORE_LIST})
 
 

+ 20 - 0
traph/source/interface/CMakeLists.txt

@@ -0,0 +1,20 @@
+SET(LIB_NAME interface)
+SET(LIB_OUTNAME traph-${LIB_NAME})
+
+SET(SOURCE_PATH ${TRAPH_PATH_SOURCE}/${LIB_NAME})
+
+FIND_PACKAGE(SWIG REQUIRED)
+INCLUDE(${SWIG_USE_FILE})
+
+FIND_PACKAGE(PythonLibs)
+INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_PATH})
+
+INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR})
+
+SET(CMAKE_SWIG_FLAGS "")
+
+SET_SOURCE_FILES_PROPERTIES(traph_tensor.i PROPERTIES CPLUSPLUS ON)
+SET_SOURCE_FILES_PROPERTIES(traph_tensor.i PROPERTIES SWIG_FLAGS "-includeall")
+SWIG_ADD_LIBRARY(traph_tensor LANGUAGE python SOURCES traph_tensor.i)
+SWIG_LINK_LIBRARIES(traph_tensor ${PYTHON_LIBRARIES})
+SWIG_LINK_LIBRARIES(traph_tensor traph-core)

+ 38 - 0
traph/source/interface/traph_tensor.i

@@ -0,0 +1,38 @@
+%module traph_tensor
+%{
+    #include<traph/core/type.h>
+    #include<traph/core/tensor.h>
+    using namespace traph;
+%}
+
+template<class T>
+class TensorStorage
+{
+public:
+    std::unique_ptr<T[]> data;
+    idx_type len;
+
+    TensorStorage();
+    void resize(idx_type size);
+    void resize(const DimVector& dimensions);
+};
+
+template<class T>
+class Tensor
+{
+private:
+    std::unique_ptr<TensorStorage<T>> rep;
+    DimVector dimensions;
+    idx_type offset;
+    DimVector strides;
+    layout_type order;
+public:
+    Tensor();
+    Tensor(const DimVector& dimensions);
+    Tensor(const DimVector& dimensions, layout_type order);
+    Tensor(const DimVector& dimensions, const DimVector& strides);
+    Tensor(const DimVector& dimensions, const DimVector& strides, layout_type order);
+};
+
+%template(tensor_f32) Tensor<f32>;
+%template(tensor_f64) Tensor<f64>;

+ 14 - 0
traph/source/test/CMakeLists.txt

@@ -0,0 +1,14 @@
+SET(LIB_NAME test)
+SET(LIB_OUTNAME traph-${LIB_NAME})
+
+SET(HEADER_PATH ${TRAPH_PATH_HEADER}/${LIB_NAME})
+SET(SOURCE_PATH ${TRAPH_PATH_SOURCE}/${LIB_NAME})
+
+SET(TEST_LIST
+	${SOURCE_PATH}/main.cpp
+)
+
+ADD_LIBRARY(${LIB_OUTNAME} SHARED ${TEST_LIST})
+target_link_libraries(${LIB_OUTNAME} traph-core)
+
+

+ 7 - 0
traph/source/test/main.cpp

@@ -0,0 +1,7 @@
+#include <traph/core/tensor.h>
+
+int main()
+{
+    traph::Tensor<float> t;
+    return 0;
+}