Quellcode durchsuchen

update code structure

JasonWang vor 7 Jahren
Ursprung
Commit
0d74e0b860

+ 8 - 1
.vscode/settings.json

@@ -68,6 +68,13 @@
         "xstring": "cpp",
         "xtr1common": "cpp",
         "xtree": "cpp",
-        "xutility": "cpp"
+        "xutility": "cpp",
+        "cctype": "cpp",
+        "cwctype": "cpp",
+        "optional": "cpp",
+        "set": "cpp",
+        "string_view": "cpp",
+        "typeindex": "cpp",
+        "variant": "cpp"
     }
 }

+ 5 - 0
CMakeLists.txt

@@ -16,6 +16,11 @@ SET(TRAPH_PATH_HEADER ${TRAPH_PATH_INCLUDE}/traph CACHE STRING "Adds a path to T
 SET(TRAPH_PATH_SOURCE ${TRAPH_PATH}/traph/source CACHE STRING "Adds a path to TRAPH source" FORCE)
 SET(TRAPH_PATH_DEPENDENCIES ${TRAPH_PATH}/traph/contrib CACHE STRING "Adds a path to TRAPH dependencies" FORCE)
 
+find_package(Boost)
+if(Boost_FOUND)
+	message(STATUS Boost found: ${Boost_INCLUDE_DIRS})
+endif()
+
 SET(TRAPH_ACCELERATE 1 CACHE STRING
 "Specify the feature Possible values:
 1 - mkl

+ 11 - 0
traph/include/traph/core/log.h

@@ -0,0 +1,11 @@
+#ifndef TRAPH_CORE_LOG_H_
+#define TRAPH_CORE_LOG_H_
+
+#include <cstdint>
+
+namespace traph
+{
+    
+}
+
+#endif

+ 2 - 2
traph/include/traph/core/type.h

@@ -1,5 +1,5 @@
-#ifndef TRAPH_TYPE_H
-#define TRAPH_TYPE_H
+#ifndef TRAPH_CORE_TYPE_H_
+#define TRAPH_CORE_TYPE_H_
 
 #include <cstdint>
 

+ 15 - 7
traph/include/traph/core/arithmetic.h → traph/include/traph/tensor/arithmetic.h

@@ -5,9 +5,9 @@
 #include <cmath>
 
 #include <traph/core/type.h>
-#include <traph/core/index.h>
-#include <traph/core/utils.h>
-#include <traph/core/tensor.h>
+#include <traph/tensor/index.h>
+#include <traph/tensor/utils.h>
+#include <traph/tensor/tensor.h>
 
 namespace traph
 {
@@ -37,6 +37,7 @@ namespace traph
 		return result;
 	}
 
+	// add fallback
 	template<class T>
 	Tensor<T> add(const Tensor<T> &t, T v)
 	{
@@ -50,10 +51,8 @@ namespace traph
 		return result;
 	}
 
-	template<>
 	Tensor<f32> add(const Tensor<f32> &t, f32 v);
 
-	template<>
 	Tensor<f64> add(const Tensor<f64> &t, f64 v);
 
 	template<class T>
@@ -102,6 +101,7 @@ namespace traph
 		}
 	}
 
+	// matmull fallback
 	template<class T>
 	Tensor<T> matmul(const Tensor<T> &a, const Tensor<T> &b)
 	{
@@ -131,10 +131,18 @@ namespace traph
 		return result;
 	}
 
-	template<>
+	Tensor<u8> matmul(const Tensor<u8> &a, const Tensor<u8> &b);
+
+	Tensor<i8> matmul(const Tensor<i8> &a, const Tensor<i8> &b);
+
+	Tensor<i16> matmul(const Tensor<i16> &a, const Tensor<i16> &b);
+
+	Tensor<i32> matmul(const Tensor<i32> &a, const Tensor<i32> &b);
+
+	Tensor<i64> matmul(const Tensor<i64> &a, const Tensor<i64> &b);
+
 	Tensor<f32> matmul(const Tensor<f32> &a, const Tensor<f32> &b);
 
-	template<>
 	Tensor<f64> matmul(const Tensor<f64> &a, const Tensor<f64> &b);
 }
 

+ 0 - 0
traph/include/traph/core/index.h → traph/include/traph/tensor/index.h


+ 3 - 3
traph/include/traph/core/slice.h → traph/include/traph/tensor/slice.h

@@ -6,9 +6,9 @@
 #include <vector>
 
 #include <traph/core/type.h>
-#include <traph/core/index.h>
-#include <traph/core/utils.h>
-#include <traph/core/tensor.h>
+#include <traph/tensor/index.h>
+#include <traph/tensor/utils.h>
+#include <traph/tensor/tensor.h>
 
 namespace traph
 {

+ 2 - 2
traph/include/traph/core/tensor.h → traph/include/traph/tensor/tensor.h

@@ -6,8 +6,8 @@
 
 
 #include<traph/core/type.h>
-#include<traph/core/index.h>
-#include<traph/core/utils.h>
+#include<traph/tensor/index.h>
+#include<traph/tensor/utils.h>
 
 namespace traph
 {

+ 1 - 1
traph/include/traph/core/utils.h → traph/include/traph/tensor/utils.h

@@ -4,7 +4,7 @@
 #include <initializer_list>
 
 #include <traph/core/type.h>
-#include <traph/core/index.h>
+#include <traph/tensor/index.h>
 
 namespace traph
 {

+ 3 - 3
traph/include/traph/core/view.h → traph/include/traph/tensor/view.h

@@ -4,9 +4,9 @@
 #include <utility>
 
 #include <traph/core/type.h>
-#include <traph/core/index.h>
-#include <traph/core/utils.h>
-#include <traph/core/tensor.h>
+#include <traph/tensor/index.h>
+#include <traph/tensor/utils.h>
+#include <traph/tensor/tensor.h>
 
 namespace traph
 {

+ 5 - 0
traph/source/CMakeLists.txt

@@ -1,6 +1,11 @@
 INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/traph/include)
 INCLUDE_DIRECTORIES(${PROJECT_SOURCE_DIR}/traph/contrib)
 
+if(Boost_FOUND)
+	INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIRS})
+endif()
+
 ADD_SUBDIRECTORY(core)
+ADD_SUBDIRECTORY(tensor)
 ADD_SUBDIRECTORY(interface)
 ADD_SUBDIRECTORY(test)

+ 2 - 7
traph/source/core/CMakeLists.txt

@@ -6,13 +6,8 @@ SET(SOURCE_PATH ${TRAPH_PATH_SOURCE}/${LIB_NAME})
 
 SET(CORE_LIST
 	${HEADER_PATH}/type.h
-	${HEADER_PATH}/utils.h
-	${HEADER_PATH}/slice.h
-	${HEADER_PATH}/index.h
-	${HEADER_PATH}/arithmetic.h
-	${SOURCE_PATH}/arithmetic.cpp
-	${HEADER_PATH}/tensor.h
-	${SOURCE_PATH}/tensor.cpp
+	${HEADER_PATH}/log.h
+	${SOURCE_PATH}/log.cpp
 )
 
 ADD_LIBRARY(${LIB_OUTNAME} ${CORE_LIST})

+ 0 - 98
traph/source/core/arithmetic.cpp

@@ -1,98 +0,0 @@
-
-#include<stdexcept>
-
-#include <traph/core/arithmetic.h>
-
-#ifdef TRAPH_BUILD_OPENBLAS
-#include<traph/core/openblas_backend.h>
-#endif
-
-#ifdef TRAPH_BUILD_MKL
-#include<mkl.h>
-#include<mkl_blas.h>
-#include<mkl_cblas.h>
-#endif
-
-namespace traph
-{
-	template<>
-	Tensor<f32> add(const Tensor<f32> &t, f32 v)
-	{
-		Tensor<f32> result(t.size());
-#ifdef TRAPH_BUILD_MKL
-		result.fill_(v);
-		cblas_saxpy(t.size().flat_size(), 1.f, t.data(), 1, result.data(), 1);
-#endif
-		return result;
-	}
-
-	template<>
-	Tensor<f64> add(const Tensor<f64> &t, f64 v)
-	{
-		Tensor<f64> result(t.size());
-#ifdef TRAPH_BUILD_MKL
-		result.fill_(v);
-		cblas_daxpy(t.size().flat_size(), 1.f, t.data(), 1, result.data(), 1);
-#endif
-		return result;
-	}
-
-    template<>
-	Tensor<f32> matmul(const Tensor<f32> &a, const Tensor<f32> &b)
-    {
-		// check
-		matmul_check(a, b);
-		// result
-		Tensor<f32> result = zeros<f32>({ a.size()[0], b.size()[1] });
-
-#ifdef TRAPH_BUILD_MKL
-		CBLAS_LAYOUT a_layout = a.layout() == layout_type::column_major ? CBLAS_LAYOUT::CblasColMajor : CBLAS_LAYOUT::CblasRowMajor;
-
-		cblas_sgemm(a_layout,
-			CBLAS_TRANSPOSE::CblasNoTrans,
-			CBLAS_TRANSPOSE::CblasNoTrans,
-			a.size()[0],
-			b.size()[1],
-			a.size()[1],
-			1.f,
-			a.data(),
-			a.size()[0],
-			b.data(),
-			b.size()[0],
-			0.f,
-			result.data(),
-			result.size()[0]);
-#endif
-		return result;
-    }
-
-	template<>
-	Tensor<f64> matmul(const Tensor<f64> &a, const Tensor<f64> &b)
-	{
-		// check
-		matmul_check(a, b);
-		// result
-		Tensor<f64> result = zeros<f64>({ a.size()[0], b.size()[1] });
-
-#ifdef TRAPH_BUILD_MKL
-		CBLAS_LAYOUT a_layout = a.layout() == layout_type::column_major ? CBLAS_LAYOUT::CblasColMajor : CBLAS_LAYOUT::CblasRowMajor;
-
-		cblas_dgemm(a_layout,
-			CBLAS_TRANSPOSE::CblasNoTrans,
-			CBLAS_TRANSPOSE::CblasNoTrans,
-			a.size()[0],
-			b.size()[1],
-			a.size()[1],
-			1.f,
-			a.data(),
-			a.size()[0],
-			b.data(),
-			b.size()[0],
-			0.f,
-			result.data(),
-			result.size()[0]);
-#endif
-
-		return result;
-	}
-}

+ 6 - 0
traph/source/core/log.cpp

@@ -0,0 +1,6 @@
+#include <traph/core/log.h>
+
+namespace traph
+{
+    
+}

+ 2 - 2
traph/source/interface/traph_tensor.i

@@ -1,8 +1,8 @@
 %module traph_tensor
 %{
     #include<traph/core/type.h>
-    #include<traph/core/tensor.h>
-    #include<traph/core/index.h>
+    #include<traph/tensor/tensor.h>
+    #include<traph/tensor/index.h>
     using namespace traph;
 %}
 

+ 42 - 0
traph/source/tensor/CMakeLists.txt

@@ -0,0 +1,42 @@
+SET(LIB_NAME tensor)
+SET(LIB_OUTNAME traph-${LIB_NAME})
+
+SET(HEADER_PATH ${TRAPH_PATH_HEADER}/${LIB_NAME})
+SET(SOURCE_PATH ${TRAPH_PATH_SOURCE}/${LIB_NAME})
+
+SET(CORE_LIST
+	${HEADER_PATH}/utils.h
+	${HEADER_PATH}/slice.h
+	${HEADER_PATH}/index.h
+	${HEADER_PATH}/arithmetic.h
+	${SOURCE_PATH}/arithmetic.cpp
+	${HEADER_PATH}/tensor.h
+	${SOURCE_PATH}/tensor.cpp
+)
+
+ADD_LIBRARY(${LIB_OUTNAME} ${CORE_LIST})
+
+IF(Boost_FOUND)
+	target_link_libraries(${LIB_OUTNAME} ${Boost_LIBRARIES})
+ENDIF()
+
+IF(TRAPH_ACCELERATE EQUAL 1)
+	if (BLAS_FOUND)
+		target_link_libraries(${LIB_OUTNAME} ${BLAS_LIBRARIES})
+	endif()
+ELSEIF(TRAPH_ACCELERATE EQUAL 2)
+	if (BLAS_FOUND)
+		target_link_libraries(${LIB_OUTNAME} ${BLAS_LIBRARIES})
+	endif()
+ELSEIF(TRAPH_ACCELERATE EQUAL 3)
+	find_package(clBLAS CONFIG REQUIRED)
+	target_link_libraries(main PRIVATE clBLAS)
+ELSEIF(TRAPH_ACCELERATE EQUAL 4)
+	# find_package(clBLAS CONFIG REQUIRED)
+	# target_link_libraries(${LIB_OUTNAME} PRIVATE clBLAS)
+ELSE()
+	MESSAGE(FATAL_ERROR "Unsupported build platform: " ${OCTOON_BUILD_PLATFORM})
+ENDIF()
+
+
+

+ 182 - 0
traph/source/tensor/arithmetic.cpp

@@ -0,0 +1,182 @@
+
+#include <stdexcept>
+#include <algorithm>
+
+#include <traph/tensor/arithmetic.h>
+
+#include <eigen3/Eigen/Dense>
+
+#ifdef TRAPH_BUILD_OPENBLAS
+#include <traph/core/openblas_backend.h>
+#endif
+
+#ifdef TRAPH_BUILD_MKL
+#include <mkl.h>
+#include <mkl_blas.h>
+#include <mkl_cblas.h>
+#endif
+
+namespace traph
+{
+	Tensor<f32> add(const Tensor<f32> &t, f32 v)
+	{
+		Tensor<f32> result(t.size());
+#ifdef TRAPH_BUILD_MKL
+		result.fill_(v);
+		cblas_saxpy(t.size().flat_size(), 1.f, t.data(), 1, result.data(), 1);
+#endif
+		return result;
+	}
+
+	Tensor<f64> add(const Tensor<f64> &t, f64 v)
+	{
+		Tensor<f64> result(t.size());
+#ifdef TRAPH_BUILD_MKL
+		result.fill_(v);
+		cblas_daxpy(t.size().flat_size(), 1.f, t.data(), 1, result.data(), 1);
+#endif
+		return result;
+	}
+
+	Tensor<u8> matmul(const Tensor<u8> &a, const Tensor<u8> &b)
+	{
+		// check
+		matmul_check(a, b);
+		// result
+		Tensor<u8> result = zeros<u8>({ a.size()[0], b.size()[1] });
+
+		// copy data
+		Eigen::Map<const Eigen::Matrix<u8, Eigen::Dynamic, Eigen::Dynamic>> eigen_a(a.data() + a.offset(), a.size()[0], a.size()[1]);
+		Eigen::Map<const Eigen::Matrix<u8, Eigen::Dynamic, Eigen::Dynamic>> eigen_b(b.data() + b.offset(), b.size()[0], b.size()[1]);
+
+		Eigen::Matrix<u8, Eigen::Dynamic, Eigen::Dynamic> eigen_c = eigen_a * eigen_b;
+		// copy to result
+		std::copy(eigen_c.data(), eigen_c.data() + a.size()[0] * b.size()[1], result.data());
+		return result;
+	}
+
+	Tensor<i8> matmul(const Tensor<i8> &a, const Tensor<i8> &b)
+	{
+		// check
+		matmul_check(a, b);
+		// result
+		Tensor<i8> result = zeros<i8>({ a.size()[0], b.size()[1] });
+
+		// copy data
+		Eigen::Map<const Eigen::Matrix<i8, Eigen::Dynamic, Eigen::Dynamic>> eigen_a(a.data() + a.offset(), a.size()[0], a.size()[1]);
+		Eigen::Map<const Eigen::Matrix<i8, Eigen::Dynamic, Eigen::Dynamic>> eigen_b(b.data() + b.offset(), b.size()[0], b.size()[1]);
+
+		Eigen::Matrix<i8, Eigen::Dynamic, Eigen::Dynamic> eigen_c = eigen_a * eigen_b;
+		// copy to result
+		std::copy(eigen_c.data(), eigen_c.data() + a.size()[0] * b.size()[1], result.data());
+		return result;
+	}
+
+	Tensor<i16> matmul(const Tensor<i16> &a, const Tensor<i16> &b)
+	{
+		// check
+		matmul_check(a, b);
+		// result
+		Tensor<i16> result = zeros<i16>({ a.size()[0], b.size()[1] });
+
+		// copy data
+		Eigen::Map<const Eigen::Matrix<i16, Eigen::Dynamic, Eigen::Dynamic>> eigen_a(a.data() + a.offset(), a.size()[0], a.size()[1]);
+		Eigen::Map<const Eigen::Matrix<i16, Eigen::Dynamic, Eigen::Dynamic>> eigen_b(b.data() + b.offset(), b.size()[0], b.size()[1]);
+
+		Eigen::Matrix<i16, Eigen::Dynamic, Eigen::Dynamic> eigen_c = eigen_a * eigen_b;
+		// copy to result
+		std::copy(eigen_c.data(), eigen_c.data() + a.size()[0] * b.size()[1], result.data());
+		return result;
+	}
+
+	Tensor<i32> matmul(const Tensor<i32> &a, const Tensor<i32> &b)
+	{
+		// check
+		matmul_check(a, b);
+		// result
+		Tensor<i32> result = zeros<i32>({ a.size()[0], b.size()[1] });
+
+		// copy data
+		Eigen::Map<const Eigen::Matrix<i32, Eigen::Dynamic, Eigen::Dynamic>> eigen_a(a.data() + a.offset(), a.size()[0], a.size()[1]);
+		Eigen::Map<const Eigen::Matrix<i32, Eigen::Dynamic, Eigen::Dynamic>> eigen_b(b.data() + b.offset(), b.size()[0], b.size()[1]);
+
+		Eigen::Matrix<i32, Eigen::Dynamic, Eigen::Dynamic> eigen_c = eigen_a * eigen_b;
+		// copy to result
+		std::copy(eigen_c.data(), eigen_c.data() + a.size()[0] * b.size()[1], result.data());
+		return result;
+	}
+
+	Tensor<i64> matmul(const Tensor<i64> &a, const Tensor<i64> &b)
+	{
+		// check
+		matmul_check(a, b);
+		// result
+		Tensor<i64> result = zeros<i64>({ a.size()[0], b.size()[1] });
+
+		// copy data
+		Eigen::Map<const Eigen::Matrix<i64, Eigen::Dynamic, Eigen::Dynamic>> eigen_a(a.data() + a.offset(), a.size()[0], a.size()[1]);
+		Eigen::Map<const Eigen::Matrix<i64, Eigen::Dynamic, Eigen::Dynamic>> eigen_b(b.data() + b.offset(), b.size()[0], b.size()[1]);
+
+		Eigen::Matrix<i64, Eigen::Dynamic, Eigen::Dynamic> eigen_c = eigen_a * eigen_b;
+		// copy to result
+		std::copy(eigen_c.data(), eigen_c.data() + a.size()[0] * b.size()[1], result.data());
+		return result;
+	}
+
+	Tensor<f32> matmul(const Tensor<f32> &a, const Tensor<f32> &b)
+	{
+		// check
+		matmul_check(a, b);
+		// result
+		Tensor<f32> result = zeros<f32>({ a.size()[0], b.size()[1] });
+
+	#ifdef TRAPH_BUILD_MKL
+		CBLAS_LAYOUT a_layout = a.layout() == layout_type::column_major ? CBLAS_LAYOUT::CblasColMajor : CBLAS_LAYOUT::CblasRowMajor;
+
+		cblas_sgemm(a_layout,
+			CBLAS_TRANSPOSE::CblasNoTrans,
+			CBLAS_TRANSPOSE::CblasNoTrans,
+			a.size()[0],
+			b.size()[1],
+			a.size()[1],
+			1.f,
+			a.data(),
+			a.size()[0],
+			b.data(),
+			b.size()[0],
+			0.f,
+			result.data(),
+			result.size()[0]);
+	#endif
+		return result;
+	}
+
+	Tensor<f64> matmul(const Tensor<f64> &a, const Tensor<f64> &b)
+	{
+		// check
+		matmul_check(a, b);
+		// result
+		Tensor<f64> result = zeros<f64>({ a.size()[0], b.size()[1] });
+
+#ifdef TRAPH_BUILD_MKL
+		CBLAS_LAYOUT a_layout = a.layout() == layout_type::column_major ? CBLAS_LAYOUT::CblasColMajor : CBLAS_LAYOUT::CblasRowMajor;
+
+		cblas_dgemm(a_layout,
+			CBLAS_TRANSPOSE::CblasNoTrans,
+			CBLAS_TRANSPOSE::CblasNoTrans,
+			a.size()[0],
+			b.size()[1],
+			a.size()[1],
+			1.f,
+			a.data(),
+			a.size()[0],
+			b.data(),
+			b.size()[0],
+			0.f,
+			result.data(),
+			result.size()[0]);
+#endif
+
+		return result;
+	}
+}

+ 0 - 0
traph/source/core/tensor.cpp → traph/source/tensor/tensor.cpp


+ 1 - 0
traph/source/test/CMakeLists.txt

@@ -10,5 +10,6 @@ SET(TEST_LIST
 
 add_executable(${LIB_OUTNAME} ${TEST_LIST})
 target_link_libraries(${LIB_OUTNAME} traph-core)
+target_link_libraries(${LIB_OUTNAME} traph-tensor)
 
 

+ 7 - 7
traph/source/test/main.cpp

@@ -1,5 +1,5 @@
-#include <traph/core/tensor.h>
-#include <traph/core/arithmetic.h>
+#include <traph/tensor/tensor.h>
+#include <traph/tensor/arithmetic.h>
 
 int main()
 {
@@ -9,10 +9,10 @@ int main()
     traph::Tensor<float> result = traph::matmul(a, w);
 	*/
 	// traph::Tensor<float> result2 = traph::add(a, 1.f);
-	/*
-	traph::Tensor<int> a = traph::zeros<int>({ 1000, 1000 });
-	a.fill_(-1);
-	traph::Tensor<int> b = traph::abs(a);
-	*/
+	
+	traph::Tensor<traph::f32> a = traph::zeros<traph::f32>({ 5000, 5000 });
+	traph::Tensor<traph::f32> b = traph::zeros<traph::f32>({ 5000, 5000 });
+	traph::Tensor<traph::f32> c = traph::matmul(a, b);
+	
     return 0;
 }