Parcourir la source

remove new/delete and malloc/free

Summary: See title

Reviewed By: nicolasvasilache

Differential Revision: D6352410

fbshipit-source-id: e73399bb7633937ce4e506045965854b4ee81961
Christian Puhrsch il y a 8 ans
Parent
commit
d647be0324
11 fichiers modifiés avec 158 ajouts et 182 suppressions
  1. 6 5
      python/fastText/pybind/fasttext_pybind.cc
  2. 1 1
      runtests.py
  3. 5 5
      src/fasttext.cc
  4. 24 56
      src/matrix.cc
  5. 46 23
      src/matrix.h
  6. 2 2
      src/model.cc
  7. 9 8
      src/productquantizer.cc
  8. 19 28
      src/qmatrix.cc
  9. 2 3
      src/qmatrix.h
  10. 23 44
      src/vector.cc
  11. 21 7
      src/vector.h

+ 6 - 5
python/fastText/pybind/fasttext_pybind.cc

@@ -71,11 +71,11 @@ PYBIND11_MODULE(fasttext_pybind, m) {
       .def(py::init<ssize_t>())
       .def_buffer([](fasttext::Vector& m) -> py::buffer_info {
         return py::buffer_info(
-            m.data_,
+            m.data(),
             sizeof(fasttext::real),
             py::format_descriptor<fasttext::real>::format(),
             1,
-            {m.m_},
+            {m.size()},
             {sizeof(fasttext::real)});
       });
 
@@ -85,12 +85,13 @@ PYBIND11_MODULE(fasttext_pybind, m) {
       .def(py::init<ssize_t, ssize_t>())
       .def_buffer([](fasttext::Matrix& m) -> py::buffer_info {
         return py::buffer_info(
-            m.data_,
+            m.data(),
             sizeof(fasttext::real),
             py::format_descriptor<fasttext::real>::format(),
             2,
-            {m.m_, m.n_},
-            {sizeof(fasttext::real) * m.n_, sizeof(fasttext::real) * (int64_t)1});
+            {m.size(0), m.size(1)},
+            {sizeof(fasttext::real) * m.size(1),
+             sizeof(fasttext::real) * (int64_t)1});
       });
 
   py::class_<fasttext::FastText>(m, "fasttext")

+ 1 - 1
runtests.py

@@ -40,7 +40,7 @@ if __name__ == "__main__":
         help="run integration tests",
         action="store_true"
     )
-    parser.add_argument("--data_dir", help="Full path to data directory")
+    parser.add_argument("--data-dir", help="Full path to data directory")
     args = parser.parse_args()
     if args.unit_tests:
         run_tests(gen_unit_tests())

+ 5 - 5
src/fasttext.cc

@@ -265,9 +265,9 @@ void FastText::printInfo(real progress, real loss, std::ostream& log_stream) {
 }
 
 std::vector<int32_t> FastText::selectEmbeddings(int32_t cutoff) const {
-  Vector norms(input_->m_);
+  Vector norms(input_->size(0));
   input_->l2NormRow(norms);
-  std::vector<int32_t> idx(input_->m_, 0);
+  std::vector<int32_t> idx(input_->size(0), 0);
   std::iota(idx.begin(), idx.end(), 0);
   auto eosid = dict_->getId(Dictionary::EOS);
   std::sort(idx.begin(), idx.end(),
@@ -287,7 +287,7 @@ void FastText::quantize(const Args qargs) {
   args_->qout = qargs.qout;
   args_->output = qargs.output;
 
-  if (qargs.cutoff > 0 && qargs.cutoff < input_->m_) {
+  if (qargs.cutoff > 0 && qargs.cutoff < input_->size(0)) {
     auto idx = selectEmbeddings(qargs.cutoff);
     dict_->prune(idx);
     std::shared_ptr<Matrix> ninput =
@@ -618,7 +618,7 @@ void FastText::loadVectors(std::string filename) {
     words.push_back(word);
     dict_->add(word);
     for (size_t j = 0; j < dim; j++) {
-      in >> mat->data_[i * dim + j];
+      in >> mat->at(i, j);
     }
   }
   in.close();
@@ -631,7 +631,7 @@ void FastText::loadVectors(std::string filename) {
     int32_t idx = dict_->getId(words[i]);
     if (idx < 0 || idx >= dict_->nwords()) continue;
     for (size_t j = 0; j < dim; j++) {
-      input_->data_[idx * dim + j] = mat->data_[i * dim + j];
+      input_->at(idx, j) = mat->at(i, j);
     }
   }
 }

+ 24 - 56
src/matrix.cc

@@ -9,8 +9,6 @@
 
 #include "matrix.h"
 
-#include <assert.h>
-
 #include <random>
 #include <exception>
 #include <stdexcept>
@@ -20,43 +18,12 @@
 
 namespace fasttext {
 
-Matrix::Matrix() {
-  m_ = 0;
-  n_ = 0;
-  data_ = nullptr;
-}
-
-Matrix::Matrix(int64_t m, int64_t n) {
-  m_ = m;
-  n_ = n;
-  data_ = new real[m * n];
-}
-
-Matrix::Matrix(const Matrix& other) {
-  m_ = other.m_;
-  n_ = other.n_;
-  data_ = new real[m_ * n_];
-  for (int64_t i = 0; i < (m_ * n_); i++) {
-    data_[i] = other.data_[i];
-  }
-}
-
-Matrix& Matrix::operator=(const Matrix& other) {
-  Matrix temp(other);
-  m_ = temp.m_;
-  n_ = temp.n_;
-  std::swap(data_, temp.data_);
-  return *this;
-}
+Matrix::Matrix() : Matrix(0, 0) {}
 
-Matrix::~Matrix() {
-  delete[] data_;
-}
+Matrix::Matrix(int64_t m, int64_t n) : data_(m * n), m_(m), n_(n) {}
 
 void Matrix::zero() {
-  for (int64_t i = 0; i < (m_ * n_); i++) {
-      data_[i] = 0.0;
-  }
+  std::fill(data_.begin(), data_.end(), 0.0);
 }
 
 void Matrix::uniform(real a) {
@@ -73,7 +40,7 @@ real Matrix::dotRow(const Vector& vec, int64_t i) const {
   assert(vec.size() == n_);
   real d = 0.0;
   for (int64_t j = 0; j < n_; j++) {
-    d += at(i, j) * vec.data_[j];
+    d += at(i, j) * vec[j];
   }
   if (std::isnan(d)) {
     throw std::runtime_error("Encountered NaN.");
@@ -86,15 +53,17 @@ void Matrix::addRow(const Vector& vec, int64_t i, real a) {
   assert(i < m_);
   assert(vec.size() == n_);
   for (int64_t j = 0; j < n_; j++) {
-    data_[i * n_ + j] += a * vec.data_[j];
+    data_[i * n_ + j] += a * vec[j];
   }
 }
 
 void Matrix::multiplyRow(const Vector& nums, int64_t ib, int64_t ie) {
-  if (ie == -1) {ie = m_;}
+  if (ie == -1) {
+    ie = m_;
+  }
   assert(ie <= nums.size());
   for (auto i = ib; i < ie; i++) {
-    real n = nums[i-ib];
+    real n = nums[i - ib];
     if (n != 0) {
       for (auto j = 0; j < n_; j++) {
         at(i, j) *= n;
@@ -104,10 +73,12 @@ void Matrix::multiplyRow(const Vector& nums, int64_t ib, int64_t ie) {
 }
 
 void Matrix::divideRow(const Vector& denoms, int64_t ib, int64_t ie) {
-  if (ie == -1) {ie = m_;}
+  if (ie == -1) {
+    ie = m_;
+  }
   assert(ie <= denoms.size());
   for (auto i = ib; i < ie; i++) {
-    real n = denoms[i-ib];
+    real n = denoms[i - ib];
     if (n != 0) {
       for (auto j = 0; j < n_; j++) {
         at(i, j) /= n;
@@ -119,8 +90,7 @@ void Matrix::divideRow(const Vector& denoms, int64_t ib, int64_t ie) {
 real Matrix::l2NormRow(int64_t i) const {
   auto norm = 0.0;
   for (auto j = 0; j < n_; j++) {
-    const real v = at(i,j);
-    norm += v * v;
+    norm += at(i, j) * at(i, j);
   }
   if (std::isnan(norm)) {
     throw std::runtime_error("Encountered NaN.");
@@ -130,23 +100,21 @@ real Matrix::l2NormRow(int64_t i) const {
 
 void Matrix::l2NormRow(Vector& norms) const {
   assert(norms.size() == m_);
-    for (auto i = 0; i < m_; i++) {
-      norms[i] = l2NormRow(i);
-    }
+  for (auto i = 0; i < m_; i++) {
+    norms[i] = l2NormRow(i);
+  }
 }
 
 void Matrix::save(std::ostream& out) {
-  out.write((char*) &m_, sizeof(int64_t));
-  out.write((char*) &n_, sizeof(int64_t));
-  out.write((char*) data_, m_ * n_ * sizeof(real));
+  out.write((char*)&m_, sizeof(int64_t));
+  out.write((char*)&n_, sizeof(int64_t));
+  out.write((char*)data_.data(), m_ * n_ * sizeof(real));
 }
 
 void Matrix::load(std::istream& in) {
-  in.read((char*) &m_, sizeof(int64_t));
-  in.read((char*) &n_, sizeof(int64_t));
-  delete[] data_;
-  data_ = new real[m_ * n_];
-  in.read((char*) data_, m_ * n_ * sizeof(real));
+  in.read((char*)&m_, sizeof(int64_t));
+  in.read((char*)&n_, sizeof(int64_t));
+  data_ = std::vector<real>(m_ * n_);
+  in.read((char*)data_.data(), m_ * n_ * sizeof(real));
 }
-
 }

+ 46 - 23
src/matrix.h

@@ -12,7 +12,9 @@
 #include <cstdint>
 #include <istream>
 #include <ostream>
+#include <vector>
 
+#include <assert.h>
 #include "real.h"
 
 namespace fasttext {
@@ -20,35 +22,56 @@ namespace fasttext {
 class Vector;
 
 class Matrix {
+ protected:
+  std::vector<real> data_;
+  const int64_t m_;
+  const int64_t n_;
 
-  public:
-    real* data_;
-    int64_t m_;
-    int64_t n_;
+ public:
+  Matrix();
+  explicit Matrix(int64_t, int64_t);
+  Matrix(const Matrix&) = default;
+  Matrix& operator=(const Matrix&) = delete;
 
-    Matrix();
-    Matrix(int64_t, int64_t);
-    Matrix(const Matrix&);
-    Matrix& operator=(const Matrix&);
-    ~Matrix();
+  inline real* data() {
+    return data_.data();
+  }
+  inline const real* data() const {
+    return data_.data();
+  }
 
-    inline const real& at(int64_t i, int64_t j) const {return data_[i * n_ + j];};
-    inline real& at(int64_t i, int64_t j) {return data_[i * n_ + j];};
+  inline const real& at(int64_t i, int64_t j) const {
+    return data_[i * n_ + j];
+  };
+  inline real& at(int64_t i, int64_t j) {
+    return data_[i * n_ + j];
+  };
 
+  inline int64_t size(int64_t dim) const {
+    assert(dim == 0 || dim == 1);
+    if (dim == 0) {
+      return m_;
+    }
+    return n_;
+  }
+  inline int64_t rows() const {
+    return m_;
+  }
+  inline int64_t cols() const {
+    return n_;
+  }
+  void zero();
+  void uniform(real);
+  real dotRow(const Vector&, int64_t) const;
+  void addRow(const Vector&, int64_t, real);
 
-    void zero();
-    void uniform(real);
-    real dotRow(const Vector&, int64_t) const;
-    void addRow(const Vector&, int64_t, real);
+  void multiplyRow(const Vector& nums, int64_t ib = 0, int64_t ie = -1);
+  void divideRow(const Vector& denoms, int64_t ib = 0, int64_t ie = -1);
 
-    void multiplyRow(const Vector& nums, int64_t ib = 0, int64_t ie = -1);
-    void divideRow(const Vector& denoms, int64_t ib = 0, int64_t ie = -1);
+  real l2NormRow(int64_t i) const;
+  void l2NormRow(Vector& norms) const;
 
-    real l2NormRow(int64_t i) const;
-    void l2NormRow(Vector& norms) const;
-
-    void save(std::ostream&);
-    void load(std::istream&);
+  void save(std::ostream&);
+  void load(std::istream&);
 };
-
 }

+ 2 - 2
src/model.cc

@@ -26,14 +26,14 @@ Model::Model(
     std::shared_ptr<Args> args,
     int32_t seed)
     : hidden_(args->dim),
-      output_(wo->m_),
+      output_(wo->size(0)),
       grad_(args->dim),
       rng(seed),
       quant_(false) {
   wi_ = wi;
   wo_ = wo;
   args_ = args;
-  osz_ = wo->m_;
+  osz_ = wo->size(0);
   hsz_ = args->dim;
   negpos = 0;
   loss_ = 0.0;

+ 9 - 8
src/productquantizer.cc

@@ -119,12 +119,11 @@ void ProductQuantizer::kmeans(const real *x, real* c, int32_t n, int32_t d) {
   for (auto i = 0; i < ksub_; i++) {
     memcpy (&c[i * d], x + perm[i] * d, d * sizeof(real));
   }
-  uint8_t* codes = new uint8_t[n];
+  auto codes = std::vector<uint8_t>(n);
   for (auto i = 0; i < niter_; i++) {
-    Estep(x, c, codes, d, n);
-    MStep(x, c, codes, d, n);
+    Estep(x, c, codes.data(), d, n);
+    MStep(x, c, codes.data(), d, n);
   }
-  delete [] codes;
 }
 
 void ProductQuantizer::train(int32_t n, const real * x) {
@@ -136,16 +135,18 @@ void ProductQuantizer::train(int32_t n, const real * x) {
   std::iota(perm.begin(), perm.end(), 0);
   auto d = dsub_;
   auto np = std::min(n, max_points_);
-  real* xslice = new real[np * dsub_];
+  auto xslice = std::vector<real>(np * dsub_);
   for (auto m = 0; m < nsubq_; m++) {
     if (m == nsubq_-1) {d = lastdsub_;}
     if (np != n) {std::shuffle(perm.begin(), perm.end(), rng);}
     for (auto j = 0; j < np; j++) {
-      memcpy (xslice + j * d, x + perm[j] * dim_ + m * dsub_, d * sizeof(real));
+      memcpy(
+          xslice.data() + j * d,
+          x + perm[j] * dim_ + m * dsub_,
+          d * sizeof(real));
     }
-    kmeans(xslice, get_centroids(m, 0), np, d);
+    kmeans(xslice.data(), get_centroids(m, 0), np, d);
   }
-  delete [] xslice;
 }
 
 real ProductQuantizer::mulcode(const Vector& x, const uint8_t* codes,

+ 19 - 28
src/qmatrix.cc

@@ -18,47 +18,38 @@ QMatrix::QMatrix() : qnorm_(false),
   m_(0), n_(0), codesize_(0) {}
 
 QMatrix::QMatrix(const Matrix& mat, int32_t dsub, bool qnorm)
-      : qnorm_(qnorm), m_(mat.m_), n_(mat.n_),
+      : qnorm_(qnorm), m_(mat.size(0)), n_(mat.size(1)),
         codesize_(m_ * ((n_ + dsub - 1) / dsub)) {
-  if (codesize_ > 0) {
-    codes_ = new uint8_t[codesize_];
-  }
+  codes_.resize(codesize_);
   pq_ = std::unique_ptr<ProductQuantizer>( new ProductQuantizer(n_, dsub));
   if (qnorm_) {
-    norm_codes_ = new uint8_t[m_];
+    norm_codes_.resize(m_);
     npq_ = std::unique_ptr<ProductQuantizer>( new ProductQuantizer(1, 1));
   }
   quantize(mat);
 }
 
-QMatrix::~QMatrix() {
-  if (codesize_ > 0) {
-    delete[] codes_;
-  }
-  if (qnorm_) { delete[] norm_codes_; }
-}
-
 void QMatrix::quantizeNorm(const Vector& norms) {
   assert(qnorm_);
-  assert(norms.m_ == m_);
-  auto dataptr = norms.data_;
+  assert(norms.size() == m_);
+  auto dataptr = norms.data();
   npq_->train(m_, dataptr);
-  npq_->compute_codes(dataptr, norm_codes_, m_);
+  npq_->compute_codes(dataptr, norm_codes_.data(), m_);
 }
 
 void QMatrix::quantize(const Matrix& matrix) {
-  assert(n_ == matrix.n_);
-  assert(m_ == matrix.m_);
+  assert(m_ == matrix.size(0));
+  assert(n_ == matrix.size(1));
   Matrix temp(matrix);
   if (qnorm_) {
-    Vector norms(temp.m_);
+    Vector norms(temp.size(0));
     temp.l2NormRow(norms);
     temp.divideRow(norms);
     quantizeNorm(norms);
   }
-  auto dataptr = temp.data_;
+  auto dataptr = temp.data();
   pq_->train(m_, dataptr);
-  pq_->compute_codes(dataptr, codes_, m_);
+  pq_->compute_codes(dataptr, codes_.data(), m_);
 }
 
 void QMatrix::addToVector(Vector& x, int32_t t) const {
@@ -66,7 +57,7 @@ void QMatrix::addToVector(Vector& x, int32_t t) const {
   if (qnorm_) {
     norm = npq_->get_centroids(0, norm_codes_[t])[0];
   }
-  pq_->addcode(x, codes_, t, norm);
+  pq_->addcode(x, codes_.data(), t, norm);
 }
 
 real QMatrix::dotRow(const Vector& vec, int64_t i) const {
@@ -77,7 +68,7 @@ real QMatrix::dotRow(const Vector& vec, int64_t i) const {
   if (qnorm_) {
     norm = npq_->get_centroids(0, norm_codes_[i])[0];
   }
-  return pq_->mulcode(vec, codes_, i, norm);
+  return pq_->mulcode(vec, codes_.data(), i, norm);
 }
 
 int64_t QMatrix::getM() const {
@@ -93,10 +84,10 @@ void QMatrix::save(std::ostream& out) {
     out.write((char*) &m_, sizeof(m_));
     out.write((char*) &n_, sizeof(n_));
     out.write((char*) &codesize_, sizeof(codesize_));
-    out.write((char*) codes_, codesize_ * sizeof(uint8_t));
+    out.write((char*) codes_.data(), codesize_ * sizeof(uint8_t));
     pq_->save(out);
     if (qnorm_) {
-      out.write((char*) norm_codes_, m_ * sizeof(uint8_t));
+      out.write((char*) norm_codes_.data(), m_ * sizeof(uint8_t));
       npq_->save(out);
     }
 }
@@ -106,13 +97,13 @@ void QMatrix::load(std::istream& in) {
     in.read((char*) &m_, sizeof(m_));
     in.read((char*) &n_, sizeof(n_));
     in.read((char*) &codesize_, sizeof(codesize_));
-    codes_ = new uint8_t[codesize_];
-    in.read((char*) codes_, codesize_ * sizeof(uint8_t));
+    codes_ = std::vector<uint8_t>(codesize_);
+    in.read((char*) codes_.data(), codesize_ * sizeof(uint8_t));
     pq_ = std::unique_ptr<ProductQuantizer>( new ProductQuantizer());
     pq_->load(in);
     if (qnorm_) {
-      norm_codes_ = new uint8_t[m_];
-      in.read((char*) norm_codes_, m_ * sizeof(uint8_t));
+      norm_codes_ = std::vector<uint8_t>(m_);
+      in.read((char*) norm_codes_.data(), m_ * sizeof(uint8_t));
       npq_ = std::unique_ptr<ProductQuantizer>( new ProductQuantizer());
       npq_->load(in);
     }

+ 2 - 3
src/qmatrix.h

@@ -30,8 +30,8 @@ class QMatrix {
     std::unique_ptr<ProductQuantizer> pq_;
     std::unique_ptr<ProductQuantizer> npq_;
 
-    uint8_t* codes_;
-    uint8_t* norm_codes_;
+    std::vector<uint8_t> codes_;
+    std::vector<uint8_t> norm_codes_;
 
     bool qnorm_;
 
@@ -44,7 +44,6 @@ class QMatrix {
 
     QMatrix();
     QMatrix(const Matrix&, int32_t, bool);
-    ~QMatrix();
 
     int64_t getM() const;
     int64_t getN() const;

+ 23 - 44
src/vector.cc

@@ -19,67 +19,54 @@
 
 namespace fasttext {
 
-Vector::Vector(int64_t m) {
-  m_ = m;
-  data_ = new real[m];
-}
-
-Vector::~Vector() {
-  delete[] data_;
-}
-
-int64_t Vector::size() const {
-  return m_;
-}
+Vector::Vector(int64_t m) : data_(m) {}
 
 void Vector::zero() {
-  for (int64_t i = 0; i < m_; i++) {
-    data_[i] = 0.0;
-  }
+  std::fill(data_.begin(), data_.end(), 0.0);
 }
 
 real Vector::norm() const {
   real sum = 0;
-  for (int64_t i = 0; i < m_; i++) {
+  for (int64_t i = 0; i < size(); i++) {
     sum += data_[i] * data_[i];
   }
   return std::sqrt(sum);
 }
 
 void Vector::mul(real a) {
-  for (int64_t i = 0; i < m_; i++) {
+  for (int64_t i = 0; i < size(); i++) {
     data_[i] *= a;
   }
 }
 
 void Vector::addVector(const Vector& source) {
-  assert(m_ == source.m_);
-  for (int64_t i = 0; i < m_; i++) {
+  assert(size() == source.size());
+  for (int64_t i = 0; i < size(); i++) {
     data_[i] += source.data_[i];
   }
 }
 
 void Vector::addVector(const Vector& source, real s) {
-  assert(m_ == source.m_);
-  for (int64_t i = 0; i < m_; i++) {
+  assert(size() == source.size());
+  for (int64_t i = 0; i < size(); i++) {
     data_[i] += s * source.data_[i];
   }
 }
 
 void Vector::addRow(const Matrix& A, int64_t i) {
   assert(i >= 0);
-  assert(i < A.m_);
-  assert(m_ == A.n_);
-  for (int64_t j = 0; j < A.n_; j++) {
+  assert(i < A.size(0));
+  assert(size() == A.size(1));
+  for (int64_t j = 0; j < A.size(1); j++) {
     data_[j] += A.at(i, j);
   }
 }
 
 void Vector::addRow(const Matrix& A, int64_t i, real a) {
   assert(i >= 0);
-  assert(i < A.m_);
-  assert(m_ == A.n_);
-  for (int64_t j = 0; j < A.n_; j++) {
+  assert(i < A.size(0));
+  assert(size() == A.size(1));
+  for (int64_t j = 0; j < A.size(1); j++) {
     data_[j] += a * A.at(i, j);
   }
 }
@@ -90,17 +77,17 @@ void Vector::addRow(const QMatrix& A, int64_t i) {
 }
 
 void Vector::mul(const Matrix& A, const Vector& vec) {
-  assert(A.m_ == m_);
-  assert(A.n_ == vec.m_);
-  for (int64_t i = 0; i < m_; i++) {
+  assert(A.size(0) == size());
+  assert(A.size(1) == vec.size());
+  for (int64_t i = 0; i < size(); i++) {
     data_[i] = A.dotRow(vec, i);
   }
 }
 
 void Vector::mul(const QMatrix& A, const Vector& vec) {
-  assert(A.getM() == m_);
-  assert(A.getN() == vec.m_);
-  for (int64_t i = 0; i < m_; i++) {
+  assert(A.getM() == size());
+  assert(A.getN() == vec.size());
+  for (int64_t i = 0; i < size(); i++) {
     data_[i] = A.dotRow(vec, i);
   }
 }
@@ -108,7 +95,7 @@ void Vector::mul(const QMatrix& A, const Vector& vec) {
 int64_t Vector::argmax() {
   real max = data_[0];
   int64_t argmax = 0;
-  for (int64_t i = 1; i < m_; i++) {
+  for (int64_t i = 1; i < size(); i++) {
     if (data_[i] > max) {
       max = data_[i];
       argmax = i;
@@ -117,19 +104,11 @@ int64_t Vector::argmax() {
   return argmax;
 }
 
-real& Vector::operator[](int64_t i) {
-  return data_[i];
-}
-
-const real& Vector::operator[](int64_t i) const {
-  return data_[i];
-}
-
 std::ostream& operator<<(std::ostream& os, const Vector& v)
 {
   os << std::setprecision(5);
-  for (int64_t j = 0; j < v.m_; j++) {
-    os << v.data_[j] << ' ';
+  for (int64_t j = 0; j < v.size(); j++) {
+    os << v[j] << ' ';
   }
   return os;
 }

+ 21 - 7
src/vector.h

@@ -11,6 +11,7 @@
 
 #include <cstdint>
 #include <ostream>
+#include <vector>
 
 #include "real.h"
 
@@ -21,17 +22,30 @@ class QMatrix;
 
 class Vector {
 
-  public:
-    int64_t m_;
-    real* data_;
+  protected:
+    std::vector<real> data_;
 
+  public:
     explicit Vector(int64_t);
-    ~Vector();
+    Vector(const Vector&) = delete;
+    Vector& operator=(const Vector&) = delete;
 
-    real& operator[](int64_t);
-    const real& operator[](int64_t) const;
+    inline real* data() {
+      return data_.data();
+    }
+    inline const real* data() const {
+      return data_.data();
+    }
+    inline real& operator[](int64_t i) {
+      return data_[i];
+    }
+    inline const real& operator[](int64_t i) const {
+      return data_[i];
+    }
 
-    int64_t size() const;
+    inline int64_t size() const {
+      return data_.size();
+    }
     void zero();
     void mul(real);
     real norm() const;