diff --git a/command/server.go b/command/server.go index 90ff52b7990a..a9a82ad2633a 100644 --- a/command/server.go +++ b/command/server.go @@ -75,12 +75,15 @@ func (c *ServerCommand) Run(args []string) int { var dev, verifyOnly, devHA, devTransactional, devLeasedKV, devThreeNode bool var configPath []string var logLevel, devRootTokenID, devListenAddress, devPluginDir string + var devLatency, devLatencyJitter int flags := c.Meta.FlagSet("server", meta.FlagSetDefault) flags.BoolVar(&dev, "dev", false, "") flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "") flags.StringVar(&devListenAddress, "dev-listen-address", "", "") flags.StringVar(&devPluginDir, "dev-plugin-dir", "", "") flags.StringVar(&logLevel, "log-level", "info", "") + flags.IntVar(&devLatency, "dev-latency", 0, "") + flags.IntVar(&devLatencyJitter, "dev-latency-jitter", 20, "") flags.BoolVar(&verifyOnly, "verify-only", false, "") flags.BoolVar(&devHA, "dev-ha", false, "") flags.BoolVar(&devTransactional, "dev-transactional", false, "") @@ -266,7 +269,14 @@ func (c *ServerCommand) Run(args []string) int { if devPluginDir != "" { coreConfig.PluginDirectory = devPluginDir } - + if devLatency > 0 { + injectLatency := time.Duration(devLatency) * time.Millisecond + if _, txnOK := backend.(physical.Transactional); txnOK { + coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger) + } else { + coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger) + } + } } if devThreeNode { diff --git a/physical/latency.go b/physical/latency.go new file mode 100644 index 000000000000..3253036da05e --- /dev/null +++ b/physical/latency.go @@ -0,0 +1,90 @@ +package physical + +import ( + "math/rand" + "time" + + log "github.com/mgutz/logxi/v1" +) + +const ( + // DefaultJitterPercent is used if no cache size is specified for NewCache + DefaultJitterPercent = 20 +) + +// LatencyInjector is used to add latency into underlying physical requests +type LatencyInjector struct { + backend Backend + latency time.Duration + jitterPercent int + random *rand.Rand +} + +// TransactionalLatencyInjector is the transactional version of the latency +// injector +type TransactionalLatencyInjector struct { + *LatencyInjector + Transactional +} + +// NewLatencyInjector returns a wrapped physical backend to simulate latency +func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { + if jitter < 0 || jitter > 100 { + jitter = DefaultJitterPercent + } + logger.Info("physical/latency: creating latency injector") + + return &LatencyInjector{ + backend: b, + latency: latency, + jitterPercent: jitter, + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalLatencyInjector creates a new transactional LatencyInjector +func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { + return &TransactionalLatencyInjector{ + LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), + Transactional: b.(Transactional), + } +} + +func (l *LatencyInjector) addLatency() { + // Calculate a value between 1 +- jitter% + min := 100 - l.jitterPercent + max := 100 + l.jitterPercent + percent := l.random.Intn(max-min) + min + latencyDuration := time.Duration(int(l.latency) * percent / 100) + time.Sleep(latencyDuration) +} + +// Put is a latent put request +func (l *LatencyInjector) Put(entry *Entry) error { + l.addLatency() + return l.backend.Put(entry) +} + +// Get is a latent get request +func (l *LatencyInjector) Get(key string) (*Entry, error) { + l.addLatency() + return l.backend.Get(key) +} + +// Delete is a latent delete request +func (l *LatencyInjector) Delete(key string) error { + l.addLatency() + return l.backend.Delete(key) +} + +// List is a latent list request +func (l *LatencyInjector) List(prefix string) ([]string, error) { + l.addLatency() + return l.backend.List(prefix) +} + +// Transaction is a latent transaction request +func (l *TransactionalLatencyInjector) Transaction(txns []TxnEntry) error { + l.addLatency() + return l.Transactional.Transaction(txns) +} diff --git a/vault/expiration.go b/vault/expiration.go index d7b00af3914b..628df8e973b0 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -60,10 +60,9 @@ type ExpirationManager struct { pending map[string]*time.Timer pendingLock sync.RWMutex - tidyLock int64 + tidyLock int32 - // A set of locks to handle restoration - restoreMode int64 + restoreMode int32 restoreModeLock sync.RWMutex restoreRequestLock sync.RWMutex restoreLocks []*locksutil.LockEntry @@ -76,7 +75,6 @@ type ExpirationManager struct { func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, logger log.Logger) *ExpirationManager { if logger == nil { logger = log.New("expiration_manager") - } exp := &ExpirationManager{ @@ -119,7 +117,7 @@ func (c *Core) setupExpiration() error { c.logger.Error("expiration: error shutting down core: %v", err) } } - go c.expiration.Restore(errorFunc, 0) + go c.expiration.Restore(errorFunc) return nil } @@ -150,7 +148,7 @@ func (m *ExpirationManager) unlockLease(leaseID string) { // inRestoreMode returns if we are currently in restore mode func (m *ExpirationManager) inRestoreMode() bool { - return atomic.LoadInt64(&m.restoreMode) == 1 + return atomic.LoadInt32(&m.restoreMode) == 1 } // Tidy cleans up the dangling storage entries for leases. It scans the storage @@ -166,12 +164,12 @@ func (m *ExpirationManager) Tidy() error { var tidyErrors *multierror.Error - if !atomic.CompareAndSwapInt64(&m.tidyLock, 0, 1) { + if !atomic.CompareAndSwapInt32(&m.tidyLock, 0, 1) { m.logger.Warn("expiration: tidy operation on leases is already in progress") return fmt.Errorf("tidy operation on leases is already in progress") } - defer atomic.CompareAndSwapInt64(&m.tidyLock, 1, 0) + defer atomic.CompareAndSwapInt32(&m.tidyLock, 1, 0) m.logger.Info("expiration: beginning tidy operation on leases") defer m.logger.Info("expiration: finished tidy operation on leases") @@ -270,13 +268,13 @@ func (m *ExpirationManager) Tidy() error { // Restore is used to recover the lease states when starting. // This is used after starting the vault. -func (m *ExpirationManager) Restore(errorFunc func(), loadDelay time.Duration) (retErr error) { +func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { defer func() { // Turn off restore mode. We can do this safely without the lock because // if restore mode finished successfully, restore mode was already // disabled with the lock. In an error state, this will allow the // Stop() function to shut everything down. - atomic.StoreInt64(&m.restoreMode, 0) + atomic.StoreInt32(&m.restoreMode, 0) switch { case retErr == nil: @@ -324,7 +322,7 @@ func (m *ExpirationManager) Restore(errorFunc func(), loadDelay time.Duration) ( return } - err := m.processRestore(leaseID, loadDelay) + err := m.processRestore(leaseID) if err != nil { errs <- err continue @@ -391,7 +389,7 @@ func (m *ExpirationManager) Restore(errorFunc func(), loadDelay time.Duration) ( m.restoreModeLock.Lock() m.restoreLoaded = sync.Map{} m.restoreLocks = nil - atomic.StoreInt64(&m.restoreMode, 0) + atomic.StoreInt32(&m.restoreMode, 0) m.restoreModeLock.Unlock() m.logger.Info("expiration: lease restore complete") @@ -400,7 +398,7 @@ func (m *ExpirationManager) Restore(errorFunc func(), loadDelay time.Duration) ( // processRestore takes a lease and restores it in the expiration manager if it has // not already been seen -func (m *ExpirationManager) processRestore(leaseID string, loadDelay time.Duration) error { +func (m *ExpirationManager) processRestore(leaseID string) error { m.restoreRequestLock.RLock() defer m.restoreRequestLock.RUnlock() @@ -417,11 +415,6 @@ func (m *ExpirationManager) processRestore(leaseID string, loadDelay time.Durati return nil } - // Useful for testing to add latency to all load requests - if loadDelay > 0 { - time.Sleep(loadDelay) - } - // Load lease and restore expiration timer _, err := m.loadEntryInternal(leaseID, true, false) if err != nil { diff --git a/vault/expiration_test.go b/vault/expiration_test.go index 4df5fb42699f..144bd16b045f 100644 --- a/vault/expiration_test.go +++ b/vault/expiration_test.go @@ -37,7 +37,7 @@ func TestExpiration_Tidy(t *testing.T) { var err error exp := mockExpiration(t) - if err := exp.Restore(nil, 0); err != nil { + if err := exp.Restore(nil); err != nil { t.Fatal(err) } @@ -341,7 +341,7 @@ func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend, b.ResetTimer() for i := 0; i < b.N; i++ { - err = exp.Restore(nil, 0) + err = exp.Restore(nil) // Restore if err != nil { b.Fatalf("err: %v", err) @@ -399,7 +399,7 @@ func TestExpiration_Restore(t *testing.T) { } // Restore - err = exp.Restore(nil, 0) + err = exp.Restore(nil) if err != nil { t.Fatalf("err: %v", err) } diff --git a/website/source/api/secret/databases/mysql-maria.html.md b/website/source/api/secret/databases/mysql-maria.html.md index 7ee78fcb87c5..0a64ab48b4e3 100644 --- a/website/source/api/secret/databases/mysql-maria.html.md +++ b/website/source/api/secret/databases/mysql-maria.html.md @@ -42,7 +42,7 @@ has a number of parameters to further configure a connection. { "plugin_name": "mysql-database-plugin", "allowed_roles": "readonly", - "connection_url": "root:mysql@tcp(127.0.0.1:3306)/" + "connection_url": "root:mysql@tcp(127.0.0.1:3306)/", "max_open_connections": 5, "max_connection_lifetime": "5s", } diff --git a/website/source/intro/vs/hsm.html.md b/website/source/intro/vs/hsm.html.md index d203690cbbab..34476e5784cb 100644 --- a/website/source/intro/vs/hsm.html.md +++ b/website/source/intro/vs/hsm.html.md @@ -14,8 +14,9 @@ device that is meant to secure various secrets using protections against access and tampering at both the software and hardware layers. The primary issue with HSMs is that they are expensive and not very cloud -friendly. Amazon provides CloudHSM, but the minimum price point to even begin -using CloudHSM is in the thousands of US dollars. +friendly. An exception to the latter is Amazon's CloudHSM service, which is +friendly for AWS users but still costs more than $14k per year per instance, +and not as useful for heterogenous cloud architectures. Once an HSM is up and running, configuring it is generally very tedious, and the API to request secrets is also difficult to use. Example: CloudHSM requires