Skip to content

Commit

Permalink
Merge remote-tracking branch 'oss/master' into generic-to-kv
Browse files Browse the repository at this point in the history
* oss/master:
  Adding latency injector option to -dev mode for storage operations (#3289)
  fix: add missing comma to payload (#3308)
  Switching atomic vars to int32 to address panic on some architectures (#3314)
  Update AWS CloudHSM comparison. (#3311)
  • Loading branch information
Chris Hoffman committed Sep 11, 2017
2 parents d4df8cb + 09f2725 commit 9686e5d
Show file tree
Hide file tree
Showing 6 changed files with 119 additions and 25 deletions.
12 changes: 11 additions & 1 deletion command/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,15 @@ func (c *ServerCommand) Run(args []string) int {
var dev, verifyOnly, devHA, devTransactional, devLeasedKV, devThreeNode bool
var configPath []string
var logLevel, devRootTokenID, devListenAddress, devPluginDir string
var devLatency, devLatencyJitter int
flags := c.Meta.FlagSet("server", meta.FlagSetDefault)
flags.BoolVar(&dev, "dev", false, "")
flags.StringVar(&devRootTokenID, "dev-root-token-id", "", "")
flags.StringVar(&devListenAddress, "dev-listen-address", "", "")
flags.StringVar(&devPluginDir, "dev-plugin-dir", "", "")
flags.StringVar(&logLevel, "log-level", "info", "")
flags.IntVar(&devLatency, "dev-latency", 0, "")
flags.IntVar(&devLatencyJitter, "dev-latency-jitter", 20, "")
flags.BoolVar(&verifyOnly, "verify-only", false, "")
flags.BoolVar(&devHA, "dev-ha", false, "")
flags.BoolVar(&devTransactional, "dev-transactional", false, "")
Expand Down Expand Up @@ -266,7 +269,14 @@ func (c *ServerCommand) Run(args []string) int {
if devPluginDir != "" {
coreConfig.PluginDirectory = devPluginDir
}

if devLatency > 0 {
injectLatency := time.Duration(devLatency) * time.Millisecond
if _, txnOK := backend.(physical.Transactional); txnOK {
coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger)
} else {
coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, devLatencyJitter, c.logger)
}
}
}

if devThreeNode {
Expand Down
90 changes: 90 additions & 0 deletions physical/latency.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
package physical

import (
"math/rand"
"time"

log "github.com/mgutz/logxi/v1"
)

const (
// DefaultJitterPercent is used if no cache size is specified for NewCache
DefaultJitterPercent = 20
)

// LatencyInjector is used to add latency into underlying physical requests
type LatencyInjector struct {
backend Backend
latency time.Duration
jitterPercent int
random *rand.Rand
}

// TransactionalLatencyInjector is the transactional version of the latency
// injector
type TransactionalLatencyInjector struct {
*LatencyInjector
Transactional
}

// NewLatencyInjector returns a wrapped physical backend to simulate latency
func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector {
if jitter < 0 || jitter > 100 {
jitter = DefaultJitterPercent
}
logger.Info("physical/latency: creating latency injector")

return &LatencyInjector{
backend: b,
latency: latency,
jitterPercent: jitter,
random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
}
}

// NewTransactionalLatencyInjector creates a new transactional LatencyInjector
func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector {
return &TransactionalLatencyInjector{
LatencyInjector: NewLatencyInjector(b, latency, jitter, logger),
Transactional: b.(Transactional),
}
}

func (l *LatencyInjector) addLatency() {
// Calculate a value between 1 +- jitter%
min := 100 - l.jitterPercent
max := 100 + l.jitterPercent
percent := l.random.Intn(max-min) + min
latencyDuration := time.Duration(int(l.latency) * percent / 100)
time.Sleep(latencyDuration)
}

// Put is a latent put request
func (l *LatencyInjector) Put(entry *Entry) error {
l.addLatency()
return l.backend.Put(entry)
}

// Get is a latent get request
func (l *LatencyInjector) Get(key string) (*Entry, error) {
l.addLatency()
return l.backend.Get(key)
}

// Delete is a latent delete request
func (l *LatencyInjector) Delete(key string) error {
l.addLatency()
return l.backend.Delete(key)
}

// List is a latent list request
func (l *LatencyInjector) List(prefix string) ([]string, error) {
l.addLatency()
return l.backend.List(prefix)
}

// Transaction is a latent transaction request
func (l *TransactionalLatencyInjector) Transaction(txns []TxnEntry) error {
l.addLatency()
return l.Transactional.Transaction(txns)
}
29 changes: 11 additions & 18 deletions vault/expiration.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,9 @@ type ExpirationManager struct {
pending map[string]*time.Timer
pendingLock sync.RWMutex

tidyLock int64
tidyLock int32

// A set of locks to handle restoration
restoreMode int64
restoreMode int32
restoreModeLock sync.RWMutex
restoreRequestLock sync.RWMutex
restoreLocks []*locksutil.LockEntry
Expand All @@ -76,7 +75,6 @@ type ExpirationManager struct {
func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, logger log.Logger) *ExpirationManager {
if logger == nil {
logger = log.New("expiration_manager")

}

exp := &ExpirationManager{
Expand Down Expand Up @@ -119,7 +117,7 @@ func (c *Core) setupExpiration() error {
c.logger.Error("expiration: error shutting down core: %v", err)
}
}
go c.expiration.Restore(errorFunc, 0)
go c.expiration.Restore(errorFunc)

return nil
}
Expand Down Expand Up @@ -150,7 +148,7 @@ func (m *ExpirationManager) unlockLease(leaseID string) {

// inRestoreMode returns if we are currently in restore mode
func (m *ExpirationManager) inRestoreMode() bool {
return atomic.LoadInt64(&m.restoreMode) == 1
return atomic.LoadInt32(&m.restoreMode) == 1
}

// Tidy cleans up the dangling storage entries for leases. It scans the storage
Expand All @@ -166,12 +164,12 @@ func (m *ExpirationManager) Tidy() error {

var tidyErrors *multierror.Error

if !atomic.CompareAndSwapInt64(&m.tidyLock, 0, 1) {
if !atomic.CompareAndSwapInt32(&m.tidyLock, 0, 1) {
m.logger.Warn("expiration: tidy operation on leases is already in progress")
return fmt.Errorf("tidy operation on leases is already in progress")
}

defer atomic.CompareAndSwapInt64(&m.tidyLock, 1, 0)
defer atomic.CompareAndSwapInt32(&m.tidyLock, 1, 0)

m.logger.Info("expiration: beginning tidy operation on leases")
defer m.logger.Info("expiration: finished tidy operation on leases")
Expand Down Expand Up @@ -270,13 +268,13 @@ func (m *ExpirationManager) Tidy() error {

// Restore is used to recover the lease states when starting.
// This is used after starting the vault.
func (m *ExpirationManager) Restore(errorFunc func(), loadDelay time.Duration) (retErr error) {
func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) {
defer func() {
// Turn off restore mode. We can do this safely without the lock because
// if restore mode finished successfully, restore mode was already
// disabled with the lock. In an error state, this will allow the
// Stop() function to shut everything down.
atomic.StoreInt64(&m.restoreMode, 0)
atomic.StoreInt32(&m.restoreMode, 0)

switch {
case retErr == nil:
Expand Down Expand Up @@ -324,7 +322,7 @@ func (m *ExpirationManager) Restore(errorFunc func(), loadDelay time.Duration) (
return
}

err := m.processRestore(leaseID, loadDelay)
err := m.processRestore(leaseID)
if err != nil {
errs <- err
continue
Expand Down Expand Up @@ -391,7 +389,7 @@ func (m *ExpirationManager) Restore(errorFunc func(), loadDelay time.Duration) (
m.restoreModeLock.Lock()
m.restoreLoaded = sync.Map{}
m.restoreLocks = nil
atomic.StoreInt64(&m.restoreMode, 0)
atomic.StoreInt32(&m.restoreMode, 0)
m.restoreModeLock.Unlock()

m.logger.Info("expiration: lease restore complete")
Expand All @@ -400,7 +398,7 @@ func (m *ExpirationManager) Restore(errorFunc func(), loadDelay time.Duration) (

// processRestore takes a lease and restores it in the expiration manager if it has
// not already been seen
func (m *ExpirationManager) processRestore(leaseID string, loadDelay time.Duration) error {
func (m *ExpirationManager) processRestore(leaseID string) error {
m.restoreRequestLock.RLock()
defer m.restoreRequestLock.RUnlock()

Expand All @@ -417,11 +415,6 @@ func (m *ExpirationManager) processRestore(leaseID string, loadDelay time.Durati
return nil
}

// Useful for testing to add latency to all load requests
if loadDelay > 0 {
time.Sleep(loadDelay)
}

// Load lease and restore expiration timer
_, err := m.loadEntryInternal(leaseID, true, false)
if err != nil {
Expand Down
6 changes: 3 additions & 3 deletions vault/expiration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func TestExpiration_Tidy(t *testing.T) {
var err error

exp := mockExpiration(t)
if err := exp.Restore(nil, 0); err != nil {
if err := exp.Restore(nil); err != nil {
t.Fatal(err)
}

Expand Down Expand Up @@ -341,7 +341,7 @@ func benchmarkExpirationBackend(b *testing.B, physicalBackend physical.Backend,

b.ResetTimer()
for i := 0; i < b.N; i++ {
err = exp.Restore(nil, 0)
err = exp.Restore(nil)
// Restore
if err != nil {
b.Fatalf("err: %v", err)
Expand Down Expand Up @@ -399,7 +399,7 @@ func TestExpiration_Restore(t *testing.T) {
}

// Restore
err = exp.Restore(nil, 0)
err = exp.Restore(nil)
if err != nil {
t.Fatalf("err: %v", err)
}
Expand Down
2 changes: 1 addition & 1 deletion website/source/api/secret/databases/mysql-maria.html.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ has a number of parameters to further configure a connection.
{
"plugin_name": "mysql-database-plugin",
"allowed_roles": "readonly",
"connection_url": "root:mysql@tcp(127.0.0.1:3306)/"
"connection_url": "root:mysql@tcp(127.0.0.1:3306)/",
"max_open_connections": 5,
"max_connection_lifetime": "5s",
}
Expand Down
5 changes: 3 additions & 2 deletions website/source/intro/vs/hsm.html.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@ device that is meant to secure various secrets using protections against access
and tampering at both the software and hardware layers.

The primary issue with HSMs is that they are expensive and not very cloud
friendly. Amazon provides CloudHSM, but the minimum price point to even begin
using CloudHSM is in the thousands of US dollars.
friendly. An exception to the latter is Amazon's CloudHSM service, which is
friendly for AWS users but still costs more than $14k per year per instance,
and not as useful for heterogenous cloud architectures.

Once an HSM is up and running, configuring it is generally very tedious, and
the API to request secrets is also difficult to use. Example: CloudHSM requires
Expand Down

0 comments on commit 9686e5d

Please sign in to comment.