diff --git a/examples/login/README.md b/examples/login/README.md index c7a16b3..40f40e9 100644 --- a/examples/login/README.md +++ b/examples/login/README.md @@ -12,16 +12,16 @@ Packets are tiny, so it is great for benchmarking the request latency. pip install uvicorn fastapi websocket-client requests tqdm fire cd examples && uvicorn sum.fastapi_server:app --log-level critical & cd .. -python examples/bench.py "sum.fastapi_client.ClientREST" --progress -python examples/bench.py "sum.fastapi_client.ClientWebSocket" --progress +python examples/bench.py "fastapi_client.ClientREST" --progress +python examples/bench.py "fastapi_client.ClientWebSocket" --progress kill %% ``` Want to dispatch more clients and aggregate statistics? ```sh -python examples/bench.py "sum.fastapi_client.ClientREST" --threads 8 -python examples/bench.py "sum.fastapi_client.ClientWebSocket" --threads 8 +python examples/bench.py "fastapi_client.ClientREST" --threads 8 +python examples/bench.py "fastapi_client.ClientWebSocket" --threads 8 ``` ### UCall @@ -34,9 +34,9 @@ sudo apt-get install cmake g++ build-essential cmake -DCMAKE_BUILD_TYPE=Release -B ./build_release && make -C ./build_release ./build_release/build/bin/ucall_example_sum_posix & ./build_release/build/bin/ucall_example_sum_uring & -python examples/bench.py "sum.jsonrpc_client.ClientTCP" --progress -python examples/bench.py "sum.jsonrpc_client.ClientHTTP" --progress -python examples/bench.py "sum.jsonrpc_client.ClientHTTPBatches" --progress +python examples/bench.py "jsonrpc_client.CaseTCP" --progress +python examples/bench.py "jsonrpc_client.CaseHTTP" --progress +python examples/bench.py "jsonrpc_client.CaseHTTPBatches" --progress kill %% ``` @@ -49,31 +49,39 @@ Want to customize server settings? Want to dispatch more clients and aggregate more accurate statistics? ```sh -python examples/bench.py "sum.jsonrpc_client.ClientTCP" --threads 32 --seconds 100 -python examples/bench.py "sum.jsonrpc_client.ClientHTTP" --threads 32 --seconds 100 -python examples/bench.py "sum.jsonrpc_client.ClientHTTPBatches" --threads 32 --seconds 100 +python examples/bench.py "jsonrpc_client.CaseTCP" --threads 32 --seconds 100 +python examples/bench.py "jsonrpc_client.CaseHTTP" --threads 32 --seconds 100 +python examples/bench.py "jsonrpc_client.CaseHTTPBatches" --threads 32 --seconds 100 ``` A lot has been said about the speed of Python code ~~or the lack of~~. -To get more accurate numbers for mean request latency, you can use the GoLang version: +To get more accurate numbers for mean request latency, you can use wrk for html requests ```sh -go run ./examples/sum/ucall_client.go +wrk -t1 -c32 -d2s http://localhost:8545/ -s json.lua +wrk -t1 -c32 -d2s http://localhost:8545/ -s json16.lua +``` + +Or the GoLang version for jsonRPC requests: + +```sh +go run ./examples/login/jsonrpc_client.go -h +go run ./examples/login/jsonrpc_client.go -b 100 ``` Or push it even further dispatching dozens of processes with GNU `parallel` utility: ```sh sudo apt install parallel -parallel go run ./examples/sum/ucall_client.go run ::: {1..32} +parallel go run ./examples/login/jsonrpc_client.go run ::: {1..32} ``` ### gRPC Results ```sh pip install grpcio grpcio-tools -python ./examples/sum/grpc_server.py & -python examples/bench.py "sum.grpc_client.gRPCClient" --progress -python examples/bench.py "sum.grpc_client.gRPCClient" --threads 32 +python ./examples/login/grpc_server.py & +python examples/bench.py "grpc_client.ValidateClient" --progress +python examples/bench.py "grpc_client.ValidateClient" --threads 32 kill %% ``` diff --git a/examples/login/json.lua b/examples/login/json.lua new file mode 100755 index 0000000..8c18642 --- /dev/null +++ b/examples/login/json.lua @@ -0,0 +1,13 @@ +-- example script demonstrating HTTP pipelining + +init = function(args) + local r = {} + wrk.headers["Content-Type"] = "application/json" + --wrk.headers["Connection"] = "Keep-Alive" + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + req = table.concat(r) +end + +request = function() + return req +end diff --git a/examples/login/json16.lua b/examples/login/json16.lua new file mode 100755 index 0000000..45fb27e --- /dev/null +++ b/examples/login/json16.lua @@ -0,0 +1,28 @@ +-- example script demonstrating HTTP pipelining + +init = function(args) + local r = {} + wrk.headers["Content-Type"] = "application/json" + --wrk.headers["Connection"] = "Keep-Alive" + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + table.insert(r, wrk.format('POST','/', nil, '{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":55,"session_id":21},"id":0}')) + req = table.concat(r) +end + +request = function() + return req +end diff --git a/examples/login/jsonrpc_client.go b/examples/login/jsonrpc_client.go index 4ce894d..7657e13 100644 --- a/examples/login/jsonrpc_client.go +++ b/examples/login/jsonrpc_client.go @@ -7,26 +7,30 @@ import ( "math/rand" "net" "os" - "strconv" "time" + "bytes" + "flag" +) + +var( + limitSeconds int + limitTransmits int + port int + batch int + html bool + req string ) func main() { - limitTransmits := 1_000_000_000 - limitSeconds := 100 - argsWithoutProg := os.Args[1:] - if len(argsWithoutProg) == 2 { - if argsWithoutProg[0] == "time" { - s, _ := strconv.ParseInt(argsWithoutProg[1], 10, 0) - limitSeconds = int(s) - } else if argsWithoutProg[0] == "cycles" { - s, _ := strconv.ParseInt(argsWithoutProg[1], 10, 0) - limitTransmits = int(s) - } - } + flag.IntVar(&port, "p", 8545, "port") + flag.IntVar(&limitSeconds, "s", 2, "Stop after n seconds") + flag.IntVar(&limitTransmits, "n", 1_000_000, "Stop after n requests") + flag.IntVar(&batch, "b", 0, "Batch n requests together") + flag.BoolVar(&html, "html", false, "Send an html request instead of jsonrpc") + flag.Parse() - servAddr := "localhost:8545" + servAddr := fmt.Sprintf(`localhost:%d`,port) tcpAddr, err := net.ResolveTCPAddr("tcp", servAddr) if err != nil { println("ResolveTCPAddr failed:", err.Error()) @@ -38,6 +42,28 @@ func main() { restarts := 0 transmits := 0 + var buffer bytes.Buffer + + if batch > 0 { + for i := 0; i < batch; i++ { + a := rand.Intn(1000) + b := rand.Intn(1000) + buffer.WriteString(fmt.Sprintf(`{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":%d,"session_id":%d},"id":0}`, a, b)) + } + } + + a := rand.Intn(1000) + b := rand.Intn(1000) + + if html { + jRPC := fmt.Sprintf(`{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":%d,"session_id":%d},"id":0}`, a, b) + req := fmt.Sprintf(`POST / HTTP/1.1\r\nHost: localhost:8558\r\nUser-Agent: python-requests/2.31.0\r\nAccept-Encoding: gzip, deflate\r\nAccept: */*\r\nConnection: keep-alive\r\nContent-Length: %d\r\nContent-Type: application/json\r\n\r\n%s`, len(jRPC), jRPC) + _ = req + } else { + req := fmt.Sprintf(`{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":%d,"session_id":%d},"id":0}`, a, b) + _ = req + } + for { conn, err := net.DialTCP("tcp", nil, tcpAddr) if err != nil { @@ -46,11 +72,14 @@ func main() { } for { - a := rand.Intn(1000) - b := rand.Intn(1000) - jsonRPC := fmt.Sprintf(`{"jsonrpc":"2.0","method":"validate_session","params":{"user_id":%d,"session_id":%d},"id":0}`, a, b) - _, err = conn.Write([]byte(jsonRPC)) + + if batch > 0 { + _, err = conn.Write(buffer.Bytes()) + } else { + _, err = conn.Write([]byte(req)) + } if err != nil { + //fmt.Printf("Write Error: %v\n", err) break } @@ -63,7 +92,6 @@ func main() { } transmits++ } - conn.Close() if transmits >= limitTransmits || time.Since(start).Seconds() >= float64(limitSeconds) { break @@ -74,9 +102,14 @@ func main() { elapsed := time.Since(start) latency := float64(elapsed.Microseconds()) / float64(transmits) speed := float64(transmits) / float64(elapsed.Seconds()) - fmt.Printf("Took %s to perform %d queries\n", elapsed, transmits) + if batch > 0 { + speed *= float64(batch) + fmt.Printf("Took %s to perform %d queries with %d cmds per query\n", elapsed, transmits, batch) + } else { + fmt.Printf("Took %s to perform %d queries\n", elapsed, transmits) + } fmt.Printf("Mean latency is %.1f microsecond\n", latency) - fmt.Printf("Resulting in %.1f requests/second\n", speed) + fmt.Printf("Resulting in %.1f commands/second\n", speed) fmt.Printf("Recreating %d TCP connections\n", restarts) }