Skip to content

Commit e6cf1ec

Browse files
author
Junchao Lyu
committed
add integration test
1 parent 974d35c commit e6cf1ec

5 files changed

Lines changed: 160 additions & 96 deletions

File tree

test/integration/integration_test.go

Lines changed: 130 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -41,29 +41,47 @@ func newDescriptorStatusLegacy(
4141
}
4242
}
4343

44+
4445
func TestBasicConfig(t *testing.T) {
45-
t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false"))
46-
t.Run("WithPerSecondRedis", testBasicConfig("8085", "true"))
46+
t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0"))
47+
t.Run("WithPerSecondRedis", testBasicConfig("8085", "true", "0"))
48+
t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("8083", "false", "1000"))
49+
t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("8085", "true", "1000"))
4750
}
51+
52+
4853
func TestBasicTLSConfig(t *testing.T) {
49-
t.Run("WithoutPerSecondRedisTLS", testBasicConfigAuthTLS("8087", "false"))
50-
t.Run("WithPerSecondRedisTLS", testBasicConfigAuthTLS("8089", "true"))
54+
t.Run("WithoutPerSecondRedisTLS", testBasicConfigAuthTLS("8087", "false", "0"))
55+
t.Run("WithPerSecondRedisTLS", testBasicConfigAuthTLS("8089", "true", "0"))
56+
t.Run("WithoutPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("8087", "false", "1000"))
57+
t.Run("WithPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("8089", "true", "1000"))
5158
}
52-
func testBasicConfigAuthTLS(grpcPort, perSecond string) func(*testing.T) {
59+
60+
func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) {
5361
os.Setenv("REDIS_PERSECOND_URL", "localhost:16382")
5462
os.Setenv("REDIS_URL", "localhost:16381")
5563
os.Setenv("REDIS_AUTH", "password123")
5664
os.Setenv("REDIS_PERSECOND_AUTH", "password123")
57-
return testBasicBaseConfig(grpcPort, perSecond)
65+
return testBasicBaseConfig(grpcPort, perSecond, local_cache_size)
5866
}
59-
func testBasicConfig(grpcPort, perSecond string) func(*testing.T) {
67+
68+
func testBasicConfig(grpcPort, perSecond string, local_cache_size string) func(*testing.T) {
6069
os.Setenv("REDIS_PERSECOND_URL", "localhost:6380")
6170
os.Setenv("REDIS_URL", "localhost:6379")
6271
os.Setenv("REDIS_TLS", "false")
6372
os.Setenv("REDIS_PERSECOND_TLS", "false")
64-
return testBasicBaseConfig(grpcPort, perSecond)
73+
return testBasicBaseConfig(grpcPort, perSecond, local_cache_size)
6574
}
66-
func testBasicBaseConfig(grpcPort, perSecond string) func(*testing.T) {
75+
76+
func getCacheKey(cacheKey string, enableLocalCache bool) string {
77+
if enableLocalCache {
78+
return cacheKey + "_local"
79+
}
80+
81+
return cacheKey
82+
}
83+
84+
func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) func(*testing.T) {
6785
return func(t *testing.T) {
6886
os.Setenv("REDIS_PERSECOND", perSecond)
6987
os.Setenv("PORT", "8082")
@@ -73,6 +91,10 @@ func testBasicBaseConfig(grpcPort, perSecond string) func(*testing.T) {
7391
os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit")
7492
os.Setenv("REDIS_PERSECOND_SOCKET_TYPE", "tcp")
7593
os.Setenv("REDIS_SOCKET_TYPE", "tcp")
94+
os.Setenv("LOCAL_CACHE_SIZE", local_cache_size)
95+
96+
local_cache_size_val, _ := strconv.Atoi(local_cache_size)
97+
enable_local_cache := local_cache_size_val > 0
7698

7799
go func() {
78100
runner.Run()
@@ -89,7 +111,7 @@ func testBasicBaseConfig(grpcPort, perSecond string) func(*testing.T) {
89111

90112
response, err := c.ShouldRateLimit(
91113
context.Background(),
92-
common.NewRateLimitRequest("foo", [][][2]string{{{"hello", "world"}}}, 1))
114+
common.NewRateLimitRequest("foo", [][][2]string{{{getCacheKey("hello", enable_local_cache), "world"}}}, 1))
93115
assert.Equal(
94116
&pb.RateLimitResponse{
95117
OverallCode: pb.RateLimitResponse_OK,
@@ -99,7 +121,7 @@ func testBasicBaseConfig(grpcPort, perSecond string) func(*testing.T) {
99121

100122
response, err = c.ShouldRateLimit(
101123
context.Background(),
102-
common.NewRateLimitRequest("basic", [][][2]string{{{"key1", "foo"}}}, 1))
124+
common.NewRateLimitRequest("basic", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1))
103125
assert.Equal(
104126
&pb.RateLimitResponse{
105127
OverallCode: pb.RateLimitResponse_OK,
@@ -115,7 +137,7 @@ func testBasicBaseConfig(grpcPort, perSecond string) func(*testing.T) {
115137
response, err = c.ShouldRateLimit(
116138
context.Background(),
117139
common.NewRateLimitRequest(
118-
"another", [][][2]string{{{"key2", strconv.Itoa(randomInt)}}}, 1))
140+
"another", [][][2]string{{{getCacheKey("key2", enable_local_cache), strconv.Itoa(randomInt)}}}, 1))
119141

120142
status := pb.RateLimitResponse_OK
121143
limitRemaining := uint32(20 - (i + 1))
@@ -141,8 +163,8 @@ func testBasicBaseConfig(grpcPort, perSecond string) func(*testing.T) {
141163
common.NewRateLimitRequest(
142164
"another",
143165
[][][2]string{
144-
{{"key2", strconv.Itoa(randomInt)}},
145-
{{"key3", strconv.Itoa(randomInt)}}}, 1))
166+
{{getCacheKey("key2", enable_local_cache), strconv.Itoa(randomInt)}},
167+
{{getCacheKey("key3", enable_local_cache), strconv.Itoa(randomInt)}}}, 1))
146168

147169
status := pb.RateLimitResponse_OK
148170
limitRemaining1 := uint32(20 - (i + 1))
@@ -165,101 +187,113 @@ func testBasicBaseConfig(grpcPort, perSecond string) func(*testing.T) {
165187
}
166188

167189
func TestBasicConfigLegacy(t *testing.T) {
168-
os.Setenv("PORT", "8082")
169-
os.Setenv("GRPC_PORT", "8083")
170-
os.Setenv("DEBUG_PORT", "8084")
171-
os.Setenv("RUNTIME_ROOT", "runtime/current")
172-
os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit")
190+
t.Run("testBasicConfigLegacy", testBasicConfigLegacy("0"))
191+
t.Run("testBasicConfigLegacyWithLocalCache", testBasicConfigLegacy("1000"))
192+
}
173193

174-
os.Setenv("REDIS_PERSECOND_URL", "localhost:6380")
175-
os.Setenv("REDIS_URL", "localhost:6379")
176-
os.Setenv("REDIS_TLS", "false")
177-
os.Setenv("REDIS_PERSECOND_TLS", "false")
178-
go func() {
179-
runner.Run()
180-
}()
181-
182-
// HACK: Wait for the server to come up. Make a hook that we can wait on.
183-
time.Sleep(100 * time.Millisecond)
184-
185-
assert := assert.New(t)
186-
conn, err := grpc.Dial("localhost:8083", grpc.WithInsecure())
187-
assert.NoError(err)
188-
defer conn.Close()
189-
c := pb_legacy.NewRateLimitServiceClient(conn)
190-
191-
response, err := c.ShouldRateLimit(
192-
context.Background(),
193-
common.NewRateLimitRequestLegacy("foo", [][][2]string{{{"hello", "world"}}}, 1))
194-
assert.Equal(
195-
&pb_legacy.RateLimitResponse{
196-
OverallCode: pb_legacy.RateLimitResponse_OK,
197-
Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}},
198-
response)
199-
assert.NoError(err)
200-
201-
response, err = c.ShouldRateLimit(
202-
context.Background(),
203-
common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{"key1", "foo"}}}, 1))
204-
assert.Equal(
205-
&pb_legacy.RateLimitResponse{
206-
OverallCode: pb_legacy.RateLimitResponse_OK,
207-
Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{
208-
newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimit_SECOND, 49)}},
209-
response)
210-
assert.NoError(err)
211-
212-
// Now come up with a random key, and go over limit for a minute limit which should always work.
213-
r := rand.New(rand.NewSource(time.Now().UnixNano()))
214-
randomInt := r.Int()
215-
for i := 0; i < 25; i++ {
216-
response, err = c.ShouldRateLimit(
217-
context.Background(),
218-
common.NewRateLimitRequestLegacy(
219-
"another", [][][2]string{{{"key2", strconv.Itoa(randomInt)}}}, 1))
220-
221-
status := pb_legacy.RateLimitResponse_OK
222-
limitRemaining := uint32(20 - (i + 1))
223-
if i >= 20 {
224-
status = pb_legacy.RateLimitResponse_OVER_LIMIT
225-
limitRemaining = 0
226-
}
194+
func testBasicConfigLegacy(local_cache_size string) func(*testing.T) {
195+
return func(t *testing.T) {
196+
os.Setenv("PORT", "8082")
197+
os.Setenv("GRPC_PORT", "8083")
198+
os.Setenv("DEBUG_PORT", "8084")
199+
os.Setenv("RUNTIME_ROOT", "runtime/current")
200+
os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit")
201+
202+
os.Setenv("REDIS_PERSECOND_URL", "localhost:6380")
203+
os.Setenv("REDIS_URL", "localhost:6379")
204+
os.Setenv("REDIS_TLS", "false")
205+
os.Setenv("REDIS_PERSECOND_TLS", "false")
206+
os.Setenv("LOCAL_CACHE_SIZE", local_cache_size)
207+
local_cache_size_val, _ := strconv.Atoi(local_cache_size)
208+
enable_local_cache := local_cache_size_val > 0
209+
210+
211+
go func() {
212+
runner.Run()
213+
}()
227214

215+
// HACK: Wait for the server to come up. Make a hook that we can wait on.
216+
time.Sleep(100 * time.Millisecond)
217+
218+
assert := assert.New(t)
219+
conn, err := grpc.Dial("localhost:8083", grpc.WithInsecure())
220+
assert.NoError(err)
221+
defer conn.Close()
222+
c := pb_legacy.NewRateLimitServiceClient(conn)
223+
224+
response, err := c.ShouldRateLimit(
225+
context.Background(),
226+
common.NewRateLimitRequestLegacy("foo", [][][2]string{{{getCacheKey("hello", enable_local_cache), "world"}}}, 1))
228227
assert.Equal(
229228
&pb_legacy.RateLimitResponse{
230-
OverallCode: status,
231-
Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{
232-
newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimit_MINUTE, limitRemaining)}},
229+
OverallCode: pb_legacy.RateLimitResponse_OK,
230+
Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}},
233231
response)
234232
assert.NoError(err)
235-
}
236233

237-
// Limit now against 2 keys in the same domain.
238-
randomInt = r.Int()
239-
for i := 0; i < 15; i++ {
240234
response, err = c.ShouldRateLimit(
241235
context.Background(),
242-
common.NewRateLimitRequestLegacy(
243-
"another_legacy",
244-
[][][2]string{
245-
{{"key2", strconv.Itoa(randomInt)}},
246-
{{"key3", strconv.Itoa(randomInt)}}}, 1))
247-
248-
status := pb_legacy.RateLimitResponse_OK
249-
limitRemaining1 := uint32(20 - (i + 1))
250-
limitRemaining2 := uint32(10 - (i + 1))
251-
if i >= 10 {
252-
status = pb_legacy.RateLimitResponse_OVER_LIMIT
253-
limitRemaining2 = 0
254-
}
255-
236+
common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1))
256237
assert.Equal(
257238
&pb_legacy.RateLimitResponse{
258-
OverallCode: status,
239+
OverallCode: pb_legacy.RateLimitResponse_OK,
259240
Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{
260-
newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimit_MINUTE, limitRemaining1),
261-
newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimit_HOUR, limitRemaining2)}},
241+
newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimit_SECOND, 49)}},
262242
response)
263243
assert.NoError(err)
244+
245+
// Now come up with a random key, and go over limit for a minute limit which should always work.
246+
r := rand.New(rand.NewSource(time.Now().UnixNano()))
247+
randomInt := r.Int()
248+
for i := 0; i < 25; i++ {
249+
response, err = c.ShouldRateLimit(
250+
context.Background(),
251+
common.NewRateLimitRequestLegacy(
252+
"another", [][][2]string{{{getCacheKey("key2", enable_local_cache), strconv.Itoa(randomInt)}}}, 1))
253+
254+
status := pb_legacy.RateLimitResponse_OK
255+
limitRemaining := uint32(20 - (i + 1))
256+
if i >= 20 {
257+
status = pb_legacy.RateLimitResponse_OVER_LIMIT
258+
limitRemaining = 0
259+
}
260+
261+
assert.Equal(
262+
&pb_legacy.RateLimitResponse{
263+
OverallCode: status,
264+
Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{
265+
newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimit_MINUTE, limitRemaining)}},
266+
response)
267+
assert.NoError(err)
268+
}
269+
270+
// Limit now against 2 keys in the same domain.
271+
randomInt = r.Int()
272+
for i := 0; i < 15; i++ {
273+
response, err = c.ShouldRateLimit(
274+
context.Background(),
275+
common.NewRateLimitRequestLegacy(
276+
"another_legacy",
277+
[][][2]string{
278+
{{getCacheKey("key2", enable_local_cache), strconv.Itoa(randomInt)}},
279+
{{getCacheKey("key3", enable_local_cache), strconv.Itoa(randomInt)}}}, 1))
280+
281+
status := pb_legacy.RateLimitResponse_OK
282+
limitRemaining1 := uint32(20 - (i + 1))
283+
limitRemaining2 := uint32(10 - (i + 1))
284+
if i >= 10 {
285+
status = pb_legacy.RateLimitResponse_OVER_LIMIT
286+
limitRemaining2 = 0
287+
}
288+
289+
assert.Equal(
290+
&pb_legacy.RateLimitResponse{
291+
OverallCode: status,
292+
Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{
293+
newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimit_MINUTE, limitRemaining1),
294+
newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimit_HOUR, limitRemaining2)}},
295+
response)
296+
assert.NoError(err)
297+
}
264298
}
265299
}

test/integration/runtime/current/ratelimit/config/another.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,13 @@ descriptors:
99
rate_limit:
1010
unit: hour
1111
requests_per_unit: 10
12+
13+
- key: key2_local
14+
rate_limit:
15+
unit: minute
16+
requests_per_unit: 20
17+
18+
- key: key3_local
19+
rate_limit:
20+
unit: hour
21+
requests_per_unit: 10

test/integration/runtime/current/ratelimit/config/another_legacy.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,3 +9,13 @@ descriptors:
99
rate_limit:
1010
unit: hour
1111
requests_per_unit: 10
12+
13+
- key: key2_local
14+
rate_limit:
15+
unit: minute
16+
requests_per_unit: 20
17+
18+
- key: key3_local
19+
rate_limit:
20+
unit: hour
21+
requests_per_unit: 10

test/integration/runtime/current/ratelimit/config/basic.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,8 @@ descriptors:
44
rate_limit:
55
unit: second
66
requests_per_unit: 50
7+
8+
- key: key1_local
9+
rate_limit:
10+
unit: second
11+
requests_per_unit: 50

test/integration/runtime/current/ratelimit/config/basic_legacy.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,3 +4,8 @@ descriptors:
44
rate_limit:
55
unit: second
66
requests_per_unit: 50
7+
8+
- key: key1_local
9+
rate_limit:
10+
unit: second
11+
requests_per_unit: 50

0 commit comments

Comments
 (0)