这是indexloc提供的服务,不要输入任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
147 changes: 97 additions & 50 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -500,32 +500,64 @@ mod integration_tests {

// Test the integrated VTS status with upstream stats

// Clear any existing data to ensure clean test state
// Create completely fresh manager state for this test to avoid race conditions
{
let mut manager = match VTS_MANAGER.write() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};
manager.stats.clear();
manager.upstream_zones.clear();
manager.connections = Default::default();

// Complete reset to ensure deterministic test state
*manager = VtsStatsManager::new();
}

// Set up connection statistics for the test
update_connection_stats(1, 0, 1, 0, 16, 16);

// Add some sample server zone data
update_server_zone_stats("example.com", 200, 1024, 2048, 150);
update_server_zone_stats("example.com", 404, 512, 256, 80);
update_server_zone_stats("api.example.com", 200, 2048, 4096, 200);
// Add some sample server zone data with unique identifiers for this test
update_server_zone_stats("test1-example.com", 200, 1024, 2048, 150);
update_server_zone_stats("test1-example.com", 404, 512, 256, 80);
update_server_zone_stats("test1-api.example.com", 200, 2048, 4096, 200);

// Add some upstream stats
update_upstream_zone_stats("backend_pool", "192.168.1.10:80", 100, 50, 1500, 800, 200);
update_upstream_zone_stats("backend_pool", "192.168.1.11:80", 150, 75, 2000, 1000, 200);
update_upstream_zone_stats("backend_pool", "192.168.1.10:80", 120, 60, 1200, 600, 404);
// Add some upstream stats with unique identifiers for this test
update_upstream_zone_stats(
"test1-backend_pool",
"192.168.1.10:80",
100,
50,
1500,
800,
200,
);
update_upstream_zone_stats(
"test1-backend_pool",
"192.168.1.11:80",
150,
75,
2000,
1000,
200,
);
update_upstream_zone_stats(
"test1-backend_pool",
"192.168.1.10:80",
120,
60,
1200,
600,
404,
);

update_upstream_zone_stats("api_pool", "192.168.2.10:8080", 80, 40, 800, 400, 200);
update_upstream_zone_stats("api_pool", "192.168.2.11:8080", 300, 200, 3000, 1500, 500);
update_upstream_zone_stats("test1-api_pool", "192.168.2.10:8080", 80, 40, 800, 400, 200);
update_upstream_zone_stats(
"test1-api_pool",
"192.168.2.11:8080",
300,
200,
3000,
1500,
500,
);

// Generate VTS status content
let status_content = generate_vts_status_content();
Expand All @@ -542,11 +574,11 @@ mod integration_tests {
assert!(status_content.contains("nginx_vts_upstream_requests_total"));
assert!(status_content.contains("nginx_vts_upstream_responses_total"));

// Verify specific upstream metrics
assert!(status_content.contains("backend_pool"));
// Verify specific upstream metrics with test-unique identifiers
assert!(status_content.contains("test1-backend_pool"));
assert!(status_content.contains("192.168.1.10:80"));
assert!(status_content.contains("192.168.1.11:80"));
assert!(status_content.contains("api_pool"));
assert!(status_content.contains("test1-api_pool"));

println!("=== Generated VTS Status Content ===");
println!("{}", status_content);
Expand All @@ -555,25 +587,43 @@ mod integration_tests {

#[test]
fn test_issue6_complete_metrics_output() {
// Clear any existing data
let _lock = GLOBAL_VTS_TEST_MUTEX
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());

// Create completely fresh manager state for this test
{
let mut manager = match VTS_MANAGER.write() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};
manager.stats.clear();
manager.upstream_zones.clear();
manager.connections = Default::default();
*manager = VtsStatsManager::new();
}

// Set up test data similar to ISSUE6.md requirements
// Set up test data similar to ISSUE6.md requirements with unique identifiers
update_connection_stats(1, 0, 1, 0, 16, 16);
update_server_zone_stats("example.com", 200, 50000, 2000000, 125);
update_server_zone_stats("example.com", 404, 5000, 100000, 50);
update_upstream_zone_stats("backend", "10.0.0.1:8080", 50, 25, 750000, 250000, 200);
update_upstream_zone_stats("backend", "10.0.0.2:8080", 60, 30, 680000, 230000, 200);
update_server_zone_stats("test2-example.com", 200, 50000, 2000000, 125);
update_server_zone_stats("test2-example.com", 404, 5000, 100000, 50);
update_upstream_zone_stats(
"test2-backend",
"10.0.0.1:8080",
50,
25,
750000,
250000,
200,
);
update_upstream_zone_stats(
"test2-backend",
"10.0.0.2:8080",
60,
30,
680000,
230000,
200,
);
update_upstream_zone_stats(
"api_backend",
"test2-api_backend",
"192.168.1.10:9090",
80,
40,
Expand All @@ -600,53 +650,50 @@ mod integration_tests {
assert!(content.contains("nginx_vts_connections_total{state=\"accepted\"} 16"));
assert!(content.contains("nginx_vts_connections_total{state=\"handled\"} 16"));

// Verify server zone metrics
// Verify server zone metrics with test-unique identifiers
assert!(content.contains("# HELP nginx_vts_server_requests_total Total number of requests"));
assert!(content.contains("nginx_vts_server_requests_total{zone=\"example.com\"}"));
assert!(content.contains("nginx_vts_server_requests_total{zone=\"test2-example.com\"}"));
assert!(content.contains("# HELP nginx_vts_server_bytes_total Total bytes transferred"));
assert!(
content.contains("nginx_vts_server_bytes_total{zone=\"example.com\",direction=\"in\"}")
);
assert!(content
.contains("nginx_vts_server_bytes_total{zone=\"example.com\",direction=\"out\"}"));
.contains("nginx_vts_server_bytes_total{zone=\"test2-example.com\",direction=\"in\"}"));
assert!(content.contains(
"nginx_vts_server_bytes_total{zone=\"test2-example.com\",direction=\"out\"}"
));

// Verify upstream metrics are still present
// Verify upstream metrics are still present with test-unique identifiers
assert!(content.contains(
"nginx_vts_upstream_requests_total{upstream=\"backend\",server=\"10.0.0.1:8080\"}"
"nginx_vts_upstream_requests_total{upstream=\"test2-backend\",server=\"10.0.0.1:8080\"}"
));
assert!(content.contains("nginx_vts_upstream_requests_total{upstream=\"api_backend\",server=\"192.168.1.10:9090\"}"));
assert!(content.contains("nginx_vts_upstream_requests_total{upstream=\"test2-api_backend\",server=\"192.168.1.10:9090\"}"));
}

#[test]
#[ignore] // Temporarily ignored due to test isolation issues
fn test_vts_stats_persistence() {
let _lock = GLOBAL_VTS_TEST_MUTEX
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner());

// Test that stats persist across multiple updates

// Clear any existing data to ensure clean test state
// Create completely fresh manager state for this test
{
let mut manager = match VTS_MANAGER.write() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};
manager.stats.clear();
manager.upstream_zones.clear();
manager.connections = Default::default();
*manager = VtsStatsManager::new();
}

let initial_content = generate_vts_status_content();
let _initial_backend_requests = if initial_content.contains("persistence_test_backend") {
let _initial_backend_requests = if initial_content.contains("test3-persistence_backend") {
1
} else {
0
};

// Add stats - two requests to same server, one request to different server
// Add stats - two requests to same server, one request to different server with unique identifiers
update_upstream_zone_stats(
"persistence_test_backend",
"test3-persistence_backend",
"10.0.0.1:80",
100,
50,
Expand All @@ -655,7 +702,7 @@ mod integration_tests {
200,
);
update_upstream_zone_stats(
"persistence_test_backend",
"test3-persistence_backend",
"10.0.0.1:80",
120,
60,
Expand All @@ -664,7 +711,7 @@ mod integration_tests {
200,
);
update_upstream_zone_stats(
"persistence_test_backend",
"test3-persistence_backend",
"10.0.0.2:80",
80,
40,
Expand All @@ -674,19 +721,19 @@ mod integration_tests {
);

let content1 = generate_vts_status_content();
assert!(content1.contains("persistence_test_backend"));
assert!(content1.contains("test3-persistence_backend"));

let content2 = generate_vts_status_content();
// Verify metrics are present (no longer check summary format)
assert!(content2.contains("nginx_vts_upstream_requests_total"));

// Verify final state (allow for some flexibility in race conditions)
// Verify final state (deterministic since we reset the manager)
let manager = VTS_MANAGER
.read()
.unwrap_or_else(|poisoned| poisoned.into_inner());

// Check that the upstream zone exists and has servers
let backend_zone = manager.get_upstream_zone("persistence_test_backend");
// Check that the upstream zone exists and has servers with test-unique identifiers
let backend_zone = manager.get_upstream_zone("test3-persistence_backend");
assert!(backend_zone.is_some(), "Backend zone should exist");

let zone = backend_zone.unwrap();
Expand Down
31 changes: 0 additions & 31 deletions src/stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -191,29 +191,6 @@ impl VtsStatsManager {
}
}

pub fn init_shared_memory(&mut self, cf: *mut ngx_conf_t) -> Result<(), &'static str> {
unsafe {
let _pool = (*cf).pool;
let mut name = ngx_string!("vts_stats_zone");
let size = 1024 * 1024; // 1MB shared memory

let shm_zone = ngx_shared_memory_add(
cf,
&mut name,
size,
&raw const crate::ngx_http_vts_module as *const _ as *mut _,
);
if shm_zone.is_null() {
return Err("Failed to allocate shared memory zone");
}

(*shm_zone).init = Some(vts_init_shm_zone);
(*shm_zone).data = self as *mut _ as *mut c_void;
self.shared_zone = Some(shm_zone);
}
Ok(())
}

pub fn update_request_stats(
&self,
server_name: &str,
Expand Down Expand Up @@ -258,11 +235,3 @@ impl VtsStatsManager {

unsafe impl Send for VtsStatsManager {}
unsafe impl Sync for VtsStatsManager {}

// Shared memory zone initialization callback
extern "C" fn vts_init_shm_zone(shm_zone: *mut ngx_shm_zone_t, _data: *mut c_void) -> ngx_int_t {
// Initialize shared memory structures here
// _data parameter added to match expected signature
let _ = shm_zone; // Suppress unused warning
NGX_OK as ngx_int_t
}