Skip to content

Commit

Permalink
Fix error thrown on overview page
Browse files Browse the repository at this point in the history
Signed-off-by: Chenyang Ji <[email protected]>
  • Loading branch information
ansjcy committed Nov 6, 2024
1 parent dcda4b8 commit 5b9be60
Show file tree
Hide file tree
Showing 2 changed files with 221 additions and 7 deletions.
208 changes: 208 additions & 0 deletions public/pages/TopNQueries/TopNQueries.test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
/*
* Copyright OpenSearch Contributors
* SPDX-License-Identifier: Apache-2.0
*/

import React from 'react';
import { render, screen, fireEvent, waitFor } from '@testing-library/react';
import '@testing-library/jest-dom/extend-expect';
import { MemoryRouter } from 'react-router-dom';
import TopNQueries, { QUERY_INSIGHTS, CONFIGURATION } from './TopNQueries';
import { CoreStart } from 'opensearch-dashboards/public';

jest.mock('../QueryInsights/QueryInsights', () => () => <div>Mocked QueryInsights</div>);
jest.mock('../Configuration/Configuration', () => () => <div>Mocked Configuration</div>);
jest.mock('../QueryDetails/QueryDetails', () => () => <div>Mocked QueryDetails</div>);

const mockCore = ({
http: {
get: jest.fn(),
put: jest.fn(),
},
} as unknown) as CoreStart;

const setUpDefaultEnabledSettings = () => {
const mockLatencyResponse = { response: { top_queries: [{ id: '1' }, { id: '2' }] } };
const mockCpuResponse = { response: { top_queries: [{ id: '2' }, { id: '3' }] } };
const mockMemoryResponse = { response: { top_queries: [{ id: '1' }] } };
// Mock API responses for each metric
(mockCore.http.get as jest.Mock).mockImplementation((endpoint) => {
if (endpoint === '/api/top_queries/latency') return Promise.resolve(mockLatencyResponse);
if (endpoint === '/api/top_queries/cpu') return Promise.resolve(mockCpuResponse);
if (endpoint === '/api/top_queries/memory') return Promise.resolve(mockMemoryResponse);
return Promise.resolve({ response: { top_queries: [] } });
});
// Mock API response for all metrics enabled
const mockSettingsResponse = {
response: {
persistent: {
search: {
insights: {
top_queries: {
latency: { enabled: 'true', top_n_size: '10', window_size: '1h' },
cpu: { enabled: 'true', top_n_size: '10', window_size: '1h' },
memory: { enabled: 'true', top_n_size: '5', window_size: '30m' },
},
},
},
},
},
};
(mockCore.http.get as jest.Mock).mockResolvedValueOnce(mockSettingsResponse);
};

describe('TopNQueries Component', () => {
beforeEach(() => {
jest.clearAllMocks();
});

it('renders and switches tabs correctly', () => {
render(
<MemoryRouter initialEntries={[QUERY_INSIGHTS]}>
<TopNQueries core={mockCore} />
</MemoryRouter>
);

// Check for Query Insights tab content
expect(screen.getByText('Mocked QueryInsights')).toBeInTheDocument();
expect(screen.getByText('Top N queries')).toBeInTheDocument();
expect(screen.getByText('Configuration')).toBeInTheDocument();

// Switch to Configuration tab
fireEvent.click(screen.getByText('Configuration'));
expect(screen.getByText('Mocked Configuration')).toBeInTheDocument();
});

it('updates settings in retrieveConfigInfo based on API response', async () => {
const mockSettingsResponse = {
response: {
persistent: {
search: {
insights: {
top_queries: {
latency: { enabled: 'true', top_n_size: '10', window_size: '1h' },
cpu: { enabled: 'false' },
memory: { enabled: 'true', top_n_size: '5', window_size: '30m' },
},
},
},
},
},
};
(mockCore.http.get as jest.Mock).mockResolvedValueOnce(mockSettingsResponse);
render(
<MemoryRouter initialEntries={[CONFIGURATION]}>
<TopNQueries core={mockCore} />
</MemoryRouter>
);
await waitFor(() => {
expect(mockCore.http.get).toHaveBeenCalledWith('/api/settings');
expect(screen.getByText('Mocked Configuration')).toBeInTheDocument();
});
});

it('fetches queries for all metrics in retrieveQueries', async () => {
setUpDefaultEnabledSettings();
render(
<MemoryRouter initialEntries={[QUERY_INSIGHTS]}>
<TopNQueries core={mockCore} />
</MemoryRouter>
);
await waitFor(() => {
// Verify each endpoint is called
expect(mockCore.http.get).toHaveBeenCalledWith('/api/settings');
expect(mockCore.http.get).toHaveBeenCalledWith(
'/api/top_queries/latency',
expect.any(Object)
);
expect(mockCore.http.get).toHaveBeenCalledWith('/api/top_queries/cpu', expect.any(Object));
expect(mockCore.http.get).toHaveBeenCalledWith('/api/top_queries/memory', expect.any(Object));
// Check that deduplicated queries would be displayed in QueryInsights
expect(screen.getByText('Mocked QueryInsights')).toBeInTheDocument();
});
});

it('fetches queries for only enabled metrics in retrieveQueries', async () => {
const mockResponse = { response: { top_queries: [{ id: '1' }, { id: '2' }] } };
// Mock API responses for each metric
(mockCore.http.get as jest.Mock).mockImplementation((endpoint) => {
if (endpoint === '/api/top_queries/latency') return Promise.resolve(mockResponse);
if (endpoint === '/api/top_queries/cpu') return Promise.resolve(mockResponse);
if (endpoint === '/api/top_queries/memory') return Promise.resolve(mockResponse);
return Promise.resolve({ response: { top_queries: [] } });
});
// Mock API response for only one metrics enabled
const mockSettingsResponse = {
response: {
persistent: {
search: {
insights: {
top_queries: {
latency: { enabled: 'true', top_n_size: '10', window_size: '1h' },
cpu: { enabled: 'false' },
memory: { enabled: 'false' },
},
},
},
},
},
};
(mockCore.http.get as jest.Mock).mockResolvedValueOnce(mockSettingsResponse);
render(
<MemoryRouter initialEntries={[QUERY_INSIGHTS]}>
<TopNQueries core={mockCore} />
</MemoryRouter>
);
await waitFor(() => {
// Verify each endpoint is called
expect(mockCore.http.get).toHaveBeenCalledWith('/api/settings');
expect(mockCore.http.get).toHaveBeenCalledWith(
'/api/top_queries/latency',
expect.any(Object)
);
expect(mockCore.http.get).not.toHaveBeenCalledWith(
'/api/top_queries/cpu',
expect.any(Object)
);
expect(mockCore.http.get).not.toHaveBeenCalledWith(
'/api/top_queries/memory',
expect.any(Object)
);
// Check that deduplicated queries would be displayed in QueryInsights
expect(screen.getByText('Mocked QueryInsights')).toBeInTheDocument();
});
});

it('updates time range and fetches data when time range changes', async () => {
setUpDefaultEnabledSettings();
(mockCore.http.get as jest.Mock).mockResolvedValueOnce({ response: { top_queries: [] } });
// Render with initial time range
const { rerender } = render(
<MemoryRouter initialEntries={[QUERY_INSIGHTS]}>
<TopNQueries core={mockCore} initialStart="now-1d" initialEnd="now" />
</MemoryRouter>
);
// Mock a new response for the time range update
(mockCore.http.get as jest.Mock).mockResolvedValueOnce({
response: { top_queries: [{ id: 'newQuery' }] },
});
// Re-render with updated time range to simulate a change
rerender(
<MemoryRouter initialEntries={[QUERY_INSIGHTS]}>
<TopNQueries core={mockCore} initialStart="now-7d" initialEnd="now" />
</MemoryRouter>
);
// Verify that the component re-fetches data for the new time range
await waitFor(() => {
// 1 initial call for settings, 3 each for the initial rendering and re-rendering
expect(mockCore.http.get).toHaveBeenCalledTimes(7);
expect(mockCore.http.get).toHaveBeenCalledWith('/api/settings');
expect(mockCore.http.get).toHaveBeenCalledWith(
'/api/top_queries/latency',
expect.any(Object)
);
expect(mockCore.http.get).toHaveBeenCalledWith('/api/top_queries/cpu', expect.any(Object));
expect(mockCore.http.get).toHaveBeenCalledWith('/api/top_queries/memory', expect.any(Object));
});
});
});
20 changes: 13 additions & 7 deletions public/pages/TopNQueries/TopNQueries.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,20 @@ export interface MetricSettings {
currTimeUnit: string;
}

const TopNQueries = ({ core }: { core: CoreStart }) => {
const TopNQueries = ({
core,
initialStart = 'now-1d',
initialEnd = 'now',
}: {
core: CoreStart;
initialStart?: string;
initialEnd?: string;
}) => {
const history = useHistory();
const location = useLocation();
const [loading, setLoading] = useState(false);
const [currStart, setStart] = useState('now-1d');
const [currEnd, setEnd] = useState('now');
const [currStart, setStart] = useState(initialStart);
const [currEnd, setEnd] = useState(initialEnd);
const [recentlyUsedRanges, setRecentlyUsedRanges] = useState([
{ start: currStart, end: currEnd },
]);
Expand Down Expand Up @@ -172,10 +180,8 @@ const TopNQueries = ({ core }: { core: CoreStart }) => {
if (get) {
try {
const resp = await core.http.get('/api/settings');
const settings = resp.response.persistent.search.insights.top_queries;
const latency = settings.latency;
const cpu = settings.cpu;
const memory = settings.memory;
const { latency, cpu, memory } =
resp?.response?.persistent?.search?.insights?.top_queries || {};
if (latency !== undefined && latency.enabled === 'true') {
const [time, timeUnits] = latency.window_size.match(/\D+|\d+/g);
setMetricSettings('latency', {
Expand Down

0 comments on commit 5b9be60

Please sign in to comment.