mirror of
https://github.com/LukeHagar/vercel.git
synced 2025-12-09 21:07:46 +00:00
[tests] Cache token when possible (#5179)
This PR solves an issue where tests would sometimes be rate limited when generating a token. This can be solved by caching the token like we do for E2E tests. Example failure: https://github.com/vercel/vercel/runs/1098030228#step:11:9376 I also update the `@vercel/node` tests to compare the error message, exactly like how we do it with `@vercel/static-build`.
This commit is contained in:
@@ -154,23 +154,21 @@ async function fetchWithAuth(url, opts = {}) {
|
||||
if (!opts.headers) opts.headers = {};
|
||||
|
||||
if (!opts.headers.Authorization) {
|
||||
currentCount += 1;
|
||||
if (!token || currentCount === MAX_COUNT) {
|
||||
currentCount = 0;
|
||||
// used for health checks
|
||||
token = process.env.VERCEL_TOKEN || process.env.NOW_TOKEN;
|
||||
if (!token) {
|
||||
// used by GH Actions
|
||||
token = await fetchTokenWithRetry();
|
||||
}
|
||||
}
|
||||
|
||||
opts.headers.Authorization = `Bearer ${token}`;
|
||||
opts.headers.Authorization = `Bearer ${await fetchCachedToken()}`;
|
||||
}
|
||||
|
||||
return await fetchApi(url, opts);
|
||||
}
|
||||
|
||||
async function fetchCachedToken() {
|
||||
currentCount += 1;
|
||||
if (!token || currentCount === MAX_COUNT) {
|
||||
currentCount = 0;
|
||||
token = await fetchTokenWithRetry();
|
||||
}
|
||||
return token;
|
||||
}
|
||||
|
||||
async function fetchTokenWithRetry(retries = 5) {
|
||||
const {
|
||||
NOW_TOKEN,
|
||||
@@ -248,5 +246,6 @@ module.exports = {
|
||||
fetchWithAuth,
|
||||
nowDeploy,
|
||||
fetchTokenWithRetry,
|
||||
fetchCachedToken,
|
||||
fileModeSymbol,
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user