mirror of
https://github.com/LukeHagar/vercel.git
synced 2025-12-10 04:22:12 +00:00
This reverts commit e9026c7a69.
This was causing some builds using Turbo + Next.js to fail with:
```
...writing to cache...
--
17:02:42.820 | Traced Next.js server files in: 169.874ms
17:02:42.871 | Error: Config file was not found at "/vercel/output/config.json"
17:02:42.871 | at f3 (/var/task/sandbox.js:239:2647)
17:02:42.872 | at async TCe (/var/task/sandbox.js:245:4681)
17:02:42.872 | at async WBt (/var/task/sandbox.js:261:1990)
17:02:42.872 | at async zBt (/var/task/sandbox.js:261:1791)
```
INC-442
This commit is contained in:
@@ -9,6 +9,7 @@ import {
|
||||
Prerender,
|
||||
getLambdaOptionsFromFunction,
|
||||
getPlatformEnv,
|
||||
streamToBuffer,
|
||||
NowBuildError,
|
||||
isSymbolicLink,
|
||||
NodejsLambda,
|
||||
@@ -24,9 +25,11 @@ import type {
|
||||
RouteWithSrc,
|
||||
} from '@vercel/routing-utils';
|
||||
import { Sema } from 'async-sema';
|
||||
import crc32 from 'buffer-crc32';
|
||||
import fs, { lstat, stat } from 'fs-extra';
|
||||
import path from 'path';
|
||||
import semver from 'semver';
|
||||
import zlib from 'zlib';
|
||||
import url from 'url';
|
||||
import { createRequire } from 'module';
|
||||
import escapeStringRegexp from 'escape-string-regexp';
|
||||
@@ -41,6 +44,7 @@ import {
|
||||
MIB,
|
||||
KIB,
|
||||
MAX_UNCOMPRESSED_LAMBDA_SIZE,
|
||||
LAMBDA_RESERVED_COMPRESSED_SIZE,
|
||||
LAMBDA_RESERVED_UNCOMPRESSED_SIZE,
|
||||
} from './constants';
|
||||
|
||||
@@ -673,34 +677,35 @@ export function getFilesMapFromReasons(
|
||||
return parentFilesMap;
|
||||
}
|
||||
|
||||
export const collectTracedFiles = async (
|
||||
file: string,
|
||||
baseDir: string,
|
||||
lstatResults: { [key: string]: ReturnType<typeof lstat> },
|
||||
lstatSema: Sema,
|
||||
reasons: NodeFileTraceReasons,
|
||||
files: { [filePath: string]: FileFsRef }
|
||||
) => {
|
||||
const reason = reasons.get(file);
|
||||
if (reason && reason.type.includes('initial')) {
|
||||
// Initial files are manually added to the lambda later
|
||||
return;
|
||||
}
|
||||
const filePath = path.join(baseDir, file);
|
||||
export const collectTracedFiles =
|
||||
(
|
||||
baseDir: string,
|
||||
lstatResults: { [key: string]: ReturnType<typeof lstat> },
|
||||
lstatSema: Sema,
|
||||
reasons: NodeFileTraceReasons,
|
||||
files: { [filePath: string]: FileFsRef }
|
||||
) =>
|
||||
async (file: string) => {
|
||||
const reason = reasons.get(file);
|
||||
if (reason && reason.type.includes('initial')) {
|
||||
// Initial files are manually added to the lambda later
|
||||
return;
|
||||
}
|
||||
const filePath = path.join(baseDir, file);
|
||||
|
||||
if (!lstatResults[filePath]) {
|
||||
lstatResults[filePath] = lstatSema
|
||||
.acquire()
|
||||
.then(() => lstat(filePath))
|
||||
.finally(() => lstatSema.release());
|
||||
}
|
||||
const { mode } = await lstatResults[filePath];
|
||||
if (!lstatResults[filePath]) {
|
||||
lstatResults[filePath] = lstatSema
|
||||
.acquire()
|
||||
.then(() => lstat(filePath))
|
||||
.finally(() => lstatSema.release());
|
||||
}
|
||||
const { mode } = await lstatResults[filePath];
|
||||
|
||||
files[file] = new FileFsRef({
|
||||
fsPath: path.join(baseDir, file),
|
||||
mode,
|
||||
});
|
||||
};
|
||||
files[file] = new FileFsRef({
|
||||
fsPath: path.join(baseDir, file),
|
||||
mode,
|
||||
});
|
||||
};
|
||||
|
||||
export const ExperimentalTraceVersion = `9.0.4-canary.1`;
|
||||
|
||||
@@ -711,6 +716,8 @@ export type PseudoLayer = {
|
||||
export type PseudoFile = {
|
||||
file: FileFsRef;
|
||||
isSymlink: false;
|
||||
crc32: number;
|
||||
compBuffer: Buffer;
|
||||
uncompressedSize: number;
|
||||
};
|
||||
|
||||
@@ -720,57 +727,54 @@ export type PseudoSymbolicLink = {
|
||||
symlinkTarget: string;
|
||||
};
|
||||
|
||||
const compressBuffer = (buf: Buffer): Promise<Buffer> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
zlib.deflateRaw(
|
||||
buf,
|
||||
{ level: zlib.constants.Z_BEST_COMPRESSION },
|
||||
(err, compBuf) => {
|
||||
if (err) return reject(err);
|
||||
resolve(compBuf);
|
||||
}
|
||||
);
|
||||
});
|
||||
};
|
||||
|
||||
export type PseudoLayerResult = {
|
||||
pseudoLayer: PseudoLayer;
|
||||
pseudoLayerBytes: number;
|
||||
};
|
||||
|
||||
export async function createPseudoLayer({
|
||||
files,
|
||||
lstatSema,
|
||||
lstatResults,
|
||||
}: {
|
||||
lstatSema: Sema;
|
||||
lstatResults: { [key: string]: ReturnType<typeof lstat> };
|
||||
files: {
|
||||
[fileName: string]: FileFsRef;
|
||||
};
|
||||
export async function createPseudoLayer(files: {
|
||||
[fileName: string]: FileFsRef;
|
||||
}): Promise<PseudoLayerResult> {
|
||||
let pseudoLayerBytes = 0;
|
||||
const pseudoLayer: PseudoLayer = {};
|
||||
const fileNames = Object.keys(files);
|
||||
let pseudoLayerBytes = 0;
|
||||
|
||||
await Promise.all(
|
||||
fileNames.map(async fileName => {
|
||||
await lstatSema.acquire();
|
||||
const file = files[fileName];
|
||||
for (const fileName of Object.keys(files)) {
|
||||
const file = files[fileName];
|
||||
|
||||
if (isSymbolicLink(file.mode)) {
|
||||
const symlinkTarget = await fs.readlink(file.fsPath);
|
||||
pseudoLayer[fileName] = {
|
||||
file,
|
||||
isSymlink: true,
|
||||
symlinkTarget,
|
||||
};
|
||||
} else {
|
||||
if (!lstatResults[file.fsPath]) {
|
||||
lstatResults[file.fsPath] = lstatSema
|
||||
.acquire()
|
||||
.then(() => lstat(file.fsPath))
|
||||
.finally(() => lstatSema.release());
|
||||
}
|
||||
const { size } = await lstatResults[file.fsPath];
|
||||
if (isSymbolicLink(file.mode)) {
|
||||
const symlinkTarget = await fs.readlink(file.fsPath);
|
||||
pseudoLayer[fileName] = {
|
||||
file,
|
||||
isSymlink: true,
|
||||
symlinkTarget,
|
||||
};
|
||||
} else {
|
||||
const origBuffer = await streamToBuffer(file.toStream());
|
||||
const compBuffer = await compressBuffer(origBuffer);
|
||||
pseudoLayerBytes += compBuffer.byteLength;
|
||||
pseudoLayer[fileName] = {
|
||||
file,
|
||||
compBuffer,
|
||||
isSymlink: false,
|
||||
crc32: crc32.unsigned(origBuffer),
|
||||
uncompressedSize: origBuffer.byteLength,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pseudoLayerBytes += size;
|
||||
pseudoLayer[fileName] = {
|
||||
file,
|
||||
isSymlink: false,
|
||||
uncompressedSize: size,
|
||||
};
|
||||
}
|
||||
lstatSema.release();
|
||||
})
|
||||
);
|
||||
return { pseudoLayer, pseudoLayerBytes };
|
||||
}
|
||||
|
||||
@@ -780,6 +784,10 @@ interface CreateLambdaFromPseudoLayersOptions extends LambdaOptionsWithFiles {
|
||||
nextVersion?: string;
|
||||
}
|
||||
|
||||
// measured with 1, 2, 5, 10, and `os.cpus().length || 5`
|
||||
// and sema(1) produced the best results
|
||||
const createLambdaSema = new Sema(1);
|
||||
|
||||
export async function createLambdaFromPseudoLayers({
|
||||
files: baseFiles,
|
||||
layers,
|
||||
@@ -787,6 +795,8 @@ export async function createLambdaFromPseudoLayers({
|
||||
nextVersion,
|
||||
...lambdaOptions
|
||||
}: CreateLambdaFromPseudoLayersOptions) {
|
||||
await createLambdaSema.acquire();
|
||||
|
||||
const files: Files = {};
|
||||
const addedFiles = new Set();
|
||||
|
||||
@@ -813,6 +823,8 @@ export async function createLambdaFromPseudoLayers({
|
||||
addedFiles.add(fileName);
|
||||
}
|
||||
|
||||
createLambdaSema.release();
|
||||
|
||||
return new NodejsLambda({
|
||||
...lambdaOptions,
|
||||
...(isStreaming
|
||||
@@ -1409,6 +1421,7 @@ export type LambdaGroup = {
|
||||
isApiLambda: boolean;
|
||||
pseudoLayer: PseudoLayer;
|
||||
pseudoLayerBytes: number;
|
||||
pseudoLayerUncompressedBytes: number;
|
||||
};
|
||||
|
||||
export async function getPageLambdaGroups({
|
||||
@@ -1421,6 +1434,8 @@ export async function getPageLambdaGroups({
|
||||
compressedPages,
|
||||
tracedPseudoLayer,
|
||||
initialPseudoLayer,
|
||||
initialPseudoLayerUncompressed,
|
||||
lambdaCompressedByteLimit,
|
||||
internalPages,
|
||||
pageExtensions,
|
||||
}: {
|
||||
@@ -1439,6 +1454,8 @@ export async function getPageLambdaGroups({
|
||||
};
|
||||
tracedPseudoLayer: PseudoLayer;
|
||||
initialPseudoLayer: PseudoLayerResult;
|
||||
initialPseudoLayerUncompressed: number;
|
||||
lambdaCompressedByteLimit: number;
|
||||
internalPages: string[];
|
||||
pageExtensions?: string[];
|
||||
}) {
|
||||
@@ -1480,16 +1497,19 @@ export async function getPageLambdaGroups({
|
||||
group.isPrerenders === isPrerenderRoute;
|
||||
|
||||
if (matches) {
|
||||
let newTracedFilesUncompressedSize = group.pseudoLayerBytes;
|
||||
let newTracedFilesSize = group.pseudoLayerBytes;
|
||||
let newTracedFilesUncompressedSize = group.pseudoLayerUncompressedBytes;
|
||||
|
||||
for (const newPage of newPages) {
|
||||
Object.keys(pageTraces[newPage] || {}).map(file => {
|
||||
if (!group.pseudoLayer[file]) {
|
||||
const item = tracedPseudoLayer[file] as PseudoFile;
|
||||
|
||||
newTracedFilesSize += item.compBuffer?.byteLength || 0;
|
||||
newTracedFilesUncompressedSize += item.uncompressedSize || 0;
|
||||
}
|
||||
});
|
||||
newTracedFilesSize += compressedPages[newPage].compBuffer.byteLength;
|
||||
newTracedFilesUncompressedSize +=
|
||||
compressedPages[newPage].uncompressedSize;
|
||||
}
|
||||
@@ -1497,8 +1517,11 @@ export async function getPageLambdaGroups({
|
||||
const underUncompressedLimit =
|
||||
newTracedFilesUncompressedSize <
|
||||
MAX_UNCOMPRESSED_LAMBDA_SIZE - LAMBDA_RESERVED_UNCOMPRESSED_SIZE;
|
||||
const underCompressedLimit =
|
||||
newTracedFilesSize <
|
||||
lambdaCompressedByteLimit - LAMBDA_RESERVED_COMPRESSED_SIZE;
|
||||
|
||||
return underUncompressedLimit;
|
||||
return underUncompressedLimit && underCompressedLimit;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
@@ -1512,6 +1535,7 @@ export async function getPageLambdaGroups({
|
||||
isPrerenders: isPrerenderRoute,
|
||||
isApiLambda: !!isApiPage(page),
|
||||
pseudoLayerBytes: initialPseudoLayer.pseudoLayerBytes,
|
||||
pseudoLayerUncompressedBytes: initialPseudoLayerUncompressed,
|
||||
pseudoLayer: Object.assign({}, initialPseudoLayer.pseudoLayer),
|
||||
};
|
||||
groups.push(newGroup);
|
||||
@@ -1521,16 +1545,21 @@ export async function getPageLambdaGroups({
|
||||
for (const newPage of newPages) {
|
||||
Object.keys(pageTraces[newPage] || {}).map(file => {
|
||||
const pseudoItem = tracedPseudoLayer[file] as PseudoFile;
|
||||
const compressedSize = pseudoItem?.compBuffer?.byteLength || 0;
|
||||
|
||||
if (!matchingGroup!.pseudoLayer[file]) {
|
||||
matchingGroup!.pseudoLayer[file] = pseudoItem;
|
||||
matchingGroup!.pseudoLayerBytes += pseudoItem.uncompressedSize || 0;
|
||||
matchingGroup!.pseudoLayerBytes += compressedSize;
|
||||
matchingGroup!.pseudoLayerUncompressedBytes +=
|
||||
pseudoItem.uncompressedSize || 0;
|
||||
}
|
||||
});
|
||||
|
||||
// ensure the page file itself is accounted for when grouping as
|
||||
// large pages can be created that can push the group over the limit
|
||||
matchingGroup!.pseudoLayerBytes +=
|
||||
compressedPages[newPage].compBuffer.byteLength;
|
||||
matchingGroup!.pseudoLayerUncompressedBytes +=
|
||||
compressedPages[newPage].uncompressedSize;
|
||||
}
|
||||
}
|
||||
@@ -1542,6 +1571,7 @@ export const outputFunctionFileSizeInfo = (
|
||||
pages: string[],
|
||||
pseudoLayer: PseudoLayer,
|
||||
pseudoLayerBytes: number,
|
||||
pseudoLayerUncompressedBytes: number,
|
||||
compressedPages: {
|
||||
[page: string]: PseudoFile;
|
||||
}
|
||||
@@ -1553,10 +1583,15 @@ export const outputFunctionFileSizeInfo = (
|
||||
', '
|
||||
)}`
|
||||
);
|
||||
exceededLimitOutput.push(['Large Dependencies', 'Uncompressed size']);
|
||||
exceededLimitOutput.push([
|
||||
'Large Dependencies',
|
||||
'Uncompressed size',
|
||||
'Compressed size',
|
||||
]);
|
||||
|
||||
const dependencies: {
|
||||
[key: string]: {
|
||||
compressed: number;
|
||||
uncompressed: number;
|
||||
};
|
||||
} = {};
|
||||
@@ -1568,16 +1603,19 @@ export const outputFunctionFileSizeInfo = (
|
||||
|
||||
if (!dependencies[depKey]) {
|
||||
dependencies[depKey] = {
|
||||
compressed: 0,
|
||||
uncompressed: 0,
|
||||
};
|
||||
}
|
||||
|
||||
dependencies[depKey].compressed += fileItem.compBuffer.byteLength;
|
||||
dependencies[depKey].uncompressed += fileItem.uncompressedSize;
|
||||
}
|
||||
}
|
||||
|
||||
for (const page of pages) {
|
||||
dependencies[`pages/${page}`] = {
|
||||
compressed: compressedPages[page].compBuffer.byteLength,
|
||||
uncompressed: compressedPages[page].uncompressedSize,
|
||||
};
|
||||
}
|
||||
@@ -1589,10 +1627,10 @@ export const outputFunctionFileSizeInfo = (
|
||||
const aDep = dependencies[a];
|
||||
const bDep = dependencies[b];
|
||||
|
||||
if (aDep.uncompressed > bDep.uncompressed) {
|
||||
if (aDep.compressed > bDep.compressed) {
|
||||
return -1;
|
||||
}
|
||||
if (aDep.uncompressed < bDep.uncompressed) {
|
||||
if (aDep.compressed < bDep.compressed) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@@ -1600,11 +1638,15 @@ export const outputFunctionFileSizeInfo = (
|
||||
.forEach(depKey => {
|
||||
const dep = dependencies[depKey];
|
||||
|
||||
if (dep.uncompressed < 500 * KIB) {
|
||||
if (dep.compressed < 100 * KIB && dep.uncompressed < 500 * KIB) {
|
||||
// ignore smaller dependencies to reduce noise
|
||||
return;
|
||||
}
|
||||
exceededLimitOutput.push([depKey, prettyBytes(dep.uncompressed)]);
|
||||
exceededLimitOutput.push([
|
||||
depKey,
|
||||
prettyBytes(dep.uncompressed),
|
||||
prettyBytes(dep.compressed),
|
||||
]);
|
||||
numLargeDependencies += 1;
|
||||
});
|
||||
|
||||
@@ -1615,7 +1657,11 @@ export const outputFunctionFileSizeInfo = (
|
||||
}
|
||||
|
||||
exceededLimitOutput.push([]);
|
||||
exceededLimitOutput.push(['All dependencies', prettyBytes(pseudoLayerBytes)]);
|
||||
exceededLimitOutput.push([
|
||||
'All dependencies',
|
||||
prettyBytes(pseudoLayerUncompressedBytes),
|
||||
prettyBytes(pseudoLayerBytes),
|
||||
]);
|
||||
|
||||
console.log(
|
||||
textTable(exceededLimitOutput, {
|
||||
@@ -1626,11 +1672,13 @@ export const outputFunctionFileSizeInfo = (
|
||||
|
||||
export const detectLambdaLimitExceeding = async (
|
||||
lambdaGroups: LambdaGroup[],
|
||||
compressedSizeLimit: number,
|
||||
compressedPages: {
|
||||
[page: string]: PseudoFile;
|
||||
}
|
||||
) => {
|
||||
// show debug info if within 5 MB of exceeding the limit
|
||||
const COMPRESSED_SIZE_LIMIT_CLOSE = compressedSizeLimit - 5 * MIB;
|
||||
const UNCOMPRESSED_SIZE_LIMIT_CLOSE = MAX_UNCOMPRESSED_LAMBDA_SIZE - 5 * MIB;
|
||||
|
||||
let numExceededLimit = 0;
|
||||
@@ -1640,9 +1688,13 @@ export const detectLambdaLimitExceeding = async (
|
||||
// pre-iterate to see if we are going to exceed the limit
|
||||
// or only get close so our first log line can be correct
|
||||
const filteredGroups = lambdaGroups.filter(group => {
|
||||
const exceededLimit = group.pseudoLayerBytes > MAX_UNCOMPRESSED_LAMBDA_SIZE;
|
||||
const exceededLimit =
|
||||
group.pseudoLayerBytes > compressedSizeLimit ||
|
||||
group.pseudoLayerUncompressedBytes > MAX_UNCOMPRESSED_LAMBDA_SIZE;
|
||||
|
||||
const closeToLimit = group.pseudoLayerBytes > UNCOMPRESSED_SIZE_LIMIT_CLOSE;
|
||||
const closeToLimit =
|
||||
group.pseudoLayerBytes > COMPRESSED_SIZE_LIMIT_CLOSE ||
|
||||
group.pseudoLayerUncompressedBytes > UNCOMPRESSED_SIZE_LIMIT_CLOSE;
|
||||
|
||||
if (
|
||||
closeToLimit ||
|
||||
@@ -1665,6 +1717,8 @@ export const detectLambdaLimitExceeding = async (
|
||||
if (numExceededLimit || numCloseToLimit) {
|
||||
console.log(
|
||||
`Warning: Max serverless function size of ${prettyBytes(
|
||||
compressedSizeLimit
|
||||
)} compressed or ${prettyBytes(
|
||||
MAX_UNCOMPRESSED_LAMBDA_SIZE
|
||||
)} uncompressed${numExceededLimit ? '' : ' almost'} reached`
|
||||
);
|
||||
@@ -1678,6 +1732,7 @@ export const detectLambdaLimitExceeding = async (
|
||||
group.pages,
|
||||
group.pseudoLayer,
|
||||
group.pseudoLayerBytes,
|
||||
group.pseudoLayerUncompressedBytes,
|
||||
compressedPages
|
||||
);
|
||||
}
|
||||
@@ -2512,14 +2567,15 @@ export function normalizeEdgeFunctionPath(
|
||||
appPathRoutesManifest[ogRoute] ||
|
||||
shortPath.replace(/(^|\/)(page|route)$/, '')
|
||||
).replace(/^\//, '');
|
||||
|
||||
if (!shortPath || shortPath === '/') {
|
||||
shortPath = 'index';
|
||||
}
|
||||
}
|
||||
|
||||
if (shortPath.startsWith('pages/')) {
|
||||
shortPath = shortPath.replace(/^pages\//, '');
|
||||
}
|
||||
if (!shortPath || shortPath === '/') {
|
||||
shortPath = 'index';
|
||||
}
|
||||
|
||||
return shortPath;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user