diff --git a/cmd/evrtools/main.go b/cmd/evrtools/main.go index cea300d..a6cf782 100644 --- a/cmd/evrtools/main.go +++ b/cmd/evrtools/main.go @@ -7,6 +7,7 @@ import ( "io" "os" "path/filepath" + "strings" "github.com/EchoTools/evrFileTools/pkg/manifest" ) @@ -20,6 +21,8 @@ var ( preserveGroups bool forceOverwrite bool useDecimalName bool + exportTypes string + quickMode bool ) func init() { @@ -31,6 +34,8 @@ func init() { flag.BoolVar(&preserveGroups, "preserve-groups", false, "Preserve frame grouping in output") flag.BoolVar(&forceOverwrite, "force", false, "Allow non-empty output directory") flag.BoolVar(&useDecimalName, "decimal-names", false, "Use decimal format for filenames (default is hex)") + flag.StringVar(&exportTypes, "export", "", "Comma-separated list of types to export (textures, tints)") + flag.BoolVar(&quickMode, "quick", false, "Quick swap mode (modifies game files in-place)") } func main() { @@ -134,11 +139,37 @@ func runExtract() error { } defer pkg.Close() + var filterTypes []int64 + if exportTypes != "" { + for _, t := range strings.Split(exportTypes, ",") { + switch strings.TrimSpace(t) { + case "textures": + // Use variables to avoid constant overflow checks for negative int64s + t1 := uint64(0xBEAC1969CB7B8861) + t2 := uint64(0x4A4C32C49300B8A0) + t3 := uint64(0xe2efe7289d5985b8) + t4 := uint64(0x489bb35d53ca50e9) + filterTypes = append(filterTypes, + int64(t1), // -4707359568332879775 + int64(t2), // 5353709876897953952 + int64(t3), // -2094201140079393352 + int64(t4), // 5231972605540061417 + ) + case "tints": + filterTypes = append(filterTypes, + int64(uint64(0x24CBFD54E9A7F2EA)), // Folder: 24cbfd54e9a7f2ea + int64(uint64(0x32f30fe361939dee)), // 3671295590506143214 + ) + } + } + } + fmt.Println("Extracting files...") if err := pkg.Extract( outputDir, manifest.WithPreserveGroups(preserveGroups), manifest.WithDecimalNames(useDecimalName), + manifest.WithTypeFilter(filterTypes), ); err != nil { return fmt.Errorf("extract: %w", err) } @@ -154,6 +185,21 @@ func runBuild() error { return fmt.Errorf("scan files: %w", err) } + // If dataDir is provided, we are in "repack" mode where we merge original files + if dataDir != "" { + manifestPath := filepath.Join(dataDir, "manifests", packageName) + if _, err := os.Stat(manifestPath); err == nil { + if quickMode { + m, err := manifest.ReadFile(manifestPath) + if err != nil { + return fmt.Errorf("read manifest: %w", err) + } + return manifest.QuickRepack(m, files, dataDir, packageName) + } + return runRepack(files) + } + } + totalFiles := 0 for _, group := range files { totalFiles += len(group) @@ -180,3 +226,14 @@ func runBuild() error { fmt.Printf("Build complete. Output written to %s\n", outputDir) return nil } + +func runRepack(inputFiles [][]manifest.ScannedFile) error { + fmt.Println("Loading original manifest for repacking...") + manifestPath := filepath.Join(dataDir, "manifests", packageName) + m, err := manifest.ReadFile(manifestPath) + if err != nil { + return fmt.Errorf("read manifest: %w", err) + } + + return manifest.Repack(m, inputFiles, outputDir, packageName, dataDir) +} diff --git a/cmd/texconv/main.go b/cmd/texconv/main.go index 633cdc0..8fd6827 100644 --- a/cmd/texconv/main.go +++ b/cmd/texconv/main.go @@ -25,6 +25,7 @@ import ( "image" "image/png" "io" + "math" "os" "path/filepath" "strings" @@ -64,8 +65,12 @@ const ( DXGIFormatBC6HSF16 = 96 DXGIFormatBC7Unorm = 98 // High quality DXGIFormatBC7UnormSRGB = 99 + DXGIFormatR8Unorm = 61 // Grayscale + DXGIFormatR11G11B10Float = 26 // Packed Float DXGIFormatR8G8B8A8Unorm = 28 // Uncompressed RGBA DXGIFormatR8G8B8A8UnormSRGB = 29 + DXGIFormatB8G8R8A8UnormSRGB = 91 // BGRA sRGB + DXGIFormatB8G8R8A8Typeless = 87 // BGRA Typeless ) // DDSHeader represents the main DDS file header (124 bytes) @@ -219,7 +224,7 @@ func decodeDDS(inputPath, outputPath string) error { } // Decompress to RGBA - rgba, err := decompressBC(compressedData, info) + img, err := decompressBC(compressedData, info) if err != nil { return fmt.Errorf("decompress: %w", err) } @@ -231,7 +236,7 @@ func decodeDDS(inputPath, outputPath string) error { } defer outFile.Close() - if err := png.Encode(outFile, rgba); err != nil { + if err := png.Encode(outFile, img); err != nil { return fmt.Errorf("encode png: %w", err) } @@ -465,6 +470,26 @@ func parseDDSHeader(r io.ReadSeeker) (*TextureInfo, error) { info.FormatName = "BC7" info.Compression = "BC7" info.BytesPerPixel = 1 + case DXGIFormatR8Unorm: + info.FormatName = "R8_UNORM" + info.Compression = "None" + info.BytesPerPixel = 1 + case DXGIFormatR11G11B10Float: + info.FormatName = "R11G11B10_FLOAT" + info.Compression = "None" + info.BytesPerPixel = 4 + case DXGIFormatR8G8B8A8Unorm, DXGIFormatR8G8B8A8UnormSRGB: + info.FormatName = "RGBA8" + info.Compression = "None" + info.BytesPerPixel = 4 + case DXGIFormatB8G8R8A8UnormSRGB: + info.FormatName = "BGRA8" + info.Compression = "None" + info.BytesPerPixel = 4 + case DXGIFormatB8G8R8A8Typeless: + info.FormatName = "BGRA8_TYPELESS" + info.Compression = "None" + info.BytesPerPixel = 4 default: return nil, fmt.Errorf("unsupported DXGI format: %d", info.Format) } @@ -498,32 +523,56 @@ func calculateMipSize(width, height, format uint32) uint32 { DXGIFormatBC6HUF16, DXGIFormatBC6HSF16, DXGIFormatBC7Unorm, DXGIFormatBC7UnormSRGB: return blockW * blockH * 16 // 16 bytes per block + case DXGIFormatR8Unorm: + return width * height + case DXGIFormatR11G11B10Float: + return width * height * 4 + case DXGIFormatR8G8B8A8Unorm, DXGIFormatR8G8B8A8UnormSRGB: + return width * height * 4 + case DXGIFormatB8G8R8A8UnormSRGB: + return width * height * 4 + case DXGIFormatB8G8R8A8Typeless: + return width * height * 4 default: return width * height * 4 // Fallback: uncompressed RGBA } } // decompressBC decompresses BC-compressed data to RGBA -func decompressBC(data []byte, info *TextureInfo) (*image.RGBA, error) { - rgba := image.NewRGBA(image.Rect(0, 0, int(info.Width), int(info.Height))) +func decompressBC(data []byte, info *TextureInfo) (*image.NRGBA, error) { + nrgba := image.NewNRGBA(image.Rect(0, 0, int(info.Width), int(info.Height))) + + isSRGB := info.Format == DXGIFormatBC1UnormSRGB || + info.Format == DXGIFormatBC3UnormSRGB || + info.Format == DXGIFormatBC7UnormSRGB switch info.Format { case DXGIFormatBC1Unorm, DXGIFormatBC1UnormSRGB: - return decompressBC1(data, int(info.Width), int(info.Height)) + return decompressBC1(data, int(info.Width), int(info.Height), isSRGB) case DXGIFormatBC3Unorm, DXGIFormatBC3UnormSRGB: - return decompressBC3(data, int(info.Width), int(info.Height)) + return decompressBC3(data, int(info.Width), int(info.Height), isSRGB) case DXGIFormatBC5Unorm, DXGIFormatBC5SNorm: return decompressBC5(data, int(info.Width), int(info.Height)) + case DXGIFormatR8Unorm: + return decompressR8(data, int(info.Width), int(info.Height)) + case DXGIFormatR11G11B10Float: + return decompressR11G11B10Float(data, int(info.Width), int(info.Height)) + case DXGIFormatR8G8B8A8Unorm, DXGIFormatR8G8B8A8UnormSRGB: + return decompressRGBA(data, int(info.Width), int(info.Height)) + case DXGIFormatB8G8R8A8UnormSRGB: + return decompressBGRA(data, int(info.Width), int(info.Height)) + case DXGIFormatB8G8R8A8Typeless: + return decompressBGRA(data, int(info.Width), int(info.Height)) default: return nil, fmt.Errorf("decompression not implemented for format: %s", info.FormatName) } - return rgba, nil + return nrgba, nil } // decompressBC1 decompresses BC1/DXT1 to RGBA -func decompressBC1(data []byte, width, height int) (*image.RGBA, error) { - rgba := image.NewRGBA(image.Rect(0, 0, width, height)) +func decompressBC1(data []byte, width, height int, isSRGB bool) (*image.NRGBA, error) { + nrgba := image.NewNRGBA(image.Rect(0, 0, width, height)) blockW := (width + 3) / 4 blockH := (height + 3) / 4 @@ -541,40 +590,73 @@ func decompressBC1(data []byte, width, height int) (*image.RGBA, error) { offset += 4 // Decode RGB565 - r0 := uint8((c0 >> 11) * 255 / 31) - g0 := uint8(((c0 >> 5) & 0x3F) * 255 / 63) - b0 := uint8((c0 & 0x1F) * 255 / 31) - - r1 := uint8((c1 >> 11) * 255 / 31) - g1 := uint8(((c1 >> 5) & 0x3F) * 255 / 63) - b1 := uint8((c1 & 0x1F) * 255 / 31) + r0_5 := (c0 >> 11) & 0x1F + g0_6 := (c0 >> 5) & 0x3F + b0_5 := c0 & 0x1F + r0_8 := uint8((r0_5 << 3) | (r0_5 >> 2)) + g0_8 := uint8((g0_6 << 2) | (g0_6 >> 4)) + b0_8 := uint8((b0_5 << 3) | (b0_5 >> 2)) + + r1_5 := (c1 >> 11) & 0x1F + g1_6 := (c1 >> 5) & 0x3F + b1_5 := c1 & 0x1F + r1_8 := uint8((r1_5 << 3) | (r1_5 >> 2)) + g1_8 := uint8((g1_6 << 2) | (g1_6 >> 4)) + b1_8 := uint8((b1_5 << 3) | (b1_5 >> 2)) // Color palette var colors [4][4]uint8 - colors[0] = [4]uint8{r0, g0, b0, 255} - colors[1] = [4]uint8{r1, g1, b1, 255} - - if c0 > c1 { - colors[2] = [4]uint8{ - (2*r0 + r1) / 3, - (2*g0 + g1) / 3, - (2*b0 + b1) / 3, - 255, + + if isSRGB { + lr0 := srgbToLinear(r0_8) + lg0 := srgbToLinear(g0_8) + lb0 := srgbToLinear(b0_8) + lr1 := srgbToLinear(r1_8) + lg1 := srgbToLinear(g1_8) + lb1 := srgbToLinear(b1_8) + + var linearColors [4][3]float32 + linearColors[0] = [3]float32{lr0, lg0, lb0} + linearColors[1] = [3]float32{lr1, lg1, lb1} + + if c0 > c1 { + linearColors[2] = [3]float32{(2*lr0 + lr1) / 3, (2*lg0 + lg1) / 3, (2*lb0 + lb1) / 3} + linearColors[3] = [3]float32{(lr0 + 2*lr1) / 3, (lg0 + 2*lg1) / 3, (lb0 + 2*lb1) / 3} + } else { + linearColors[2] = [3]float32{(lr0 + lr1) / 2, (lg0 + lg1) / 2, (lb0 + lb1) / 2} + linearColors[3] = [3]float32{0, 0, 0} } - colors[3] = [4]uint8{ - (r0 + 2*r1) / 3, - (g0 + 2*g1) / 3, - (b0 + 2*b1) / 3, - 255, + + for i := 0; i < 4; i++ { + colors[i][0] = linearToSrgb(linearColors[i][0]) + colors[i][1] = linearToSrgb(linearColors[i][1]) + colors[i][2] = linearToSrgb(linearColors[i][2]) + colors[i][3] = 255 + } + if c0 <= c1 { + colors[3][3] = 0 } } else { - colors[2] = [4]uint8{ - (r0 + r1) / 2, - (g0 + g1) / 2, - (b0 + b1) / 2, - 255, + colors[0] = [4]uint8{r0_8, g0_8, b0_8, 255} + colors[1] = [4]uint8{r1_8, g1_8, b1_8, 255} + + if c0 > c1 { + colors[2] = [4]uint8{ + (2*r0_8 + r1_8) / 3, + (2*g0_8 + g1_8) / 3, + (2*b0_8 + b1_8) / 3, + 255, + } + colors[3] = [4]uint8{ + (r0_8 + 2*r1_8) / 3, + (g0_8 + 2*g1_8) / 3, + (b0_8 + 2*b1_8) / 3, + 255, + } + } else { + colors[2] = [4]uint8{(r0_8 + r1_8) / 2, (g0_8 + g1_8) / 2, (b0_8 + b1_8) / 2, 255} + colors[3] = [4]uint8{0, 0, 0, 0} // Transparent } - colors[3] = [4]uint8{0, 0, 0, 0} // Transparent } // Read index bits @@ -594,22 +676,22 @@ func decompressBC1(data []byte, width, height int) (*image.RGBA, error) { idx := (indices >> (2 * (py*4 + px))) & 3 color := colors[idx] - offset := rgba.PixOffset(x, y) - rgba.Pix[offset+0] = color[0] - rgba.Pix[offset+1] = color[1] - rgba.Pix[offset+2] = color[2] - rgba.Pix[offset+3] = color[3] + offset := nrgba.PixOffset(x, y) + nrgba.Pix[offset+0] = color[0] + nrgba.Pix[offset+1] = color[1] + nrgba.Pix[offset+2] = color[2] + nrgba.Pix[offset+3] = color[3] } } } } - return rgba, nil + return nrgba, nil } // decompressBC3 decompresses BC3/DXT5 to RGBA -func decompressBC3(data []byte, width, height int) (*image.RGBA, error) { - rgba := image.NewRGBA(image.Rect(0, 0, width, height)) +func decompressBC3(data []byte, width, height int, isSRGB bool) (*image.NRGBA, error) { + nrgba := image.NewNRGBA(image.Rect(0, 0, width, height)) blockW := (width + 3) / 4 blockH := (height + 3) / 4 @@ -651,19 +733,46 @@ func decompressBC3(data []byte, width, height int) (*image.RGBA, error) { c1 := uint16(data[offset+2]) | uint16(data[offset+3])<<8 offset += 4 - r0 := uint8((c0 >> 11) * 255 / 31) - g0 := uint8(((c0 >> 5) & 0x3F) * 255 / 63) - b0 := uint8((c0 & 0x1F) * 255 / 31) + r0_5 := (c0 >> 11) & 0x1F + g0_6 := (c0 >> 5) & 0x3F + b0_5 := c0 & 0x1F + r0_8 := uint8((r0_5 << 3) | (r0_5 >> 2)) + g0_8 := uint8((g0_6 << 2) | (g0_6 >> 4)) + b0_8 := uint8((b0_5 << 3) | (b0_5 >> 2)) - r1 := uint8((c1 >> 11) * 255 / 31) - g1 := uint8(((c1 >> 5) & 0x3F) * 255 / 63) - b1 := uint8((c1 & 0x1F) * 255 / 31) + r1_5 := (c1 >> 11) & 0x1F + g1_6 := (c1 >> 5) & 0x3F + b1_5 := c1 & 0x1F + r1_8 := uint8((r1_5 << 3) | (r1_5 >> 2)) + g1_8 := uint8((g1_6 << 2) | (g1_6 >> 4)) + b1_8 := uint8((b1_5 << 3) | (b1_5 >> 2)) var colors [4][3]uint8 - colors[0] = [3]uint8{r0, g0, b0} - colors[1] = [3]uint8{r1, g1, b1} - colors[2] = [3]uint8{(2*r0 + r1) / 3, (2*g0 + g1) / 3, (2*b0 + b1) / 3} - colors[3] = [3]uint8{(r0 + 2*r1) / 3, (g0 + 2*g1) / 3, (b0 + 2*b1) / 3} + if isSRGB { + lr0 := srgbToLinear(r0_8) + lg0 := srgbToLinear(g0_8) + lb0 := srgbToLinear(b0_8) + lr1 := srgbToLinear(r1_8) + lg1 := srgbToLinear(g1_8) + lb1 := srgbToLinear(b1_8) + + var linearColors [4][3]float32 + linearColors[0] = [3]float32{lr0, lg0, lb0} + linearColors[1] = [3]float32{lr1, lg1, lb1} + linearColors[2] = [3]float32{(2*lr0 + lr1) / 3, (2*lg0 + lg1) / 3, (2*lb0 + lb1) / 3} + linearColors[3] = [3]float32{(lr0 + 2*lr1) / 3, (lg0 + 2*lg1) / 3, (lb0 + 2*lb1) / 3} + + for i := 0; i < 4; i++ { + colors[i][0] = linearToSrgb(linearColors[i][0]) + colors[i][1] = linearToSrgb(linearColors[i][1]) + colors[i][2] = linearToSrgb(linearColors[i][2]) + } + } else { + colors[0] = [3]uint8{r0_8, g0_8, b0_8} + colors[1] = [3]uint8{r1_8, g1_8, b1_8} + colors[2] = [3]uint8{(2*r0_8 + r1_8) / 3, (2*g0_8 + g1_8) / 3, (2*b0_8 + b1_8) / 3} + colors[3] = [3]uint8{(r0_8 + 2*r1_8) / 3, (g0_8 + 2*g1_8) / 3, (b0_8 + 2*b1_8) / 3} + } colorIndices := uint32(data[offset]) | uint32(data[offset+1])<<8 | uint32(data[offset+2])<<16 | uint32(data[offset+3])<<24 @@ -685,26 +794,159 @@ func decompressBC3(data []byte, width, height int) (*image.RGBA, error) { color := colors[colorIdx] alpha := alphas[alphaIdx] - pixOffset := rgba.PixOffset(x, y) - rgba.Pix[pixOffset+0] = color[0] - rgba.Pix[pixOffset+1] = color[1] - rgba.Pix[pixOffset+2] = color[2] - rgba.Pix[pixOffset+3] = alpha + pixOffset := nrgba.PixOffset(x, y) + nrgba.Pix[pixOffset+0] = color[0] + nrgba.Pix[pixOffset+1] = color[1] + nrgba.Pix[pixOffset+2] = color[2] + nrgba.Pix[pixOffset+3] = alpha } } } } - return rgba, nil + return nrgba, nil } // decompressBC5 decompresses BC5 (normal maps) to RGBA -func decompressBC5(data []byte, width, height int) (*image.RGBA, error) { +func decompressBC5(data []byte, width, height int) (*image.NRGBA, error) { // BC5 stores two channels (RG for normal maps) // We'll decode them and reconstruct Z = sqrt(1 - X^2 - Y^2) return nil, fmt.Errorf("BC5 decompression not yet implemented") } +// decompressR8 decompresses R8_UNORM (grayscale) to RGBA +func decompressR8(data []byte, width, height int) (*image.NRGBA, error) { + nrgba := image.NewNRGBA(image.Rect(0, 0, width, height)) + if len(data) < width*height { + return nil, fmt.Errorf("data truncated") + } + + offset := 0 + for y := 0; y < height; y++ { + for x := 0; x < width; x++ { + v := data[offset] + offset++ + pixOffset := nrgba.PixOffset(x, y) + nrgba.Pix[pixOffset+0] = v + nrgba.Pix[pixOffset+1] = v + nrgba.Pix[pixOffset+2] = v + nrgba.Pix[pixOffset+3] = 255 + } + } + return nrgba, nil +} + +// decompressRGBA decompresses uncompressed RGBA to RGBA +func decompressRGBA(data []byte, width, height int) (*image.NRGBA, error) { + nrgba := image.NewNRGBA(image.Rect(0, 0, width, height)) + if len(data) < width*height*4 { + return nil, fmt.Errorf("data truncated") + } + copy(nrgba.Pix, data[:width*height*4]) + return nrgba, nil +} + +// decompressBGRA decompresses uncompressed BGRA to RGBA +func decompressBGRA(data []byte, width, height int) (*image.NRGBA, error) { + nrgba := image.NewNRGBA(image.Rect(0, 0, width, height)) + if len(data) < width*height*4 { + return nil, fmt.Errorf("data truncated") + } + + count := width * height + for i := 0; i < count; i++ { + offset := i * 4 + b := data[offset] + g := data[offset+1] + r := data[offset+2] + a := data[offset+3] + + nrgba.Pix[offset] = r + nrgba.Pix[offset+1] = g + nrgba.Pix[offset+2] = b + nrgba.Pix[offset+3] = a + } + return nrgba, nil +} + +// decompressR11G11B10Float decompresses packed float format to RGBA +func decompressR11G11B10Float(data []byte, width, height int) (*image.NRGBA, error) { + nrgba := image.NewNRGBA(image.Rect(0, 0, width, height)) + if len(data) < width*height*4 { + return nil, fmt.Errorf("data truncated") + } + + offset := 0 + for y := 0; y < height; y++ { + for x := 0; x < width; x++ { + packed := uint32(data[offset]) | uint32(data[offset+1])<<8 | uint32(data[offset+2])<<16 | uint32(data[offset+3])<<24 + offset += 4 + + r := f11ToF32(packed & 0x7FF) + g := f11ToF32((packed >> 11) & 0x7FF) + b := f10ToF32((packed >> 22) & 0x3FF) + + // Clamp to 0-255 + r8 := uint8(math.Min(255, math.Max(0, float64(r)*255))) + g8 := uint8(math.Min(255, math.Max(0, float64(g)*255))) + b8 := uint8(math.Min(255, math.Max(0, float64(b)*255))) + + pixOffset := nrgba.PixOffset(x, y) + nrgba.Pix[pixOffset+0] = r8 + nrgba.Pix[pixOffset+1] = g8 + nrgba.Pix[pixOffset+2] = b8 + nrgba.Pix[pixOffset+3] = 255 + } + } + return nrgba, nil +} + +func f11ToF32(u uint32) float32 { + exponent := (u >> 6) & 0x1F + mantissa := u & 0x3F + if exponent == 0 { + if mantissa == 0 { + return 0.0 + } + return float32(mantissa) / 64.0 * (1.0 / 16384.0) + } else if exponent == 31 { + return 65504.0 + } + return float32(math.Pow(2, float64(exponent)-15)) * (1.0 + float32(mantissa)/64.0) +} + +func f10ToF32(u uint32) float32 { + exponent := (u >> 5) & 0x1F + mantissa := u & 0x1F + if exponent == 0 { + if mantissa == 0 { + return 0.0 + } + return float32(mantissa) / 32.0 * (1.0 / 16384.0) + } else if exponent == 31 { + return 65504.0 + } + return float32(math.Pow(2, float64(exponent)-15)) * (1.0 + float32(mantissa)/32.0) +} + +// srgbToLinear converts an sRGB byte value to a linear float32 value. +func srgbToLinear(c uint8) float32 { + v := float32(c) / 255.0 + if v <= 0.04045 { + return v / 12.92 + } + return float32(math.Pow(float64((v+0.055)/1.055), 2.4)) +} + +// linearToSrgb converts a linear float32 value to an sRGB byte value. +func linearToSrgb(v float32) uint8 { + if v <= 0.0031308 { + return uint8(math.Min(255, math.Max(0, float64(v)*12.92*255.0))) + } + srgb := 1.055*math.Pow(float64(v), 1.0/2.4) - 0.055 + return uint8(math.Min(255, math.Max(0, srgb*255.0))) +} + // writeDDSFile writes a complete DDS file with DX10 header func writeDDSFile(w io.Writer, width, height, mipCount, dxgiFormat uint32, compressedData []byte) error { // Calculate pitch/linear size diff --git a/make b/make new file mode 100644 index 0000000..e69de29 diff --git a/pkg/manifest/builder.go b/pkg/manifest/builder.go index 20c523c..7a0523d 100644 --- a/pkg/manifest/builder.go +++ b/pkg/manifest/builder.go @@ -6,6 +6,7 @@ import ( "math" "os" "path/filepath" + "strings" "github.com/DataDog/zstd" ) @@ -16,6 +17,11 @@ const ( // MaxPackageSize is the maximum size of a single package file. MaxPackageSize = math.MaxInt32 + + // MaxFrameSize is the maximum size of a single uncompressed frame. + // This prevents frames from becoming too large when grouping files, + // which can cause memory issues or overflows during decompression. + MaxFrameSize = 1 * 1024 * 1024 ) // Builder constructs packages and manifests from a set of files. @@ -91,24 +97,40 @@ func (b *Builder) Build(fileGroups [][]ScannedFile) (*Manifest, error) { } for _, file := range group { - data, err := os.ReadFile(file.Path) + var data []byte + var err error + + if file.Path != "" { + data, err = os.ReadFile(file.Path) + } else if file.SrcPackage != nil && file.SrcContent != nil { + data, err = file.SrcPackage.ReadContent(file.SrcContent) + if err != nil && strings.Contains(err.Error(), "too short") { + fmt.Printf("Warning: skipping corrupted file %x/%x: %v\n", file.TypeSymbol, file.FileSymbol, err) + data = []byte{} + err = nil + } + } else { + err = fmt.Errorf("no source for file %x/%x", file.TypeSymbol, file.FileSymbol) + } + if err != nil { - return nil, fmt.Errorf("read file %s: %w", file.Path, err) + return nil, fmt.Errorf("read file %x/%x: %w", file.TypeSymbol, file.FileSymbol, err) } - manifest.FrameContents = append(manifest.FrameContents, FrameContent{ - TypeSymbol: file.TypeSymbol, - FileSymbol: file.FileSymbol, - FrameIndex: frameIndex, - DataOffset: currentOffset, - Size: uint32(len(data)), - Alignment: 1, - }) + // Check if adding this file would exceed max frame size + // We only split if the frame is not empty to ensure we don't loop infinitely on large files + if currentFrame.Len() > 0 && currentFrame.Len()+len(data) > MaxFrameSize { + if err := b.writeFrame(manifest, ¤tFrame, frameIndex); err != nil { + return nil, err + } + frameIndex++ + currentFrame.Reset() + currentOffset = 0 + } - manifest.Metadata = append(manifest.Metadata, FileMetadata{ - TypeSymbol: file.TypeSymbol, - FileSymbol: file.FileSymbol, - }) + if !file.SkipManifest { + b.addFileToManifest(manifest, file, frameIndex, currentOffset) + } currentFrame.Write(data) currentOffset += uint32(len(data)) @@ -131,24 +153,45 @@ func (b *Builder) Build(fileGroups [][]ScannedFile) (*Manifest, error) { return manifest, nil } +func (b *Builder) addFileToManifest(manifest *Manifest, file ScannedFile, frameIndex, offset uint32) { + alignment := uint32(1) + + manifest.FrameContents = append(manifest.FrameContents, FrameContent{ + TypeSymbol: file.TypeSymbol, + FileSymbol: file.FileSymbol, + FrameIndex: frameIndex, + DataOffset: offset, + Size: file.Size, + Alignment: alignment, + }) + + manifest.Metadata = append(manifest.Metadata, FileMetadata{ + TypeSymbol: file.TypeSymbol, + FileSymbol: file.FileSymbol, + }) +} + func (b *Builder) writeFrame(manifest *Manifest, data *bytes.Buffer, index uint32) error { compressed, err := zstd.CompressLevel(nil, data.Bytes(), b.compressionLevel) if err != nil { return fmt.Errorf("compress frame %d: %w", index, err) } + return b.writeCompressedFrame(manifest, compressed, uint32(data.Len())) +} +func (b *Builder) writeCompressedFrame(manifest *Manifest, compressed []byte, uncompressedSize uint32) error { packageIndex := manifest.Header.PackageCount - 1 packagePath := filepath.Join(b.outputDir, "packages", fmt.Sprintf("%s_%d", b.packageName, packageIndex)) // Check if we need a new package file + // We use os.Stat to get the actual file size to ensure the manifest offset is correct var offset uint32 - if len(manifest.Frames) > 0 { - lastFrame := manifest.Frames[len(manifest.Frames)-1] - offset = lastFrame.Offset + lastFrame.CompressedSize + if info, err := os.Stat(packagePath); err == nil { + offset = uint32(info.Size()) } maxSize := int64(MaxPackageSize) - if int64(offset) >= maxSize || int64(offset)+int64(len(compressed)) > maxSize { + if int64(offset)+int64(len(compressed)) > maxSize { manifest.Header.PackageCount++ packageIndex++ packagePath = filepath.Join(b.outputDir, "packages", fmt.Sprintf("%s_%d", b.packageName, packageIndex)) @@ -162,14 +205,14 @@ func (b *Builder) writeFrame(manifest *Manifest, data *bytes.Buffer, index uint3 defer f.Close() if _, err := f.Write(compressed); err != nil { - return fmt.Errorf("write frame %d: %w", index, err) + return fmt.Errorf("write compressed data: %w", err) } manifest.Frames = append(manifest.Frames, Frame{ PackageIndex: packageIndex, Offset: offset, CompressedSize: uint32(len(compressed)), - Length: uint32(data.Len()), + Length: uncompressedSize, }) b.incrementSection(&manifest.Header.Frames, 1) diff --git a/pkg/manifest/package.go b/pkg/manifest/package.go index de00e26..e11feb3 100644 --- a/pkg/manifest/package.go +++ b/pkg/manifest/package.go @@ -14,6 +14,10 @@ import ( type Package struct { manifest *Manifest files []packageFile + + // Decompression cache + lastFrameIdx uint32 + lastFrameData []byte } type packageFile interface { @@ -31,8 +35,9 @@ func OpenPackage(manifest *Manifest, basePath string) (*Package, error) { count := manifest.PackageCount() pkg := &Package{ - manifest: manifest, - files: make([]packageFile, count), + manifest: manifest, + files: make([]packageFile, count), + lastFrameIdx: ^uint32(0), // Invalid index } for i := range count { @@ -58,6 +63,7 @@ func (p *Package) Close() error { } } } + p.lastFrameData = nil return lastErr } @@ -66,6 +72,84 @@ func (p *Package) Manifest() *Manifest { return p.manifest } +// ReadContent reads the data for a specific file content. +func (p *Package) ReadContent(fc *FrameContent) ([]byte, error) { + // Check cache + if p.lastFrameData != nil && p.lastFrameIdx == fc.FrameIndex { + if uint32(len(p.lastFrameData)) < fc.DataOffset+fc.Size { + return nil, fmt.Errorf("frame data too short for content") + } + return p.lastFrameData[fc.DataOffset : fc.DataOffset+fc.Size], nil + } + + // Load frame + if int(fc.FrameIndex) >= len(p.manifest.Frames) { + return nil, fmt.Errorf("invalid frame index %d", fc.FrameIndex) + } + frame := p.manifest.Frames[fc.FrameIndex] + + if frame.Length == 0 { + return nil, nil + } + + // Read compressed data + if int(frame.PackageIndex) >= len(p.files) { + return nil, fmt.Errorf("invalid package index %d", frame.PackageIndex) + } + file := p.files[frame.PackageIndex] + if _, err := file.Seek(int64(frame.Offset), io.SeekStart); err != nil { + return nil, fmt.Errorf("seek frame %d: %w", fc.FrameIndex, err) + } + + compressed := make([]byte, frame.CompressedSize) + if _, err := io.ReadFull(file, compressed); err != nil { + return nil, fmt.Errorf("read frame %d: %w", fc.FrameIndex, err) + } + + // Decompress + decompressed, err := zstd.Decompress(nil, compressed) + if err != nil { + return nil, fmt.Errorf("decompress frame %d: %w", fc.FrameIndex, err) + } + + // Update cache + p.lastFrameIdx = fc.FrameIndex + p.lastFrameData = decompressed + + if uint32(len(decompressed)) < fc.DataOffset+fc.Size { + return nil, fmt.Errorf("decompressed frame too short") + } + + return decompressed[fc.DataOffset : fc.DataOffset+fc.Size], nil +} + +// ReadRawFrame reads the raw compressed data for a specific frame. +func (p *Package) ReadRawFrame(frameIndex uint32) ([]byte, error) { + if int(frameIndex) >= len(p.manifest.Frames) { + return nil, fmt.Errorf("invalid frame index %d", frameIndex) + } + frame := p.manifest.Frames[frameIndex] + + if frame.Length == 0 { + return nil, nil + } + + if int(frame.PackageIndex) >= len(p.files) { + return nil, fmt.Errorf("invalid package index %d", frame.PackageIndex) + } + file := p.files[frame.PackageIndex] + if _, err := file.Seek(int64(frame.Offset), io.SeekStart); err != nil { + return nil, fmt.Errorf("seek frame %d: %w", frameIndex, err) + } + + compressed := make([]byte, frame.CompressedSize) + if _, err := io.ReadFull(file, compressed); err != nil { + return nil, fmt.Errorf("read frame %d: %w", frameIndex, err) + } + + return compressed, nil +} + // Extract extracts all files from the package to the output directory. func (p *Package) Extract(outputDir string, opts ...ExtractOption) error { cfg := &extractConfig{} @@ -117,6 +201,10 @@ func (p *Package) Extract(outputDir string, opts ...ExtractOption) error { // Extract files from this frame using pre-built index contents := frameIndex[uint32(frameIdx)] for _, fc := range contents { + if len(cfg.allowedTypes) > 0 && !cfg.allowedTypes[fc.TypeSymbol] { + continue + } + var fileName string if cfg.decimalNames { fileName = strconv.FormatInt(fc.FileSymbol, 10) @@ -154,6 +242,7 @@ func (p *Package) Extract(outputDir string, opts ...ExtractOption) error { type extractConfig struct { preserveGroups bool decimalNames bool + allowedTypes map[int64]bool } // ExtractOption configures extraction behavior. @@ -172,3 +261,15 @@ func WithDecimalNames(decimal bool) ExtractOption { c.decimalNames = decimal } } + +// WithTypeFilter configures extraction to only include specific file types. +func WithTypeFilter(types []int64) ExtractOption { + return func(c *extractConfig) { + if len(types) > 0 { + c.allowedTypes = make(map[int64]bool, len(types)) + for _, t := range types { + c.allowedTypes[t] = true + } + } + } +} diff --git a/pkg/manifest/repack.go b/pkg/manifest/repack.go new file mode 100644 index 0000000..25fffe3 --- /dev/null +++ b/pkg/manifest/repack.go @@ -0,0 +1,723 @@ +package manifest + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "os" + "path/filepath" + "runtime" + "sort" + "sync" + + "github.com/DataDog/zstd" +) + +// Pools to eliminate GC overhead +var ( + readPool = sync.Pool{New: func() interface{} { return make([]byte, 0, 1024*1024) }} + decompPool = sync.Pool{New: func() interface{} { return make([]byte, 0, 4*1024*1024) }} + compPool = sync.Pool{New: func() interface{} { return make([]byte, 0, 1024*1024) }} + constructionPool = sync.Pool{New: func() interface{} { return bytes.NewBuffer(make([]byte, 0, 4*1024*1024)) }} +) + +type frameResult struct { + index int + data []byte + err error + decompressedSize uint32 + isModified bool + shouldSkip bool + rawReadBuf []byte + decompBuf []byte +} + +type fcWrapper struct { + index int + fc FrameContent +} + +type packageWriter struct { + fileHandle *os.File + pkgIndex uint32 + outputDir string + pkgName string + created map[uint32]bool + currentOffset int64 + minPkgIndex uint32 +} + +func (pw *packageWriter) write(manifest *Manifest, data []byte, decompressedSize uint32) error { + os.MkdirAll(fmt.Sprintf("%s/packages", pw.outputDir), 0777) + + cEntry := Frame{} + if len(manifest.Frames) > 0 { + cEntry = manifest.Frames[len(manifest.Frames)-1] + } + activePackageNum := cEntry.PackageIndex + + // Ensure we don't write to protected original packages + if activePackageNum < pw.minPkgIndex { + activePackageNum = pw.minPkgIndex + } + + // Ensure manifest knows about this package + if manifest.Header.PackageCount <= activePackageNum { + manifest.Header.PackageCount = activePackageNum + 1 + } + + // Check if the current frame forces a rotation, BUT only if we are still in the same package. + // If we moved to a new package (activePackageNum > cEntry.PackageIndex), the offset of cEntry is irrelevant. + if activePackageNum == cEntry.PackageIndex { + if int64(cEntry.Offset)+int64(cEntry.CompressedSize)+int64(len(data)) > math.MaxInt32 { + activePackageNum++ + manifest.Header.PackageCount = activePackageNum + 1 + } + } + + // Open file and verify size constraints (handling existing files or rotation) + for { + if pw.fileHandle == nil || pw.pkgIndex != activePackageNum { + if pw.fileHandle != nil { + pw.fileHandle.Close() + } + + currentPackagePath := fmt.Sprintf("%s/packages/%s_%d", pw.outputDir, pw.pkgName, activePackageNum) + flags := os.O_RDWR | os.O_CREATE | os.O_APPEND + + if !pw.created[activePackageNum] { + flags = os.O_RDWR | os.O_CREATE | os.O_TRUNC + pw.created[activePackageNum] = true + } + + f, err := os.OpenFile(currentPackagePath, flags, 0777) + if err != nil { + return err + } + pw.fileHandle = f + pw.pkgIndex = activePackageNum + + stat, err := pw.fileHandle.Stat() + if err != nil { + return fmt.Errorf("stat package file: %w", err) + } + pw.currentOffset = stat.Size() + } + + // Check if data fits in the current package + if pw.currentOffset+int64(len(data)) > math.MaxInt32 { + activePackageNum++ + manifest.Header.PackageCount = activePackageNum + 1 + continue // Retry with next package + } + break // Fits + } + + if _, err := pw.fileHandle.Write(data); err != nil { + return err + } + + newEntry := Frame{ + PackageIndex: activePackageNum, + Offset: uint32(pw.currentOffset), + CompressedSize: uint32(len(data)), + Length: decompressedSize, + } + if int64(newEntry.Offset)+int64(newEntry.CompressedSize) > math.MaxInt32 { + newEntry.Offset = 0 + } + + manifest.Frames = append(manifest.Frames, newEntry) + incrementSection(&manifest.Header.Frames, 1) + pw.currentOffset += int64(len(data)) + + return nil +} + +func (pw *packageWriter) close() { + if pw.fileHandle != nil { + pw.fileHandle.Close() + pw.fileHandle = nil + } +} + +func incrementSection(s *Section, count int) { + s.Count += uint64(count) + s.ElementCount += uint64(count) + s.Length += s.ElementSize * uint64(count) +} + +func Repack(manifest *Manifest, fileMap [][]ScannedFile, outputDir, packageName, dataDir string) error { + fmt.Println("Mapping modified files...") + + totalFiles := 0 + for _, chunk := range fileMap { + totalFiles += len(chunk) + } + + modifiedFilesLookupTable := make(map[[128]byte]ScannedFile, totalFiles) + frameContentsLookupTable := make(map[[128]byte]FrameContent, manifest.Header.FrameContents.ElementCount) + modifiedFrames := make(map[uint32]bool) + + for _, v := range manifest.FrameContents { + buf := [128]byte{} + binary.LittleEndian.PutUint64(buf[0:64], uint64(v.TypeSymbol)) + binary.LittleEndian.PutUint64(buf[64:128], uint64(v.FileSymbol)) + frameContentsLookupTable[buf] = v + } + + for _, fileGroup := range fileMap { + for _, v := range fileGroup { + buf := [128]byte{} + binary.LittleEndian.PutUint64(buf[0:64], uint64(v.TypeSymbol)) + binary.LittleEndian.PutUint64(buf[64:128], uint64(v.FileSymbol)) + + if content, ok := frameContentsLookupTable[buf]; ok { + modifiedFrames[content.FrameIndex] = true + modifiedFilesLookupTable[buf] = v + } + } + } + fmt.Printf("Mapped %d files to modify.\n", len(modifiedFilesLookupTable)) + + contentsByFrame := make(map[uint32][]fcWrapper) + for k, v := range manifest.FrameContents { + contentsByFrame[v.FrameIndex] = append(contentsByFrame[v.FrameIndex], fcWrapper{index: k, fc: v}) + } + + newManifest := *manifest + newManifest.Frames = make([]Frame, 0) + origFramesHeader := manifest.Header.Frames + newManifest.Header.PackageCount = 1 + newManifest.Header.Frames = Section{ + Unk1: origFramesHeader.Unk1, + Unk2: origFramesHeader.Unk2, + ElementSize: 16, + } + + packages := make(map[uint32]*os.File) + for i := 0; i < int(manifest.Header.PackageCount); i++ { + pFilePath := fmt.Sprintf("%s/packages/%s_%d", dataDir, packageName, i) + f, err := os.Open(pFilePath) + if err != nil { + return fmt.Errorf("failed to open package %s: %v", pFilePath, err) + } + packages[uint32(i)] = f + defer f.Close() + } + + totalFrames := int(manifest.Header.Frames.ElementCount) + lookaheadSize := runtime.NumCPU() * 16 + futureResults := make(chan chan frameResult, lookaheadSize) + writer := &packageWriter{outputDir: outputDir, pkgName: packageName, created: make(map[uint32]bool)} + defer writer.close() + + go func() { + defer close(futureResults) + for i := 0; i < totalFrames; i++ { + resultChan := make(chan frameResult, 1) + futureResults <- resultChan + + go func(idx int, ch chan frameResult) { + v := manifest.Frames[idx] + isMod := modifiedFrames[uint32(idx)] + res := frameResult{index: idx, isModified: isMod, decompressedSize: v.Length} + + rawReadBuf := readPool.Get().([]byte) + if cap(rawReadBuf) < int(v.CompressedSize) { + rawReadBuf = make([]byte, int(v.CompressedSize)) + } else { + rawReadBuf = rawReadBuf[:v.CompressedSize] + } + res.rawReadBuf = rawReadBuf + + activeFile := packages[v.PackageIndex] + if v.CompressedSize > 0 { + if _, err := activeFile.ReadAt(rawReadBuf, int64(v.Offset)); err != nil { + if v.Length == 0 { + res.shouldSkip = true + ch <- res + return + } + res.err = err + ch <- res + return + } + } + + if !isMod { + res.data = rawReadBuf + ch <- res + return + } + + decompBuf := decompPool.Get().([]byte) + decompBytes, err := zstd.Decompress(decompBuf[:0], rawReadBuf) + if err != nil { + res.err = err + ch <- res + return + } + res.decompBuf = decompBytes + + bufObj := constructionPool.Get() + constructionBuf := bufObj.(*bytes.Buffer) + constructionBuf.Reset() + defer constructionPool.Put(bufObj) + + sorted := make([]fcWrapper, 0) + if contents, ok := contentsByFrame[uint32(idx)]; ok { + sorted = append(sorted, contents...) + } + sort.Slice(sorted, func(a, b int) bool { + return sorted[a].fc.DataOffset < sorted[b].fc.DataOffset + }) + + for j := 0; j < len(sorted); j++ { + buf := [128]byte{} + binary.LittleEndian.PutUint64(buf[0:64], uint64(sorted[j].fc.TypeSymbol)) + binary.LittleEndian.PutUint64(buf[64:128], uint64(sorted[j].fc.FileSymbol)) + + if modFile, exists := modifiedFilesLookupTable[buf]; exists && modFile.FileSymbol != 0 { + modData, err := os.ReadFile(modFile.Path) + if err != nil { + res.err = err + ch <- res + return + } + constructionBuf.Write(modData) + } else { + start := sorted[j].fc.DataOffset + end := start + sorted[j].fc.Size + constructionBuf.Write(decompBytes[start:end]) + } + } + + compBuf := compPool.Get().([]byte) + encodedData, err := zstd.CompressLevel(compBuf[:0], constructionBuf.Bytes(), zstd.BestSpeed) + if err != nil { + res.err = fmt.Errorf("compress frame: %w", err) + ch <- res + return + } + res.data = encodedData + res.decompressedSize = uint32(constructionBuf.Len()) + + ch <- res + }(i, resultChan) + } + }() + + fmt.Println("Starting repack...") + for resultCh := range futureResults { + res := <-resultCh + if res.err != nil { + return res.err + } + + if res.shouldSkip { + if res.rawReadBuf != nil { + readPool.Put(res.rawReadBuf) + } + if res.decompBuf != nil { + decompPool.Put(res.decompBuf) + } + if res.isModified && res.data != nil { + compPool.Put(res.data) + } + continue + } + + if res.isModified { + sorted := make([]fcWrapper, 0) + if contents, ok := contentsByFrame[uint32(res.index)]; ok { + sorted = append(sorted, contents...) + } + sort.Slice(sorted, func(a, b int) bool { + return sorted[a].fc.DataOffset < sorted[b].fc.DataOffset + }) + + currentOffset := uint32(0) + for j := 0; j < len(sorted); j++ { + buf := [128]byte{} + binary.LittleEndian.PutUint64(buf[0:64], uint64(sorted[j].fc.TypeSymbol)) + binary.LittleEndian.PutUint64(buf[64:128], uint64(sorted[j].fc.FileSymbol)) + + size := sorted[j].fc.Size + if modFile, exists := modifiedFilesLookupTable[buf]; exists && modFile.FileSymbol != 0 { + size = modFile.Size + } + + newManifest.FrameContents[sorted[j].index] = FrameContent{ + TypeSymbol: sorted[j].fc.TypeSymbol, + FileSymbol: sorted[j].fc.FileSymbol, + FrameIndex: sorted[j].fc.FrameIndex, + DataOffset: currentOffset, + Size: size, + Alignment: sorted[j].fc.Alignment, + } + currentOffset += size + } + } + + if err := writer.write(&newManifest, res.data, res.decompressedSize); err != nil { + return err + } + + if res.isModified { + if res.rawReadBuf != nil { + readPool.Put(res.rawReadBuf) + } + if res.decompBuf != nil { + decompPool.Put(res.decompBuf) + } + if res.data != nil { + compPool.Put(res.data) + } + } else { + if res.data != nil { + readPool.Put(res.data) + } + } + } + + writer.close() + + actualPkgCount := uint32(0) + for { + path := fmt.Sprintf("%s/packages/%s_%d", outputDir, packageName, actualPkgCount) + if _, err := os.Stat(path); err != nil { + break + } + actualPkgCount++ + } + newManifest.Header.PackageCount = actualPkgCount + + for i := uint32(0); i < newManifest.Header.PackageCount; i++ { + path := fmt.Sprintf("%s/packages/%s_%d", outputDir, packageName, i) + stats, err := os.Stat(path) + if err != nil { + continue + } + newEntry := Frame{ + PackageIndex: i, + Offset: uint32(stats.Size()), + CompressedSize: 0, Length: 0, + } + newManifest.Frames = append(newManifest.Frames, newEntry) + incrementSection(&newManifest.Header.Frames, 1) + } + + newManifest.Frames = append(newManifest.Frames, Frame{}) + incrementSection(&newManifest.Header.Frames, 1) + + manifestDir := filepath.Join(outputDir, "manifests") + if err := os.MkdirAll(manifestDir, 0755); err != nil { + return fmt.Errorf("create manifest dir: %w", err) + } + + return WriteFile(filepath.Join(manifestDir, packageName), &newManifest) +} + +// QuickRepack modifies the existing package files in-place by appending new frames +// and updating the manifest. This avoids rewriting the entire package set. +func QuickRepack(manifest *Manifest, fileMap [][]ScannedFile, dataDir, packageName string) error { + manifestPath := filepath.Join(dataDir, "manifests", packageName) + originalManifestPath := manifestPath + ".bak" + + // 1. Backup/Restore Logic: Ensure we have a clean original manifest + // Check for legacy backup first + if _, err := os.Stat(manifestPath + "_original"); err == nil { + if _, err := os.Stat(originalManifestPath); os.IsNotExist(err) { + os.Rename(manifestPath+"_original", originalManifestPath) + } + } + + if _, err := os.Stat(originalManifestPath); err == nil { + // Backup exists, load it as the source of truth + fmt.Println("Loading original manifest from backup...") + origM, err := ReadFile(originalManifestPath) + if err != nil { + return fmt.Errorf("failed to read backup manifest: %w", err) + } + *manifest = *origM + } else { + // No backup, create one from current (assumed original) + fmt.Println("Creating backup of original manifest...") + input, err := os.ReadFile(manifestPath) + if err == nil { + os.WriteFile(originalManifestPath, input, 0644) + } + } + + minSafePackageIndex := manifest.Header.PackageCount + + // 2. Open Package + pkgPath := filepath.Join(dataDir, "packages", packageName) + srcPkg, err := OpenPackage(manifest, pkgPath) + if err != nil { + return fmt.Errorf("failed to open source package: %w", err) + } + defer srcPkg.Close() + + fmt.Println("Starting Quick Swap (In-Place Modification)...") + + totalFiles := 0 + for _, chunk := range fileMap { + totalFiles += len(chunk) + } + + modifiedFilesLookupTable := make(map[[128]byte]ScannedFile, totalFiles) + frameContentsLookupTable := make(map[[128]byte]FrameContent, manifest.Header.FrameContents.ElementCount) + + for _, v := range manifest.FrameContents { + buf := [128]byte{} + binary.LittleEndian.PutUint64(buf[0:64], uint64(v.TypeSymbol)) + binary.LittleEndian.PutUint64(buf[64:128], uint64(v.FileSymbol)) + frameContentsLookupTable[buf] = v + } + + for _, fileGroup := range fileMap { + for _, v := range fileGroup { + buf := [128]byte{} + binary.LittleEndian.PutUint64(buf[0:64], uint64(v.TypeSymbol)) + binary.LittleEndian.PutUint64(buf[64:128], uint64(v.FileSymbol)) + + if _, ok := frameContentsLookupTable[buf]; ok { + modifiedFilesLookupTable[buf] = v + } + } + } + + fmt.Println("Checking for identical files...") + type checkItem struct { + key [128]byte + fc FrameContent + mod ScannedFile + } + var checks []checkItem + for key, modFile := range modifiedFilesLookupTable { + if fc, ok := frameContentsLookupTable[key]; ok { + checks = append(checks, checkItem{key, fc, modFile}) + } + } + + sort.Slice(checks, func(i, j int) bool { + if checks[i].fc.FrameIndex != checks[j].fc.FrameIndex { + return checks[i].fc.FrameIndex < checks[j].fc.FrameIndex + } + return checks[i].fc.DataOffset < checks[j].fc.DataOffset + }) + + skippedCount := 0 + for _, item := range checks { + newData, err := os.ReadFile(item.mod.Path) + if err != nil { + return fmt.Errorf("read input %s: %w", item.mod.Path, err) + } + + if uint32(len(newData)) == item.fc.Size { + oldData, err := srcPkg.ReadContent(&item.fc) + if err == nil && bytes.Equal(newData, oldData) { + delete(modifiedFilesLookupTable, item.key) + skippedCount++ + } + } + } + + if skippedCount > 0 { + fmt.Printf("Skipped %d identical files.\n", skippedCount) + } + + if len(modifiedFilesLookupTable) == 0 { + fmt.Println("No files changed. Nothing to repack.") + return nil + } + + affectedFrames := make(map[uint32]bool) + for key := range modifiedFilesLookupTable { + if fc, ok := frameContentsLookupTable[key]; ok { + affectedFrames[fc.FrameIndex] = true + } + } + fmt.Printf("Mapped %d files to modify across %d frames.\n", len(modifiedFilesLookupTable), len(affectedFrames)) + + contentsByFrame := make(map[uint32][]fcWrapper) + for k, v := range manifest.FrameContents { + if affectedFrames[v.FrameIndex] { + contentsByFrame[v.FrameIndex] = append(contentsByFrame[v.FrameIndex], fcWrapper{index: k, fc: v}) + } + } + + createdMap := make(map[uint32]bool) + for i := uint32(0); i < manifest.Header.PackageCount; i++ { + createdMap[i] = true + } + + writer := &packageWriter{ + outputDir: dataDir, + pkgName: packageName, + created: createdMap, + minPkgIndex: minSafePackageIndex, + } + defer writer.close() + + var framesToProcess []int + for idx := range affectedFrames { + framesToProcess = append(framesToProcess, int(idx)) + } + sort.Ints(framesToProcess) + + lookaheadSize := runtime.NumCPU() * 4 + futureResults := make(chan chan frameResult, lookaheadSize) + + go func() { + defer close(futureResults) + for _, idx := range framesToProcess { + resultChan := make(chan frameResult, 1) + futureResults <- resultChan + + go func(idx int, ch chan frameResult) { + v := manifest.Frames[idx] + res := frameResult{index: idx, isModified: true, decompressedSize: v.Length} + + rawReadBuf := readPool.Get().([]byte) + if cap(rawReadBuf) < int(v.CompressedSize) { + rawReadBuf = make([]byte, int(v.CompressedSize)) + } else { + rawReadBuf = rawReadBuf[:v.CompressedSize] + } + res.rawReadBuf = rawReadBuf + + if int(v.PackageIndex) >= len(srcPkg.files) { + res.err = fmt.Errorf("invalid package index %d", v.PackageIndex) + ch <- res + return + } + activeFile := srcPkg.files[v.PackageIndex] + + if v.CompressedSize > 0 { + if _, err := activeFile.ReadAt(rawReadBuf, int64(v.Offset)); err != nil { + res.err = err + ch <- res + return + } + } + + decompBuf := decompPool.Get().([]byte) + decompBytes, err := zstd.Decompress(decompBuf[:0], rawReadBuf) + if err != nil { + res.err = err + ch <- res + return + } + res.decompBuf = decompBytes + + bufObj := constructionPool.Get() + constructionBuf := bufObj.(*bytes.Buffer) + constructionBuf.Reset() + defer constructionPool.Put(bufObj) + + sorted := make([]fcWrapper, 0) + if contents, ok := contentsByFrame[uint32(idx)]; ok { + sorted = append(sorted, contents...) + } + sort.Slice(sorted, func(a, b int) bool { + return sorted[a].fc.DataOffset < sorted[b].fc.DataOffset + }) + + for j := 0; j < len(sorted); j++ { + buf := [128]byte{} + binary.LittleEndian.PutUint64(buf[0:64], uint64(sorted[j].fc.TypeSymbol)) + binary.LittleEndian.PutUint64(buf[64:128], uint64(sorted[j].fc.FileSymbol)) + + if modFile, exists := modifiedFilesLookupTable[buf]; exists && modFile.FileSymbol != 0 { + modData, err := os.ReadFile(modFile.Path) + if err != nil { + res.err = err + ch <- res + return + } + constructionBuf.Write(modData) + } else { + start := sorted[j].fc.DataOffset + end := start + sorted[j].fc.Size + if end > uint32(len(decompBytes)) { + res.err = fmt.Errorf("frame content out of bounds") + ch <- res + return + } + constructionBuf.Write(decompBytes[start:end]) + } + } + + compBuf := compPool.Get().([]byte) + encodedData, _ := zstd.CompressLevel(compBuf[:0], constructionBuf.Bytes(), zstd.BestSpeed) + res.data = encodedData + res.decompressedSize = uint32(constructionBuf.Len()) + + ch <- res + }(idx, resultChan) + } + }() + + fmt.Println("Writing modified frames...") + for resultCh := range futureResults { + res := <-resultCh + if res.err != nil { + return res.err + } + + newFrameIndex := len(manifest.Frames) + + sorted := make([]fcWrapper, 0) + if contents, ok := contentsByFrame[uint32(res.index)]; ok { + sorted = append(sorted, contents...) + } + sort.Slice(sorted, func(a, b int) bool { + return sorted[a].fc.DataOffset < sorted[b].fc.DataOffset + }) + + currentOffset := uint32(0) + for j := 0; j < len(sorted); j++ { + buf := [128]byte{} + binary.LittleEndian.PutUint64(buf[0:64], uint64(sorted[j].fc.TypeSymbol)) + binary.LittleEndian.PutUint64(buf[64:128], uint64(sorted[j].fc.FileSymbol)) + + size := sorted[j].fc.Size + if modFile, exists := modifiedFilesLookupTable[buf]; exists && modFile.FileSymbol != 0 { + size = modFile.Size + } + + manifest.FrameContents[sorted[j].index] = FrameContent{ + TypeSymbol: sorted[j].fc.TypeSymbol, + FileSymbol: sorted[j].fc.FileSymbol, + FrameIndex: uint32(newFrameIndex), + DataOffset: currentOffset, + Size: size, + Alignment: sorted[j].fc.Alignment, + } + currentOffset += size + } + + if err := writer.write(manifest, res.data, res.decompressedSize); err != nil { + return err + } + + if res.rawReadBuf != nil { + readPool.Put(res.rawReadBuf) + } + if res.decompBuf != nil { + decompPool.Put(res.decompBuf) + } + if res.data != nil { + compPool.Put(res.data) + } + } + + writer.close() + + fmt.Printf("Updating manifest: %s\n", manifestPath) + return WriteFile(manifestPath, manifest) +} diff --git a/pkg/manifest/scanner.go b/pkg/manifest/scanner.go index 5e7ceaa..1618644 100644 --- a/pkg/manifest/scanner.go +++ b/pkg/manifest/scanner.go @@ -14,6 +14,11 @@ type ScannedFile struct { FileSymbol int64 Path string Size uint32 + + // Source for repacking (optional) + SrcPackage *Package + SrcContent *FrameContent + SkipManifest bool } // ScanFiles walks the input directory and returns files grouped by chunk number. @@ -29,26 +34,50 @@ func ScanFiles(inputDir string) ([][]ScannedFile, error) { return nil } - // Parse directory structure - dir := filepath.Dir(path) - parts := strings.Split(filepath.ToSlash(dir), "/") - if len(parts) < 3 { - return fmt.Errorf("invalid path structure: %s", path) + relPath, err := filepath.Rel(inputDir, path) + if err != nil { + return fmt.Errorf("failed to get relative path: %w", err) } - chunkNum, err := strconv.ParseInt(parts[len(parts)-3], 10, 64) - if err != nil { - return fmt.Errorf("parse chunk number: %w", err) + // Normalize separators + relPath = filepath.ToSlash(relPath) + parts := strings.Split(relPath, "/") + + var chunkNum int64 = 0 + var typeStr, fileStr string + + if len(parts) == 3 { + if c, err := strconv.ParseInt(parts[0], 10, 64); err == nil { + chunkNum = c + typeStr = parts[1] + fileStr = parts[2] + } else { + typeStr = parts[1] + fileStr = parts[2] + } + } else if len(parts) == 2 { + typeStr = parts[0] + fileStr = parts[1] + } else { + return nil // Skip } - typeSymbol, err := strconv.ParseInt(parts[len(parts)-2], 10, 64) + parseSymbol := func(s string) (int64, error) { + s = strings.TrimSuffix(s, filepath.Ext(s)) + if u, err := strconv.ParseUint(s, 16, 64); err == nil { + return int64(u), nil + } + return strconv.ParseInt(s, 10, 64) + } + + typeSymbol, err := parseSymbol(typeStr) if err != nil { - return fmt.Errorf("parse type symbol: %w", err) + return nil } - fileSymbol, err := strconv.ParseInt(filepath.Base(path), 10, 64) + fileSymbol, err := parseSymbol(fileStr) if err != nil { - return fmt.Errorf("parse file symbol: %w", err) + return nil } size := info.Size()