1
0
Fork 0
mirror of https://github.com/Luzifer/scs-extract.git synced 2024-12-20 13:31:16 +00:00

Adopt to SCS# v2 archive format for v1.50+

This commit is contained in:
Knut Ahlers 2024-11-25 16:01:02 +01:00
parent 49b920f208
commit 7324e14da4
Signed by: luzifer
SSH key fingerprint: SHA256:/xtE5lCgiRDQr8SLxHMS92ZBlACmATUmF1crK16Ks4E
6 changed files with 457 additions and 223 deletions

View file

@ -1,3 +1,7 @@
// Package b0rkhash contains a broken implementation of the Google
// CityHash algorithm to access the SCS archive files of ETS2
//
//nolint:mnd
package b0rkhash package b0rkhash
import ( import (
@ -65,9 +69,9 @@ func hashLen0to16(s []byte, length int) uint64 {
} }
if length > 0 { if length > 0 {
a := uint8(s[0]) a := s[0]
b := uint8(s[length>>1]) b := s[length>>1]
c := uint8(s[length-1]) c := s[length-1]
y := uint32(a) + (uint32(b) << 8) y := uint32(a) + (uint32(b) << 8)
z := uint32(length) + (uint32(c) << 2) z := uint32(length) + (uint32(c) << 2)
return shiftMix(uint64(y)*k2^uint64(z)*k3) * k2 return shiftMix(uint64(y)*k2^uint64(z)*k3) * k2
@ -84,7 +88,7 @@ func hashLen17to32(s []byte, length int) uint64 {
c := fetch64(s[length-8:]) * k2 c := fetch64(s[length-8:]) * k2
d := fetch64(s[length-16:]) * k0 d := fetch64(s[length-16:]) * k0
return hashLen16(rotate(a-b, 43)+rotate(c, 30)+d, return hashLen16(rotate(a-b, 43)+rotate(c, 30)+d,
a+rotate(b^k3, 20)-c+uint64(length)) a+rotate(b^k3, 20)-c+uint64(length)) //#nosec:G115 // Should never be negative
} }
// Return a 16-byte hash for 48 bytes. Quick and dirty. // Return a 16-byte hash for 48 bytes. Quick and dirty.
@ -113,7 +117,7 @@ func weakHashLen32WithSeedsByte(s []byte, a, b uint64) Uint128 {
// Return an 8-byte hash for 33 to 64 bytes. // Return an 8-byte hash for 33 to 64 bytes.
func hashLen33to64(s []byte, length int) uint64 { func hashLen33to64(s []byte, length int) uint64 {
z := fetch64(s[24:]) z := fetch64(s[24:])
a := fetch64(s) + (uint64(length)+fetch64(s[length-16:]))*k0 a := fetch64(s) + (uint64(length)+fetch64(s[length-16:]))*k0 //#nosec:G115 // Should never be negative
b := rotate(a+z, 52) b := rotate(a+z, 52)
c := rotate(a, 37) c := rotate(a, 37)
a += fetch64(s[8:]) a += fetch64(s[8:])
@ -156,8 +160,8 @@ func CityHash64(s []byte) uint64 {
x = x*k1 + fetch64(s) x = x*k1 + fetch64(s)
// Decrease len to the nearest multiple of 64, and operate on 64-byte chunks. // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks.
tmpLength := uint32(length) tmpLength := uint32(length) //#nosec:G115 // Should never be negative
tmpLength = uint32(tmpLength-1) & ^uint32(63) tmpLength -= 1 & ^uint32(63)
for { for {
x = rotate(x+y+v.Low64()+fetch64(s[8:]), 37) * k1 x = rotate(x+y+v.Low64()+fetch64(s[8:]), 37) * k1
y = rotate(y+v.High64()+fetch64(s[48:]), 42) * k1 y = rotate(y+v.High64()+fetch64(s[48:]), 42) * k1

21
go.mod
View file

@ -1,13 +1,18 @@
module github.com/Luzifer/scs-extract module github.com/Luzifer/scs-extract
go 1.13 go 1.22
toolchain go1.23.2
require ( require (
github.com/Luzifer/go_helpers/v2 v2.9.1 github.com/Luzifer/go_helpers/v2 v2.25.0
github.com/Luzifer/rconfig/v2 v2.2.1 github.com/Luzifer/rconfig/v2 v2.5.2
github.com/onsi/ginkgo v1.10.2 // indirect github.com/sirupsen/logrus v1.9.3
github.com/onsi/gomega v1.7.0 // indirect )
github.com/pkg/errors v0.8.1
github.com/sirupsen/logrus v1.4.2 require (
github.com/tenfyzhong/cityhash v0.0.0-20181130044406-4c2731b5918c github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/sys v0.27.0 // indirect
gopkg.in/validator.v2 v2.0.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

74
go.sum
View file

@ -1,52 +1,32 @@
github.com/Luzifer/go_helpers v2.8.1+incompatible h1:9YvrAn7pU2viK5vRpAnI+0gyz+Tw8rxWHVIYHi642zk= github.com/Luzifer/go_helpers/v2 v2.25.0 h1:k1J4gd1+BfuokTDoWgcgib9P5mdadjzKEgbtKSVe46k=
github.com/Luzifer/go_helpers/v2 v2.9.1 h1:MVUOlD6tJ2m/iTF0hllnI/QVZH5kI+TikUm1WRGg/c4= github.com/Luzifer/go_helpers/v2 v2.25.0/go.mod h1:KSVUdAJAav5cWGyB5oKGxmC27HrKULVTOxwPS/Kr+pc=
github.com/Luzifer/go_helpers/v2 v2.9.1/go.mod h1:ZnWxPjyCdQ4rZP3kNiMSUW/7FigU1X9Rz8XopdJ5ZCU= github.com/Luzifer/rconfig/v2 v2.5.2 h1:4Bfp8mTrCCK/xghUmUbh/qtKiLZA6RC0tHTgqkNw1m4=
github.com/Luzifer/rconfig v2.2.0+incompatible h1:Kle3+rshPM7LxciOheaR4EfHUzibkDDGws04sefQ5m8= github.com/Luzifer/rconfig/v2 v2.5.2/go.mod h1:HnqUWg+NQh60/neUqfMDDDo5d1v8UPuhwKR1HqM4VWQ=
github.com/Luzifer/rconfig/v2 v2.2.1 h1:zcDdLQlnlzwcBJ8E0WFzOkQE1pCMn3EbX0dFYkeTczg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/Luzifer/rconfig/v2 v2.2.1/go.mod h1:OKIX0/JRZrPJ/ZXXWklQEFXA6tBfWaljZbW37w+sqBw=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/leekchan/gtf v0.0.0-20190214083521-5fba33c5b00b/go.mod h1:thNruaSwydMhkQ8dXzapABF9Sc1Tz08ZBcDdgott9RA=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/tenfyzhong/cityhash v0.0.0-20181130044406-4c2731b5918c h1:Y3PUeYj+OuuOy7BnbIvtGOj9YDKjPHsZyN/2AJqN32s= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tenfyzhong/cityhash v0.0.0-20181130044406-4c2731b5918c/go.mod h1:Izvvi9mFtnF9nbPc2Z/gazIliNnYtxOsbQnFYpmxbfc= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8=
gopkg.in/validator.v2 v2.0.0-20180514200540-135c24b11c19 h1:WB265cn5OpO+hK3pikC9hpP1zI/KTwmyMFKloW9eOVc= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/validator.v2 v2.0.0-20180514200540-135c24b11c19/go.mod h1:o4V0GXN9/CAmCsvJ0oXYZvrZOe7syiDZSN1GWGZTGzc= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

83
main.go
View file

@ -9,9 +9,11 @@ import (
"github.com/Luzifer/go_helpers/v2/str" "github.com/Luzifer/go_helpers/v2/str"
"github.com/Luzifer/rconfig/v2" "github.com/Luzifer/rconfig/v2"
"github.com/Luzifer/scs-extract/scs" "github.com/Luzifer/scs-extract/scs"
log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
const dirPermissions = 0x750
var ( var (
cfg = struct { cfg = struct {
Dest string `flag:"dest,d" default:"." description:"Path prefix to use to extract files to"` Dest string `flag:"dest,d" default:"." description:"Path prefix to use to extract files to"`
@ -23,24 +25,32 @@ var (
version = "dev" version = "dev"
) )
func init() { func initApp() (err error) {
if err := rconfig.ParseAndValidate(&cfg); err != nil { if err = rconfig.ParseAndValidate(&cfg); err != nil {
log.Fatalf("Unable to parse commandline options: %s", err) return fmt.Errorf("parsing CLI options: %w", err)
}
l, err := logrus.ParseLevel(cfg.LogLevel)
if err != nil {
return fmt.Errorf("parsing log-level: %w", err)
}
logrus.SetLevel(l)
return nil
}
//nolint:gocyclo // simple loop routine, fine to understand
func main() {
var err error
if err = initApp(); err != nil {
logrus.WithError(err).Fatal("initializing app")
} }
if cfg.VersionAndExit { if cfg.VersionAndExit {
fmt.Printf("scs-extract %s\n", version) fmt.Printf("scs-extract %s\n", version) //nolint:forbidigo
os.Exit(0) os.Exit(0)
} }
if l, err := log.ParseLevel(cfg.LogLevel); err != nil {
log.WithError(err).Fatal("Unable to parse log level")
} else {
log.SetLevel(l)
}
}
func main() {
var ( var (
archive string archive string
extract []string extract []string
@ -49,9 +59,9 @@ func main() {
switch len(rconfig.Args()) { switch len(rconfig.Args()) {
case 1: case 1:
// No positional arguments // No positional arguments
log.Fatal("No SCS archive given") logrus.Fatal("no SCS archive given")
case 2: case 2: //nolint:mnd
archive = rconfig.Args()[1] archive = rconfig.Args()[1]
default: default:
@ -59,32 +69,32 @@ func main() {
extract = rconfig.Args()[2:] extract = rconfig.Args()[2:]
} }
f, err := os.Open(archive) f, err := os.Open(archive) //#nosec:G304 // Intended to open arbitrary files
if err != nil { if err != nil {
log.WithError(err).Fatal("Unable to open input file") logrus.WithError(err).Fatal("opening input file")
} }
defer f.Close() defer f.Close() //nolint:errcheck // will be closed by program exit
r, err := scs.NewReader(f, 0) r, err := scs.NewReader(f)
if err != nil { if err != nil {
log.WithError(err).Fatal("Unable to read SCS file headers") logrus.WithError(err).Fatal("reading SCS file headers")
} }
log.WithField("no_files", len(r.Files)).Debug("Opened archive") logrus.WithField("no_files", len(r.Files)).Debug("opened archive")
destInfo, err := os.Stat(cfg.Dest) destInfo, err := os.Stat(cfg.Dest)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
log.WithError(err).Fatal("Unable to access destination") logrus.WithError(err).Fatal("accessing destination")
} }
if err := os.MkdirAll(cfg.Dest, 0755); err != nil { if err := os.MkdirAll(cfg.Dest, dirPermissions); err != nil {
log.WithError(err).Fatal("Unable to create destination directory") logrus.WithError(err).Fatal("creating destination directory")
} }
} }
if destInfo != nil && !destInfo.IsDir() { if destInfo != nil && !destInfo.IsDir() {
log.Fatal("Destination exists and is no directory") logrus.Fatal("destination exists and is no directory")
} }
for _, file := range r.Files { for _, file := range r.Files {
@ -93,40 +103,39 @@ func main() {
continue continue
} }
if file.Type == scs.EntryTypeCompressedNames || file.Type == scs.EntryTypeCompressedNamesCopy || if file.IsDirectory {
file.Type == scs.EntryTypeUncompressedNames || file.Type == scs.EntryTypeUncompressedNamesCopy {
// Don't care about directories, if they contain files they will be created // Don't care about directories, if they contain files they will be created
continue continue
} }
if !cfg.Extract { if !cfg.Extract {
// Not asked to extract, do not extract // Not asked to extract, do not extract
fmt.Println(file.Name) fmt.Println(file.Name) //nolint:forbidigo // Intended to print file list
continue continue
} }
destPath := path.Join(cfg.Dest, file.Name) destPath := path.Join(cfg.Dest, file.Name)
if err := os.MkdirAll(path.Dir(destPath), 0755); err != nil { if err := os.MkdirAll(path.Dir(destPath), dirPermissions); err != nil {
log.WithError(err).Fatal("Unable to create directory") logrus.WithError(err).Fatal("creating directory")
} }
src, err := file.Open() src, err := file.Open()
if err != nil { if err != nil {
log.WithError(err).Fatal("Unable to open file from archive") logrus.WithError(err).Fatal("opening file from archive")
} }
dest, err := os.Create(destPath) dest, err := os.Create(destPath) //#nosec:G304 // Intended to create files at given location
if err != nil { if err != nil {
log.WithError(err).Fatal("Unable to create destination file") logrus.WithError(err).Fatal("creating destination file")
} }
if _, err = io.Copy(dest, src); err != nil { if _, err = io.Copy(dest, src); err != nil {
log.WithError(err).Fatal("Unable to write file contents") logrus.WithError(err).WithField("name", file.Name).Fatal("Unable to write file contents")
} }
dest.Close() dest.Close() //nolint:errcheck,gosec,revive // Will be closed by program exit
src.Close() src.Close() //nolint:errcheck,gosec // Will be closed by program exit
log.WithField("file", file.Name).Info("File extracted") logrus.WithField("file", file.Name).Info("File extracted")
} }
} }

89
scs/meta.go Normal file
View file

@ -0,0 +1,89 @@
package scs
const offsetBlockSize = 16 // byte
type (
iMetaEntry interface {
Fill(*catalogMetaEntry)
}
metaEntry struct {
t metaEntryType
p iMetaEntry
}
metaEntryBrokenOctal [3]byte
metaEntryBrokenOctalImage [4]byte
metaEntryType struct {
Index metaEntryBrokenOctal
Type catalogMetaEntryType
}
metaEntryDir struct {
CompressedSize metaEntryBrokenOctal
Flags byte
Size uint32
Unknown2 uint32
OffsetBlock uint32
}
metaEntryFile struct {
CompressedSize metaEntryBrokenOctal
Flags byte
Size uint32
Unknown2 uint32
OffsetBlock uint32
}
metaEntryImage struct {
Unknown1 uint64
TextureWidth uint16
TextureHeight uint16
ImgFlags uint32
SampleFlags uint32
CompressedSize metaEntryBrokenOctalImage
Unknown3 [8]byte
OffsetBlock uint32
}
)
func (m metaEntry) Fill(c *catalogMetaEntry) {
c.Index = m.t.Index.Uint32()
m.p.Fill(c)
}
func (m metaEntryDir) Fill(c *catalogMetaEntry) {
c.IsDirectory = true
c.Offset = uint64(m.OffsetBlock) * offsetBlockSize
c.CompressedSize = m.CompressedSize.Uint32()
c.Size = m.Size
c.Flags = m.Flags
}
func (m metaEntryFile) Fill(c *catalogMetaEntry) {
c.Offset = uint64(m.OffsetBlock) * offsetBlockSize
c.CompressedSize = m.CompressedSize.Uint32()
c.Size = m.Size
c.Flags = m.Flags
}
func (m metaEntryImage) Fill(c *catalogMetaEntry) {
c.Offset = uint64(m.OffsetBlock) * offsetBlockSize
c.CompressedSize = m.CompressedSize.Uint32()
c.Size = m.CompressedSize.Uint32()
c.IsCompressed = m.CompressedSize.IsCompressed()
}
func (m metaEntryBrokenOctal) Uint32() uint32 {
return uint32(m[0]) + uint32(m[1])<<8 + uint32(m[2])<<16
}
func (m metaEntryBrokenOctalImage) IsCompressed() bool {
return (m[3] & 0xf0) != 0 //nolint:mnd
}
func (m metaEntryBrokenOctalImage) Uint32() uint32 {
return uint32(m[0]) + uint32(m[1])<<8 + uint32(m[2])<<16 + uint32(m[3])<<24
}

View file

@ -1,204 +1,351 @@
// Package scs contains a reader for SCS# archive files
package scs package scs
import ( import (
"bufio"
"bytes" "bytes"
"compress/flate" "compress/flate"
"compress/zlib"
"encoding/binary" "encoding/binary"
"errors"
"fmt"
"io" "io"
"io/ioutil"
"path" "path"
"reflect" "sort"
"strings" "strings"
"github.com/pkg/errors"
"github.com/Luzifer/scs-extract/b0rkhash" "github.com/Luzifer/scs-extract/b0rkhash"
) )
const (
flagIsDirectory = 0x10
supportedVersion = 0x2
zipHeaderSize = 0x2
)
type (
// File represents a file inside the SCS# archive
File struct {
Name string
CompressedSize uint32
Hash uint64
IsCompressed bool
IsDirectory bool
Size uint32
archiveReader io.ReaderAt
offset uint64
}
// Reader contains a parser for the archive and after creation will
// hold a list of files ready to be opened from the archive
Reader struct {
Files []*File
header fileHeader
entryTable []catalogEntry
metadataTable map[uint32]catalogMetaEntry
archiveReader io.ReaderAt
}
fileHeader struct {
Magic [4]byte
Version uint16
Salt uint16
HashMethod [4]byte
EntryCount uint32
EntryTableLength uint32
MetadataEntriesCount uint32
MetadataTableLength uint32
EntryTableStart uint64
MetadataTableStart uint64
SecurityDescriptorOffset uint32
Platform byte
}
catalogEntry struct {
Hash uint64
MetadataIndex uint32
MetadataCount uint16
Flags uint16
}
catalogMetaEntry struct {
Index uint32
Offset uint64
CompressedSize uint32
Size uint32
Flags byte
IsDirectory bool
IsCompressed bool
}
catalogMetaEntryType byte
)
const (
metaEntryTypeImage catalogMetaEntryType = 1
metaEntryTypeSample catalogMetaEntryType = 2
metaEntryTypeMipProxy catalogMetaEntryType = 3
metaEntryTypeInlineDirectory catalogMetaEntryType = 4
metaEntryTypePlain catalogMetaEntryType = 128
metaEntryTypeDirectory catalogMetaEntryType = 129
metaEntryTypeMip0 catalogMetaEntryType = 130
metaEntryTypeMip1 catalogMetaEntryType = 131
metaEntryTypeMipTail catalogMetaEntryType = 132
)
var ( var (
scsMagic = []byte("SCS#")
scsHashMethod = []byte("CITY")
localeRootPathHash = b0rkhash.CityHash64([]byte("locale")) localeRootPathHash = b0rkhash.CityHash64([]byte("locale"))
rootPathHash = b0rkhash.CityHash64([]byte("")) rootPathHash = b0rkhash.CityHash64([]byte(""))
) )
type CatalogEntry struct { // NewReader opens the archive from the given io.ReaderAt and parses
HashedPath uint64 // the header information
Offset int32 func NewReader(r io.ReaderAt) (out *Reader, err error) {
_ int32 // Read the header
Type EntryType var header fileHeader
CRC uint32 if err = binary.Read(
Size int32 io.NewSectionReader(r, 0, int64(binary.Size(fileHeader{}))),
ZSize int32 binary.LittleEndian,
} &header,
); err != nil {
type EntryType int32 return nil, fmt.Errorf("reading header: %w", err)
// See https://forum.scssoft.com/viewtopic.php?p=644638#p644638
const (
EntryTypeUncompressedFile EntryType = iota
EntryTypeUncompressedNames
EntryTypeCompressedFile
EntryTypeCompressedNames
EntryTypeUncompressedFileCopy
EntryTypeUncompressedNamesCopy
EntryTypeCompressedFileCopy
EntryTypeCompressedNamesCopy
)
type File struct {
Name string
archiveReader io.ReaderAt
CatalogEntry
}
func (f *File) Open() (io.ReadCloser, error) {
var rc io.ReadCloser
switch f.Type {
case EntryTypeCompressedFile, EntryTypeCompressedFileCopy, EntryTypeCompressedNames, EntryTypeCompressedNamesCopy:
r := io.NewSectionReader(f.archiveReader, int64(f.Offset+2), int64(f.ZSize))
rc = flate.NewReader(r)
case EntryTypeUncompressedFile, EntryTypeUncompressedFileCopy, EntryTypeUncompressedNames, EntryTypeUncompressedNamesCopy:
r := io.NewSectionReader(f.archiveReader, int64(f.Offset), int64(f.Size))
rc = ioutil.NopCloser(r)
}
return rc, nil
}
type Reader struct {
Files []*File
}
func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
var magic = make([]byte, 4)
n, err := r.ReadAt(magic, 0)
if err != nil || n != 4 {
return nil, errors.Wrap(err, "Unable to read file magic")
} }
if !reflect.DeepEqual(magic, []byte{0x53, 0x43, 0x53, 0x23}) { // Sanity checks
return nil, errors.New("Did not receive expected file magic") if !bytes.Equal(header.Magic[:], scsMagic) {
return nil, fmt.Errorf("unexpected magic header")
} }
var entries = make([]byte, 4) if !bytes.Equal(header.HashMethod[:], scsHashMethod) {
n, err = r.ReadAt(entries, 0xC) return nil, fmt.Errorf("unexpected hash method")
if err != nil || n != 4 {
return nil, errors.Wrap(err, "Unable to read entry count")
} }
var entryCount int32 if header.Version != supportedVersion {
if err = binary.Read(bytes.NewReader(entries), binary.LittleEndian, &entryCount); err != nil { return nil, fmt.Errorf("unsupported archive version: %d", header.Version)
return nil, errors.Wrap(err, "Unable to parse entry count")
} }
out := &Reader{} // Do the real parsing
out = &Reader{
var offset int64 = 0x1000
for i := int32(0); i < entryCount; i++ {
var hdr = make([]byte, 32)
n, err = r.ReadAt(hdr, offset)
if err != nil || n != 32 {
return nil, errors.Wrap(err, "Unable to read file header")
}
var e = CatalogEntry{}
if err = binary.Read(bytes.NewReader(hdr), binary.LittleEndian, &e); err != nil {
return nil, errors.Wrap(err, "Unable to parse file header")
}
out.Files = append(out.Files, &File{
CatalogEntry: e,
archiveReader: r, archiveReader: r,
}) header: header,
offset += 32 }
if err = out.parseEntryTable(); err != nil {
return nil, fmt.Errorf("parsing entry table: %w", err)
}
if err = out.parseMetadataTable(); err != nil {
return nil, fmt.Errorf("parsing metadata table: %w", err)
}
for _, e := range out.entryTable {
meta := out.metadataTable[e.MetadataIndex+uint32(e.MetadataCount)]
f := File{
CompressedSize: meta.CompressedSize,
Hash: e.Hash,
IsCompressed: meta.IsCompressed || (meta.Flags&flagIsDirectory) != 0,
IsDirectory: meta.IsDirectory,
Size: meta.Size,
archiveReader: r,
offset: meta.Offset,
}
out.Files = append(out.Files, &f)
} }
return out, out.populateFileNames() return out, out.populateFileNames()
} }
func (r *Reader) populateFileNames() error { // Open opens the file for reading
func (f *File) Open() (io.ReadCloser, error) {
var rc io.ReadCloser
if f.IsCompressed {
r := io.NewSectionReader(f.archiveReader, int64(f.offset+zipHeaderSize), int64(f.CompressedSize)) //#nosec:G115 // int64 wraps at 9EB - We don't have to care for a LONG time
rc = flate.NewReader(r)
} else {
r := io.NewSectionReader(f.archiveReader, int64(f.offset), int64(f.Size)) //#nosec:G115 // int64 wraps at 9EB - We don't have to care for a LONG time
rc = io.NopCloser(r)
}
return rc, nil
}
func (r *Reader) parseEntryTable() error {
etReader, err := zlib.NewReader(io.NewSectionReader(
r.archiveReader,
int64(r.header.EntryTableStart), //#nosec:G115 // int64 wraps at 9EB - We don't have to care for a LONG time
int64(r.header.EntryTableLength),
))
if err != nil {
return fmt.Errorf("opening entry-table reader: %w", err)
}
defer etReader.Close() //nolint:errcheck
for i := uint32(0); i < r.header.EntryCount; i++ {
var e catalogEntry
if err = binary.Read(etReader, binary.LittleEndian, &e); err != nil {
return fmt.Errorf("reading entry: %w", err)
}
r.entryTable = append(r.entryTable, e)
}
sort.Slice(r.entryTable, func(i, j int) bool {
return r.entryTable[i].MetadataIndex < r.entryTable[j].MetadataIndex
})
return nil
}
func (r *Reader) parseMetadataTable() error {
r.metadataTable = make(map[uint32]catalogMetaEntry)
mtReader, err := zlib.NewReader(io.NewSectionReader(
r.archiveReader,
int64(r.header.MetadataTableStart), //#nosec:G115 // int64 wraps at 9EB - We don't have to care for a LONG time
int64(r.header.MetadataTableLength),
))
if err != nil {
return fmt.Errorf("opening metadata-table reader: %w", err)
}
defer mtReader.Close() //nolint:errcheck
for {
var metaType metaEntryType
if err = binary.Read(mtReader, binary.LittleEndian, &metaType); err != nil {
if errors.Is(err, io.EOF) {
return nil
}
return fmt.Errorf("reading meta-type-header: %w", err)
}
var payload iMetaEntry
switch metaType.Type {
case metaEntryTypeDirectory:
var p metaEntryDir
if err = binary.Read(mtReader, binary.LittleEndian, &p); err != nil {
return fmt.Errorf("reading dir definition: %w", err)
}
payload = metaEntry{t: metaType, p: p}
case metaEntryTypePlain:
var p metaEntryFile
if err = binary.Read(mtReader, binary.LittleEndian, &p); err != nil {
return fmt.Errorf("reading file definition: %w", err)
}
payload = metaEntry{t: metaType, p: p}
case metaEntryTypeImage:
var p metaEntryImage
if err = binary.Read(mtReader, binary.LittleEndian, &p); err != nil {
return fmt.Errorf("reading image definition: %w", err)
}
payload = metaEntry{t: metaType, p: p}
default:
return fmt.Errorf("unhandled file type: %v", metaType.Type)
}
var e catalogMetaEntry
payload.Fill(&e)
r.metadataTable[e.Index] = e
}
}
func (r *Reader) populateFileNames() (err error) {
// first seek root entry, without the archive is not usable for us // first seek root entry, without the archive is not usable for us
var entry *File var entry *File
for _, e := range r.Files { for _, f := range r.Files {
if e.HashedPath == rootPathHash { if f.Hash == rootPathHash {
entry = e entry = f
entry.Name = "" entry.Name = ""
break break
} else if e.HashedPath == localeRootPathHash { } else if f.Hash == localeRootPathHash {
entry = e entry = f
entry.Name = "locale" entry.Name = "locale"
break break
} }
} }
if entry == nil || if entry == nil {
(entry.ZSize == 0 && entry.Size == 0) || // We found no suitable entrypoint
(entry.Type != EntryTypeCompressedNames && return fmt.Errorf("no root entry found")
entry.Type != EntryTypeCompressedNamesCopy &&
entry.Type != EntryTypeUncompressedNames &&
entry.Type != EntryTypeUncompressedNamesCopy) {
return errors.New("No root path entry found or root path empty")
} }
return r.populateFileTree(entry) if err = r.setFilenamesFromDir(entry); err != nil {
return fmt.Errorf("setting filenames: %w", err)
}
return nil
} }
func (r *Reader) populateFileTree(node *File) error { func (r *Reader) setFilenamesFromDir(node *File) error {
f, err := node.Open() f, err := node.Open()
if err != nil { if err != nil {
return errors.Wrap(err, "Unable to open file") return fmt.Errorf("opening file: %w", err)
} }
defer f.Close() defer f.Close() //nolint:errcheck
var entries []string var entryCount uint32
if err = binary.Read(f, binary.LittleEndian, &entryCount); err != nil {
scanner := bufio.NewScanner(f) return fmt.Errorf("reading entry count: %w", err)
for scanner.Scan() {
entries = append(entries, scanner.Text())
} }
if err := scanner.Err(); err != nil { if entryCount == 0 {
return errors.Wrap(err, "Unable to read from file") // Listing without any files
return fmt.Errorf("no entries in directory listing")
} }
for _, entry := range entries { stringLengths := make([]byte, entryCount)
if err = binary.Read(f, binary.LittleEndian, &stringLengths); err != nil {
return fmt.Errorf("reading string lengths: %w", err)
}
for i := uint32(0); i < entryCount; i++ {
var ( var (
hash uint64 hash uint64
name = make([]byte, stringLengths[i])
recurse bool recurse bool
) )
if entry[0] == '*' { if err = binary.Read(f, binary.LittleEndian, &name); err != nil {
// Directory here return fmt.Errorf("reading name: %w", err)
recurse = true
entry = entry[1:]
} }
hash = b0rkhash.CityHash64([]byte(strings.TrimPrefix(path.Join(node.Name, entry), "/"))) if name[0] == '/' {
// Directory entry
recurse = true
name = name[1:]
}
hash = b0rkhash.CityHash64([]byte(strings.TrimPrefix(path.Join(node.Name, string(name)), "/")))
var next *File var next *File
for _, rf := range r.Files { for _, rf := range r.Files {
if rf.HashedPath == hash { if rf.Hash == hash {
next = rf next = rf
break break
} }
} }
if next == nil { if next == nil {
return errors.Errorf("Found missing reference: %s", path.Join(node.Name, entry)) return fmt.Errorf("reference to void: %s", path.Join(node.Name, string(name)))
} }
next.Name = strings.TrimPrefix(path.Join(node.Name, entry), "/") next.Name = strings.TrimPrefix(path.Join(node.Name, string(name)), "/")
if recurse { if recurse {
if err = r.populateFileTree(next); err != nil { if err = r.setFilenamesFromDir(next); err != nil {
return err return err
} }
} }
} }
return nil return nil