mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-11-02 03:27:54 +00:00
Vault Debug (#7375)
* cli: initial work on debug; server-status target * debug: add metrics capture target (#7376) * check against DR secondary * debug: add compression * refactor check into preflight func * debug: set short test time on tests, fix exit code bug * debug: use temp dir for output on tests * debug: use mholt/archiver for compression * first pass on adding pprof * use logger for output * refactor polling target capture logic * debug: poll and collect replication status * debug: poll and collect host-info; rename output files and collection refactor * fix comments * add archive test; fix bugs found * rename flag name to singular target * add target output test; scaffold other tests cases * debug/test: add pprof and index file tests * debug/test: add min timing check tests * debug: fix index gen race and collection goroutine race * debug: extend archive tests, handle race between program exit and polling goroutines * update docstring * debug: correctly add to pollingWg * debug: add config target support * debug: don't wait on interrupt shutdown; add file exists unit tests * move pprof bits into its goroutine * debug: skip empty metrics and some pprof file creation if permission denied, add matching unit test * address comments and feedback * Vault debug using run.Group (#7658) * debug: switch to use oklog/run.Group * debug: use context to cancel requests and interrupt rungroups. * debug: trigger the first interval properly * debug: metrics collection should use metrics interval * debug: add missing continue on metrics error * debug: remove the use of buffered chan to trigger first interval * debug: don't shadow BaseCommand's client, properly block on interval capture failures * debug: actually use c.cachedClient everywhere * go mod vendor * debug: run all pprof in goroutines; bump pprof timings in tests to reduce flakiness * debug: update help text
This commit is contained in:
committed by
GitHub
parent
b51735af4c
commit
0b3777f2aa
@@ -157,6 +157,17 @@ func (b *BaseCommand) PredictVaultPolicies() complete.Predictor {
|
||||
return NewPredict().VaultPolicies()
|
||||
}
|
||||
|
||||
func (b *BaseCommand) PredictVaultDebugTargets() complete.Predictor {
|
||||
return complete.PredictSet(
|
||||
"config",
|
||||
"host",
|
||||
"metrics",
|
||||
"pprof",
|
||||
"replication-status",
|
||||
"server-status",
|
||||
)
|
||||
}
|
||||
|
||||
// VaultFiles returns a predictor for Vault "files". This is a public API for
|
||||
// consumers, but you probably want BaseCommand.PredictVaultFiles instead.
|
||||
func (p *Predict) VaultFiles() complete.Predictor {
|
||||
|
||||
@@ -249,6 +249,12 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {
|
||||
BaseCommand: getBaseCommand(),
|
||||
}, nil
|
||||
},
|
||||
"debug": func() (cli.Command, error) {
|
||||
return &DebugCommand{
|
||||
BaseCommand: getBaseCommand(),
|
||||
ShutdownCh: MakeShutdownCh(),
|
||||
}, nil
|
||||
},
|
||||
"delete": func() (cli.Command, error) {
|
||||
return &DeleteCommand{
|
||||
BaseCommand: getBaseCommand(),
|
||||
|
||||
973
command/debug.go
Normal file
973
command/debug.go
Normal file
@@ -0,0 +1,973 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
gatedwriter "github.com/hashicorp/vault/helper/gated-writer"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/helper/strutil"
|
||||
"github.com/hashicorp/vault/sdk/version"
|
||||
"github.com/mholt/archiver"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/oklog/run"
|
||||
"github.com/posener/complete"
|
||||
)
|
||||
|
||||
const (
|
||||
// debugIndexVersion tracks the canonical version in the index file
|
||||
// for compatibility with future format/layout changes on the bundle.
|
||||
debugIndexVersion = 1
|
||||
|
||||
// debugMinInterval is the minimum acceptable interval capture value. This
|
||||
// value applies to duration and all interval-related flags.
|
||||
debugMinInterval = 5 * time.Second
|
||||
|
||||
// debugDurationGrace is the grace period added to duration to allow for
|
||||
// "last frame" capture if the interval falls into the last duration time
|
||||
// value. For instance, using default values, adding a grace duration lets
|
||||
// the command capture 5 intervals (0, 30, 60, 90, and 120th second) before
|
||||
// exiting.
|
||||
debugDurationGrace = 1 * time.Second
|
||||
|
||||
// debugCompressionExt is the default compression extension used if
|
||||
// compression is enabled.
|
||||
debugCompressionExt = ".tar.gz"
|
||||
|
||||
// fileFriendlyTimeFormat is the time format used for file and directory
|
||||
// naming.
|
||||
fileFriendlyTimeFormat = "2006-01-02T15-04-05Z"
|
||||
)
|
||||
|
||||
// debugIndex represents the data structure in the index file
|
||||
type debugIndex struct {
|
||||
Version int `json:"version"`
|
||||
VaultAddress string `json:"vault_address"`
|
||||
ClientVersion string `json:"client_version"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
DurationSeconds int `json:"duration_seconds"`
|
||||
IntervalSeconds int `json:"interval_seconds"`
|
||||
MetricsIntervalSeconds int `json:"metrics_interval_seconds"`
|
||||
Compress bool `json:"compress"`
|
||||
RawArgs []string `json:"raw_args"`
|
||||
Targets []string `json:"targets"`
|
||||
Output map[string]interface{} `json:"output"`
|
||||
Errors []*captureError `json:"errors"`
|
||||
}
|
||||
|
||||
// captureError holds an error entry that can occur during polling capture.
|
||||
// It includes the timestamp, the target, and the error itself.
|
||||
type captureError struct {
|
||||
TargetError string `json:"error"`
|
||||
Target string `json:"target"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
var _ cli.Command = (*DebugCommand)(nil)
|
||||
var _ cli.CommandAutocomplete = (*DebugCommand)(nil)
|
||||
|
||||
type DebugCommand struct {
|
||||
*BaseCommand
|
||||
|
||||
flagCompress bool
|
||||
flagDuration time.Duration
|
||||
flagInterval time.Duration
|
||||
flagMetricsInterval time.Duration
|
||||
flagOutput string
|
||||
flagTargets []string
|
||||
|
||||
// debugIndex is used to keep track of the index state, which gets written
|
||||
// to a file at the end.
|
||||
debugIndex *debugIndex
|
||||
|
||||
// skipTimingChecks bypasses timing-related checks, used primarily for tests
|
||||
skipTimingChecks bool
|
||||
// logger is the logger used for outputting capture progress
|
||||
logger hclog.Logger
|
||||
|
||||
// ShutdownCh is used to capture interrupt signal and end polling capture
|
||||
ShutdownCh chan struct{}
|
||||
|
||||
// Collection slices to hold data
|
||||
hostInfoCollection []map[string]interface{}
|
||||
metricsCollection []map[string]interface{}
|
||||
replicationStatusCollection []map[string]interface{}
|
||||
serverStatusCollection []map[string]interface{}
|
||||
|
||||
// cachedClient holds the client retrieved during preflight
|
||||
cachedClient *api.Client
|
||||
|
||||
// errLock is used to lock error capture into the index file
|
||||
errLock sync.Mutex
|
||||
}
|
||||
|
||||
func (c *DebugCommand) AutocompleteArgs() complete.Predictor {
|
||||
// Predict targets
|
||||
return c.PredictVaultDebugTargets()
|
||||
}
|
||||
|
||||
func (c *DebugCommand) AutocompleteFlags() complete.Flags {
|
||||
return c.Flags().Completions()
|
||||
}
|
||||
|
||||
func (c *DebugCommand) Flags() *FlagSets {
|
||||
set := c.flagSet(FlagSetHTTP)
|
||||
|
||||
f := set.NewFlagSet("Command Options")
|
||||
|
||||
f.BoolVar(&BoolVar{
|
||||
Name: "compress",
|
||||
Target: &c.flagCompress,
|
||||
Default: true,
|
||||
Usage: "Toggles whether to compress output package",
|
||||
})
|
||||
|
||||
f.DurationVar(&DurationVar{
|
||||
Name: "duration",
|
||||
Target: &c.flagDuration,
|
||||
Completion: complete.PredictAnything,
|
||||
Default: 2 * time.Minute,
|
||||
Usage: "Duration to run the command.",
|
||||
})
|
||||
|
||||
f.DurationVar(&DurationVar{
|
||||
Name: "interval",
|
||||
Target: &c.flagInterval,
|
||||
Completion: complete.PredictAnything,
|
||||
Default: 30 * time.Second,
|
||||
Usage: "The polling interval at which to collect profiling data and server state.",
|
||||
})
|
||||
|
||||
f.DurationVar(&DurationVar{
|
||||
Name: "metrics-interval",
|
||||
Target: &c.flagMetricsInterval,
|
||||
Completion: complete.PredictAnything,
|
||||
Default: 10 * time.Second,
|
||||
Usage: "The polling interval at which to collect metrics data.",
|
||||
})
|
||||
|
||||
f.StringVar(&StringVar{
|
||||
Name: "output",
|
||||
Target: &c.flagOutput,
|
||||
Completion: complete.PredictAnything,
|
||||
Usage: "Specifies the output path for the debug package.",
|
||||
})
|
||||
|
||||
f.StringSliceVar(&StringSliceVar{
|
||||
Name: "target",
|
||||
Target: &c.flagTargets,
|
||||
Usage: "Target to capture, defaulting to all if none specified. " +
|
||||
"This can be specified multiple times to capture multiple targets. " +
|
||||
"Available targets are: config, host, metrics, pprof, " +
|
||||
"replication-status, server-status.",
|
||||
})
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
func (c *DebugCommand) Help() string {
|
||||
helpText := `
|
||||
Usage: vault debug [options]
|
||||
|
||||
Probes a specific Vault server node for a specified period of time, recording
|
||||
information about the node, its cluster, and its host environment. The
|
||||
information collected is packaged and written to the specified path.
|
||||
|
||||
Certain endpoints that this command uses require ACL permissions to access.
|
||||
If not permitted, the information from these endpoints will not be part of the
|
||||
output. The command uses the Vault address and token as specified via
|
||||
the login command, environment variables, or CLI flags.
|
||||
|
||||
To create a debug package using default duration and interval values in the
|
||||
current directory that captures all applicable targets:
|
||||
|
||||
$ vault debug
|
||||
|
||||
To create a debug package with a specific duration and interval in the current
|
||||
directory that capture all applicable targets:
|
||||
|
||||
$ vault debug -duration=10m -interval=1m
|
||||
|
||||
To create a debug package in the current directory with a specific sub-set of
|
||||
targets:
|
||||
|
||||
$ vault debug -target=host -target=metrics
|
||||
|
||||
` + c.Flags().Help()
|
||||
|
||||
return helpText
|
||||
}
|
||||
|
||||
func (c *DebugCommand) Run(args []string) int {
|
||||
f := c.Flags()
|
||||
|
||||
if err := f.Parse(args); err != nil {
|
||||
c.UI.Error(err.Error())
|
||||
return 1
|
||||
}
|
||||
|
||||
parsedArgs := f.Args()
|
||||
if len(parsedArgs) > 0 {
|
||||
c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(parsedArgs)))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Initialize the logger for debug output
|
||||
logWriter := &gatedwriter.Writer{Writer: os.Stderr}
|
||||
if c.logger == nil {
|
||||
c.logger = logging.NewVaultLoggerWithWriter(logWriter, hclog.Trace)
|
||||
}
|
||||
|
||||
dstOutputFile, err := c.preflight(args)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error during validation: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
// Print debug information
|
||||
c.UI.Output("==> Starting debug capture...")
|
||||
c.UI.Info(fmt.Sprintf(" Vault Address: %s", c.debugIndex.VaultAddress))
|
||||
c.UI.Info(fmt.Sprintf(" Client Version: %s", c.debugIndex.ClientVersion))
|
||||
c.UI.Info(fmt.Sprintf(" Duration: %s", c.flagDuration))
|
||||
c.UI.Info(fmt.Sprintf(" Interval: %s", c.flagInterval))
|
||||
c.UI.Info(fmt.Sprintf(" Metrics Interval: %s", c.flagMetricsInterval))
|
||||
c.UI.Info(fmt.Sprintf(" Targets: %s", strings.Join(c.flagTargets, ", ")))
|
||||
c.UI.Info(fmt.Sprintf(" Output: %s", dstOutputFile))
|
||||
c.UI.Output("")
|
||||
|
||||
// Release the log gate.
|
||||
logWriter.Flush()
|
||||
|
||||
// Capture static information
|
||||
c.UI.Info("==> Capturing static information...")
|
||||
if err := c.captureStaticTargets(); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error capturing static information: %s", err))
|
||||
return 2
|
||||
}
|
||||
|
||||
c.UI.Output("")
|
||||
|
||||
// Capture polling information
|
||||
c.UI.Info("==> Capturing dynamic information...")
|
||||
if err := c.capturePollingTargets(); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error capturing dynamic information: %s", err))
|
||||
return 2
|
||||
}
|
||||
|
||||
c.UI.Output("Finished capturing information, bundling files...")
|
||||
|
||||
// Generate index file
|
||||
if err := c.generateIndex(); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error generating index: %s", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
if c.flagCompress {
|
||||
if err := c.compress(dstOutputFile); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error encountered during bundle compression: %s", err))
|
||||
// We want to inform that data collection was captured and stored in
|
||||
// a directory even if compression fails
|
||||
c.UI.Info(fmt.Sprintf("Data written to: %s", c.flagOutput))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
c.UI.Info(fmt.Sprintf("Success! Bundle written to: %s", dstOutputFile))
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *DebugCommand) Synopsis() string {
|
||||
return "Runs the debug command"
|
||||
}
|
||||
|
||||
func (c *DebugCommand) generateIndex() error {
|
||||
outputLayout := map[string]interface{}{
|
||||
"files": []string{},
|
||||
}
|
||||
// Walk the directory to generate the output layout
|
||||
err := filepath.Walk(c.flagOutput, func(path string, info os.FileInfo, err error) error {
|
||||
// Prevent panic by handling failure accessing a path
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip the base dir
|
||||
if path == c.flagOutput {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we're a directory, simply add a corresponding map
|
||||
if info.IsDir() {
|
||||
parsedTime, err := time.Parse(fileFriendlyTimeFormat, info.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outputLayout[info.Name()] = map[string]interface{}{
|
||||
"timestamp": parsedTime,
|
||||
"files": []string{},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(c.flagOutput, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dir, file := filepath.Split(relPath)
|
||||
if len(dir) != 0 {
|
||||
dir = strings.TrimSuffix(dir, "/")
|
||||
filesArr := outputLayout[dir].(map[string]interface{})["files"]
|
||||
outputLayout[dir].(map[string]interface{})["files"] = append(filesArr.([]string), file)
|
||||
} else {
|
||||
outputLayout["files"] = append(outputLayout["files"].([]string), file)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating directory output layout: %s", err)
|
||||
}
|
||||
|
||||
c.debugIndex.Output = outputLayout
|
||||
|
||||
// Marshal into json
|
||||
bytes, err := json.MarshalIndent(c.debugIndex, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling index file: %s", err)
|
||||
}
|
||||
|
||||
// Write out file
|
||||
if err := ioutil.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0644); err != nil {
|
||||
return fmt.Errorf("error generating index file; %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// preflight performs various checks against the provided flags to ensure they
|
||||
// are valid/reasonable values. It also takes care of instantiating a client and
|
||||
// index object for use by the command.
|
||||
func (c *DebugCommand) preflight(rawArgs []string) (string, error) {
|
||||
if !c.skipTimingChecks {
|
||||
// Guard duration and interval values to acceptable values
|
||||
if c.flagDuration < debugMinInterval {
|
||||
c.UI.Info(fmt.Sprintf("Overwriting duration value %q to the minimum value of %q", c.flagDuration, debugMinInterval))
|
||||
c.flagDuration = debugMinInterval
|
||||
}
|
||||
if c.flagInterval < debugMinInterval {
|
||||
c.UI.Info(fmt.Sprintf("Overwriting interval value %q to the minimum value of %q", c.flagInterval, debugMinInterval))
|
||||
c.flagInterval = debugMinInterval
|
||||
}
|
||||
if c.flagMetricsInterval < debugMinInterval {
|
||||
c.UI.Info(fmt.Sprintf("Overwriting metrics interval value %q to the minimum value of %q", c.flagMetricsInterval, debugMinInterval))
|
||||
c.flagMetricsInterval = debugMinInterval
|
||||
}
|
||||
}
|
||||
|
||||
// These timing checks are always applicable since interval shouldn't be
|
||||
// greater than the duration
|
||||
if c.flagInterval > c.flagDuration {
|
||||
c.UI.Info(fmt.Sprintf("Overwriting interval value %q to the duration value %q", c.flagInterval, c.flagDuration))
|
||||
c.flagInterval = c.flagDuration
|
||||
}
|
||||
if c.flagMetricsInterval > c.flagDuration {
|
||||
c.UI.Info(fmt.Sprintf("Overwriting metrics interval value %q to the duration value %q", c.flagMetricsInterval, c.flagDuration))
|
||||
c.flagMetricsInterval = c.flagDuration
|
||||
}
|
||||
|
||||
if len(c.flagTargets) == 0 {
|
||||
c.flagTargets = c.defaultTargets()
|
||||
}
|
||||
|
||||
// Make sure we can talk to the server
|
||||
client, err := c.Client()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to create client to connect to Vault: %s", err)
|
||||
}
|
||||
if _, err := client.Sys().Health(); err != nil {
|
||||
return "", fmt.Errorf("unable to connect to the server: %s", err)
|
||||
}
|
||||
c.cachedClient = client
|
||||
|
||||
captureTime := time.Now().UTC()
|
||||
if len(c.flagOutput) == 0 {
|
||||
formattedTime := captureTime.Format(fileFriendlyTimeFormat)
|
||||
c.flagOutput = fmt.Sprintf("vault-debug-%s", formattedTime)
|
||||
}
|
||||
|
||||
// Strip trailing slash before proceeding
|
||||
c.flagOutput = strings.TrimSuffix(c.flagOutput, "/")
|
||||
|
||||
// If compression is enabled, trim the extension so that the files are
|
||||
// written to a directory even if compression somehow fails. We ensure the
|
||||
// extension during compression. We also prevent overwriting if the file
|
||||
// already exists.
|
||||
dstOutputFile := c.flagOutput
|
||||
if c.flagCompress {
|
||||
if !strings.HasSuffix(dstOutputFile, ".tar.gz") && !strings.HasSuffix(dstOutputFile, ".tgz") {
|
||||
dstOutputFile = dstOutputFile + debugCompressionExt
|
||||
}
|
||||
|
||||
// Ensure that the file doesn't already exist, and ensure that we always
|
||||
// trim the extension from flagOutput since we'll be progressively
|
||||
// writing to that.
|
||||
_, err := os.Stat(dstOutputFile)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
c.flagOutput = strings.TrimSuffix(c.flagOutput, ".tar.gz")
|
||||
c.flagOutput = strings.TrimSuffix(c.flagOutput, ".tgz")
|
||||
case err != nil:
|
||||
return "", fmt.Errorf("unable to stat file: %s", err)
|
||||
default:
|
||||
return "", fmt.Errorf("output file already exists: %s", dstOutputFile)
|
||||
}
|
||||
}
|
||||
|
||||
// Stat check the directory to ensure we don't override any existing data.
|
||||
_, err = os.Stat(c.flagOutput)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
err := os.MkdirAll(c.flagOutput, 0755)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to create output directory: %s", err)
|
||||
}
|
||||
case err != nil:
|
||||
return "", fmt.Errorf("unable to stat directory: %s", err)
|
||||
default:
|
||||
return "", fmt.Errorf("output directory already exists: %s", c.flagOutput)
|
||||
}
|
||||
|
||||
// Populate initial index fields
|
||||
c.debugIndex = &debugIndex{
|
||||
VaultAddress: client.Address(),
|
||||
ClientVersion: version.GetVersion().VersionNumber(),
|
||||
Compress: c.flagCompress,
|
||||
DurationSeconds: int(c.flagDuration.Seconds()),
|
||||
IntervalSeconds: int(c.flagInterval.Seconds()),
|
||||
MetricsIntervalSeconds: int(c.flagMetricsInterval.Seconds()),
|
||||
RawArgs: rawArgs,
|
||||
Version: debugIndexVersion,
|
||||
Targets: c.flagTargets,
|
||||
Timestamp: captureTime,
|
||||
Errors: []*captureError{},
|
||||
}
|
||||
|
||||
return dstOutputFile, nil
|
||||
}
|
||||
|
||||
func (c *DebugCommand) defaultTargets() []string {
|
||||
return []string{"config", "host", "metrics", "pprof", "replication-status", "server-status"}
|
||||
}
|
||||
|
||||
func (c *DebugCommand) captureStaticTargets() error {
|
||||
// Capture configuration state
|
||||
if strutil.StrListContains(c.flagTargets, "config") {
|
||||
c.logger.Info("capturing configuration state")
|
||||
|
||||
resp, err := c.cachedClient.Logical().Read("sys/config/state/sanitized")
|
||||
if err != nil {
|
||||
c.captureError("config", err)
|
||||
c.logger.Error("config: error capturing config state", "error", err)
|
||||
}
|
||||
|
||||
if resp != nil && resp.Data != nil {
|
||||
collection := []map[string]interface{}{
|
||||
{
|
||||
"timestamp": time.Now().UTC(),
|
||||
"config": resp.Data,
|
||||
},
|
||||
}
|
||||
if err := c.persistCollection(collection, "config.json"); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "config.json", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// capturePollingTargets captures all dynamic targets over the specified
|
||||
// duration and interval.
|
||||
func (c *DebugCommand) capturePollingTargets() error {
|
||||
var g run.Group
|
||||
|
||||
ctx, cancelFunc := context.WithTimeout(context.Background(), c.flagDuration+debugDurationGrace)
|
||||
|
||||
// This run group watches for interrupt or duration
|
||||
g.Add(func() error {
|
||||
for {
|
||||
select {
|
||||
case <-c.ShutdownCh:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}, func(error) {})
|
||||
|
||||
// Collect host-info if target is specified
|
||||
if strutil.StrListContains(c.flagTargets, "host") {
|
||||
g.Add(func() error {
|
||||
c.collectHostInfo(ctx)
|
||||
return nil
|
||||
}, func(error) {
|
||||
cancelFunc()
|
||||
})
|
||||
}
|
||||
|
||||
// Collect metrics if target is specified
|
||||
if strutil.StrListContains(c.flagTargets, "metrics") {
|
||||
g.Add(func() error {
|
||||
c.collectMetrics(ctx)
|
||||
return nil
|
||||
}, func(error) {
|
||||
cancelFunc()
|
||||
})
|
||||
}
|
||||
|
||||
// Collect pprof data if target is specified
|
||||
if strutil.StrListContains(c.flagTargets, "pprof") {
|
||||
g.Add(func() error {
|
||||
c.collectPprof(ctx)
|
||||
return nil
|
||||
}, func(error) {
|
||||
cancelFunc()
|
||||
})
|
||||
}
|
||||
|
||||
// Collect replication status if target is specified
|
||||
if strutil.StrListContains(c.flagTargets, "replication-status") {
|
||||
g.Add(func() error {
|
||||
c.collectReplicationStatus(ctx)
|
||||
return nil
|
||||
}, func(error) {
|
||||
cancelFunc()
|
||||
})
|
||||
}
|
||||
|
||||
// Collect server status if target is specified
|
||||
if strutil.StrListContains(c.flagTargets, "server-status") {
|
||||
g.Add(func() error {
|
||||
c.collectServerStatus(ctx)
|
||||
return nil
|
||||
}, func(error) {
|
||||
cancelFunc()
|
||||
})
|
||||
}
|
||||
|
||||
// We shouldn't bump across errors since none is returned by the interrupts,
|
||||
// but we error check for sanity here.
|
||||
if err := g.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write collected data to their corresponding files
|
||||
if err := c.persistCollection(c.metricsCollection, "metrics.json"); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "metrics.json", err))
|
||||
}
|
||||
if err := c.persistCollection(c.serverStatusCollection, "server_status.json"); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "server_status.json", err))
|
||||
}
|
||||
if err := c.persistCollection(c.replicationStatusCollection, "replication_status.json"); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "replication_status.json", err))
|
||||
}
|
||||
if err := c.persistCollection(c.hostInfoCollection, "host_info.json"); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "host_info.json", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *DebugCommand) collectHostInfo(ctx context.Context) {
|
||||
idxCount := 0
|
||||
intervalTicker := time.Tick(c.flagInterval)
|
||||
|
||||
for {
|
||||
if idxCount > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-intervalTicker:
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Info("capturing host information", "count", idxCount)
|
||||
idxCount++
|
||||
|
||||
r := c.cachedClient.NewRequest("GET", "/v1/sys/host-info")
|
||||
resp, err := c.cachedClient.RawRequestWithContext(ctx, r)
|
||||
if err != nil {
|
||||
c.captureError("host", err)
|
||||
}
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
|
||||
secret, err := api.ParseSecret(resp.Body)
|
||||
if err != nil {
|
||||
c.captureError("host", err)
|
||||
}
|
||||
if secret != nil && secret.Data != nil {
|
||||
hostEntry := secret.Data
|
||||
c.hostInfoCollection = append(c.hostInfoCollection, hostEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DebugCommand) collectMetrics(ctx context.Context) {
|
||||
idxCount := 0
|
||||
intervalTicker := time.Tick(c.flagMetricsInterval)
|
||||
|
||||
for {
|
||||
if idxCount > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-intervalTicker:
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Info("capturing metrics", "count", idxCount)
|
||||
idxCount++
|
||||
|
||||
healthStatus, err := c.cachedClient.Sys().Health()
|
||||
if err != nil {
|
||||
c.captureError("metrics", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check replication status. We skip on processing metrics if we're one
|
||||
// of the following (since the request will be forwarded):
|
||||
// 1. Any type of DR Node
|
||||
// 2. Non-DR, non-performance standby nodes
|
||||
switch {
|
||||
case healthStatus.ReplicationDRMode == "secondary":
|
||||
c.logger.Info("skipping metrics capture on DR secondary node")
|
||||
continue
|
||||
case healthStatus.Standby && !healthStatus.PerformanceStandby:
|
||||
c.logger.Info("skipping metrics on standby node")
|
||||
continue
|
||||
}
|
||||
|
||||
// Perform metrics request
|
||||
r := c.cachedClient.NewRequest("GET", "/v1/sys/metrics")
|
||||
resp, err := c.cachedClient.RawRequestWithContext(ctx, r)
|
||||
if err != nil {
|
||||
c.captureError("metrics", err)
|
||||
continue
|
||||
}
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
|
||||
metricsEntry := make(map[string]interface{})
|
||||
err := json.NewDecoder(resp.Body).Decode(&metricsEntry)
|
||||
if err != nil {
|
||||
c.captureError("metrics", err)
|
||||
continue
|
||||
}
|
||||
c.metricsCollection = append(c.metricsCollection, metricsEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DebugCommand) collectPprof(ctx context.Context) {
|
||||
idxCount := 0
|
||||
startTime := time.Now()
|
||||
intervalTicker := time.Tick(c.flagInterval)
|
||||
|
||||
for {
|
||||
if idxCount > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-intervalTicker:
|
||||
}
|
||||
}
|
||||
|
||||
currentTimestamp := time.Now().UTC()
|
||||
c.logger.Info("capturing pprof data", "count", idxCount)
|
||||
idxCount++
|
||||
|
||||
// Create a sub-directory for pprof data
|
||||
currentDir := currentTimestamp.Format(fileFriendlyTimeFormat)
|
||||
dirName := filepath.Join(c.flagOutput, currentDir)
|
||||
if err := os.MkdirAll(dirName, 0755); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error creating sub-directory for time interval: %s", err))
|
||||
continue
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Capture goroutines
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
data, err := pprofGoroutine(ctx, c.cachedClient)
|
||||
if err != nil {
|
||||
c.captureError("pprof.goroutine", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dirName, "goroutine.prof"), data, 0644)
|
||||
if err != nil {
|
||||
c.captureError("pprof.goroutine", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Capture heap
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
data, err := pprofHeap(ctx, c.cachedClient)
|
||||
if err != nil {
|
||||
c.captureError("pprof.heap", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dirName, "heap.prof"), data, 0644)
|
||||
if err != nil {
|
||||
c.captureError("pprof.heap", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// If the our remaining duration is less than the interval value
|
||||
// skip profile and trace.
|
||||
runDuration := currentTimestamp.Sub(startTime)
|
||||
if (c.flagDuration+debugDurationGrace)-runDuration < c.flagInterval {
|
||||
wg.Wait()
|
||||
continue
|
||||
}
|
||||
|
||||
// Capture profile
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
data, err := pprofProfile(ctx, c.cachedClient, c.flagInterval)
|
||||
if err != nil {
|
||||
c.captureError("pprof.profile", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0644)
|
||||
if err != nil {
|
||||
c.captureError("pprof.profile", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Capture trace
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
data, err := pprofTrace(ctx, c.cachedClient, c.flagInterval)
|
||||
if err != nil {
|
||||
c.captureError("pprof.trace", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dirName, "trace.out"), data, 0644)
|
||||
if err != nil {
|
||||
c.captureError("pprof.trace", err)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DebugCommand) collectReplicationStatus(ctx context.Context) {
|
||||
idxCount := 0
|
||||
intervalTicker := time.Tick(c.flagInterval)
|
||||
|
||||
for {
|
||||
if idxCount > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-intervalTicker:
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Info("capturing replication status", "count", idxCount)
|
||||
idxCount++
|
||||
|
||||
r := c.cachedClient.NewRequest("GET", "/v1/sys/replication/status")
|
||||
resp, err := c.cachedClient.RawRequestWithContext(ctx, r)
|
||||
if err != nil {
|
||||
c.captureError("replication-status", err)
|
||||
}
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
|
||||
secret, err := api.ParseSecret(resp.Body)
|
||||
if err != nil {
|
||||
c.captureError("replication-status", err)
|
||||
}
|
||||
if replicationEntry := secret.Data; replicationEntry != nil {
|
||||
replicationEntry["timestamp"] = time.Now().UTC()
|
||||
c.replicationStatusCollection = append(c.replicationStatusCollection, replicationEntry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *DebugCommand) collectServerStatus(ctx context.Context) {
|
||||
idxCount := 0
|
||||
intervalTicker := time.Tick(c.flagInterval)
|
||||
|
||||
for {
|
||||
if idxCount > 0 {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-intervalTicker:
|
||||
}
|
||||
}
|
||||
|
||||
c.logger.Info("capturing server status", "count", idxCount)
|
||||
idxCount++
|
||||
|
||||
healthInfo, err := c.cachedClient.Sys().Health()
|
||||
if err != nil {
|
||||
c.captureError("server-status.health", err)
|
||||
}
|
||||
sealInfo, err := c.cachedClient.Sys().SealStatus()
|
||||
if err != nil {
|
||||
c.captureError("server-status.seal", err)
|
||||
}
|
||||
|
||||
statusEntry := map[string]interface{}{
|
||||
"timestamp": time.Now().UTC(),
|
||||
"health": healthInfo,
|
||||
"seal": sealInfo,
|
||||
}
|
||||
c.serverStatusCollection = append(c.serverStatusCollection, statusEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// persistCollection writes the collected data for a particular target onto the
|
||||
// specified file. If the collection is empty, it returns immediately.
|
||||
func (c *DebugCommand) persistCollection(collection []map[string]interface{}, outFile string) error {
|
||||
if len(collection) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write server-status file and update the index
|
||||
bytes, err := json.MarshalIndent(collection, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *DebugCommand) compress(dst string) error {
|
||||
tgz := archiver.NewTarGz()
|
||||
if err := tgz.Archive([]string{c.flagOutput}, dst); err != nil {
|
||||
return fmt.Errorf("failed to compress data: %s", err)
|
||||
}
|
||||
|
||||
// If everything is fine up to this point, remove original directory
|
||||
if err := os.RemoveAll(c.flagOutput); err != nil {
|
||||
return fmt.Errorf("failed to remove data directory: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func pprofGoroutine(ctx context.Context, client *api.Client) ([]byte, error) {
|
||||
req := client.NewRequest("GET", "/v1/sys/pprof/goroutine")
|
||||
resp, err := client.RawRequestWithContext(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func pprofHeap(ctx context.Context, client *api.Client) ([]byte, error) {
|
||||
req := client.NewRequest("GET", "/v1/sys/pprof/heap")
|
||||
resp, err := client.RawRequestWithContext(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func pprofProfile(ctx context.Context, client *api.Client, duration time.Duration) ([]byte, error) {
|
||||
seconds := int(duration.Seconds())
|
||||
secStr := strconv.Itoa(seconds)
|
||||
|
||||
req := client.NewRequest("GET", "/v1/sys/pprof/profile")
|
||||
req.Params.Add("seconds", secStr)
|
||||
resp, err := client.RawRequestWithContext(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func pprofTrace(ctx context.Context, client *api.Client, duration time.Duration) ([]byte, error) {
|
||||
seconds := int(duration.Seconds())
|
||||
secStr := strconv.Itoa(seconds)
|
||||
|
||||
req := client.NewRequest("GET", "/v1/sys/pprof/trace")
|
||||
req.Params.Add("seconds", secStr)
|
||||
resp, err := client.RawRequestWithContext(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// newCaptureError instantiates a new captureError.
|
||||
func (c *DebugCommand) captureError(target string, err error) {
|
||||
c.errLock.Lock()
|
||||
c.debugIndex.Errors = append(c.debugIndex.Errors, &captureError{
|
||||
TargetError: err.Error(),
|
||||
Target: target,
|
||||
Timestamp: time.Now().UTC(),
|
||||
})
|
||||
c.errLock.Unlock()
|
||||
}
|
||||
687
command/debug_test.go
Normal file
687
command/debug_test.go
Normal file
@@ -0,0 +1,687 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/mholt/archiver"
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) {
|
||||
tb.Helper()
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
return ui, &DebugCommand{
|
||||
BaseCommand: &BaseCommand{
|
||||
UI: ui,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugCommand_Run(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testDir, err := ioutil.TempDir("", "vault-debug")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
args []string
|
||||
out string
|
||||
code int
|
||||
}{
|
||||
{
|
||||
"valid",
|
||||
[]string{
|
||||
"-duration=1s",
|
||||
fmt.Sprintf("-output=%s/valid", testDir),
|
||||
},
|
||||
"",
|
||||
0,
|
||||
},
|
||||
{
|
||||
"too_many_args",
|
||||
[]string{
|
||||
"-duration=1s",
|
||||
fmt.Sprintf("-output=%s/too_many_args", testDir),
|
||||
"foo",
|
||||
},
|
||||
"Too many arguments",
|
||||
1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, closer := testVaultServer(t)
|
||||
defer closer()
|
||||
|
||||
ui, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.skipTimingChecks = true
|
||||
|
||||
code := cmd.Run(tc.args)
|
||||
if code != tc.code {
|
||||
t.Errorf("expected %d to be %d", code, tc.code)
|
||||
}
|
||||
|
||||
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
|
||||
if !strings.Contains(combined, tc.out) {
|
||||
t.Fatalf("expected %q to contain %q", combined, tc.out)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugCommand_Archive(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
ext string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
"no-ext",
|
||||
"",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"with-ext-tar-gz",
|
||||
".tar.gz",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"with-ext-tgz",
|
||||
".tgz",
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Create temp dirs for each test case since os.Stat and tgz.Walk
|
||||
// (called down below) exhibits raciness otherwise.
|
||||
testDir, err := ioutil.TempDir("", "vault-debug")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
client, closer := testVaultServer(t)
|
||||
defer closer()
|
||||
|
||||
ui, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.skipTimingChecks = true
|
||||
|
||||
// We use tc.name as the base path and apply the extension per
|
||||
// test case.
|
||||
basePath := tc.name
|
||||
outputPath := filepath.Join(testDir, basePath+tc.ext)
|
||||
args := []string{
|
||||
"-duration=1s",
|
||||
fmt.Sprintf("-output=%s", outputPath),
|
||||
"-target=server-status",
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
if exp := 0; code != exp {
|
||||
t.Log(ui.OutputWriter.String())
|
||||
t.Log(ui.ErrorWriter.String())
|
||||
t.Fatalf("expected %d to be %d", code, exp)
|
||||
}
|
||||
// If we expect an error we're done here
|
||||
if tc.expectError {
|
||||
return
|
||||
}
|
||||
|
||||
expectedExt := tc.ext
|
||||
if expectedExt == "" {
|
||||
expectedExt = debugCompressionExt
|
||||
}
|
||||
|
||||
bundlePath := filepath.Join(testDir, basePath+expectedExt)
|
||||
_, err = os.Stat(bundlePath)
|
||||
if os.IsNotExist(err) {
|
||||
t.Log(ui.OutputWriter.String())
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tgz := archiver.NewTarGz()
|
||||
err = tgz.Walk(bundlePath, func(f archiver.File) error {
|
||||
fh, ok := f.Header.(*tar.Header)
|
||||
if !ok {
|
||||
t.Fatalf("invalid file header: %#v", f.Header)
|
||||
}
|
||||
|
||||
// Ignore base directory and index file
|
||||
if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if fh.Name != filepath.Join(basePath, "server_status.json") {
|
||||
t.Fatalf("unxexpected file: %s", fh.Name)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugCommand_CaptureTargets(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
targets []string
|
||||
expectedFiles []string
|
||||
}{
|
||||
{
|
||||
"config",
|
||||
[]string{"config"},
|
||||
[]string{"config.json"},
|
||||
},
|
||||
{
|
||||
"host-info",
|
||||
[]string{"host"},
|
||||
[]string{"host_info.json"},
|
||||
},
|
||||
{
|
||||
"metrics",
|
||||
[]string{"metrics"},
|
||||
[]string{"metrics.json"},
|
||||
},
|
||||
{
|
||||
"replication-status",
|
||||
[]string{"replication-status"},
|
||||
[]string{"replication_status.json"},
|
||||
},
|
||||
{
|
||||
"server-status",
|
||||
[]string{"server-status"},
|
||||
[]string{"server_status.json"},
|
||||
},
|
||||
{
|
||||
"all-minus-pprof",
|
||||
[]string{"config", "host", "metrics", "replication-status", "server-status"},
|
||||
[]string{"config.json", "host_info.json", "metrics.json", "replication_status.json", "server_status.json"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testDir, err := ioutil.TempDir("", "vault-debug")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
client, closer := testVaultServer(t)
|
||||
defer closer()
|
||||
|
||||
ui, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.skipTimingChecks = true
|
||||
|
||||
basePath := tc.name
|
||||
args := []string{
|
||||
"-duration=1s",
|
||||
fmt.Sprintf("-output=%s/%s", testDir, basePath),
|
||||
}
|
||||
for _, target := range tc.targets {
|
||||
args = append(args, fmt.Sprintf("-target=%s", target))
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
if exp := 0; code != exp {
|
||||
t.Log(ui.ErrorWriter.String())
|
||||
t.Fatalf("expected %d to be %d", code, exp)
|
||||
}
|
||||
|
||||
bundlePath := filepath.Join(testDir, basePath+debugCompressionExt)
|
||||
_, err = os.Open(bundlePath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open archive: %s", err)
|
||||
}
|
||||
|
||||
tgz := archiver.NewTarGz()
|
||||
err = tgz.Walk(bundlePath, func(f archiver.File) error {
|
||||
fh, ok := f.Header.(*tar.Header)
|
||||
if !ok {
|
||||
t.Fatalf("invalid file header: %#v", f.Header)
|
||||
}
|
||||
|
||||
// Ignore base directory and index file
|
||||
if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, fileName := range tc.expectedFiles {
|
||||
if fh.Name == filepath.Join(basePath, fileName) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// If we reach here, it means that this is an unexpected file
|
||||
return fmt.Errorf("unexpected file: %s", fh.Name)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugCommand_Pprof(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", "vault-debug")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
client, closer := testVaultServer(t)
|
||||
defer closer()
|
||||
|
||||
ui, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.skipTimingChecks = true
|
||||
|
||||
basePath := "pprof"
|
||||
outputPath := filepath.Join(testDir, basePath)
|
||||
// pprof requires a minimum interval of 1s, we set it to 2 to ensure it
|
||||
// runs through and reduce flakiness on slower systems.
|
||||
args := []string{
|
||||
"-compress=false",
|
||||
"-duration=2s",
|
||||
"-interval=2s",
|
||||
fmt.Sprintf("-output=%s", outputPath),
|
||||
"-target=pprof",
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
if exp := 0; code != exp {
|
||||
t.Log(ui.ErrorWriter.String())
|
||||
t.Fatalf("expected %d to be %d", code, exp)
|
||||
}
|
||||
|
||||
profiles := []string{"heap.prof", "goroutine.prof"}
|
||||
pollingProfiles := []string{"profile.prof", "trace.out"}
|
||||
|
||||
// These are captures on the first (0th) and last (1st) frame
|
||||
for _, v := range profiles {
|
||||
files, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v))
|
||||
if len(files) != 2 {
|
||||
t.Errorf("2 output files should exist for %s: got: %v", v, files)
|
||||
}
|
||||
}
|
||||
|
||||
// Since profile and trace are polling outputs, these only get captured
|
||||
// on the first (0th) frame.
|
||||
for _, v := range pollingProfiles {
|
||||
files, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v))
|
||||
if len(files) != 1 {
|
||||
t.Errorf("1 output file should exist for %s: got: %v", v, files)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log(ui.OutputWriter.String())
|
||||
t.Log(ui.ErrorWriter.String())
|
||||
}
|
||||
|
||||
func TestDebugCommand_IndexFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testDir, err := ioutil.TempDir("", "vault-debug")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
client, closer := testVaultServer(t)
|
||||
defer closer()
|
||||
|
||||
ui, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.skipTimingChecks = true
|
||||
|
||||
basePath := "index-test"
|
||||
outputPath := filepath.Join(testDir, basePath)
|
||||
// pprof requires a minimum interval of 1s
|
||||
args := []string{
|
||||
"-compress=false",
|
||||
"-duration=1s",
|
||||
"-interval=1s",
|
||||
"-metrics-interval=1s",
|
||||
fmt.Sprintf("-output=%s", outputPath),
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
if exp := 0; code != exp {
|
||||
t.Log(ui.ErrorWriter.String())
|
||||
t.Fatalf("expected %d to be %d", code, exp)
|
||||
}
|
||||
|
||||
content, err := ioutil.ReadFile(filepath.Join(outputPath, "index.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
index := &debugIndex{}
|
||||
if err := json.Unmarshal(content, index); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(index.Output) == 0 {
|
||||
t.Fatalf("expected valid index file: got: %v", index)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugCommand_TimingChecks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testDir, err := ioutil.TempDir("", "vault-debug")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
duration string
|
||||
interval string
|
||||
metricsInterval string
|
||||
}{
|
||||
{
|
||||
"short-values-all",
|
||||
"10ms",
|
||||
"10ms",
|
||||
"10ms",
|
||||
},
|
||||
{
|
||||
"short-duration",
|
||||
"10ms",
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"short-interval",
|
||||
debugMinInterval.String(),
|
||||
"10ms",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"short-metrics-interval",
|
||||
debugMinInterval.String(),
|
||||
"",
|
||||
"10ms",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, closer := testVaultServer(t)
|
||||
defer closer()
|
||||
|
||||
// If we are past the minimum duration + some grace, trigger shutdown
|
||||
// to prevent hanging
|
||||
grace := 10 * time.Second
|
||||
shutdownCh := make(chan struct{})
|
||||
go func() {
|
||||
time.AfterFunc(grace, func() {
|
||||
close(shutdownCh)
|
||||
})
|
||||
}()
|
||||
|
||||
ui, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.ShutdownCh = shutdownCh
|
||||
|
||||
basePath := tc.name
|
||||
outputPath := filepath.Join(testDir, basePath)
|
||||
// pprof requires a minimum interval of 1s
|
||||
args := []string{
|
||||
"-target=server-status",
|
||||
fmt.Sprintf("-output=%s", outputPath),
|
||||
}
|
||||
if tc.duration != "" {
|
||||
args = append(args, fmt.Sprintf("-duration=%s", tc.duration))
|
||||
}
|
||||
if tc.interval != "" {
|
||||
args = append(args, fmt.Sprintf("-interval=%s", tc.interval))
|
||||
}
|
||||
if tc.metricsInterval != "" {
|
||||
args = append(args, fmt.Sprintf("-metrics-interval=%s", tc.metricsInterval))
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
if exp := 0; code != exp {
|
||||
t.Log(ui.ErrorWriter.String())
|
||||
t.Fatalf("expected %d to be %d", code, exp)
|
||||
}
|
||||
|
||||
if !strings.Contains(ui.OutputWriter.String(), "Duration: 5s") {
|
||||
t.Fatal("expected minimum duration value")
|
||||
}
|
||||
|
||||
if tc.interval != "" {
|
||||
if !strings.Contains(ui.OutputWriter.String(), " Interval: 5s") {
|
||||
t.Fatal("expected minimum interval value")
|
||||
}
|
||||
}
|
||||
|
||||
if tc.metricsInterval != "" {
|
||||
if !strings.Contains(ui.OutputWriter.String(), "Metrics Interval: 5s") {
|
||||
t.Fatal("expected minimum metrics interval value")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugCommand_NoConnection(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, err := api.NewClient(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.skipTimingChecks = true
|
||||
|
||||
args := []string{
|
||||
"-duration=1s",
|
||||
"-target=server-status",
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
if exp := 1; code != exp {
|
||||
t.Fatalf("expected %d to be %d", code, exp)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugCommand_OutputExists(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
compress bool
|
||||
outputFile string
|
||||
expectedError string
|
||||
}{
|
||||
{
|
||||
"no-compress",
|
||||
false,
|
||||
"output-exists",
|
||||
"output directory already exists",
|
||||
},
|
||||
{
|
||||
"compress",
|
||||
true,
|
||||
"output-exist.tar.gz",
|
||||
"output file already exists",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testDir, err := ioutil.TempDir("", "vault-debug")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
client, closer := testVaultServer(t)
|
||||
defer closer()
|
||||
|
||||
ui, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.skipTimingChecks = true
|
||||
|
||||
outputPath := filepath.Join(testDir, tc.outputFile)
|
||||
|
||||
// Create a conflicting file/directory
|
||||
if tc.compress {
|
||||
_, err = os.Create(outputPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
err = os.Mkdir(outputPath, 0755)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
args := []string{
|
||||
fmt.Sprintf("-compress=%t", tc.compress),
|
||||
"-duration=1s",
|
||||
"-interval=1s",
|
||||
"-metrics-interval=1s",
|
||||
fmt.Sprintf("-output=%s", outputPath),
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
if exp := 1; code != exp {
|
||||
t.Log(ui.OutputWriter.String())
|
||||
t.Log(ui.ErrorWriter.String())
|
||||
t.Errorf("expected %d to be %d", code, exp)
|
||||
}
|
||||
|
||||
output := ui.ErrorWriter.String() + ui.OutputWriter.String()
|
||||
if !strings.Contains(output, tc.expectedError) {
|
||||
t.Fatalf("expected %s, got: %s", tc.expectedError, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebugCommand_PartialPermissions(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testDir, err := ioutil.TempDir("", "vault-debug")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
client, closer := testVaultServer(t)
|
||||
defer closer()
|
||||
|
||||
// Create a new token with default policy
|
||||
resp, err := client.Logical().Write("auth/token/create", map[string]interface{}{
|
||||
"policies": "default",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
client.SetToken(resp.Auth.ClientToken)
|
||||
|
||||
ui, cmd := testDebugCommand(t)
|
||||
cmd.client = client
|
||||
cmd.skipTimingChecks = true
|
||||
|
||||
basePath := "with-default-policy-token"
|
||||
args := []string{
|
||||
"-duration=1s",
|
||||
fmt.Sprintf("-output=%s/%s", testDir, basePath),
|
||||
}
|
||||
|
||||
code := cmd.Run(args)
|
||||
if exp := 0; code != exp {
|
||||
t.Log(ui.ErrorWriter.String())
|
||||
t.Fatalf("expected %d to be %d", code, exp)
|
||||
}
|
||||
|
||||
bundlePath := filepath.Join(testDir, basePath+debugCompressionExt)
|
||||
_, err = os.Open(bundlePath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to open archive: %s", err)
|
||||
}
|
||||
|
||||
tgz := archiver.NewTarGz()
|
||||
err = tgz.Walk(bundlePath, func(f archiver.File) error {
|
||||
fh, ok := f.Header.(*tar.Header)
|
||||
if !ok {
|
||||
t.Fatalf("invalid file header: %#v", f.Header)
|
||||
}
|
||||
|
||||
// Ignore base directory and index file
|
||||
if fh.Name == basePath+"/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ignore directories, which still get created by pprof but should
|
||||
// otherwise be empty.
|
||||
if fh.FileInfo().IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch {
|
||||
case fh.Name == filepath.Join(basePath, "index.json"):
|
||||
case fh.Name == filepath.Join(basePath, "replication_status.json"):
|
||||
case fh.Name == filepath.Join(basePath, "server_status.json"):
|
||||
default:
|
||||
return fmt.Errorf("unexpected file: %s", fh.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -33,6 +33,7 @@ require (
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d // indirect
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190412130859-3b1d194e553a
|
||||
github.com/dnaeon/go-vcr v1.0.1 // indirect
|
||||
github.com/dsnet/compress v0.0.1 // indirect
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0
|
||||
github.com/fatih/color v1.7.0
|
||||
@@ -95,6 +96,7 @@ require (
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/mattn/go-colorable v0.1.2
|
||||
github.com/mholt/archiver v3.1.1+incompatible
|
||||
github.com/michaelklishin/rabbit-hole v1.5.0
|
||||
github.com/mitchellh/cli v1.0.0
|
||||
github.com/mitchellh/copystructure v1.0.0
|
||||
@@ -103,6 +105,7 @@ require (
|
||||
github.com/mitchellh/mapstructure v1.1.2
|
||||
github.com/mitchellh/reflectwalk v1.0.1
|
||||
github.com/ncw/swift v1.0.47
|
||||
github.com/nwaples/rardecode v1.0.0 // indirect
|
||||
github.com/oklog/run v1.0.0
|
||||
github.com/onsi/ginkgo v1.7.0 // indirect
|
||||
github.com/oracle/oci-go-sdk v7.0.0+incompatible
|
||||
@@ -120,6 +123,7 @@ require (
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
go.etcd.io/bbolt v1.3.2
|
||||
go.etcd.io/etcd v0.0.0-20190412021913-f29b1ada1971
|
||||
go.uber.org/atomic v1.4.0
|
||||
|
||||
13
go.sum
13
go.sum
@@ -132,6 +132,9 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
|
||||
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=
|
||||
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=
|
||||
@@ -413,6 +416,8 @@ github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f h1:Gsc9mVHLRqBjM
|
||||
github.com/keybase/go-crypto v0.0.0-20190403132359-d65b6b94177f/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
@@ -441,6 +446,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.0 h1:YNOwxxSJzSUARoD9KRZLz
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU=
|
||||
github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU=
|
||||
github.com/michaelklishin/rabbit-hole v1.5.0 h1:Bex27BiFDsijCM9D0ezSHqyy0kehpYHuNKaPqq/a4RM=
|
||||
github.com/michaelklishin/rabbit-hole v1.5.0/go.mod h1:vvI1uOitYZi0O5HEGXhaWC1XT80Gy+HvFheJ+5Krlhk=
|
||||
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
|
||||
@@ -477,6 +484,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
|
||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/nwaples/rardecode v1.0.0 h1:r7vGuS5akxOnR4JQSkko62RJ1ReCMXxQRPtxsiFMBOs=
|
||||
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
||||
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
@@ -606,7 +615,11 @@ github.com/ugorji/go v1.1.2 h1:JON3E2/GPW2iDNGoSAusl1KDf5TRQ8k8q7Tp097pZGs=
|
||||
github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||
github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43 h1:BasDe+IErOQKrMVXab7UayvSlIpiyGwRvuX3EKYY7UA=
|
||||
github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA=
|
||||
github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
|
||||
36
vendor/github.com/dsnet/compress/.travis.yml
generated
vendored
Normal file
36
vendor/github.com/dsnet/compress/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
sudo: false
|
||||
language: go
|
||||
before_install:
|
||||
- curl -L https://github.com/google/brotli/archive/v1.0.2.tar.gz | tar -zxv
|
||||
- (cd brotli-1.0.2 && mkdir out && cd out && ../configure-cmake && make && sudo make install)
|
||||
- rm -rf brotli-1.0.2
|
||||
- curl -L https://github.com/facebook/zstd/archive/v1.3.2.tar.gz | tar -zxv
|
||||
- (cd zstd-1.3.2 && sudo make install)
|
||||
- rm -rf zstd-1.3.2
|
||||
- sudo ldconfig
|
||||
- mkdir /tmp/go1.12
|
||||
- curl -L -s https://dl.google.com/go/go1.12.linux-amd64.tar.gz | tar -zxf - -C /tmp/go1.12 --strip-components 1
|
||||
- unset GOROOT
|
||||
- (GO111MODULE=on /tmp/go1.12/bin/go mod vendor)
|
||||
- (cd /tmp && GO111MODULE=on /tmp/go1.12/bin/go get golang.org/x/lint/golint@8f45f776aaf18cebc8d65861cc70c33c60471952)
|
||||
- (cd /tmp && GO111MODULE=on /tmp/go1.12/bin/go get honnef.co/go/tools/cmd/staticcheck@2019.1)
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- go test -v -race ./...
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- go test -v -race ./...
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- go test -v -race ./...
|
||||
- go: 1.12.x
|
||||
script:
|
||||
- ./ztest.sh
|
||||
- go: master
|
||||
script:
|
||||
- go test -v -race ./...
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
24
vendor/github.com/dsnet/compress/LICENSE.md
generated
vendored
Normal file
24
vendor/github.com/dsnet/compress/LICENSE.md
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
Copyright © 2015, Joe Tsai and The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
* Neither the copyright holder nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific prior
|
||||
written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
75
vendor/github.com/dsnet/compress/README.md
generated
vendored
Normal file
75
vendor/github.com/dsnet/compress/README.md
generated
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
# Collection of compression libraries for Go #
|
||||
|
||||
[](https://godoc.org/github.com/dsnet/compress)
|
||||
[](https://travis-ci.org/dsnet/compress)
|
||||
[](https://goreportcard.com/report/github.com/dsnet/compress)
|
||||
|
||||
## Introduction ##
|
||||
|
||||
**NOTE: This library is in active development. As such, there are no guarantees about the stability of the API. The author reserves the right to arbitrarily break the API for any reason.**
|
||||
|
||||
This repository hosts a collection of compression related libraries. The goal of this project is to provide pure Go implementations for popular compression algorithms beyond what the Go standard library provides. The goals for these packages are as follows:
|
||||
* Maintainable: That the code remains well documented, well tested, readable, easy to maintain, and easy to verify that it conforms to the specification for the format being implemented.
|
||||
* Performant: To be able to compress and decompress within at least 80% of the rates that the C implementations are able to achieve.
|
||||
* Flexible: That the code provides low-level and fine granularity control over the compression streams similar to what the C APIs would provide.
|
||||
|
||||
Of these three, the first objective is often at odds with the other two objectives and provides interesting challenges. Higher performance can often be achieved by muddling abstraction layers or using non-intuitive low-level primitives. Also, more features and functionality, while useful in some situations, often complicates the API. Thus, this package will attempt to satisfy all the goals, but will defer to favoring maintainability when the performance or flexibility benefits are not significant enough.
|
||||
|
||||
|
||||
## Library Status ##
|
||||
|
||||
For the packages available, only some features are currently implemented:
|
||||
|
||||
| Package | Reader | Writer |
|
||||
| ------- | :----: | :----: |
|
||||
| brotli | :white_check_mark: | |
|
||||
| bzip2 | :white_check_mark: | :white_check_mark: |
|
||||
| flate | :white_check_mark: | |
|
||||
| xflate | :white_check_mark: | :white_check_mark: |
|
||||
|
||||
This library is in active development. As such, there are no guarantees about the stability of the API. The author reserves the right to arbitrarily break the API for any reason. When the library becomes more mature, it is planned to eventually conform to some strict versioning scheme like [Semantic Versioning](http://semver.org/).
|
||||
|
||||
However, in the meanwhile, this library does provide some basic API guarantees. For the types defined below, the method signatures are guaranteed to not change. Note that the author still reserves the right to change the fields within each ```Reader``` and ```Writer``` structs.
|
||||
```go
|
||||
type ReaderConfig struct { ... }
|
||||
type Reader struct { ... }
|
||||
func NewReader(io.Reader, *ReaderConfig) (*Reader, error) { ... }
|
||||
func (*Reader) Read([]byte) (int, error) { ... }
|
||||
func (*Reader) Close() error { ... }
|
||||
|
||||
type WriterConfig struct { ... }
|
||||
type Writer struct { ... }
|
||||
func NewWriter(io.Writer, *WriterConfig) (*Writer, error) { ... }
|
||||
func (*Writer) Write([]byte) (int, error) { ... }
|
||||
func (*Writer) Close() error { ... }
|
||||
```
|
||||
|
||||
To see what work still remains, see the [Task List](https://github.com/dsnet/compress/wiki/Task-List).
|
||||
|
||||
## Performance ##
|
||||
|
||||
See [Performance Metrics](https://github.com/dsnet/compress/wiki/Performance-Metrics).
|
||||
|
||||
|
||||
## Frequently Asked Questions ##
|
||||
|
||||
See [Frequently Asked Questions](https://github.com/dsnet/compress/wiki/Frequently-Asked-Questions).
|
||||
|
||||
|
||||
## Installation ##
|
||||
|
||||
Run the command:
|
||||
|
||||
```go get -u github.com/dsnet/compress```
|
||||
|
||||
This library requires `Go1.9` or higher in order to build.
|
||||
|
||||
|
||||
## Packages ##
|
||||
|
||||
| Package | Description |
|
||||
| :------ | :---------- |
|
||||
| [brotli](http://godoc.org/github.com/dsnet/compress/brotli) | Package brotli implements the Brotli format, described in RFC 7932. |
|
||||
| [bzip2](http://godoc.org/github.com/dsnet/compress/bzip2) | Package bzip2 implements the BZip2 compressed data format. |
|
||||
| [flate](http://godoc.org/github.com/dsnet/compress/flate) | Package flate implements the DEFLATE format, described in RFC 1951. |
|
||||
| [xflate](http://godoc.org/github.com/dsnet/compress/xflate) | Package xflate implements the XFLATE format, an random-access extension to DEFLATE. |
|
||||
74
vendor/github.com/dsnet/compress/api.go
generated
vendored
Normal file
74
vendor/github.com/dsnet/compress/api.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Package compress is a collection of compression libraries.
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
|
||||
"github.com/dsnet/compress/internal/errors"
|
||||
)
|
||||
|
||||
// The Error interface identifies all compression related errors.
|
||||
type Error interface {
|
||||
error
|
||||
CompressError()
|
||||
|
||||
// IsDeprecated reports the use of a deprecated and unsupported feature.
|
||||
IsDeprecated() bool
|
||||
|
||||
// IsCorrupted reports whether the input stream was corrupted.
|
||||
IsCorrupted() bool
|
||||
}
|
||||
|
||||
var _ Error = errors.Error{}
|
||||
|
||||
// ByteReader is an interface accepted by all decompression Readers.
|
||||
// It guarantees that the decompressor never reads more data than is necessary
|
||||
// from the underlying io.Reader.
|
||||
type ByteReader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
}
|
||||
|
||||
var _ ByteReader = (*bufio.Reader)(nil)
|
||||
|
||||
// BufferedReader is an interface accepted by all decompression Readers.
|
||||
// It guarantees that the decompressor never reads more data than is necessary
|
||||
// from the underlying io.Reader. Since BufferedReader allows a decompressor
|
||||
// to peek at bytes further along in the stream without advancing the read
|
||||
// pointer, decompression can experience a significant performance gain when
|
||||
// provided a reader that satisfies this interface. Thus, a decompressor will
|
||||
// prefer this interface over ByteReader for performance reasons.
|
||||
//
|
||||
// The bufio.Reader satisfies this interface.
|
||||
type BufferedReader interface {
|
||||
io.Reader
|
||||
|
||||
// Buffered returns the number of bytes currently buffered.
|
||||
//
|
||||
// This value becomes invalid following the next Read/Discard operation.
|
||||
Buffered() int
|
||||
|
||||
// Peek returns the next n bytes without advancing the reader.
|
||||
//
|
||||
// If Peek returns fewer than n bytes, it also returns an error explaining
|
||||
// why the peek is short. Peek must support peeking of at least 8 bytes.
|
||||
// If 0 <= n <= Buffered(), Peek is guaranteed to succeed without reading
|
||||
// from the underlying io.Reader.
|
||||
//
|
||||
// This result becomes invalid following the next Read/Discard operation.
|
||||
Peek(n int) ([]byte, error)
|
||||
|
||||
// Discard skips the next n bytes, returning the number of bytes discarded.
|
||||
//
|
||||
// If Discard skips fewer than n bytes, it also returns an error.
|
||||
// If 0 <= n <= Buffered(), Discard is guaranteed to succeed without reading
|
||||
// from the underlying io.Reader.
|
||||
Discard(n int) (int, error)
|
||||
}
|
||||
|
||||
var _ BufferedReader = (*bufio.Reader)(nil)
|
||||
110
vendor/github.com/dsnet/compress/bzip2/bwt.go
generated
vendored
Normal file
110
vendor/github.com/dsnet/compress/bzip2/bwt.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package bzip2
|
||||
|
||||
import "github.com/dsnet/compress/bzip2/internal/sais"
|
||||
|
||||
// The Burrows-Wheeler Transform implementation used here is based on the
|
||||
// Suffix Array by Induced Sorting (SA-IS) methodology by Nong, Zhang, and Chan.
|
||||
// This implementation uses the sais algorithm originally written by Yuta Mori.
|
||||
//
|
||||
// The SA-IS algorithm runs in O(n) and outputs a Suffix Array. There is a
|
||||
// mathematical relationship between Suffix Arrays and the Burrows-Wheeler
|
||||
// Transform, such that a SA can be converted to a BWT in O(n) time.
|
||||
//
|
||||
// References:
|
||||
// http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.pdf
|
||||
// https://github.com/cscott/compressjs/blob/master/lib/BWT.js
|
||||
// https://www.quora.com/How-can-I-optimize-burrows-wheeler-transform-and-inverse-transform-to-work-in-O-n-time-O-n-space
|
||||
type burrowsWheelerTransform struct {
|
||||
buf []byte
|
||||
sa []int
|
||||
perm []uint32
|
||||
}
|
||||
|
||||
func (bwt *burrowsWheelerTransform) Encode(buf []byte) (ptr int) {
|
||||
if len(buf) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
// TODO(dsnet): Find a way to avoid the duplicate input string method.
|
||||
// We only need to do this because suffix arrays (by definition) only
|
||||
// operate non-wrapped suffixes of a string. On the other hand,
|
||||
// the BWT specifically used in bzip2 operate on a strings that wrap-around
|
||||
// when being sorted.
|
||||
|
||||
// Step 1: Concatenate the input string to itself so that we can use the
|
||||
// suffix array algorithm for bzip2's variant of BWT.
|
||||
n := len(buf)
|
||||
bwt.buf = append(append(bwt.buf[:0], buf...), buf...)
|
||||
if cap(bwt.sa) < 2*n {
|
||||
bwt.sa = make([]int, 2*n)
|
||||
}
|
||||
t := bwt.buf[:2*n]
|
||||
sa := bwt.sa[:2*n]
|
||||
|
||||
// Step 2: Compute the suffix array (SA). The input string, t, will not be
|
||||
// modified, while the results will be written to the output, sa.
|
||||
sais.ComputeSA(t, sa)
|
||||
|
||||
// Step 3: Convert the SA to a BWT. Since ComputeSA does not mutate the
|
||||
// input, we have two copies of the input; in buf and buf2. Thus, we write
|
||||
// the transformation to buf, while using buf2.
|
||||
var j int
|
||||
buf2 := t[n:]
|
||||
for _, i := range sa {
|
||||
if i < n {
|
||||
if i == 0 {
|
||||
ptr = j
|
||||
i = n
|
||||
}
|
||||
buf[j] = buf2[i-1]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return ptr
|
||||
}
|
||||
|
||||
func (bwt *burrowsWheelerTransform) Decode(buf []byte, ptr int) {
|
||||
if len(buf) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Step 1: Compute cumm, where cumm[ch] reports the total number of
|
||||
// characters that precede the character ch in the alphabet.
|
||||
var cumm [256]int
|
||||
for _, v := range buf {
|
||||
cumm[v]++
|
||||
}
|
||||
var sum int
|
||||
for i, v := range cumm {
|
||||
cumm[i] = sum
|
||||
sum += v
|
||||
}
|
||||
|
||||
// Step 2: Compute perm, where perm[ptr] contains a pointer to the next
|
||||
// byte in buf and the next pointer in perm itself.
|
||||
if cap(bwt.perm) < len(buf) {
|
||||
bwt.perm = make([]uint32, len(buf))
|
||||
}
|
||||
perm := bwt.perm[:len(buf)]
|
||||
for i, b := range buf {
|
||||
perm[cumm[b]] = uint32(i)
|
||||
cumm[b]++
|
||||
}
|
||||
|
||||
// Step 3: Follow each pointer in perm to the next byte, starting with the
|
||||
// origin pointer.
|
||||
if cap(bwt.buf) < len(buf) {
|
||||
bwt.buf = make([]byte, len(buf))
|
||||
}
|
||||
buf2 := bwt.buf[:len(buf)]
|
||||
i := perm[ptr]
|
||||
for j := range buf2 {
|
||||
buf2[j] = buf[i]
|
||||
i = perm[i]
|
||||
}
|
||||
copy(buf, buf2)
|
||||
}
|
||||
110
vendor/github.com/dsnet/compress/bzip2/common.go
generated
vendored
Normal file
110
vendor/github.com/dsnet/compress/bzip2/common.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Package bzip2 implements the BZip2 compressed data format.
|
||||
//
|
||||
// Canonical C implementation:
|
||||
// http://bzip.org
|
||||
//
|
||||
// Unofficial format specification:
|
||||
// https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf
|
||||
package bzip2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
|
||||
"github.com/dsnet/compress/internal"
|
||||
"github.com/dsnet/compress/internal/errors"
|
||||
)
|
||||
|
||||
// There does not exist a formal specification of the BZip2 format. As such,
|
||||
// much of this work is derived by either reverse engineering the original C
|
||||
// source code or using secondary sources.
|
||||
//
|
||||
// Significant amounts of fuzz testing is done to ensure that outputs from
|
||||
// this package is properly decoded by the C library. Furthermore, we test that
|
||||
// both this package and the C library agree about what inputs are invalid.
|
||||
//
|
||||
// Compression stack:
|
||||
// Run-length encoding 1 (RLE1)
|
||||
// Burrows-Wheeler transform (BWT)
|
||||
// Move-to-front transform (MTF)
|
||||
// Run-length encoding 2 (RLE2)
|
||||
// Prefix encoding (PE)
|
||||
//
|
||||
// References:
|
||||
// http://bzip.org/
|
||||
// https://en.wikipedia.org/wiki/Bzip2
|
||||
// https://code.google.com/p/jbzip2/
|
||||
|
||||
const (
|
||||
BestSpeed = 1
|
||||
BestCompression = 9
|
||||
DefaultCompression = 6
|
||||
)
|
||||
|
||||
const (
|
||||
hdrMagic = 0x425a // Hex of "BZ"
|
||||
blkMagic = 0x314159265359 // BCD of PI
|
||||
endMagic = 0x177245385090 // BCD of sqrt(PI)
|
||||
|
||||
blockSize = 100000
|
||||
)
|
||||
|
||||
func errorf(c int, f string, a ...interface{}) error {
|
||||
return errors.Error{Code: c, Pkg: "bzip2", Msg: fmt.Sprintf(f, a...)}
|
||||
}
|
||||
|
||||
func panicf(c int, f string, a ...interface{}) {
|
||||
errors.Panic(errorf(c, f, a...))
|
||||
}
|
||||
|
||||
// errWrap converts a lower-level errors.Error to be one from this package.
|
||||
// The replaceCode passed in will be used to replace the code for any errors
|
||||
// with the errors.Invalid code.
|
||||
//
|
||||
// For the Reader, set this to errors.Corrupted.
|
||||
// For the Writer, set this to errors.Internal.
|
||||
func errWrap(err error, replaceCode int) error {
|
||||
if cerr, ok := err.(errors.Error); ok {
|
||||
if errors.IsInvalid(cerr) {
|
||||
cerr.Code = replaceCode
|
||||
}
|
||||
err = errorf(cerr.Code, "%s", cerr.Msg)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var errClosed = errorf(errors.Closed, "")
|
||||
|
||||
// crc computes the CRC-32 used by BZip2.
|
||||
//
|
||||
// The CRC-32 computation in bzip2 treats bytes as having bits in big-endian
|
||||
// order. That is, the MSB is read before the LSB. Thus, we can use the
|
||||
// standard library version of CRC-32 IEEE with some minor adjustments.
|
||||
//
|
||||
// The byte array is used as an intermediate buffer to swap the bits of every
|
||||
// byte of the input.
|
||||
type crc struct {
|
||||
val uint32
|
||||
buf [256]byte
|
||||
}
|
||||
|
||||
// update computes the CRC-32 of appending buf to c.
|
||||
func (c *crc) update(buf []byte) {
|
||||
cval := internal.ReverseUint32(c.val)
|
||||
for len(buf) > 0 {
|
||||
n := len(buf)
|
||||
if n > len(c.buf) {
|
||||
n = len(c.buf)
|
||||
}
|
||||
for i, b := range buf[:n] {
|
||||
c.buf[i] = internal.ReverseLUT[b]
|
||||
}
|
||||
cval = crc32.Update(cval, crc32.IEEETable, c.buf[:n])
|
||||
buf = buf[n:]
|
||||
}
|
||||
c.val = internal.ReverseUint32(cval)
|
||||
}
|
||||
13
vendor/github.com/dsnet/compress/bzip2/fuzz_off.go
generated
vendored
Normal file
13
vendor/github.com/dsnet/compress/bzip2/fuzz_off.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2016, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build !gofuzz
|
||||
|
||||
// This file exists to suppress fuzzing details from release builds.
|
||||
|
||||
package bzip2
|
||||
|
||||
type fuzzReader struct{}
|
||||
|
||||
func (*fuzzReader) updateChecksum(int64, uint32) {}
|
||||
77
vendor/github.com/dsnet/compress/bzip2/fuzz_on.go
generated
vendored
Normal file
77
vendor/github.com/dsnet/compress/bzip2/fuzz_on.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2016, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
// This file exists to export internal implementation details for fuzz testing.
|
||||
|
||||
package bzip2
|
||||
|
||||
func ForwardBWT(buf []byte) (ptr int) {
|
||||
var bwt burrowsWheelerTransform
|
||||
return bwt.Encode(buf)
|
||||
}
|
||||
|
||||
func ReverseBWT(buf []byte, ptr int) {
|
||||
var bwt burrowsWheelerTransform
|
||||
bwt.Decode(buf, ptr)
|
||||
}
|
||||
|
||||
type fuzzReader struct {
|
||||
Checksums Checksums
|
||||
}
|
||||
|
||||
// updateChecksum updates Checksums.
|
||||
//
|
||||
// If a valid pos is provided, it appends the (pos, val) pair to the slice.
|
||||
// Otherwise, it will update the last record with the new value.
|
||||
func (fr *fuzzReader) updateChecksum(pos int64, val uint32) {
|
||||
if pos >= 0 {
|
||||
fr.Checksums = append(fr.Checksums, Checksum{pos, val})
|
||||
} else {
|
||||
fr.Checksums[len(fr.Checksums)-1].Value = val
|
||||
}
|
||||
}
|
||||
|
||||
type Checksum struct {
|
||||
Offset int64 // Bit offset of the checksum
|
||||
Value uint32 // Checksum value
|
||||
}
|
||||
|
||||
type Checksums []Checksum
|
||||
|
||||
// Apply overwrites all checksum fields in d with the ones in cs.
|
||||
func (cs Checksums) Apply(d []byte) []byte {
|
||||
d = append([]byte(nil), d...)
|
||||
for _, c := range cs {
|
||||
setU32(d, c.Offset, c.Value)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func setU32(d []byte, pos int64, val uint32) {
|
||||
for i := uint(0); i < 32; i++ {
|
||||
bpos := uint64(pos) + uint64(i)
|
||||
d[bpos/8] &= ^byte(1 << (7 - bpos%8))
|
||||
d[bpos/8] |= byte(val>>(31-i)) << (7 - bpos%8)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checks that all checksum fields in d matches those in cs.
|
||||
func (cs Checksums) Verify(d []byte) bool {
|
||||
for _, c := range cs {
|
||||
if getU32(d, c.Offset) != c.Value {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func getU32(d []byte, pos int64) (val uint32) {
|
||||
for i := uint(0); i < 32; i++ {
|
||||
bpos := uint64(pos) + uint64(i)
|
||||
val |= (uint32(d[bpos/8] >> (7 - bpos%8))) << (31 - i)
|
||||
}
|
||||
return val
|
||||
}
|
||||
28
vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go
generated
vendored
Normal file
28
vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Package sais implements a linear time suffix array algorithm.
|
||||
package sais
|
||||
|
||||
//go:generate go run sais_gen.go byte sais_byte.go
|
||||
//go:generate go run sais_gen.go int sais_int.go
|
||||
|
||||
// This package ports the C sais implementation by Yuta Mori. The ports are
|
||||
// located in sais_byte.go and sais_int.go, which are identical to each other
|
||||
// except for the types. Since Go does not support generics, we use generators to
|
||||
// create the two files.
|
||||
//
|
||||
// References:
|
||||
// https://sites.google.com/site/yuta256/sais
|
||||
// https://www.researchgate.net/publication/221313676_Linear_Time_Suffix_Array_Construction_Using_D-Critical_Substrings
|
||||
// https://www.researchgate.net/publication/224176324_Two_Efficient_Algorithms_for_Linear_Time_Suffix_Array_Construction
|
||||
|
||||
// ComputeSA computes the suffix array of t and places the result in sa.
|
||||
// Both t and sa must be the same length.
|
||||
func ComputeSA(t []byte, sa []int) {
|
||||
if len(sa) != len(t) {
|
||||
panic("mismatching sizes")
|
||||
}
|
||||
computeSA_byte(t, sa, 0, len(t), 256)
|
||||
}
|
||||
661
vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go
generated
vendored
Normal file
661
vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go
generated
vendored
Normal file
@@ -0,0 +1,661 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Code generated by sais_gen.go. DO NOT EDIT.
|
||||
|
||||
// ====================================================
|
||||
// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person
|
||||
// obtaining a copy of this software and associated documentation
|
||||
// files (the "Software"), to deal in the Software without
|
||||
// restriction, including without limitation the rights to use,
|
||||
// copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be
|
||||
// included in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
// OTHER DEALINGS IN THE SOFTWARE.
|
||||
// ====================================================
|
||||
|
||||
package sais
|
||||
|
||||
func getCounts_byte(T []byte, C []int, n, k int) {
|
||||
var i int
|
||||
for i = 0; i < k; i++ {
|
||||
C[i] = 0
|
||||
}
|
||||
for i = 0; i < n; i++ {
|
||||
C[T[i]]++
|
||||
}
|
||||
}
|
||||
|
||||
func getBuckets_byte(C, B []int, k int, end bool) {
|
||||
var i, sum int
|
||||
if end {
|
||||
for i = 0; i < k; i++ {
|
||||
sum += C[i]
|
||||
B[i] = sum
|
||||
}
|
||||
} else {
|
||||
for i = 0; i < k; i++ {
|
||||
sum += C[i]
|
||||
B[i] = sum - C[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sortLMS1_byte(T []byte, SA, C, B []int, n, k int) {
|
||||
var b, i, j int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_byte(T, C, n, k)
|
||||
}
|
||||
getBuckets_byte(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i = 0; i < n; i++ {
|
||||
if j = SA[i]; j > 0 {
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
SA[i] = 0
|
||||
} else if j < 0 {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_byte(T, C, n, k)
|
||||
}
|
||||
getBuckets_byte(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i = n - 1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
b--
|
||||
if int(T[j]) > c1 {
|
||||
SA[b] = ^(j + 1)
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
SA[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postProcLMS1_byte(T []byte, SA []int, n, m int) int {
|
||||
var i, j, p, q, plen, qlen, name int
|
||||
var c0, c1 int
|
||||
var diff bool
|
||||
|
||||
// Compact all the sorted substrings into the first m items of SA.
|
||||
// 2*m must be not larger than n (provable).
|
||||
for i = 0; SA[i] < 0; i++ {
|
||||
SA[i] = ^SA[i]
|
||||
}
|
||||
if i < m {
|
||||
for j, i = i, i+1; ; i++ {
|
||||
if p = SA[i]; p < 0 {
|
||||
SA[j] = ^p
|
||||
j++
|
||||
SA[i] = 0
|
||||
if j == m {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the length of all substrings.
|
||||
i = n - 1
|
||||
j = n - 1
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
SA[m+((i+1)>>1)] = j - i
|
||||
j = i + 1
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find the lexicographic names of all substrings.
|
||||
name = 0
|
||||
qlen = 0
|
||||
for i, q = 0, n; i < m; i++ {
|
||||
p = SA[i]
|
||||
plen = SA[m+(p>>1)]
|
||||
diff = true
|
||||
if (plen == qlen) && ((q + plen) < n) {
|
||||
for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ {
|
||||
}
|
||||
if j == plen {
|
||||
diff = false
|
||||
}
|
||||
}
|
||||
if diff {
|
||||
name++
|
||||
q = p
|
||||
qlen = plen
|
||||
}
|
||||
SA[m+(p>>1)] = name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func sortLMS2_byte(T []byte, SA, C, B, D []int, n, k int) {
|
||||
var b, i, j, t, d int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
getBuckets_byte(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
t = 1
|
||||
} else {
|
||||
t = 0
|
||||
}
|
||||
j += n
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i, d = 0, 0; i < n; i++ {
|
||||
if j = SA[i]; j > 0 {
|
||||
if n <= j {
|
||||
d += 1
|
||||
j -= n
|
||||
}
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
t = int(c0) << 1
|
||||
if int(T[j]) < c1 {
|
||||
t |= 1
|
||||
}
|
||||
if D[t] != d {
|
||||
j += n
|
||||
D[t] = d
|
||||
}
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
SA[i] = 0
|
||||
} else if j < 0 {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
for i = n - 1; 0 <= i; i-- {
|
||||
if SA[i] > 0 {
|
||||
if SA[i] < n {
|
||||
SA[i] += n
|
||||
for j = i - 1; SA[j] < n; j-- {
|
||||
}
|
||||
SA[j] -= n
|
||||
i = j
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
getBuckets_byte(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i, d = n-1, d+1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
if n <= j {
|
||||
d += 1
|
||||
j -= n
|
||||
}
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
t = int(c0) << 1
|
||||
if int(T[j]) > c1 {
|
||||
t |= 1
|
||||
}
|
||||
if D[t] != d {
|
||||
j += n
|
||||
D[t] = d
|
||||
}
|
||||
b--
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^(j + 1)
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
SA[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postProcLMS2_byte(SA []int, n, m int) int {
|
||||
var i, j, d, name int
|
||||
|
||||
// Compact all the sorted LMS substrings into the first m items of SA.
|
||||
name = 0
|
||||
for i = 0; SA[i] < 0; i++ {
|
||||
j = ^SA[i]
|
||||
if n <= j {
|
||||
name += 1
|
||||
}
|
||||
SA[i] = j
|
||||
}
|
||||
if i < m {
|
||||
for d, i = i, i+1; ; i++ {
|
||||
if j = SA[i]; j < 0 {
|
||||
j = ^j
|
||||
if n <= j {
|
||||
name += 1
|
||||
}
|
||||
SA[d] = j
|
||||
d++
|
||||
SA[i] = 0
|
||||
if d == m {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if name < m {
|
||||
// Store the lexicographic names.
|
||||
for i, d = m-1, name+1; 0 <= i; i-- {
|
||||
if j = SA[i]; n <= j {
|
||||
j -= n
|
||||
d--
|
||||
}
|
||||
SA[m+(j>>1)] = d
|
||||
}
|
||||
} else {
|
||||
// Unset flags.
|
||||
for i = 0; i < m; i++ {
|
||||
if j = SA[i]; n <= j {
|
||||
j -= n
|
||||
SA[i] = j
|
||||
}
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func induceSA_byte(T []byte, SA, C, B []int, n, k int) {
|
||||
var b, i, j int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_byte(T, C, n, k)
|
||||
}
|
||||
getBuckets_byte(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
if j > 0 && int(T[j-1]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i = 0; i < n; i++ {
|
||||
j = SA[i]
|
||||
SA[i] = ^j
|
||||
if j > 0 {
|
||||
j--
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
if j > 0 && int(T[j-1]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_byte(T, C, n, k)
|
||||
}
|
||||
getBuckets_byte(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i = n - 1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
j--
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
b--
|
||||
if (j == 0) || (int(T[j-1]) > c1) {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
} else {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeSA_byte(T []byte, SA []int, fs, n, k int) {
|
||||
const (
|
||||
minBucketSize = 512
|
||||
sortLMS2Limit = 0x3fffffff
|
||||
)
|
||||
|
||||
var C, B, D, RA []int
|
||||
var bo int // Offset of B relative to SA
|
||||
var b, i, j, m, p, q, name, newfs int
|
||||
var c0, c1 int
|
||||
var flags uint
|
||||
|
||||
if k <= minBucketSize {
|
||||
C = make([]int, k)
|
||||
if k <= fs {
|
||||
bo = n + fs - k
|
||||
B = SA[bo:]
|
||||
flags = 1
|
||||
} else {
|
||||
B = make([]int, k)
|
||||
flags = 3
|
||||
}
|
||||
} else if k <= fs {
|
||||
C = SA[n+fs-k:]
|
||||
if k <= fs-k {
|
||||
bo = n + fs - 2*k
|
||||
B = SA[bo:]
|
||||
flags = 0
|
||||
} else if k <= 4*minBucketSize {
|
||||
B = make([]int, k)
|
||||
flags = 2
|
||||
} else {
|
||||
B = C
|
||||
flags = 8
|
||||
}
|
||||
} else {
|
||||
C = make([]int, k)
|
||||
B = C
|
||||
flags = 4 | 8
|
||||
}
|
||||
if n <= sortLMS2Limit && 2 <= (n/k) {
|
||||
if flags&1 > 0 {
|
||||
if 2*k <= fs-k {
|
||||
flags |= 32
|
||||
} else {
|
||||
flags |= 16
|
||||
}
|
||||
} else if flags == 0 && 2*k <= (fs-2*k) {
|
||||
flags |= 32
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 1: Reduce the problem by at least 1/2.
|
||||
// Sort all the LMS-substrings.
|
||||
getCounts_byte(T, C, n, k)
|
||||
getBuckets_byte(C, B, k, true) // Find ends of buckets
|
||||
for i = 0; i < n; i++ {
|
||||
SA[i] = 0
|
||||
}
|
||||
b = -1
|
||||
i = n - 1
|
||||
j = n
|
||||
m = 0
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
if b >= 0 {
|
||||
SA[b] = j
|
||||
}
|
||||
B[c1]--
|
||||
b = B[c1]
|
||||
j = i
|
||||
m++
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m > 1 {
|
||||
if flags&(16|32) > 0 {
|
||||
if flags&16 > 0 {
|
||||
D = make([]int, 2*k)
|
||||
} else {
|
||||
D = SA[bo-2*k:]
|
||||
}
|
||||
B[T[j+1]]++
|
||||
for i, j = 0, 0; i < k; i++ {
|
||||
j += C[i]
|
||||
if B[i] != j {
|
||||
SA[B[i]] += n
|
||||
}
|
||||
D[i] = 0
|
||||
D[i+k] = 0
|
||||
}
|
||||
sortLMS2_byte(T, SA, C, B, D, n, k)
|
||||
name = postProcLMS2_byte(SA, n, m)
|
||||
} else {
|
||||
sortLMS1_byte(T, SA, C, B, n, k)
|
||||
name = postProcLMS1_byte(T, SA, n, m)
|
||||
}
|
||||
} else if m == 1 {
|
||||
SA[b] = j + 1
|
||||
name = 1
|
||||
} else {
|
||||
name = 0
|
||||
}
|
||||
|
||||
// Stage 2: Solve the reduced problem.
|
||||
// Recurse if names are not yet unique.
|
||||
if name < m {
|
||||
newfs = n + fs - 2*m
|
||||
if flags&(1|4|8) == 0 {
|
||||
if k+name <= newfs {
|
||||
newfs -= k
|
||||
} else {
|
||||
flags |= 8
|
||||
}
|
||||
}
|
||||
RA = SA[m+newfs:]
|
||||
for i, j = m+(n>>1)-1, m-1; m <= i; i-- {
|
||||
if SA[i] != 0 {
|
||||
RA[j] = SA[i] - 1
|
||||
j--
|
||||
}
|
||||
}
|
||||
computeSA_int(RA, SA, newfs, m, name)
|
||||
|
||||
i = n - 1
|
||||
j = m - 1
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
RA[j] = i + 1
|
||||
j--
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for i = 0; i < m; i++ {
|
||||
SA[i] = RA[SA[i]]
|
||||
}
|
||||
if flags&4 > 0 {
|
||||
B = make([]int, k)
|
||||
C = B
|
||||
}
|
||||
if flags&2 > 0 {
|
||||
B = make([]int, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 3: Induce the result for the original problem.
|
||||
if flags&8 > 0 {
|
||||
getCounts_byte(T, C, n, k)
|
||||
}
|
||||
// Put all left-most S characters into their buckets.
|
||||
if m > 1 {
|
||||
getBuckets_byte(C, B, k, true) // Find ends of buckets
|
||||
i = m - 1
|
||||
j = n
|
||||
p = SA[m-1]
|
||||
c1 = int(T[p])
|
||||
for {
|
||||
c0 = c1
|
||||
q = B[c0]
|
||||
for q < j {
|
||||
j--
|
||||
SA[j] = 0
|
||||
}
|
||||
for {
|
||||
j--
|
||||
SA[j] = p
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
p = SA[i]
|
||||
if c1 = int(T[p]); c1 != c0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for j > 0 {
|
||||
j--
|
||||
SA[j] = 0
|
||||
}
|
||||
}
|
||||
induceSA_byte(T, SA, C, B, n, k)
|
||||
}
|
||||
703
vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go
generated
vendored
Normal file
703
vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_gen.go
generated
vendored
Normal file
@@ -0,0 +1,703 @@
|
||||
// Copyright 2017, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 3 {
|
||||
log.Fatalf("Usage: %s GO_TYPE OUTPUT_FILE", os.Args[0])
|
||||
}
|
||||
typ := os.Args[1]
|
||||
path := os.Args[2]
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
t := template.Must(template.New("source").Parse(source))
|
||||
if err := t.Execute(b, struct {
|
||||
Type, GeneratedMessage string
|
||||
}{typ, "// Code generated by sais_gen.go. DO NOT EDIT."}); err != nil {
|
||||
log.Fatalf("Template.Execute error: %v", err)
|
||||
}
|
||||
out, err := format.Source(bytes.TrimSpace(b.Bytes()))
|
||||
if err != nil {
|
||||
log.Fatalf("format.Source error: %v", err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path, out, 0644); err != nil {
|
||||
log.Fatalf("ioutil.WriteFile error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
const source = `
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
{{.GeneratedMessage}}
|
||||
|
||||
// ====================================================
|
||||
// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person
|
||||
// obtaining a copy of this software and associated documentation
|
||||
// files (the "Software"), to deal in the Software without
|
||||
// restriction, including without limitation the rights to use,
|
||||
// copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be
|
||||
// included in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
// OTHER DEALINGS IN THE SOFTWARE.
|
||||
// ====================================================
|
||||
|
||||
package sais
|
||||
|
||||
func getCounts_{{.Type}}(T []{{.Type}}, C []int, n, k int) {
|
||||
var i int
|
||||
for i = 0; i < k; i++ {
|
||||
C[i] = 0
|
||||
}
|
||||
for i = 0; i < n; i++ {
|
||||
C[T[i]]++
|
||||
}
|
||||
}
|
||||
|
||||
func getBuckets_{{.Type}}(C, B []int, k int, end bool) {
|
||||
var i, sum int
|
||||
if end {
|
||||
for i = 0; i < k; i++ {
|
||||
sum += C[i]
|
||||
B[i] = sum
|
||||
}
|
||||
} else {
|
||||
for i = 0; i < k; i++ {
|
||||
sum += C[i]
|
||||
B[i] = sum - C[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sortLMS1_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) {
|
||||
var b, i, j int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i = 0; i < n; i++ {
|
||||
if j = SA[i]; j > 0 {
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
SA[i] = 0
|
||||
} else if j < 0 {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i = n - 1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
b--
|
||||
if int(T[j]) > c1 {
|
||||
SA[b] = ^(j + 1)
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
SA[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postProcLMS1_{{.Type}}(T []{{.Type}}, SA []int, n, m int) int {
|
||||
var i, j, p, q, plen, qlen, name int
|
||||
var c0, c1 int
|
||||
var diff bool
|
||||
|
||||
// Compact all the sorted substrings into the first m items of SA.
|
||||
// 2*m must be not larger than n (provable).
|
||||
for i = 0; SA[i] < 0; i++ {
|
||||
SA[i] = ^SA[i]
|
||||
}
|
||||
if i < m {
|
||||
for j, i = i, i+1; ; i++ {
|
||||
if p = SA[i]; p < 0 {
|
||||
SA[j] = ^p
|
||||
j++
|
||||
SA[i] = 0
|
||||
if j == m {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the length of all substrings.
|
||||
i = n - 1
|
||||
j = n - 1
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
SA[m+((i+1)>>1)] = j - i
|
||||
j = i + 1
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find the lexicographic names of all substrings.
|
||||
name = 0
|
||||
qlen = 0
|
||||
for i, q = 0, n; i < m; i++ {
|
||||
p = SA[i]
|
||||
plen = SA[m+(p>>1)]
|
||||
diff = true
|
||||
if (plen == qlen) && ((q + plen) < n) {
|
||||
for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ {
|
||||
}
|
||||
if j == plen {
|
||||
diff = false
|
||||
}
|
||||
}
|
||||
if diff {
|
||||
name++
|
||||
q = p
|
||||
qlen = plen
|
||||
}
|
||||
SA[m+(p>>1)] = name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func sortLMS2_{{.Type}}(T []{{.Type}}, SA, C, B, D []int, n, k int) {
|
||||
var b, i, j, t, d int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
t = 1
|
||||
} else {
|
||||
t = 0
|
||||
}
|
||||
j += n
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i, d = 0, 0; i < n; i++ {
|
||||
if j = SA[i]; j > 0 {
|
||||
if n <= j {
|
||||
d += 1
|
||||
j -= n
|
||||
}
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
t = int(c0) << 1
|
||||
if int(T[j]) < c1 {
|
||||
t |= 1
|
||||
}
|
||||
if D[t] != d {
|
||||
j += n
|
||||
D[t] = d
|
||||
}
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
SA[i] = 0
|
||||
} else if j < 0 {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
for i = n - 1; 0 <= i; i-- {
|
||||
if SA[i] > 0 {
|
||||
if SA[i] < n {
|
||||
SA[i] += n
|
||||
for j = i - 1; SA[j] < n; j-- {
|
||||
}
|
||||
SA[j] -= n
|
||||
i = j
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i, d = n-1, d+1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
if n <= j {
|
||||
d += 1
|
||||
j -= n
|
||||
}
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
t = int(c0) << 1
|
||||
if int(T[j]) > c1 {
|
||||
t |= 1
|
||||
}
|
||||
if D[t] != d {
|
||||
j += n
|
||||
D[t] = d
|
||||
}
|
||||
b--
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^(j + 1)
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
SA[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postProcLMS2_{{.Type}}(SA []int, n, m int) int {
|
||||
var i, j, d, name int
|
||||
|
||||
// Compact all the sorted LMS substrings into the first m items of SA.
|
||||
name = 0
|
||||
for i = 0; SA[i] < 0; i++ {
|
||||
j = ^SA[i]
|
||||
if n <= j {
|
||||
name += 1
|
||||
}
|
||||
SA[i] = j
|
||||
}
|
||||
if i < m {
|
||||
for d, i = i, i+1; ; i++ {
|
||||
if j = SA[i]; j < 0 {
|
||||
j = ^j
|
||||
if n <= j {
|
||||
name += 1
|
||||
}
|
||||
SA[d] = j
|
||||
d++
|
||||
SA[i] = 0
|
||||
if d == m {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if name < m {
|
||||
// Store the lexicographic names.
|
||||
for i, d = m-1, name+1; 0 <= i; i-- {
|
||||
if j = SA[i]; n <= j {
|
||||
j -= n
|
||||
d--
|
||||
}
|
||||
SA[m+(j>>1)] = d
|
||||
}
|
||||
} else {
|
||||
// Unset flags.
|
||||
for i = 0; i < m; i++ {
|
||||
if j = SA[i]; n <= j {
|
||||
j -= n
|
||||
SA[i] = j
|
||||
}
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func induceSA_{{.Type}}(T []{{.Type}}, SA, C, B []int, n, k int) {
|
||||
var b, i, j int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
getBuckets_{{.Type}}(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
if j > 0 && int(T[j-1]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i = 0; i < n; i++ {
|
||||
j = SA[i]
|
||||
SA[i] = ^j
|
||||
if j > 0 {
|
||||
j--
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
if j > 0 && int(T[j-1]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i = n - 1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
j--
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
b--
|
||||
if (j == 0) || (int(T[j-1]) > c1) {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
} else {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeSA_{{.Type}}(T []{{.Type}}, SA []int, fs, n, k int) {
|
||||
const (
|
||||
minBucketSize = 512
|
||||
sortLMS2Limit = 0x3fffffff
|
||||
)
|
||||
|
||||
var C, B, D, RA []int
|
||||
var bo int // Offset of B relative to SA
|
||||
var b, i, j, m, p, q, name, newfs int
|
||||
var c0, c1 int
|
||||
var flags uint
|
||||
|
||||
if k <= minBucketSize {
|
||||
C = make([]int, k)
|
||||
if k <= fs {
|
||||
bo = n + fs - k
|
||||
B = SA[bo:]
|
||||
flags = 1
|
||||
} else {
|
||||
B = make([]int, k)
|
||||
flags = 3
|
||||
}
|
||||
} else if k <= fs {
|
||||
C = SA[n+fs-k:]
|
||||
if k <= fs-k {
|
||||
bo = n + fs - 2*k
|
||||
B = SA[bo:]
|
||||
flags = 0
|
||||
} else if k <= 4*minBucketSize {
|
||||
B = make([]int, k)
|
||||
flags = 2
|
||||
} else {
|
||||
B = C
|
||||
flags = 8
|
||||
}
|
||||
} else {
|
||||
C = make([]int, k)
|
||||
B = C
|
||||
flags = 4 | 8
|
||||
}
|
||||
if n <= sortLMS2Limit && 2 <= (n/k) {
|
||||
if flags&1 > 0 {
|
||||
if 2*k <= fs-k {
|
||||
flags |= 32
|
||||
} else {
|
||||
flags |= 16
|
||||
}
|
||||
} else if flags == 0 && 2*k <= (fs-2*k) {
|
||||
flags |= 32
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 1: Reduce the problem by at least 1/2.
|
||||
// Sort all the LMS-substrings.
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
for i = 0; i < n; i++ {
|
||||
SA[i] = 0
|
||||
}
|
||||
b = -1
|
||||
i = n - 1
|
||||
j = n
|
||||
m = 0
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
if b >= 0 {
|
||||
SA[b] = j
|
||||
}
|
||||
B[c1]--
|
||||
b = B[c1]
|
||||
j = i
|
||||
m++
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m > 1 {
|
||||
if flags&(16|32) > 0 {
|
||||
if flags&16 > 0 {
|
||||
D = make([]int, 2*k)
|
||||
} else {
|
||||
D = SA[bo-2*k:]
|
||||
}
|
||||
B[T[j+1]]++
|
||||
for i, j = 0, 0; i < k; i++ {
|
||||
j += C[i]
|
||||
if B[i] != j {
|
||||
SA[B[i]] += n
|
||||
}
|
||||
D[i] = 0
|
||||
D[i+k] = 0
|
||||
}
|
||||
sortLMS2_{{.Type}}(T, SA, C, B, D, n, k)
|
||||
name = postProcLMS2_{{.Type}}(SA, n, m)
|
||||
} else {
|
||||
sortLMS1_{{.Type}}(T, SA, C, B, n, k)
|
||||
name = postProcLMS1_{{.Type}}(T, SA, n, m)
|
||||
}
|
||||
} else if m == 1 {
|
||||
SA[b] = j + 1
|
||||
name = 1
|
||||
} else {
|
||||
name = 0
|
||||
}
|
||||
|
||||
// Stage 2: Solve the reduced problem.
|
||||
// Recurse if names are not yet unique.
|
||||
if name < m {
|
||||
newfs = n + fs - 2*m
|
||||
if flags&(1|4|8) == 0 {
|
||||
if k+name <= newfs {
|
||||
newfs -= k
|
||||
} else {
|
||||
flags |= 8
|
||||
}
|
||||
}
|
||||
RA = SA[m+newfs:]
|
||||
for i, j = m+(n>>1)-1, m-1; m <= i; i-- {
|
||||
if SA[i] != 0 {
|
||||
RA[j] = SA[i] - 1
|
||||
j--
|
||||
}
|
||||
}
|
||||
computeSA_int(RA, SA, newfs, m, name)
|
||||
|
||||
i = n - 1
|
||||
j = m - 1
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
RA[j] = i + 1
|
||||
j--
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for i = 0; i < m; i++ {
|
||||
SA[i] = RA[SA[i]]
|
||||
}
|
||||
if flags&4 > 0 {
|
||||
B = make([]int, k)
|
||||
C = B
|
||||
}
|
||||
if flags&2 > 0 {
|
||||
B = make([]int, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 3: Induce the result for the original problem.
|
||||
if flags&8 > 0 {
|
||||
getCounts_{{.Type}}(T, C, n, k)
|
||||
}
|
||||
// Put all left-most S characters into their buckets.
|
||||
if m > 1 {
|
||||
getBuckets_{{.Type}}(C, B, k, true) // Find ends of buckets
|
||||
i = m - 1
|
||||
j = n
|
||||
p = SA[m-1]
|
||||
c1 = int(T[p])
|
||||
for {
|
||||
c0 = c1
|
||||
q = B[c0]
|
||||
for q < j {
|
||||
j--
|
||||
SA[j] = 0
|
||||
}
|
||||
for {
|
||||
j--
|
||||
SA[j] = p
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
p = SA[i]
|
||||
if c1 = int(T[p]); c1 != c0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for j > 0 {
|
||||
j--
|
||||
SA[j] = 0
|
||||
}
|
||||
}
|
||||
induceSA_{{.Type}}(T, SA, C, B, n, k)
|
||||
}
|
||||
`
|
||||
661
vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go
generated
vendored
Normal file
661
vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go
generated
vendored
Normal file
@@ -0,0 +1,661 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Code generated by sais_gen.go. DO NOT EDIT.
|
||||
|
||||
// ====================================================
|
||||
// Copyright (c) 2008-2010 Yuta Mori All Rights Reserved.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person
|
||||
// obtaining a copy of this software and associated documentation
|
||||
// files (the "Software"), to deal in the Software without
|
||||
// restriction, including without limitation the rights to use,
|
||||
// copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be
|
||||
// included in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
// OTHER DEALINGS IN THE SOFTWARE.
|
||||
// ====================================================
|
||||
|
||||
package sais
|
||||
|
||||
func getCounts_int(T []int, C []int, n, k int) {
|
||||
var i int
|
||||
for i = 0; i < k; i++ {
|
||||
C[i] = 0
|
||||
}
|
||||
for i = 0; i < n; i++ {
|
||||
C[T[i]]++
|
||||
}
|
||||
}
|
||||
|
||||
func getBuckets_int(C, B []int, k int, end bool) {
|
||||
var i, sum int
|
||||
if end {
|
||||
for i = 0; i < k; i++ {
|
||||
sum += C[i]
|
||||
B[i] = sum
|
||||
}
|
||||
} else {
|
||||
for i = 0; i < k; i++ {
|
||||
sum += C[i]
|
||||
B[i] = sum - C[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sortLMS1_int(T []int, SA, C, B []int, n, k int) {
|
||||
var b, i, j int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_int(T, C, n, k)
|
||||
}
|
||||
getBuckets_int(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i = 0; i < n; i++ {
|
||||
if j = SA[i]; j > 0 {
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
SA[i] = 0
|
||||
} else if j < 0 {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_int(T, C, n, k)
|
||||
}
|
||||
getBuckets_int(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i = n - 1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
b--
|
||||
if int(T[j]) > c1 {
|
||||
SA[b] = ^(j + 1)
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
SA[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postProcLMS1_int(T []int, SA []int, n, m int) int {
|
||||
var i, j, p, q, plen, qlen, name int
|
||||
var c0, c1 int
|
||||
var diff bool
|
||||
|
||||
// Compact all the sorted substrings into the first m items of SA.
|
||||
// 2*m must be not larger than n (provable).
|
||||
for i = 0; SA[i] < 0; i++ {
|
||||
SA[i] = ^SA[i]
|
||||
}
|
||||
if i < m {
|
||||
for j, i = i, i+1; ; i++ {
|
||||
if p = SA[i]; p < 0 {
|
||||
SA[j] = ^p
|
||||
j++
|
||||
SA[i] = 0
|
||||
if j == m {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Store the length of all substrings.
|
||||
i = n - 1
|
||||
j = n - 1
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
SA[m+((i+1)>>1)] = j - i
|
||||
j = i + 1
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Find the lexicographic names of all substrings.
|
||||
name = 0
|
||||
qlen = 0
|
||||
for i, q = 0, n; i < m; i++ {
|
||||
p = SA[i]
|
||||
plen = SA[m+(p>>1)]
|
||||
diff = true
|
||||
if (plen == qlen) && ((q + plen) < n) {
|
||||
for j = 0; (j < plen) && (T[p+j] == T[q+j]); j++ {
|
||||
}
|
||||
if j == plen {
|
||||
diff = false
|
||||
}
|
||||
}
|
||||
if diff {
|
||||
name++
|
||||
q = p
|
||||
qlen = plen
|
||||
}
|
||||
SA[m+(p>>1)] = name
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func sortLMS2_int(T []int, SA, C, B, D []int, n, k int) {
|
||||
var b, i, j, t, d int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
getBuckets_int(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
j--
|
||||
if int(T[j]) < c1 {
|
||||
t = 1
|
||||
} else {
|
||||
t = 0
|
||||
}
|
||||
j += n
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i, d = 0, 0; i < n; i++ {
|
||||
if j = SA[i]; j > 0 {
|
||||
if n <= j {
|
||||
d += 1
|
||||
j -= n
|
||||
}
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
t = int(c0) << 1
|
||||
if int(T[j]) < c1 {
|
||||
t |= 1
|
||||
}
|
||||
if D[t] != d {
|
||||
j += n
|
||||
D[t] = d
|
||||
}
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
SA[i] = 0
|
||||
} else if j < 0 {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
for i = n - 1; 0 <= i; i-- {
|
||||
if SA[i] > 0 {
|
||||
if SA[i] < n {
|
||||
SA[i] += n
|
||||
for j = i - 1; SA[j] < n; j-- {
|
||||
}
|
||||
SA[j] -= n
|
||||
i = j
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
getBuckets_int(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i, d = n-1, d+1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
if n <= j {
|
||||
d += 1
|
||||
j -= n
|
||||
}
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
j--
|
||||
t = int(c0) << 1
|
||||
if int(T[j]) > c1 {
|
||||
t |= 1
|
||||
}
|
||||
if D[t] != d {
|
||||
j += n
|
||||
D[t] = d
|
||||
}
|
||||
b--
|
||||
if t&1 > 0 {
|
||||
SA[b] = ^(j + 1)
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
SA[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postProcLMS2_int(SA []int, n, m int) int {
|
||||
var i, j, d, name int
|
||||
|
||||
// Compact all the sorted LMS substrings into the first m items of SA.
|
||||
name = 0
|
||||
for i = 0; SA[i] < 0; i++ {
|
||||
j = ^SA[i]
|
||||
if n <= j {
|
||||
name += 1
|
||||
}
|
||||
SA[i] = j
|
||||
}
|
||||
if i < m {
|
||||
for d, i = i, i+1; ; i++ {
|
||||
if j = SA[i]; j < 0 {
|
||||
j = ^j
|
||||
if n <= j {
|
||||
name += 1
|
||||
}
|
||||
SA[d] = j
|
||||
d++
|
||||
SA[i] = 0
|
||||
if d == m {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if name < m {
|
||||
// Store the lexicographic names.
|
||||
for i, d = m-1, name+1; 0 <= i; i-- {
|
||||
if j = SA[i]; n <= j {
|
||||
j -= n
|
||||
d--
|
||||
}
|
||||
SA[m+(j>>1)] = d
|
||||
}
|
||||
} else {
|
||||
// Unset flags.
|
||||
for i = 0; i < m; i++ {
|
||||
if j = SA[i]; n <= j {
|
||||
j -= n
|
||||
SA[i] = j
|
||||
}
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func induceSA_int(T []int, SA, C, B []int, n, k int) {
|
||||
var b, i, j int
|
||||
var c0, c1 int
|
||||
|
||||
// Compute SAl.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_int(T, C, n, k)
|
||||
}
|
||||
getBuckets_int(C, B, k, false) // Find starts of buckets
|
||||
j = n - 1
|
||||
c1 = int(T[j])
|
||||
b = B[c1]
|
||||
if j > 0 && int(T[j-1]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
for i = 0; i < n; i++ {
|
||||
j = SA[i]
|
||||
SA[i] = ^j
|
||||
if j > 0 {
|
||||
j--
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
if j > 0 && int(T[j-1]) < c1 {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
b++
|
||||
}
|
||||
}
|
||||
|
||||
// Compute SAs.
|
||||
if &C[0] == &B[0] {
|
||||
getCounts_int(T, C, n, k)
|
||||
}
|
||||
getBuckets_int(C, B, k, true) // Find ends of buckets
|
||||
c1 = 0
|
||||
b = B[c1]
|
||||
for i = n - 1; i >= 0; i-- {
|
||||
if j = SA[i]; j > 0 {
|
||||
j--
|
||||
if c0 = int(T[j]); c0 != c1 {
|
||||
B[c1] = b
|
||||
c1 = c0
|
||||
b = B[c1]
|
||||
}
|
||||
b--
|
||||
if (j == 0) || (int(T[j-1]) > c1) {
|
||||
SA[b] = ^j
|
||||
} else {
|
||||
SA[b] = j
|
||||
}
|
||||
} else {
|
||||
SA[i] = ^j
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeSA_int(T []int, SA []int, fs, n, k int) {
|
||||
const (
|
||||
minBucketSize = 512
|
||||
sortLMS2Limit = 0x3fffffff
|
||||
)
|
||||
|
||||
var C, B, D, RA []int
|
||||
var bo int // Offset of B relative to SA
|
||||
var b, i, j, m, p, q, name, newfs int
|
||||
var c0, c1 int
|
||||
var flags uint
|
||||
|
||||
if k <= minBucketSize {
|
||||
C = make([]int, k)
|
||||
if k <= fs {
|
||||
bo = n + fs - k
|
||||
B = SA[bo:]
|
||||
flags = 1
|
||||
} else {
|
||||
B = make([]int, k)
|
||||
flags = 3
|
||||
}
|
||||
} else if k <= fs {
|
||||
C = SA[n+fs-k:]
|
||||
if k <= fs-k {
|
||||
bo = n + fs - 2*k
|
||||
B = SA[bo:]
|
||||
flags = 0
|
||||
} else if k <= 4*minBucketSize {
|
||||
B = make([]int, k)
|
||||
flags = 2
|
||||
} else {
|
||||
B = C
|
||||
flags = 8
|
||||
}
|
||||
} else {
|
||||
C = make([]int, k)
|
||||
B = C
|
||||
flags = 4 | 8
|
||||
}
|
||||
if n <= sortLMS2Limit && 2 <= (n/k) {
|
||||
if flags&1 > 0 {
|
||||
if 2*k <= fs-k {
|
||||
flags |= 32
|
||||
} else {
|
||||
flags |= 16
|
||||
}
|
||||
} else if flags == 0 && 2*k <= (fs-2*k) {
|
||||
flags |= 32
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 1: Reduce the problem by at least 1/2.
|
||||
// Sort all the LMS-substrings.
|
||||
getCounts_int(T, C, n, k)
|
||||
getBuckets_int(C, B, k, true) // Find ends of buckets
|
||||
for i = 0; i < n; i++ {
|
||||
SA[i] = 0
|
||||
}
|
||||
b = -1
|
||||
i = n - 1
|
||||
j = n
|
||||
m = 0
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
if b >= 0 {
|
||||
SA[b] = j
|
||||
}
|
||||
B[c1]--
|
||||
b = B[c1]
|
||||
j = i
|
||||
m++
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m > 1 {
|
||||
if flags&(16|32) > 0 {
|
||||
if flags&16 > 0 {
|
||||
D = make([]int, 2*k)
|
||||
} else {
|
||||
D = SA[bo-2*k:]
|
||||
}
|
||||
B[T[j+1]]++
|
||||
for i, j = 0, 0; i < k; i++ {
|
||||
j += C[i]
|
||||
if B[i] != j {
|
||||
SA[B[i]] += n
|
||||
}
|
||||
D[i] = 0
|
||||
D[i+k] = 0
|
||||
}
|
||||
sortLMS2_int(T, SA, C, B, D, n, k)
|
||||
name = postProcLMS2_int(SA, n, m)
|
||||
} else {
|
||||
sortLMS1_int(T, SA, C, B, n, k)
|
||||
name = postProcLMS1_int(T, SA, n, m)
|
||||
}
|
||||
} else if m == 1 {
|
||||
SA[b] = j + 1
|
||||
name = 1
|
||||
} else {
|
||||
name = 0
|
||||
}
|
||||
|
||||
// Stage 2: Solve the reduced problem.
|
||||
// Recurse if names are not yet unique.
|
||||
if name < m {
|
||||
newfs = n + fs - 2*m
|
||||
if flags&(1|4|8) == 0 {
|
||||
if k+name <= newfs {
|
||||
newfs -= k
|
||||
} else {
|
||||
flags |= 8
|
||||
}
|
||||
}
|
||||
RA = SA[m+newfs:]
|
||||
for i, j = m+(n>>1)-1, m-1; m <= i; i-- {
|
||||
if SA[i] != 0 {
|
||||
RA[j] = SA[i] - 1
|
||||
j--
|
||||
}
|
||||
}
|
||||
computeSA_int(RA, SA, newfs, m, name)
|
||||
|
||||
i = n - 1
|
||||
j = m - 1
|
||||
c0 = int(T[n-1])
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for i >= 0 {
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 > c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i >= 0 {
|
||||
RA[j] = i + 1
|
||||
j--
|
||||
for {
|
||||
c1 = c0
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
if c0 = int(T[i]); c0 < c1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for i = 0; i < m; i++ {
|
||||
SA[i] = RA[SA[i]]
|
||||
}
|
||||
if flags&4 > 0 {
|
||||
B = make([]int, k)
|
||||
C = B
|
||||
}
|
||||
if flags&2 > 0 {
|
||||
B = make([]int, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Stage 3: Induce the result for the original problem.
|
||||
if flags&8 > 0 {
|
||||
getCounts_int(T, C, n, k)
|
||||
}
|
||||
// Put all left-most S characters into their buckets.
|
||||
if m > 1 {
|
||||
getBuckets_int(C, B, k, true) // Find ends of buckets
|
||||
i = m - 1
|
||||
j = n
|
||||
p = SA[m-1]
|
||||
c1 = int(T[p])
|
||||
for {
|
||||
c0 = c1
|
||||
q = B[c0]
|
||||
for q < j {
|
||||
j--
|
||||
SA[j] = 0
|
||||
}
|
||||
for {
|
||||
j--
|
||||
SA[j] = p
|
||||
if i--; i < 0 {
|
||||
break
|
||||
}
|
||||
p = SA[i]
|
||||
if c1 = int(T[p]); c1 != c0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
for j > 0 {
|
||||
j--
|
||||
SA[j] = 0
|
||||
}
|
||||
}
|
||||
induceSA_int(T, SA, C, B, n, k)
|
||||
}
|
||||
131
vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go
generated
vendored
Normal file
131
vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package bzip2
|
||||
|
||||
import "github.com/dsnet/compress/internal/errors"
|
||||
|
||||
// moveToFront implements both the MTF and RLE stages of bzip2 at the same time.
|
||||
// Any runs of zeros in the encoded output will be replaced by a sequence of
|
||||
// RUNA and RUNB symbols are encode the length of the run.
|
||||
//
|
||||
// The RLE encoding used can actually be encoded to and decoded from using
|
||||
// normal two's complement arithmetic. The methodology for doing so is below.
|
||||
//
|
||||
// Assuming the following:
|
||||
// num: The value being encoded by RLE encoding.
|
||||
// run: A sequence of RUNA and RUNB symbols represented as a binary integer,
|
||||
// where RUNA is the 0 bit, RUNB is the 1 bit, and least-significant RUN
|
||||
// symbols are at the least-significant bit positions.
|
||||
// cnt: The number of RUNA and RUNB symbols.
|
||||
//
|
||||
// Then the RLE encoding used by bzip2 has this mathematical property:
|
||||
// num+1 == (1<<cnt) | run
|
||||
type moveToFront struct {
|
||||
dictBuf [256]uint8
|
||||
dictLen int
|
||||
|
||||
vals []byte
|
||||
syms []uint16
|
||||
blkSize int
|
||||
}
|
||||
|
||||
func (mtf *moveToFront) Init(dict []uint8, blkSize int) {
|
||||
if len(dict) > len(mtf.dictBuf) {
|
||||
panicf(errors.Internal, "alphabet too large")
|
||||
}
|
||||
copy(mtf.dictBuf[:], dict)
|
||||
mtf.dictLen = len(dict)
|
||||
mtf.blkSize = blkSize
|
||||
}
|
||||
|
||||
func (mtf *moveToFront) Encode(vals []byte) (syms []uint16) {
|
||||
dict := mtf.dictBuf[:mtf.dictLen]
|
||||
syms = mtf.syms[:0]
|
||||
|
||||
if len(vals) > mtf.blkSize {
|
||||
panicf(errors.Internal, "exceeded block size")
|
||||
}
|
||||
|
||||
var lastNum uint32
|
||||
for _, val := range vals {
|
||||
// Normal move-to-front transform.
|
||||
var idx uint8 // Reverse lookup idx in dict
|
||||
for di, dv := range dict {
|
||||
if dv == val {
|
||||
idx = uint8(di)
|
||||
break
|
||||
}
|
||||
}
|
||||
copy(dict[1:], dict[:idx])
|
||||
dict[0] = val
|
||||
|
||||
// Run-length encoding augmentation.
|
||||
if idx == 0 {
|
||||
lastNum++
|
||||
continue
|
||||
}
|
||||
if lastNum > 0 {
|
||||
for rc := lastNum + 1; rc != 1; rc >>= 1 {
|
||||
syms = append(syms, uint16(rc&1))
|
||||
}
|
||||
lastNum = 0
|
||||
}
|
||||
syms = append(syms, uint16(idx)+1)
|
||||
}
|
||||
if lastNum > 0 {
|
||||
for rc := lastNum + 1; rc != 1; rc >>= 1 {
|
||||
syms = append(syms, uint16(rc&1))
|
||||
}
|
||||
}
|
||||
mtf.syms = syms
|
||||
return syms
|
||||
}
|
||||
|
||||
func (mtf *moveToFront) Decode(syms []uint16) (vals []byte) {
|
||||
dict := mtf.dictBuf[:mtf.dictLen]
|
||||
vals = mtf.vals[:0]
|
||||
|
||||
var lastCnt uint
|
||||
var lastRun uint32
|
||||
for _, sym := range syms {
|
||||
// Run-length encoding augmentation.
|
||||
if sym < 2 {
|
||||
lastRun |= uint32(sym) << lastCnt
|
||||
lastCnt++
|
||||
continue
|
||||
}
|
||||
if lastCnt > 0 {
|
||||
cnt := int((1<<lastCnt)|lastRun) - 1
|
||||
if len(vals)+cnt > mtf.blkSize || lastCnt > 24 {
|
||||
panicf(errors.Corrupted, "run-length decoding exceeded block size")
|
||||
}
|
||||
for i := cnt; i > 0; i-- {
|
||||
vals = append(vals, dict[0])
|
||||
}
|
||||
lastCnt, lastRun = 0, 0
|
||||
}
|
||||
|
||||
// Normal move-to-front transform.
|
||||
val := dict[sym-1] // Forward lookup val in dict
|
||||
copy(dict[1:], dict[:sym-1])
|
||||
dict[0] = val
|
||||
|
||||
if len(vals) >= mtf.blkSize {
|
||||
panicf(errors.Corrupted, "run-length decoding exceeded block size")
|
||||
}
|
||||
vals = append(vals, val)
|
||||
}
|
||||
if lastCnt > 0 {
|
||||
cnt := int((1<<lastCnt)|lastRun) - 1
|
||||
if len(vals)+cnt > mtf.blkSize || lastCnt > 24 {
|
||||
panicf(errors.Corrupted, "run-length decoding exceeded block size")
|
||||
}
|
||||
for i := cnt; i > 0; i-- {
|
||||
vals = append(vals, dict[0])
|
||||
}
|
||||
}
|
||||
mtf.vals = vals
|
||||
return vals
|
||||
}
|
||||
374
vendor/github.com/dsnet/compress/bzip2/prefix.go
generated
vendored
Normal file
374
vendor/github.com/dsnet/compress/bzip2/prefix.go
generated
vendored
Normal file
@@ -0,0 +1,374 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package bzip2
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/dsnet/compress/internal"
|
||||
"github.com/dsnet/compress/internal/errors"
|
||||
"github.com/dsnet/compress/internal/prefix"
|
||||
)
|
||||
|
||||
const (
|
||||
minNumTrees = 2
|
||||
maxNumTrees = 6
|
||||
|
||||
maxPrefixBits = 20 // Maximum bit-width of a prefix code
|
||||
maxNumSyms = 256 + 2 // Maximum number of symbols in the alphabet
|
||||
numBlockSyms = 50 // Number of bytes in a block
|
||||
)
|
||||
|
||||
// encSel and decSel are used to handle the prefix encoding for tree selectors.
|
||||
// The prefix encoding is as follows:
|
||||
//
|
||||
// Code TreeIdx
|
||||
// 0 <=> 0
|
||||
// 10 <=> 1
|
||||
// 110 <=> 2
|
||||
// 1110 <=> 3
|
||||
// 11110 <=> 4
|
||||
// 111110 <=> 5
|
||||
// 111111 <=> 6 Invalid tree index, so should fail
|
||||
//
|
||||
var encSel, decSel = func() (e prefix.Encoder, d prefix.Decoder) {
|
||||
var selCodes [maxNumTrees + 1]prefix.PrefixCode
|
||||
for i := range selCodes {
|
||||
selCodes[i] = prefix.PrefixCode{Sym: uint32(i), Len: uint32(i + 1)}
|
||||
}
|
||||
selCodes[maxNumTrees] = prefix.PrefixCode{Sym: maxNumTrees, Len: maxNumTrees}
|
||||
prefix.GeneratePrefixes(selCodes[:])
|
||||
e.Init(selCodes[:])
|
||||
d.Init(selCodes[:])
|
||||
return
|
||||
}()
|
||||
|
||||
type prefixReader struct{ prefix.Reader }
|
||||
|
||||
func (pr *prefixReader) Init(r io.Reader) {
|
||||
pr.Reader.Init(r, true)
|
||||
}
|
||||
|
||||
func (pr *prefixReader) ReadBitsBE64(nb uint) uint64 {
|
||||
if nb <= 32 {
|
||||
v := uint32(pr.ReadBits(nb))
|
||||
return uint64(internal.ReverseUint32N(v, nb))
|
||||
}
|
||||
v0 := internal.ReverseUint32(uint32(pr.ReadBits(32)))
|
||||
v1 := internal.ReverseUint32(uint32(pr.ReadBits(nb - 32)))
|
||||
v := uint64(v0)<<32 | uint64(v1)
|
||||
return v >> (64 - nb)
|
||||
}
|
||||
|
||||
func (pr *prefixReader) ReadPrefixCodes(codes []prefix.PrefixCodes, trees []prefix.Decoder) {
|
||||
for i, pc := range codes {
|
||||
clen := int(pr.ReadBitsBE64(5))
|
||||
sum := 1 << maxPrefixBits
|
||||
for sym := range pc {
|
||||
for {
|
||||
if clen < 1 || clen > maxPrefixBits {
|
||||
panicf(errors.Corrupted, "invalid prefix bit-length: %d", clen)
|
||||
}
|
||||
|
||||
b, ok := pr.TryReadBits(1)
|
||||
if !ok {
|
||||
b = pr.ReadBits(1)
|
||||
}
|
||||
if b == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
b, ok = pr.TryReadBits(1)
|
||||
if !ok {
|
||||
b = pr.ReadBits(1)
|
||||
}
|
||||
clen -= int(b*2) - 1 // +1 or -1
|
||||
}
|
||||
pc[sym] = prefix.PrefixCode{Sym: uint32(sym), Len: uint32(clen)}
|
||||
sum -= (1 << maxPrefixBits) >> uint(clen)
|
||||
}
|
||||
|
||||
if sum == 0 {
|
||||
// Fast path, but only handles complete trees.
|
||||
if err := prefix.GeneratePrefixes(pc); err != nil {
|
||||
errors.Panic(err) // Using complete trees; should never fail
|
||||
}
|
||||
} else {
|
||||
// Slow path, but handles anything.
|
||||
pc = handleDegenerateCodes(pc) // Never fails, but may fail later
|
||||
codes[i] = pc
|
||||
}
|
||||
trees[i].Init(pc)
|
||||
}
|
||||
}
|
||||
|
||||
type prefixWriter struct{ prefix.Writer }
|
||||
|
||||
func (pw *prefixWriter) Init(w io.Writer) {
|
||||
pw.Writer.Init(w, true)
|
||||
}
|
||||
|
||||
func (pw *prefixWriter) WriteBitsBE64(v uint64, nb uint) {
|
||||
if nb <= 32 {
|
||||
v := internal.ReverseUint32N(uint32(v), nb)
|
||||
pw.WriteBits(uint(v), nb)
|
||||
return
|
||||
}
|
||||
v <<= (64 - nb)
|
||||
v0 := internal.ReverseUint32(uint32(v >> 32))
|
||||
v1 := internal.ReverseUint32(uint32(v))
|
||||
pw.WriteBits(uint(v0), 32)
|
||||
pw.WriteBits(uint(v1), nb-32)
|
||||
return
|
||||
}
|
||||
|
||||
func (pw *prefixWriter) WritePrefixCodes(codes []prefix.PrefixCodes, trees []prefix.Encoder) {
|
||||
for i, pc := range codes {
|
||||
if err := prefix.GeneratePrefixes(pc); err != nil {
|
||||
errors.Panic(err) // Using complete trees; should never fail
|
||||
}
|
||||
trees[i].Init(pc)
|
||||
|
||||
clen := int(pc[0].Len)
|
||||
pw.WriteBitsBE64(uint64(clen), 5)
|
||||
for _, c := range pc {
|
||||
for int(c.Len) < clen {
|
||||
pw.WriteBits(3, 2) // 11
|
||||
clen--
|
||||
}
|
||||
for int(c.Len) > clen {
|
||||
pw.WriteBits(1, 2) // 10
|
||||
clen++
|
||||
}
|
||||
pw.WriteBits(0, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleDegenerateCodes converts a degenerate tree into a canonical tree.
|
||||
//
|
||||
// For example, when the input is an under-subscribed tree:
|
||||
// input: []PrefixCode{
|
||||
// {Sym: 0, Len: 3},
|
||||
// {Sym: 1, Len: 4},
|
||||
// {Sym: 2, Len: 3},
|
||||
// }
|
||||
// output: []PrefixCode{
|
||||
// {Sym: 0, Len: 3, Val: 0}, // 000
|
||||
// {Sym: 1, Len: 4, Val: 2}, // 0010
|
||||
// {Sym: 2, Len: 3, Val: 4}, // 100
|
||||
// {Sym: 258, Len: 4, Val: 10}, // 1010
|
||||
// {Sym: 259, Len: 3, Val: 6}, // 110
|
||||
// {Sym: 260, Len: 1, Val: 1}, // 1
|
||||
// }
|
||||
//
|
||||
// For example, when the input is an over-subscribed tree:
|
||||
// input: []PrefixCode{
|
||||
// {Sym: 0, Len: 1},
|
||||
// {Sym: 1, Len: 3},
|
||||
// {Sym: 2, Len: 4},
|
||||
// {Sym: 3, Len: 3},
|
||||
// {Sym: 4, Len: 2},
|
||||
// }
|
||||
// output: []PrefixCode{
|
||||
// {Sym: 0, Len: 1, Val: 0}, // 0
|
||||
// {Sym: 1, Len: 3, Val: 3}, // 011
|
||||
// {Sym: 3, Len: 3, Val: 7}, // 111
|
||||
// {Sym: 4, Len: 2, Val: 1}, // 01
|
||||
// }
|
||||
func handleDegenerateCodes(codes prefix.PrefixCodes) prefix.PrefixCodes {
|
||||
// Since there is no formal definition for the BZip2 format, there is no
|
||||
// specification that says that the code lengths must form a complete
|
||||
// prefix tree (IE: it is neither over-subscribed nor under-subscribed).
|
||||
// Thus, the original C implementation becomes the reference for how prefix
|
||||
// decoding is done in these edge cases. Unfortunately, the C version does
|
||||
// not error when an invalid tree is used, but rather allows decoding to
|
||||
// continue and only errors if some bit pattern happens to cause an error.
|
||||
// Thus, it is possible for an invalid tree to end up decoding an input
|
||||
// "properly" so long as invalid bit patterns are not present. In order to
|
||||
// replicate this non-specified behavior, we use a ported version of the
|
||||
// C code to generate the codes as a valid canonical tree by substituting
|
||||
// invalid nodes with invalid symbols.
|
||||
//
|
||||
// ====================================================
|
||||
// This program, "bzip2", the associated library "libbzip2", and all
|
||||
// documentation, are copyright (C) 1996-2010 Julian R Seward. All
|
||||
// rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions
|
||||
// are met:
|
||||
//
|
||||
// 1. Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
//
|
||||
// 2. The origin of this software must not be misrepresented; you must
|
||||
// not claim that you wrote the original software. If you use this
|
||||
// software in a product, an acknowledgment in the product
|
||||
// documentation would be appreciated but is not required.
|
||||
//
|
||||
// 3. Altered source versions must be plainly marked as such, and must
|
||||
// not be misrepresented as being the original software.
|
||||
//
|
||||
// 4. The name of the author may not be used to endorse or promote
|
||||
// products derived from this software without specific prior written
|
||||
// permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
|
||||
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
||||
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
|
||||
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
||||
// Julian Seward, jseward@bzip.org
|
||||
// bzip2/libbzip2 version 1.0.6 of 6 September 2010
|
||||
// ====================================================
|
||||
var (
|
||||
limits [maxPrefixBits + 2]int32
|
||||
bases [maxPrefixBits + 2]int32
|
||||
perms [maxNumSyms]int32
|
||||
|
||||
minLen = uint32(maxPrefixBits)
|
||||
maxLen = uint32(0)
|
||||
)
|
||||
|
||||
const (
|
||||
statusOkay = iota
|
||||
statusInvalid
|
||||
statusNeedBits
|
||||
statusMaxBits
|
||||
)
|
||||
|
||||
// createTables is the BZ2_hbCreateDecodeTables function from the C code.
|
||||
createTables := func(codes []prefix.PrefixCode) {
|
||||
for _, c := range codes {
|
||||
if c.Len > maxLen {
|
||||
maxLen = c.Len
|
||||
}
|
||||
if c.Len < minLen {
|
||||
minLen = c.Len
|
||||
}
|
||||
}
|
||||
|
||||
var pp int
|
||||
for i := minLen; i <= maxLen; i++ {
|
||||
for j, c := range codes {
|
||||
if c.Len == i {
|
||||
perms[pp] = int32(j)
|
||||
pp++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var vec int32
|
||||
for _, c := range codes {
|
||||
bases[c.Len+1]++
|
||||
}
|
||||
for i := 1; i < len(bases); i++ {
|
||||
bases[i] += bases[i-1]
|
||||
}
|
||||
for i := minLen; i <= maxLen; i++ {
|
||||
vec += bases[i+1] - bases[i]
|
||||
limits[i] = vec - 1
|
||||
vec <<= 1
|
||||
}
|
||||
for i := minLen + 1; i <= maxLen; i++ {
|
||||
bases[i] = ((limits[i-1] + 1) << 1) - bases[i]
|
||||
}
|
||||
}
|
||||
|
||||
// getSymbol is the GET_MTF_VAL macro from the C code.
|
||||
getSymbol := func(c prefix.PrefixCode) (uint32, int) {
|
||||
v := internal.ReverseUint32(c.Val)
|
||||
n := c.Len
|
||||
|
||||
zn := minLen
|
||||
if zn > n {
|
||||
return 0, statusNeedBits
|
||||
}
|
||||
zvec := int32(v >> (32 - zn))
|
||||
v <<= zn
|
||||
for {
|
||||
if zn > maxLen {
|
||||
return 0, statusMaxBits
|
||||
}
|
||||
if zvec <= limits[zn] {
|
||||
break
|
||||
}
|
||||
zn++
|
||||
if zn > n {
|
||||
return 0, statusNeedBits
|
||||
}
|
||||
zvec = (zvec << 1) | int32(v>>31)
|
||||
v <<= 1
|
||||
}
|
||||
if zvec-bases[zn] < 0 || zvec-bases[zn] >= maxNumSyms {
|
||||
return 0, statusInvalid
|
||||
}
|
||||
return uint32(perms[zvec-bases[zn]]), statusOkay
|
||||
}
|
||||
|
||||
// Step 1: Create the prefix trees using the C algorithm.
|
||||
createTables(codes)
|
||||
|
||||
// Step 2: Starting with the shortest bit pattern, explore the whole tree.
|
||||
// If tree is under-subscribed, the worst-case runtime is O(1<<maxLen).
|
||||
// If tree is over-subscribed, the worst-case runtime is O(maxNumSyms).
|
||||
var pcodesArr [2 * maxNumSyms]prefix.PrefixCode
|
||||
pcodes := pcodesArr[:maxNumSyms]
|
||||
var exploreCode func(prefix.PrefixCode) bool
|
||||
exploreCode = func(c prefix.PrefixCode) (term bool) {
|
||||
sym, status := getSymbol(c)
|
||||
switch status {
|
||||
case statusOkay:
|
||||
// This code is valid, so insert it.
|
||||
c.Sym = sym
|
||||
pcodes[sym] = c
|
||||
term = true
|
||||
case statusInvalid:
|
||||
// This code is invalid, so insert an invalid symbol.
|
||||
c.Sym = uint32(len(pcodes))
|
||||
pcodes = append(pcodes, c)
|
||||
term = true
|
||||
case statusNeedBits:
|
||||
// This code is too short, so explore both children.
|
||||
c.Len++
|
||||
c0, c1 := c, c
|
||||
c1.Val |= 1 << (c.Len - 1)
|
||||
|
||||
b0 := exploreCode(c0)
|
||||
b1 := exploreCode(c1)
|
||||
switch {
|
||||
case !b0 && b1:
|
||||
c0.Sym = uint32(len(pcodes))
|
||||
pcodes = append(pcodes, c0)
|
||||
case !b1 && b0:
|
||||
c1.Sym = uint32(len(pcodes))
|
||||
pcodes = append(pcodes, c1)
|
||||
}
|
||||
term = b0 || b1
|
||||
case statusMaxBits:
|
||||
// This code is too long, so report it upstream.
|
||||
term = false
|
||||
}
|
||||
return term // Did this code terminate?
|
||||
}
|
||||
exploreCode(prefix.PrefixCode{})
|
||||
|
||||
// Step 3: Copy new sparse codes to old output codes.
|
||||
codes = codes[:0]
|
||||
for _, c := range pcodes {
|
||||
if c.Len > 0 {
|
||||
codes = append(codes, c)
|
||||
}
|
||||
}
|
||||
return codes
|
||||
}
|
||||
274
vendor/github.com/dsnet/compress/bzip2/reader.go
generated
vendored
Normal file
274
vendor/github.com/dsnet/compress/bzip2/reader.go
generated
vendored
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package bzip2
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/dsnet/compress/internal"
|
||||
"github.com/dsnet/compress/internal/errors"
|
||||
"github.com/dsnet/compress/internal/prefix"
|
||||
)
|
||||
|
||||
type Reader struct {
|
||||
InputOffset int64 // Total number of bytes read from underlying io.Reader
|
||||
OutputOffset int64 // Total number of bytes emitted from Read
|
||||
|
||||
rd prefixReader
|
||||
err error
|
||||
level int // The current compression level
|
||||
rdHdrFtr int // Number of times we read the stream header and footer
|
||||
blkCRC uint32 // CRC-32 IEEE of each block (as stored)
|
||||
endCRC uint32 // Checksum of all blocks using bzip2's custom method
|
||||
|
||||
crc crc
|
||||
mtf moveToFront
|
||||
bwt burrowsWheelerTransform
|
||||
rle runLengthEncoding
|
||||
|
||||
// These fields are allocated with Reader and re-used later.
|
||||
treeSels []uint8
|
||||
codes2D [maxNumTrees][maxNumSyms]prefix.PrefixCode
|
||||
codes1D [maxNumTrees]prefix.PrefixCodes
|
||||
trees1D [maxNumTrees]prefix.Decoder
|
||||
syms []uint16
|
||||
|
||||
fuzzReader // Exported functionality when fuzz testing
|
||||
}
|
||||
|
||||
type ReaderConfig struct {
|
||||
_ struct{} // Blank field to prevent unkeyed struct literals
|
||||
}
|
||||
|
||||
func NewReader(r io.Reader, conf *ReaderConfig) (*Reader, error) {
|
||||
zr := new(Reader)
|
||||
zr.Reset(r)
|
||||
return zr, nil
|
||||
}
|
||||
|
||||
func (zr *Reader) Reset(r io.Reader) error {
|
||||
*zr = Reader{
|
||||
rd: zr.rd,
|
||||
|
||||
mtf: zr.mtf,
|
||||
bwt: zr.bwt,
|
||||
rle: zr.rle,
|
||||
|
||||
treeSels: zr.treeSels,
|
||||
trees1D: zr.trees1D,
|
||||
syms: zr.syms,
|
||||
}
|
||||
zr.rd.Init(r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (zr *Reader) Read(buf []byte) (int, error) {
|
||||
for {
|
||||
cnt, err := zr.rle.Read(buf)
|
||||
if err != rleDone && zr.err == nil {
|
||||
zr.err = err
|
||||
}
|
||||
if cnt > 0 {
|
||||
zr.crc.update(buf[:cnt])
|
||||
zr.OutputOffset += int64(cnt)
|
||||
return cnt, nil
|
||||
}
|
||||
if zr.err != nil || len(buf) == 0 {
|
||||
return 0, zr.err
|
||||
}
|
||||
|
||||
// Read the next chunk.
|
||||
zr.rd.Offset = zr.InputOffset
|
||||
func() {
|
||||
defer errors.Recover(&zr.err)
|
||||
if zr.rdHdrFtr%2 == 0 {
|
||||
// Check if we are already at EOF.
|
||||
if err := zr.rd.PullBits(1); err != nil {
|
||||
if err == io.ErrUnexpectedEOF && zr.rdHdrFtr > 0 {
|
||||
err = io.EOF // EOF is okay if we read at least one stream
|
||||
}
|
||||
errors.Panic(err)
|
||||
}
|
||||
|
||||
// Read stream header.
|
||||
if zr.rd.ReadBitsBE64(16) != hdrMagic {
|
||||
panicf(errors.Corrupted, "invalid stream magic")
|
||||
}
|
||||
if ver := zr.rd.ReadBitsBE64(8); ver != 'h' {
|
||||
if ver == '0' {
|
||||
panicf(errors.Deprecated, "bzip1 format is not supported")
|
||||
}
|
||||
panicf(errors.Corrupted, "invalid version: %q", ver)
|
||||
}
|
||||
lvl := int(zr.rd.ReadBitsBE64(8)) - '0'
|
||||
if lvl < BestSpeed || lvl > BestCompression {
|
||||
panicf(errors.Corrupted, "invalid block size: %d", lvl*blockSize)
|
||||
}
|
||||
zr.level = lvl
|
||||
zr.rdHdrFtr++
|
||||
} else {
|
||||
// Check and update the CRC.
|
||||
if internal.GoFuzz {
|
||||
zr.updateChecksum(-1, zr.crc.val) // Update with value
|
||||
zr.blkCRC = zr.crc.val // Suppress CRC failures
|
||||
}
|
||||
if zr.blkCRC != zr.crc.val {
|
||||
panicf(errors.Corrupted, "mismatching block checksum")
|
||||
}
|
||||
zr.endCRC = (zr.endCRC<<1 | zr.endCRC>>31) ^ zr.blkCRC
|
||||
}
|
||||
buf := zr.decodeBlock()
|
||||
zr.rle.Init(buf)
|
||||
}()
|
||||
if zr.InputOffset, err = zr.rd.Flush(); zr.err == nil {
|
||||
zr.err = err
|
||||
}
|
||||
if zr.err != nil {
|
||||
zr.err = errWrap(zr.err, errors.Corrupted)
|
||||
return 0, zr.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (zr *Reader) Close() error {
|
||||
if zr.err == io.EOF || zr.err == errClosed {
|
||||
zr.rle.Init(nil) // Make sure future reads fail
|
||||
zr.err = errClosed
|
||||
return nil
|
||||
}
|
||||
return zr.err // Return the persistent error
|
||||
}
|
||||
|
||||
func (zr *Reader) decodeBlock() []byte {
|
||||
if magic := zr.rd.ReadBitsBE64(48); magic != blkMagic {
|
||||
if magic == endMagic {
|
||||
endCRC := uint32(zr.rd.ReadBitsBE64(32))
|
||||
if internal.GoFuzz {
|
||||
zr.updateChecksum(zr.rd.BitsRead()-32, zr.endCRC)
|
||||
endCRC = zr.endCRC // Suppress CRC failures
|
||||
}
|
||||
if zr.endCRC != endCRC {
|
||||
panicf(errors.Corrupted, "mismatching stream checksum")
|
||||
}
|
||||
zr.endCRC = 0
|
||||
zr.rd.ReadPads()
|
||||
zr.rdHdrFtr++
|
||||
return nil
|
||||
}
|
||||
panicf(errors.Corrupted, "invalid block or footer magic")
|
||||
}
|
||||
|
||||
zr.crc.val = 0
|
||||
zr.blkCRC = uint32(zr.rd.ReadBitsBE64(32))
|
||||
if internal.GoFuzz {
|
||||
zr.updateChecksum(zr.rd.BitsRead()-32, 0) // Record offset only
|
||||
}
|
||||
if zr.rd.ReadBitsBE64(1) != 0 {
|
||||
panicf(errors.Deprecated, "block randomization is not supported")
|
||||
}
|
||||
|
||||
// Read BWT related fields.
|
||||
ptr := int(zr.rd.ReadBitsBE64(24)) // BWT origin pointer
|
||||
|
||||
// Read MTF related fields.
|
||||
var dictArr [256]uint8
|
||||
dict := dictArr[:0]
|
||||
bmapHi := uint16(zr.rd.ReadBits(16))
|
||||
for i := 0; i < 256; i, bmapHi = i+16, bmapHi>>1 {
|
||||
if bmapHi&1 > 0 {
|
||||
bmapLo := uint16(zr.rd.ReadBits(16))
|
||||
for j := 0; j < 16; j, bmapLo = j+1, bmapLo>>1 {
|
||||
if bmapLo&1 > 0 {
|
||||
dict = append(dict, uint8(i+j))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 1: Prefix encoding.
|
||||
syms := zr.decodePrefix(len(dict))
|
||||
|
||||
// Step 2: Move-to-front transform and run-length encoding.
|
||||
zr.mtf.Init(dict, zr.level*blockSize)
|
||||
buf := zr.mtf.Decode(syms)
|
||||
|
||||
// Step 3: Burrows-Wheeler transformation.
|
||||
if ptr >= len(buf) {
|
||||
panicf(errors.Corrupted, "origin pointer (0x%06x) exceeds block size: %d", ptr, len(buf))
|
||||
}
|
||||
zr.bwt.Decode(buf, ptr)
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (zr *Reader) decodePrefix(numSyms int) (syms []uint16) {
|
||||
numSyms += 2 // Remove 0 symbol, add RUNA, RUNB, and EOF symbols
|
||||
if numSyms < 3 {
|
||||
panicf(errors.Corrupted, "not enough prefix symbols: %d", numSyms)
|
||||
}
|
||||
|
||||
// Read information about the trees and tree selectors.
|
||||
var mtf internal.MoveToFront
|
||||
numTrees := int(zr.rd.ReadBitsBE64(3))
|
||||
if numTrees < minNumTrees || numTrees > maxNumTrees {
|
||||
panicf(errors.Corrupted, "invalid number of prefix trees: %d", numTrees)
|
||||
}
|
||||
numSels := int(zr.rd.ReadBitsBE64(15))
|
||||
if cap(zr.treeSels) < numSels {
|
||||
zr.treeSels = make([]uint8, numSels)
|
||||
}
|
||||
treeSels := zr.treeSels[:numSels]
|
||||
for i := range treeSels {
|
||||
sym, ok := zr.rd.TryReadSymbol(&decSel)
|
||||
if !ok {
|
||||
sym = zr.rd.ReadSymbol(&decSel)
|
||||
}
|
||||
if int(sym) >= numTrees {
|
||||
panicf(errors.Corrupted, "invalid prefix tree selector: %d", sym)
|
||||
}
|
||||
treeSels[i] = uint8(sym)
|
||||
}
|
||||
mtf.Decode(treeSels)
|
||||
zr.treeSels = treeSels
|
||||
|
||||
// Initialize prefix codes.
|
||||
for i := range zr.codes2D[:numTrees] {
|
||||
zr.codes1D[i] = zr.codes2D[i][:numSyms]
|
||||
}
|
||||
zr.rd.ReadPrefixCodes(zr.codes1D[:numTrees], zr.trees1D[:numTrees])
|
||||
|
||||
// Read prefix encoded symbols of compressed data.
|
||||
var tree *prefix.Decoder
|
||||
var blkLen, selIdx int
|
||||
syms = zr.syms[:0]
|
||||
for {
|
||||
if blkLen == 0 {
|
||||
blkLen = numBlockSyms
|
||||
if selIdx >= len(treeSels) {
|
||||
panicf(errors.Corrupted, "not enough prefix tree selectors")
|
||||
}
|
||||
tree = &zr.trees1D[treeSels[selIdx]]
|
||||
selIdx++
|
||||
}
|
||||
blkLen--
|
||||
sym, ok := zr.rd.TryReadSymbol(tree)
|
||||
if !ok {
|
||||
sym = zr.rd.ReadSymbol(tree)
|
||||
}
|
||||
|
||||
if int(sym) == numSyms-1 {
|
||||
break // EOF marker
|
||||
}
|
||||
if int(sym) >= numSyms {
|
||||
panicf(errors.Corrupted, "invalid prefix symbol: %d", sym)
|
||||
}
|
||||
if len(syms) >= zr.level*blockSize {
|
||||
panicf(errors.Corrupted, "number of prefix symbols exceeds block size")
|
||||
}
|
||||
syms = append(syms, uint16(sym))
|
||||
}
|
||||
zr.syms = syms
|
||||
return syms
|
||||
}
|
||||
101
vendor/github.com/dsnet/compress/bzip2/rle1.go
generated
vendored
Normal file
101
vendor/github.com/dsnet/compress/bzip2/rle1.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package bzip2
|
||||
|
||||
import "github.com/dsnet/compress/internal/errors"
|
||||
|
||||
// rleDone is a special "error" to indicate that the RLE stage is done.
|
||||
var rleDone = errorf(errors.Unknown, "RLE1 stage is completed")
|
||||
|
||||
// runLengthEncoding implements the first RLE stage of bzip2. Every sequence
|
||||
// of 4..255 duplicated bytes is replaced by only the first 4 bytes, and a
|
||||
// single byte representing the repeat length. Similar to the C bzip2
|
||||
// implementation, the encoder will always terminate repeat sequences with a
|
||||
// count (even if it is the end of the buffer), and it will also never produce
|
||||
// run lengths of 256..259. The decoder can handle the latter case.
|
||||
//
|
||||
// For example, if the input was:
|
||||
// input: "AAAAAAABBBBCCCD"
|
||||
//
|
||||
// Then the output will be:
|
||||
// output: "AAAA\x03BBBB\x00CCCD"
|
||||
type runLengthEncoding struct {
|
||||
buf []byte
|
||||
idx int
|
||||
lastVal byte
|
||||
lastCnt int
|
||||
}
|
||||
|
||||
func (rle *runLengthEncoding) Init(buf []byte) {
|
||||
*rle = runLengthEncoding{buf: buf}
|
||||
}
|
||||
|
||||
func (rle *runLengthEncoding) Write(buf []byte) (int, error) {
|
||||
for i, b := range buf {
|
||||
if rle.lastVal != b {
|
||||
rle.lastCnt = 0
|
||||
}
|
||||
rle.lastCnt++
|
||||
switch {
|
||||
case rle.lastCnt < 4:
|
||||
if rle.idx >= len(rle.buf) {
|
||||
return i, rleDone
|
||||
}
|
||||
rle.buf[rle.idx] = b
|
||||
rle.idx++
|
||||
case rle.lastCnt == 4:
|
||||
if rle.idx+1 >= len(rle.buf) {
|
||||
return i, rleDone
|
||||
}
|
||||
rle.buf[rle.idx] = b
|
||||
rle.idx++
|
||||
rle.buf[rle.idx] = 0
|
||||
rle.idx++
|
||||
case rle.lastCnt < 256:
|
||||
rle.buf[rle.idx-1]++
|
||||
default:
|
||||
if rle.idx >= len(rle.buf) {
|
||||
return i, rleDone
|
||||
}
|
||||
rle.lastCnt = 1
|
||||
rle.buf[rle.idx] = b
|
||||
rle.idx++
|
||||
}
|
||||
rle.lastVal = b
|
||||
}
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func (rle *runLengthEncoding) Read(buf []byte) (int, error) {
|
||||
for i := range buf {
|
||||
switch {
|
||||
case rle.lastCnt == -4:
|
||||
if rle.idx >= len(rle.buf) {
|
||||
return i, errorf(errors.Corrupted, "missing terminating run-length repeater")
|
||||
}
|
||||
rle.lastCnt = int(rle.buf[rle.idx])
|
||||
rle.idx++
|
||||
if rle.lastCnt > 0 {
|
||||
break // Break the switch
|
||||
}
|
||||
fallthrough // Count was zero, continue the work
|
||||
case rle.lastCnt <= 0:
|
||||
if rle.idx >= len(rle.buf) {
|
||||
return i, rleDone
|
||||
}
|
||||
b := rle.buf[rle.idx]
|
||||
rle.idx++
|
||||
if b != rle.lastVal {
|
||||
rle.lastCnt = 0
|
||||
rle.lastVal = b
|
||||
}
|
||||
}
|
||||
buf[i] = rle.lastVal
|
||||
rle.lastCnt--
|
||||
}
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func (rle *runLengthEncoding) Bytes() []byte { return rle.buf[:rle.idx] }
|
||||
307
vendor/github.com/dsnet/compress/bzip2/writer.go
generated
vendored
Normal file
307
vendor/github.com/dsnet/compress/bzip2/writer.go
generated
vendored
Normal file
@@ -0,0 +1,307 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package bzip2
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/dsnet/compress/internal"
|
||||
"github.com/dsnet/compress/internal/errors"
|
||||
"github.com/dsnet/compress/internal/prefix"
|
||||
)
|
||||
|
||||
type Writer struct {
|
||||
InputOffset int64 // Total number of bytes issued to Write
|
||||
OutputOffset int64 // Total number of bytes written to underlying io.Writer
|
||||
|
||||
wr prefixWriter
|
||||
err error
|
||||
level int // The current compression level
|
||||
wrHdr bool // Have we written the stream header?
|
||||
blkCRC uint32 // CRC-32 IEEE of each block
|
||||
endCRC uint32 // Checksum of all blocks using bzip2's custom method
|
||||
|
||||
crc crc
|
||||
rle runLengthEncoding
|
||||
bwt burrowsWheelerTransform
|
||||
mtf moveToFront
|
||||
|
||||
// These fields are allocated with Writer and re-used later.
|
||||
buf []byte
|
||||
treeSels []uint8
|
||||
treeSelsMTF []uint8
|
||||
codes2D [maxNumTrees][maxNumSyms]prefix.PrefixCode
|
||||
codes1D [maxNumTrees]prefix.PrefixCodes
|
||||
trees1D [maxNumTrees]prefix.Encoder
|
||||
}
|
||||
|
||||
type WriterConfig struct {
|
||||
Level int
|
||||
|
||||
_ struct{} // Blank field to prevent unkeyed struct literals
|
||||
}
|
||||
|
||||
func NewWriter(w io.Writer, conf *WriterConfig) (*Writer, error) {
|
||||
var lvl int
|
||||
if conf != nil {
|
||||
lvl = conf.Level
|
||||
}
|
||||
if lvl == 0 {
|
||||
lvl = DefaultCompression
|
||||
}
|
||||
if lvl < BestSpeed || lvl > BestCompression {
|
||||
return nil, errorf(errors.Invalid, "compression level: %d", lvl)
|
||||
}
|
||||
zw := new(Writer)
|
||||
zw.level = lvl
|
||||
zw.Reset(w)
|
||||
return zw, nil
|
||||
}
|
||||
|
||||
func (zw *Writer) Reset(w io.Writer) error {
|
||||
*zw = Writer{
|
||||
wr: zw.wr,
|
||||
level: zw.level,
|
||||
|
||||
rle: zw.rle,
|
||||
bwt: zw.bwt,
|
||||
mtf: zw.mtf,
|
||||
|
||||
buf: zw.buf,
|
||||
treeSels: zw.treeSels,
|
||||
treeSelsMTF: zw.treeSelsMTF,
|
||||
trees1D: zw.trees1D,
|
||||
}
|
||||
zw.wr.Init(w)
|
||||
if len(zw.buf) != zw.level*blockSize {
|
||||
zw.buf = make([]byte, zw.level*blockSize)
|
||||
}
|
||||
zw.rle.Init(zw.buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (zw *Writer) Write(buf []byte) (int, error) {
|
||||
if zw.err != nil {
|
||||
return 0, zw.err
|
||||
}
|
||||
|
||||
cnt := len(buf)
|
||||
for {
|
||||
wrCnt, err := zw.rle.Write(buf)
|
||||
if err != rleDone && zw.err == nil {
|
||||
zw.err = err
|
||||
}
|
||||
zw.crc.update(buf[:wrCnt])
|
||||
buf = buf[wrCnt:]
|
||||
if len(buf) == 0 {
|
||||
zw.InputOffset += int64(cnt)
|
||||
return cnt, nil
|
||||
}
|
||||
if zw.err = zw.flush(); zw.err != nil {
|
||||
return 0, zw.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (zw *Writer) flush() error {
|
||||
vals := zw.rle.Bytes()
|
||||
if len(vals) == 0 {
|
||||
return nil
|
||||
}
|
||||
zw.wr.Offset = zw.OutputOffset
|
||||
func() {
|
||||
defer errors.Recover(&zw.err)
|
||||
if !zw.wrHdr {
|
||||
// Write stream header.
|
||||
zw.wr.WriteBitsBE64(hdrMagic, 16)
|
||||
zw.wr.WriteBitsBE64('h', 8)
|
||||
zw.wr.WriteBitsBE64(uint64('0'+zw.level), 8)
|
||||
zw.wrHdr = true
|
||||
}
|
||||
zw.encodeBlock(vals)
|
||||
}()
|
||||
var err error
|
||||
if zw.OutputOffset, err = zw.wr.Flush(); zw.err == nil {
|
||||
zw.err = err
|
||||
}
|
||||
if zw.err != nil {
|
||||
zw.err = errWrap(zw.err, errors.Internal)
|
||||
return zw.err
|
||||
}
|
||||
zw.endCRC = (zw.endCRC<<1 | zw.endCRC>>31) ^ zw.blkCRC
|
||||
zw.blkCRC = 0
|
||||
zw.rle.Init(zw.buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (zw *Writer) Close() error {
|
||||
if zw.err == errClosed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush RLE buffer if there is left-over data.
|
||||
if zw.err = zw.flush(); zw.err != nil {
|
||||
return zw.err
|
||||
}
|
||||
|
||||
// Write stream footer.
|
||||
zw.wr.Offset = zw.OutputOffset
|
||||
func() {
|
||||
defer errors.Recover(&zw.err)
|
||||
if !zw.wrHdr {
|
||||
// Write stream header.
|
||||
zw.wr.WriteBitsBE64(hdrMagic, 16)
|
||||
zw.wr.WriteBitsBE64('h', 8)
|
||||
zw.wr.WriteBitsBE64(uint64('0'+zw.level), 8)
|
||||
zw.wrHdr = true
|
||||
}
|
||||
zw.wr.WriteBitsBE64(endMagic, 48)
|
||||
zw.wr.WriteBitsBE64(uint64(zw.endCRC), 32)
|
||||
zw.wr.WritePads(0)
|
||||
}()
|
||||
var err error
|
||||
if zw.OutputOffset, err = zw.wr.Flush(); zw.err == nil {
|
||||
zw.err = err
|
||||
}
|
||||
if zw.err != nil {
|
||||
zw.err = errWrap(zw.err, errors.Internal)
|
||||
return zw.err
|
||||
}
|
||||
|
||||
zw.err = errClosed
|
||||
return nil
|
||||
}
|
||||
|
||||
func (zw *Writer) encodeBlock(buf []byte) {
|
||||
zw.blkCRC = zw.crc.val
|
||||
zw.wr.WriteBitsBE64(blkMagic, 48)
|
||||
zw.wr.WriteBitsBE64(uint64(zw.blkCRC), 32)
|
||||
zw.wr.WriteBitsBE64(0, 1)
|
||||
zw.crc.val = 0
|
||||
|
||||
// Step 1: Burrows-Wheeler transformation.
|
||||
ptr := zw.bwt.Encode(buf)
|
||||
zw.wr.WriteBitsBE64(uint64(ptr), 24)
|
||||
|
||||
// Step 2: Move-to-front transform and run-length encoding.
|
||||
var dictMap [256]bool
|
||||
for _, c := range buf {
|
||||
dictMap[c] = true
|
||||
}
|
||||
|
||||
var dictArr [256]uint8
|
||||
var bmapLo [16]uint16
|
||||
dict := dictArr[:0]
|
||||
bmapHi := uint16(0)
|
||||
for i, b := range dictMap {
|
||||
if b {
|
||||
c := uint8(i)
|
||||
dict = append(dict, c)
|
||||
bmapHi |= 1 << (c >> 4)
|
||||
bmapLo[c>>4] |= 1 << (c & 0xf)
|
||||
}
|
||||
}
|
||||
|
||||
zw.wr.WriteBits(uint(bmapHi), 16)
|
||||
for _, m := range bmapLo {
|
||||
if m > 0 {
|
||||
zw.wr.WriteBits(uint(m), 16)
|
||||
}
|
||||
}
|
||||
|
||||
zw.mtf.Init(dict, len(buf))
|
||||
syms := zw.mtf.Encode(buf)
|
||||
|
||||
// Step 3: Prefix encoding.
|
||||
zw.encodePrefix(syms, len(dict))
|
||||
}
|
||||
|
||||
func (zw *Writer) encodePrefix(syms []uint16, numSyms int) {
|
||||
numSyms += 2 // Remove 0 symbol, add RUNA, RUNB, and EOB symbols
|
||||
if numSyms < 3 {
|
||||
panicf(errors.Internal, "unable to encode EOB marker")
|
||||
}
|
||||
syms = append(syms, uint16(numSyms-1)) // EOB marker
|
||||
|
||||
// Compute number of prefix trees needed.
|
||||
numTrees := maxNumTrees
|
||||
for i, lim := range []int{200, 600, 1200, 2400} {
|
||||
if len(syms) < lim {
|
||||
numTrees = minNumTrees + i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Compute number of block selectors.
|
||||
numSels := (len(syms) + numBlockSyms - 1) / numBlockSyms
|
||||
if cap(zw.treeSels) < numSels {
|
||||
zw.treeSels = make([]uint8, numSels)
|
||||
}
|
||||
treeSels := zw.treeSels[:numSels]
|
||||
for i := range treeSels {
|
||||
treeSels[i] = uint8(i % numTrees)
|
||||
}
|
||||
|
||||
// Initialize prefix codes.
|
||||
for i := range zw.codes2D[:numTrees] {
|
||||
pc := zw.codes2D[i][:numSyms]
|
||||
for j := range pc {
|
||||
pc[j] = prefix.PrefixCode{Sym: uint32(j)}
|
||||
}
|
||||
zw.codes1D[i] = pc
|
||||
}
|
||||
|
||||
// First cut at assigning prefix trees to each group.
|
||||
var codes prefix.PrefixCodes
|
||||
var blkLen, selIdx int
|
||||
for _, sym := range syms {
|
||||
if blkLen == 0 {
|
||||
blkLen = numBlockSyms
|
||||
codes = zw.codes2D[treeSels[selIdx]][:numSyms]
|
||||
selIdx++
|
||||
}
|
||||
blkLen--
|
||||
codes[sym].Cnt++
|
||||
}
|
||||
|
||||
// TODO(dsnet): Use K-means to cluster groups to each prefix tree.
|
||||
|
||||
// Generate lengths and prefixes based on symbol frequencies.
|
||||
for i := range zw.trees1D[:numTrees] {
|
||||
pc := prefix.PrefixCodes(zw.codes2D[i][:numSyms])
|
||||
pc.SortByCount()
|
||||
if err := prefix.GenerateLengths(pc, maxPrefixBits); err != nil {
|
||||
errors.Panic(err)
|
||||
}
|
||||
pc.SortBySymbol()
|
||||
}
|
||||
|
||||
// Write out information about the trees and tree selectors.
|
||||
var mtf internal.MoveToFront
|
||||
zw.wr.WriteBitsBE64(uint64(numTrees), 3)
|
||||
zw.wr.WriteBitsBE64(uint64(numSels), 15)
|
||||
zw.treeSelsMTF = append(zw.treeSelsMTF[:0], treeSels...)
|
||||
mtf.Encode(zw.treeSelsMTF)
|
||||
for _, sym := range zw.treeSelsMTF {
|
||||
zw.wr.WriteSymbol(uint(sym), &encSel)
|
||||
}
|
||||
zw.wr.WritePrefixCodes(zw.codes1D[:numTrees], zw.trees1D[:numTrees])
|
||||
|
||||
// Write out prefix encoded symbols of compressed data.
|
||||
var tree *prefix.Encoder
|
||||
blkLen, selIdx = 0, 0
|
||||
for _, sym := range syms {
|
||||
if blkLen == 0 {
|
||||
blkLen = numBlockSyms
|
||||
tree = &zw.trees1D[treeSels[selIdx]]
|
||||
selIdx++
|
||||
}
|
||||
blkLen--
|
||||
ok := zw.wr.TryWriteSymbol(uint(sym), tree)
|
||||
if !ok {
|
||||
zw.wr.WriteSymbol(uint(sym), tree)
|
||||
}
|
||||
}
|
||||
}
|
||||
10
vendor/github.com/dsnet/compress/go.mod
generated
vendored
Normal file
10
vendor/github.com/dsnet/compress/go.mod
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
module github.com/dsnet/compress
|
||||
|
||||
go 1.9
|
||||
|
||||
require (
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0 // indirect
|
||||
github.com/ulikunitz/xz v0.5.6
|
||||
)
|
||||
8
vendor/github.com/dsnet/compress/go.sum
generated
vendored
Normal file
8
vendor/github.com/dsnet/compress/go.sum
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780 h1:tFh1tRc4CA31yP6qDcu+Trax5wW5GuMxvkIba07qVLY=
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||
github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
107
vendor/github.com/dsnet/compress/internal/common.go
generated
vendored
Normal file
107
vendor/github.com/dsnet/compress/internal/common.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Package internal is a collection of common compression algorithms.
|
||||
//
|
||||
// For performance reasons, these packages lack strong error checking and
|
||||
// require that the caller to ensure that strict invariants are kept.
|
||||
package internal
|
||||
|
||||
var (
|
||||
// IdentityLUT returns the input key itself.
|
||||
IdentityLUT = func() (lut [256]byte) {
|
||||
for i := range lut {
|
||||
lut[i] = uint8(i)
|
||||
}
|
||||
return lut
|
||||
}()
|
||||
|
||||
// ReverseLUT returns the input key with its bits reversed.
|
||||
ReverseLUT = func() (lut [256]byte) {
|
||||
for i := range lut {
|
||||
b := uint8(i)
|
||||
b = (b&0xaa)>>1 | (b&0x55)<<1
|
||||
b = (b&0xcc)>>2 | (b&0x33)<<2
|
||||
b = (b&0xf0)>>4 | (b&0x0f)<<4
|
||||
lut[i] = b
|
||||
}
|
||||
return lut
|
||||
}()
|
||||
)
|
||||
|
||||
// ReverseUint32 reverses all bits of v.
|
||||
func ReverseUint32(v uint32) (x uint32) {
|
||||
x |= uint32(ReverseLUT[byte(v>>0)]) << 24
|
||||
x |= uint32(ReverseLUT[byte(v>>8)]) << 16
|
||||
x |= uint32(ReverseLUT[byte(v>>16)]) << 8
|
||||
x |= uint32(ReverseLUT[byte(v>>24)]) << 0
|
||||
return x
|
||||
}
|
||||
|
||||
// ReverseUint32N reverses the lower n bits of v.
|
||||
func ReverseUint32N(v uint32, n uint) (x uint32) {
|
||||
return ReverseUint32(v << (32 - n))
|
||||
}
|
||||
|
||||
// ReverseUint64 reverses all bits of v.
|
||||
func ReverseUint64(v uint64) (x uint64) {
|
||||
x |= uint64(ReverseLUT[byte(v>>0)]) << 56
|
||||
x |= uint64(ReverseLUT[byte(v>>8)]) << 48
|
||||
x |= uint64(ReverseLUT[byte(v>>16)]) << 40
|
||||
x |= uint64(ReverseLUT[byte(v>>24)]) << 32
|
||||
x |= uint64(ReverseLUT[byte(v>>32)]) << 24
|
||||
x |= uint64(ReverseLUT[byte(v>>40)]) << 16
|
||||
x |= uint64(ReverseLUT[byte(v>>48)]) << 8
|
||||
x |= uint64(ReverseLUT[byte(v>>56)]) << 0
|
||||
return x
|
||||
}
|
||||
|
||||
// ReverseUint64N reverses the lower n bits of v.
|
||||
func ReverseUint64N(v uint64, n uint) (x uint64) {
|
||||
return ReverseUint64(v << (64 - n))
|
||||
}
|
||||
|
||||
// MoveToFront is a data structure that allows for more efficient move-to-front
|
||||
// transformations. This specific implementation assumes that the alphabet is
|
||||
// densely packed within 0..255.
|
||||
type MoveToFront struct {
|
||||
dict [256]uint8 // Mapping from indexes to values
|
||||
tail int // Number of tail bytes that are already ordered
|
||||
}
|
||||
|
||||
func (m *MoveToFront) Encode(vals []uint8) {
|
||||
copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity
|
||||
|
||||
var max int
|
||||
for i, val := range vals {
|
||||
var idx uint8 // Reverse lookup idx in dict
|
||||
for di, dv := range m.dict {
|
||||
if dv == val {
|
||||
idx = uint8(di)
|
||||
break
|
||||
}
|
||||
}
|
||||
vals[i] = idx
|
||||
|
||||
max |= int(idx)
|
||||
copy(m.dict[1:], m.dict[:idx])
|
||||
m.dict[0] = val
|
||||
}
|
||||
m.tail = 256 - max - 1
|
||||
}
|
||||
|
||||
func (m *MoveToFront) Decode(idxs []uint8) {
|
||||
copy(m.dict[:], IdentityLUT[:256-m.tail]) // Reset dict to be identity
|
||||
|
||||
var max int
|
||||
for i, idx := range idxs {
|
||||
val := m.dict[idx] // Forward lookup val in dict
|
||||
idxs[i] = val
|
||||
|
||||
max |= int(idx)
|
||||
copy(m.dict[1:], m.dict[:idx])
|
||||
m.dict[0] = val
|
||||
}
|
||||
m.tail = 256 - max - 1
|
||||
}
|
||||
12
vendor/github.com/dsnet/compress/internal/debug.go
generated
vendored
Normal file
12
vendor/github.com/dsnet/compress/internal/debug.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build debug,!gofuzz
|
||||
|
||||
package internal
|
||||
|
||||
const (
|
||||
Debug = true
|
||||
GoFuzz = false
|
||||
)
|
||||
120
vendor/github.com/dsnet/compress/internal/errors/errors.go
generated
vendored
Normal file
120
vendor/github.com/dsnet/compress/internal/errors/errors.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright 2016, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Package errors implements functions to manipulate compression errors.
|
||||
//
|
||||
// In idiomatic Go, it is an anti-pattern to use panics as a form of error
|
||||
// reporting in the API. Instead, the expected way to transmit errors is by
|
||||
// returning an error value. Unfortunately, the checking of "err != nil" in
|
||||
// tight loops commonly found in compression causes non-negligible performance
|
||||
// degradation. While this may not be idiomatic, the internal packages of this
|
||||
// repository rely on panics as a normal means to convey errors. In order to
|
||||
// ensure that these panics do not leak across the public API, the public
|
||||
// packages must recover from these panics and present an error value.
|
||||
//
|
||||
// The Panic and Recover functions in this package provide a safe way to
|
||||
// recover from errors only generated from within this repository.
|
||||
//
|
||||
// Example usage:
|
||||
// func Foo() (err error) {
|
||||
// defer errors.Recover(&err)
|
||||
//
|
||||
// if rand.Intn(2) == 0 {
|
||||
// // Unexpected panics will not be caught by Recover.
|
||||
// io.Closer(nil).Close()
|
||||
// } else {
|
||||
// // Errors thrown by Panic will be caught by Recover.
|
||||
// errors.Panic(errors.New("whoopsie"))
|
||||
// }
|
||||
// }
|
||||
//
|
||||
package errors
|
||||
|
||||
import "strings"
|
||||
|
||||
const (
|
||||
// Unknown indicates that there is no classification for this error.
|
||||
Unknown = iota
|
||||
|
||||
// Internal indicates that this error is due to an internal bug.
|
||||
// Users should file a issue report if this type of error is encountered.
|
||||
Internal
|
||||
|
||||
// Invalid indicates that this error is due to the user misusing the API
|
||||
// and is indicative of a bug on the user's part.
|
||||
Invalid
|
||||
|
||||
// Deprecated indicates the use of a deprecated and unsupported feature.
|
||||
Deprecated
|
||||
|
||||
// Corrupted indicates that the input stream is corrupted.
|
||||
Corrupted
|
||||
|
||||
// Closed indicates that the handlers are closed.
|
||||
Closed
|
||||
)
|
||||
|
||||
var codeMap = map[int]string{
|
||||
Unknown: "unknown error",
|
||||
Internal: "internal error",
|
||||
Invalid: "invalid argument",
|
||||
Deprecated: "deprecated format",
|
||||
Corrupted: "corrupted input",
|
||||
Closed: "closed handler",
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Code int // The error type
|
||||
Pkg string // Name of the package where the error originated
|
||||
Msg string // Descriptive message about the error (optional)
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
var ss []string
|
||||
for _, s := range []string{e.Pkg, codeMap[e.Code], e.Msg} {
|
||||
if s != "" {
|
||||
ss = append(ss, s)
|
||||
}
|
||||
}
|
||||
return strings.Join(ss, ": ")
|
||||
}
|
||||
|
||||
func (e Error) CompressError() {}
|
||||
func (e Error) IsInternal() bool { return e.Code == Internal }
|
||||
func (e Error) IsInvalid() bool { return e.Code == Invalid }
|
||||
func (e Error) IsDeprecated() bool { return e.Code == Deprecated }
|
||||
func (e Error) IsCorrupted() bool { return e.Code == Corrupted }
|
||||
func (e Error) IsClosed() bool { return e.Code == Closed }
|
||||
|
||||
func IsInternal(err error) bool { return isCode(err, Internal) }
|
||||
func IsInvalid(err error) bool { return isCode(err, Invalid) }
|
||||
func IsDeprecated(err error) bool { return isCode(err, Deprecated) }
|
||||
func IsCorrupted(err error) bool { return isCode(err, Corrupted) }
|
||||
func IsClosed(err error) bool { return isCode(err, Closed) }
|
||||
|
||||
func isCode(err error, code int) bool {
|
||||
if cerr, ok := err.(Error); ok && cerr.Code == code {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// errWrap is used by Panic and Recover to ensure that only errors raised by
|
||||
// Panic are recovered by Recover.
|
||||
type errWrap struct{ e *error }
|
||||
|
||||
func Recover(err *error) {
|
||||
switch ex := recover().(type) {
|
||||
case nil:
|
||||
// Do nothing.
|
||||
case errWrap:
|
||||
*err = *ex.e
|
||||
default:
|
||||
panic(ex)
|
||||
}
|
||||
}
|
||||
|
||||
func Panic(err error) {
|
||||
panic(errWrap{&err})
|
||||
}
|
||||
12
vendor/github.com/dsnet/compress/internal/gofuzz.go
generated
vendored
Normal file
12
vendor/github.com/dsnet/compress/internal/gofuzz.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2016, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
package internal
|
||||
|
||||
const (
|
||||
Debug = true
|
||||
GoFuzz = true
|
||||
)
|
||||
159
vendor/github.com/dsnet/compress/internal/prefix/debug.go
generated
vendored
Normal file
159
vendor/github.com/dsnet/compress/internal/prefix/debug.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build debug
|
||||
|
||||
package prefix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func lenBase2(n uint) int {
|
||||
return int(math.Ceil(math.Log2(float64(n + 1))))
|
||||
}
|
||||
func padBase2(v, n uint, m int) string {
|
||||
s := fmt.Sprintf("%b", 1<<n|v)[1:]
|
||||
if pad := m - len(s); pad > 0 {
|
||||
return strings.Repeat(" ", pad) + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func lenBase10(n int) int {
|
||||
return int(math.Ceil(math.Log10(float64(n + 1))))
|
||||
}
|
||||
func padBase10(n, m int) string {
|
||||
s := fmt.Sprintf("%d", n)
|
||||
if pad := m - len(s); pad > 0 {
|
||||
return strings.Repeat(" ", pad) + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (rc RangeCodes) String() string {
|
||||
var maxLen, maxBase int
|
||||
for _, c := range rc {
|
||||
maxLen = max(maxLen, int(c.Len))
|
||||
maxBase = max(maxBase, int(c.Base))
|
||||
}
|
||||
|
||||
var ss []string
|
||||
ss = append(ss, "{")
|
||||
for i, c := range rc {
|
||||
base := padBase10(int(c.Base), lenBase10(maxBase))
|
||||
if c.Len > 0 {
|
||||
base += fmt.Sprintf("-%d", c.End()-1)
|
||||
}
|
||||
ss = append(ss, fmt.Sprintf("\t%s: {len: %s, range: %s},",
|
||||
padBase10(int(i), lenBase10(len(rc)-1)),
|
||||
padBase10(int(c.Len), lenBase10(maxLen)),
|
||||
base,
|
||||
))
|
||||
}
|
||||
ss = append(ss, "}")
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
func (pc PrefixCodes) String() string {
|
||||
var maxSym, maxLen, maxCnt int
|
||||
for _, c := range pc {
|
||||
maxSym = max(maxSym, int(c.Sym))
|
||||
maxLen = max(maxLen, int(c.Len))
|
||||
maxCnt = max(maxCnt, int(c.Cnt))
|
||||
}
|
||||
|
||||
var ss []string
|
||||
ss = append(ss, "{")
|
||||
for _, c := range pc {
|
||||
var cntStr string
|
||||
if maxCnt > 0 {
|
||||
cnt := int(32*float32(c.Cnt)/float32(maxCnt) + 0.5)
|
||||
cntStr = fmt.Sprintf("%s |%s",
|
||||
padBase10(int(c.Cnt), lenBase10(maxCnt)),
|
||||
strings.Repeat("#", cnt),
|
||||
)
|
||||
}
|
||||
ss = append(ss, fmt.Sprintf("\t%s: %s, %s",
|
||||
padBase10(int(c.Sym), lenBase10(maxSym)),
|
||||
padBase2(uint(c.Val), uint(c.Len), maxLen),
|
||||
cntStr,
|
||||
))
|
||||
}
|
||||
ss = append(ss, "}")
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
func (pd Decoder) String() string {
|
||||
var ss []string
|
||||
ss = append(ss, "{")
|
||||
if len(pd.chunks) > 0 {
|
||||
ss = append(ss, "\tchunks: {")
|
||||
for i, c := range pd.chunks {
|
||||
label := "sym"
|
||||
if uint(c&countMask) > uint(pd.chunkBits) {
|
||||
label = "idx"
|
||||
}
|
||||
ss = append(ss, fmt.Sprintf("\t\t%s: {%s: %s, len: %s}",
|
||||
padBase2(uint(i), uint(pd.chunkBits), int(pd.chunkBits)),
|
||||
label, padBase10(int(c>>countBits), 3),
|
||||
padBase10(int(c&countMask), 2),
|
||||
))
|
||||
}
|
||||
ss = append(ss, "\t},")
|
||||
|
||||
for j, links := range pd.links {
|
||||
ss = append(ss, fmt.Sprintf("\tlinks[%d]: {", j))
|
||||
linkBits := lenBase2(uint(pd.linkMask))
|
||||
for i, c := range links {
|
||||
ss = append(ss, fmt.Sprintf("\t\t%s: {sym: %s, len: %s},",
|
||||
padBase2(uint(i), uint(linkBits), int(linkBits)),
|
||||
padBase10(int(c>>countBits), 3),
|
||||
padBase10(int(c&countMask), 2),
|
||||
))
|
||||
}
|
||||
ss = append(ss, "\t},")
|
||||
}
|
||||
}
|
||||
ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pd.chunkMask))
|
||||
ss = append(ss, fmt.Sprintf("\tlinkMask: %b,", pd.linkMask))
|
||||
ss = append(ss, fmt.Sprintf("\tchunkBits: %d,", pd.chunkBits))
|
||||
ss = append(ss, fmt.Sprintf("\tMinBits: %d,", pd.MinBits))
|
||||
ss = append(ss, fmt.Sprintf("\tNumSyms: %d,", pd.NumSyms))
|
||||
ss = append(ss, "}")
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
func (pe Encoder) String() string {
|
||||
var maxLen int
|
||||
for _, c := range pe.chunks {
|
||||
maxLen = max(maxLen, int(c&countMask))
|
||||
}
|
||||
|
||||
var ss []string
|
||||
ss = append(ss, "{")
|
||||
if len(pe.chunks) > 0 {
|
||||
ss = append(ss, "\tchunks: {")
|
||||
for i, c := range pe.chunks {
|
||||
ss = append(ss, fmt.Sprintf("\t\t%s: %s,",
|
||||
padBase10(i, 3),
|
||||
padBase2(uint(c>>countBits), uint(c&countMask), maxLen),
|
||||
))
|
||||
}
|
||||
ss = append(ss, "\t},")
|
||||
}
|
||||
ss = append(ss, fmt.Sprintf("\tchunkMask: %b,", pe.chunkMask))
|
||||
ss = append(ss, fmt.Sprintf("\tNumSyms: %d,", pe.NumSyms))
|
||||
ss = append(ss, "}")
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
136
vendor/github.com/dsnet/compress/internal/prefix/decoder.go
generated
vendored
Normal file
136
vendor/github.com/dsnet/compress/internal/prefix/decoder.go
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package prefix
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/dsnet/compress/internal"
|
||||
)
|
||||
|
||||
// The algorithm used to decode variable length codes is based on the lookup
|
||||
// method in zlib. If the code is less-than-or-equal to maxChunkBits,
|
||||
// then the symbol can be decoded using a single lookup into the chunks table.
|
||||
// Otherwise, the links table will be used for a second level lookup.
|
||||
//
|
||||
// The chunks slice is keyed by the contents of the bit buffer ANDed with
|
||||
// the chunkMask to avoid a out-of-bounds lookup. The value of chunks is a tuple
|
||||
// that is decoded as follow:
|
||||
//
|
||||
// var length = chunks[bitBuffer&chunkMask] & countMask
|
||||
// var symbol = chunks[bitBuffer&chunkMask] >> countBits
|
||||
//
|
||||
// If the decoded length is larger than chunkBits, then an overflow link table
|
||||
// must be used for further decoding. In this case, the symbol is actually the
|
||||
// index into the links tables. The second-level links table returned is
|
||||
// processed in the same way as the chunks table.
|
||||
//
|
||||
// if length > chunkBits {
|
||||
// var index = symbol // Previous symbol is index into links tables
|
||||
// length = links[index][bitBuffer>>chunkBits & linkMask] & countMask
|
||||
// symbol = links[index][bitBuffer>>chunkBits & linkMask] >> countBits
|
||||
// }
|
||||
//
|
||||
// See the following:
|
||||
// http://www.gzip.org/algorithm.txt
|
||||
|
||||
type Decoder struct {
|
||||
chunks []uint32 // First-level lookup map
|
||||
links [][]uint32 // Second-level lookup map
|
||||
chunkMask uint32 // Mask the length of the chunks table
|
||||
linkMask uint32 // Mask the length of the link table
|
||||
chunkBits uint32 // Bit-length of the chunks table
|
||||
|
||||
MinBits uint32 // The minimum number of bits to safely make progress
|
||||
NumSyms uint32 // Number of symbols
|
||||
}
|
||||
|
||||
// Init initializes Decoder according to the codes provided.
|
||||
func (pd *Decoder) Init(codes PrefixCodes) {
|
||||
// Handle special case trees.
|
||||
if len(codes) <= 1 {
|
||||
switch {
|
||||
case len(codes) == 0: // Empty tree (should error if used later)
|
||||
*pd = Decoder{chunks: pd.chunks[:0], links: pd.links[:0], NumSyms: 0}
|
||||
case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero)
|
||||
pd.chunks = append(pd.chunks[:0], codes[0].Sym<<countBits|0)
|
||||
*pd = Decoder{chunks: pd.chunks[:1], links: pd.links[:0], NumSyms: 1}
|
||||
default:
|
||||
panic("invalid codes")
|
||||
}
|
||||
return
|
||||
}
|
||||
if internal.Debug && !sort.IsSorted(prefixCodesBySymbol(codes)) {
|
||||
panic("input codes is not sorted")
|
||||
}
|
||||
if internal.Debug && !(codes.checkLengths() && codes.checkPrefixes()) {
|
||||
panic("detected incomplete or overlapping codes")
|
||||
}
|
||||
|
||||
var minBits, maxBits uint32 = valueBits, 0
|
||||
for _, c := range codes {
|
||||
if minBits > c.Len {
|
||||
minBits = c.Len
|
||||
}
|
||||
if maxBits < c.Len {
|
||||
maxBits = c.Len
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate chunks table as needed.
|
||||
const maxChunkBits = 9 // This can be tuned for better performance
|
||||
pd.NumSyms = uint32(len(codes))
|
||||
pd.MinBits = minBits
|
||||
pd.chunkBits = maxBits
|
||||
if pd.chunkBits > maxChunkBits {
|
||||
pd.chunkBits = maxChunkBits
|
||||
}
|
||||
numChunks := 1 << pd.chunkBits
|
||||
pd.chunks = allocUint32s(pd.chunks, numChunks)
|
||||
pd.chunkMask = uint32(numChunks - 1)
|
||||
|
||||
// Allocate links tables as needed.
|
||||
pd.links = pd.links[:0]
|
||||
pd.linkMask = 0
|
||||
if pd.chunkBits < maxBits {
|
||||
numLinks := 1 << (maxBits - pd.chunkBits)
|
||||
pd.linkMask = uint32(numLinks - 1)
|
||||
|
||||
var linkIdx uint32
|
||||
for i := range pd.chunks {
|
||||
pd.chunks[i] = 0 // Logic below relies on zero value as uninitialized
|
||||
}
|
||||
for _, c := range codes {
|
||||
if c.Len > pd.chunkBits && pd.chunks[c.Val&pd.chunkMask] == 0 {
|
||||
pd.chunks[c.Val&pd.chunkMask] = (linkIdx << countBits) | (pd.chunkBits + 1)
|
||||
linkIdx++
|
||||
}
|
||||
}
|
||||
|
||||
pd.links = extendSliceUint32s(pd.links, int(linkIdx))
|
||||
linksFlat := allocUint32s(pd.links[0], numLinks*int(linkIdx))
|
||||
for i, j := 0, 0; i < len(pd.links); i, j = i+1, j+numLinks {
|
||||
pd.links[i] = linksFlat[j : j+numLinks]
|
||||
}
|
||||
}
|
||||
|
||||
// Fill out chunks and links tables with values.
|
||||
for _, c := range codes {
|
||||
chunk := c.Sym<<countBits | c.Len
|
||||
if c.Len <= pd.chunkBits {
|
||||
skip := 1 << uint(c.Len)
|
||||
for j := int(c.Val); j < len(pd.chunks); j += skip {
|
||||
pd.chunks[j] = chunk
|
||||
}
|
||||
} else {
|
||||
linkIdx := pd.chunks[c.Val&pd.chunkMask] >> countBits
|
||||
links := pd.links[linkIdx]
|
||||
skip := 1 << uint(c.Len-pd.chunkBits)
|
||||
for j := int(c.Val >> pd.chunkBits); j < len(links); j += skip {
|
||||
links[j] = chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
66
vendor/github.com/dsnet/compress/internal/prefix/encoder.go
generated
vendored
Normal file
66
vendor/github.com/dsnet/compress/internal/prefix/encoder.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package prefix
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/dsnet/compress/internal"
|
||||
)
|
||||
|
||||
type Encoder struct {
|
||||
chunks []uint32 // First-level lookup map
|
||||
chunkMask uint32 // Mask the length of the chunks table
|
||||
|
||||
NumSyms uint32 // Number of symbols
|
||||
}
|
||||
|
||||
// Init initializes Encoder according to the codes provided.
|
||||
func (pe *Encoder) Init(codes PrefixCodes) {
|
||||
// Handle special case trees.
|
||||
if len(codes) <= 1 {
|
||||
switch {
|
||||
case len(codes) == 0: // Empty tree (should error if used later)
|
||||
*pe = Encoder{chunks: pe.chunks[:0], NumSyms: 0}
|
||||
case len(codes) == 1 && codes[0].Len == 0: // Single code tree (bit-length of zero)
|
||||
pe.chunks = append(pe.chunks[:0], codes[0].Val<<countBits|0)
|
||||
*pe = Encoder{chunks: pe.chunks[:1], NumSyms: 1}
|
||||
default:
|
||||
panic("invalid codes")
|
||||
}
|
||||
return
|
||||
}
|
||||
if internal.Debug && !sort.IsSorted(prefixCodesBySymbol(codes)) {
|
||||
panic("input codes is not sorted")
|
||||
}
|
||||
if internal.Debug && !(codes.checkLengths() && codes.checkPrefixes()) {
|
||||
panic("detected incomplete or overlapping codes")
|
||||
}
|
||||
|
||||
// Enough chunks to contain all the symbols.
|
||||
numChunks := 1
|
||||
for n := len(codes) - 1; n > 0; n >>= 1 {
|
||||
numChunks <<= 1
|
||||
}
|
||||
pe.NumSyms = uint32(len(codes))
|
||||
|
||||
retry:
|
||||
// Allocate and reset chunks.
|
||||
pe.chunks = allocUint32s(pe.chunks, numChunks)
|
||||
pe.chunkMask = uint32(numChunks - 1)
|
||||
for i := range pe.chunks {
|
||||
pe.chunks[i] = 0 // Logic below relies on zero value as uninitialized
|
||||
}
|
||||
|
||||
// Insert each symbol, checking that there are no conflicts.
|
||||
for _, c := range codes {
|
||||
if pe.chunks[c.Sym&pe.chunkMask] > 0 {
|
||||
// Collision found our "hash" table, so grow and try again.
|
||||
numChunks <<= 1
|
||||
goto retry
|
||||
}
|
||||
pe.chunks[c.Sym&pe.chunkMask] = c.Val<<countBits | c.Len
|
||||
}
|
||||
}
|
||||
400
vendor/github.com/dsnet/compress/internal/prefix/prefix.go
generated
vendored
Normal file
400
vendor/github.com/dsnet/compress/internal/prefix/prefix.go
generated
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// Package prefix implements bit readers and writers that use prefix encoding.
|
||||
package prefix
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/dsnet/compress/internal"
|
||||
"github.com/dsnet/compress/internal/errors"
|
||||
)
|
||||
|
||||
func errorf(c int, f string, a ...interface{}) error {
|
||||
return errors.Error{Code: c, Pkg: "prefix", Msg: fmt.Sprintf(f, a...)}
|
||||
}
|
||||
|
||||
func panicf(c int, f string, a ...interface{}) {
|
||||
errors.Panic(errorf(c, f, a...))
|
||||
}
|
||||
|
||||
const (
|
||||
countBits = 5 // Number of bits to store the bit-length of the code
|
||||
valueBits = 27 // Number of bits to store the code value
|
||||
|
||||
countMask = (1 << countBits) - 1
|
||||
)
|
||||
|
||||
// PrefixCode is a representation of a prefix code, which is conceptually a
|
||||
// mapping from some arbitrary symbol to some bit-string.
|
||||
//
|
||||
// The Sym and Cnt fields are typically provided by the user,
|
||||
// while the Len and Val fields are generated by this package.
|
||||
type PrefixCode struct {
|
||||
Sym uint32 // The symbol being mapped
|
||||
Cnt uint32 // The number times this symbol is used
|
||||
Len uint32 // Bit-length of the prefix code
|
||||
Val uint32 // Value of the prefix code (must be in 0..(1<<Len)-1)
|
||||
}
|
||||
type PrefixCodes []PrefixCode
|
||||
|
||||
type prefixCodesBySymbol []PrefixCode
|
||||
|
||||
func (c prefixCodesBySymbol) Len() int { return len(c) }
|
||||
func (c prefixCodesBySymbol) Less(i, j int) bool { return c[i].Sym < c[j].Sym }
|
||||
func (c prefixCodesBySymbol) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
||||
|
||||
type prefixCodesByCount []PrefixCode
|
||||
|
||||
func (c prefixCodesByCount) Len() int { return len(c) }
|
||||
func (c prefixCodesByCount) Less(i, j int) bool {
|
||||
return c[i].Cnt < c[j].Cnt || (c[i].Cnt == c[j].Cnt && c[i].Sym < c[j].Sym)
|
||||
}
|
||||
func (c prefixCodesByCount) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
|
||||
|
||||
func (pc PrefixCodes) SortBySymbol() { sort.Sort(prefixCodesBySymbol(pc)) }
|
||||
func (pc PrefixCodes) SortByCount() { sort.Sort(prefixCodesByCount(pc)) }
|
||||
|
||||
// Length computes the total bit-length using the Len and Cnt fields.
|
||||
func (pc PrefixCodes) Length() (nb uint) {
|
||||
for _, c := range pc {
|
||||
nb += uint(c.Len * c.Cnt)
|
||||
}
|
||||
return nb
|
||||
}
|
||||
|
||||
// checkLengths reports whether the codes form a complete prefix tree.
|
||||
func (pc PrefixCodes) checkLengths() bool {
|
||||
sum := 1 << valueBits
|
||||
for _, c := range pc {
|
||||
sum -= (1 << valueBits) >> uint(c.Len)
|
||||
}
|
||||
return sum == 0 || len(pc) == 0
|
||||
}
|
||||
|
||||
// checkPrefixes reports whether all codes have non-overlapping prefixes.
|
||||
func (pc PrefixCodes) checkPrefixes() bool {
|
||||
for i, c1 := range pc {
|
||||
for j, c2 := range pc {
|
||||
mask := uint32(1)<<c1.Len - 1
|
||||
if i != j && c1.Len <= c2.Len && c1.Val&mask == c2.Val&mask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// checkCanonical reports whether all codes are canonical.
|
||||
// That is, they have the following properties:
|
||||
//
|
||||
// 1. All codes of a given bit-length are consecutive values.
|
||||
// 2. Shorter codes lexicographically precede longer codes.
|
||||
//
|
||||
// The codes must have unique symbols and be sorted by the symbol
|
||||
// The Len and Val fields in each code must be populated.
|
||||
func (pc PrefixCodes) checkCanonical() bool {
|
||||
// Rule 1.
|
||||
var vals [valueBits + 1]PrefixCode
|
||||
for _, c := range pc {
|
||||
if c.Len > 0 {
|
||||
c.Val = internal.ReverseUint32N(c.Val, uint(c.Len))
|
||||
if vals[c.Len].Cnt > 0 && vals[c.Len].Val+1 != c.Val {
|
||||
return false
|
||||
}
|
||||
vals[c.Len].Val = c.Val
|
||||
vals[c.Len].Cnt++
|
||||
}
|
||||
}
|
||||
|
||||
// Rule 2.
|
||||
var last PrefixCode
|
||||
for _, v := range vals {
|
||||
if v.Cnt > 0 {
|
||||
curVal := v.Val - v.Cnt + 1
|
||||
if last.Cnt != 0 && last.Val >= curVal {
|
||||
return false
|
||||
}
|
||||
last = v
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GenerateLengths assigns non-zero bit-lengths to all codes. Codes with high
|
||||
// frequency counts will be assigned shorter codes to reduce bit entropy.
|
||||
// This function is used primarily by compressors.
|
||||
//
|
||||
// The input codes must have the Cnt field populated, be sorted by count.
|
||||
// Even if a code has a count of 0, a non-zero bit-length will be assigned.
|
||||
//
|
||||
// The result will have the Len field populated. The algorithm used guarantees
|
||||
// that Len <= maxBits and that it is a complete prefix tree. The resulting
|
||||
// codes will remain sorted by count.
|
||||
func GenerateLengths(codes PrefixCodes, maxBits uint) error {
|
||||
if len(codes) <= 1 {
|
||||
if len(codes) == 1 {
|
||||
codes[0].Len = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify that the codes are in ascending order by count.
|
||||
cntLast := codes[0].Cnt
|
||||
for _, c := range codes[1:] {
|
||||
if c.Cnt < cntLast {
|
||||
return errorf(errors.Invalid, "non-monotonically increasing symbol counts")
|
||||
}
|
||||
cntLast = c.Cnt
|
||||
}
|
||||
|
||||
// Construct a Huffman tree used to generate the bit-lengths.
|
||||
//
|
||||
// The Huffman tree is a binary tree where each symbol lies as a leaf node
|
||||
// on this tree. The length of the prefix code to assign is the depth of
|
||||
// that leaf from the root. The Huffman algorithm, which runs in O(n),
|
||||
// is used to generate the tree. It assumes that codes are sorted in
|
||||
// increasing order of frequency.
|
||||
//
|
||||
// The algorithm is as follows:
|
||||
// 1. Start with two queues, F and Q, where F contains all of the starting
|
||||
// symbols sorted such that symbols with lowest counts come first.
|
||||
// 2. While len(F)+len(Q) > 1:
|
||||
// 2a. Dequeue the node from F or Q that has the lowest weight as N0.
|
||||
// 2b. Dequeue the node from F or Q that has the lowest weight as N1.
|
||||
// 2c. Create a new node N that has N0 and N1 as its children.
|
||||
// 2d. Enqueue N into the back of Q.
|
||||
// 3. The tree's root node is Q[0].
|
||||
type node struct {
|
||||
cnt uint32
|
||||
|
||||
// n0 or c0 represent the left child of this node.
|
||||
// Since Go does not have unions, only one of these will be set.
|
||||
// Similarly, n1 or c1 represent the right child of this node.
|
||||
//
|
||||
// If n0 or n1 is set, then it represents a "pointer" to another
|
||||
// node in the Huffman tree. Since Go's pointer analysis cannot reason
|
||||
// that these node pointers do not escape (golang.org/issue/13493),
|
||||
// we use an index to a node in the nodes slice as a pseudo-pointer.
|
||||
//
|
||||
// If c0 or c1 is set, then it represents a leaf "node" in the
|
||||
// Huffman tree. The leaves are the PrefixCode values themselves.
|
||||
n0, n1 int // Index to child nodes
|
||||
c0, c1 *PrefixCode
|
||||
}
|
||||
var nodeIdx int
|
||||
var nodeArr [1024]node // Large enough to handle most cases on the stack
|
||||
nodes := nodeArr[:]
|
||||
if len(nodes) < len(codes) {
|
||||
nodes = make([]node, len(codes)) // Number of internal nodes < number of leaves
|
||||
}
|
||||
freqs, queue := codes, nodes[:0]
|
||||
for len(freqs)+len(queue) > 1 {
|
||||
// These are the two smallest nodes at the front of freqs and queue.
|
||||
var n node
|
||||
if len(queue) == 0 || (len(freqs) > 0 && freqs[0].Cnt <= queue[0].cnt) {
|
||||
n.c0, freqs = &freqs[0], freqs[1:]
|
||||
n.cnt += n.c0.Cnt
|
||||
} else {
|
||||
n.cnt += queue[0].cnt
|
||||
n.n0 = nodeIdx // nodeIdx is same as &queue[0] - &nodes[0]
|
||||
nodeIdx++
|
||||
queue = queue[1:]
|
||||
}
|
||||
if len(queue) == 0 || (len(freqs) > 0 && freqs[0].Cnt <= queue[0].cnt) {
|
||||
n.c1, freqs = &freqs[0], freqs[1:]
|
||||
n.cnt += n.c1.Cnt
|
||||
} else {
|
||||
n.cnt += queue[0].cnt
|
||||
n.n1 = nodeIdx // nodeIdx is same as &queue[0] - &nodes[0]
|
||||
nodeIdx++
|
||||
queue = queue[1:]
|
||||
}
|
||||
queue = append(queue, n)
|
||||
}
|
||||
rootIdx := nodeIdx
|
||||
|
||||
// Search the whole binary tree, noting when we hit each leaf node.
|
||||
// We do not care about the exact Huffman tree structure, but rather we only
|
||||
// care about depth of each of the leaf nodes. That is, the depth determines
|
||||
// how long each symbol is in bits.
|
||||
//
|
||||
// Since the number of leaves is n, there is at most n internal nodes.
|
||||
// Thus, this algorithm runs in O(n).
|
||||
var fixBits bool
|
||||
var explore func(int, uint)
|
||||
explore = func(rootIdx int, level uint) {
|
||||
root := &nodes[rootIdx]
|
||||
|
||||
// Explore left branch.
|
||||
if root.c0 == nil {
|
||||
explore(root.n0, level+1)
|
||||
} else {
|
||||
fixBits = fixBits || (level > maxBits)
|
||||
root.c0.Len = uint32(level)
|
||||
}
|
||||
|
||||
// Explore right branch.
|
||||
if root.c1 == nil {
|
||||
explore(root.n1, level+1)
|
||||
} else {
|
||||
fixBits = fixBits || (level > maxBits)
|
||||
root.c1.Len = uint32(level)
|
||||
}
|
||||
}
|
||||
explore(rootIdx, 1)
|
||||
|
||||
// Fix the bit-lengths if we violate the maxBits requirement.
|
||||
if fixBits {
|
||||
// Create histogram for number of symbols with each bit-length.
|
||||
var symBitsArr [valueBits + 1]uint32
|
||||
symBits := symBitsArr[:] // symBits[nb] indicates number of symbols using nb bits
|
||||
for _, c := range codes {
|
||||
for int(c.Len) >= len(symBits) {
|
||||
symBits = append(symBits, 0)
|
||||
}
|
||||
symBits[c.Len]++
|
||||
}
|
||||
|
||||
// Fudge the tree such that the largest bit-length is <= maxBits.
|
||||
// This is accomplish by effectively doing a tree rotation. That is, we
|
||||
// increase the bit-length of some higher frequency code, so that the
|
||||
// bit-lengths of lower frequency codes can be decreased.
|
||||
//
|
||||
// Visually, this looks like the following transform:
|
||||
//
|
||||
// Level Before After
|
||||
// __ ___
|
||||
// / \ / \
|
||||
// n-1 X / \ /\ /\
|
||||
// n X /\ X X X X
|
||||
// n+1 X X
|
||||
//
|
||||
var treeRotate func(uint)
|
||||
treeRotate = func(nb uint) {
|
||||
if symBits[nb-1] == 0 {
|
||||
treeRotate(nb - 1)
|
||||
}
|
||||
symBits[nb-1] -= 1 // Push this node to the level below
|
||||
symBits[nb] += 3 // This level gets one node from above, two from below
|
||||
symBits[nb+1] -= 2 // Push two nodes to the level above
|
||||
}
|
||||
for i := uint(len(symBits)) - 1; i > maxBits; i-- {
|
||||
for symBits[i] > 0 {
|
||||
treeRotate(i - 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Assign bit-lengths to each code. Since codes is sorted in increasing
|
||||
// order of frequency, that means that the most frequently used symbols
|
||||
// should have the shortest bit-lengths. Thus, we copy symbols to codes
|
||||
// from the back of codes first.
|
||||
cs := codes
|
||||
for nb, cnt := range symBits {
|
||||
if cnt > 0 {
|
||||
pos := len(cs) - int(cnt)
|
||||
cs2 := cs[pos:]
|
||||
for i := range cs2 {
|
||||
cs2[i].Len = uint32(nb)
|
||||
}
|
||||
cs = cs[:pos]
|
||||
}
|
||||
}
|
||||
if len(cs) != 0 {
|
||||
panic("not all codes were used up")
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Debug && !codes.checkLengths() {
|
||||
panic("incomplete prefix tree detected")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GeneratePrefixes assigns a prefix value to all codes according to the
|
||||
// bit-lengths. This function is used by both compressors and decompressors.
|
||||
//
|
||||
// The input codes must have the Sym and Len fields populated and be
|
||||
// sorted by symbol. The bit-lengths of each code must be properly allocated,
|
||||
// such that it forms a complete tree.
|
||||
//
|
||||
// The result will have the Val field populated and will produce a canonical
|
||||
// prefix tree. The resulting codes will remain sorted by symbol.
|
||||
func GeneratePrefixes(codes PrefixCodes) error {
|
||||
if len(codes) <= 1 {
|
||||
if len(codes) == 1 {
|
||||
if codes[0].Len != 0 {
|
||||
return errorf(errors.Invalid, "degenerate prefix tree with one node")
|
||||
}
|
||||
codes[0].Val = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute basic statistics on the symbols.
|
||||
var bitCnts [valueBits + 1]uint
|
||||
c0 := codes[0]
|
||||
bitCnts[c0.Len]++
|
||||
minBits, maxBits, symLast := c0.Len, c0.Len, c0.Sym
|
||||
for _, c := range codes[1:] {
|
||||
if c.Sym <= symLast {
|
||||
return errorf(errors.Invalid, "non-unique or non-monotonically increasing symbols")
|
||||
}
|
||||
if minBits > c.Len {
|
||||
minBits = c.Len
|
||||
}
|
||||
if maxBits < c.Len {
|
||||
maxBits = c.Len
|
||||
}
|
||||
bitCnts[c.Len]++ // Histogram of bit counts
|
||||
symLast = c.Sym // Keep track of last symbol
|
||||
}
|
||||
if minBits == 0 {
|
||||
return errorf(errors.Invalid, "invalid prefix bit-length")
|
||||
}
|
||||
|
||||
// Compute the next code for a symbol of a given bit length.
|
||||
var nextCodes [valueBits + 1]uint
|
||||
var code uint
|
||||
for i := minBits; i <= maxBits; i++ {
|
||||
code <<= 1
|
||||
nextCodes[i] = code
|
||||
code += bitCnts[i]
|
||||
}
|
||||
if code != 1<<maxBits {
|
||||
return errorf(errors.Invalid, "degenerate prefix tree")
|
||||
}
|
||||
|
||||
// Assign the code to each symbol.
|
||||
for i, c := range codes {
|
||||
codes[i].Val = internal.ReverseUint32N(uint32(nextCodes[c.Len]), uint(c.Len))
|
||||
nextCodes[c.Len]++
|
||||
}
|
||||
|
||||
if internal.Debug && !codes.checkPrefixes() {
|
||||
panic("overlapping prefixes detected")
|
||||
}
|
||||
if internal.Debug && !codes.checkCanonical() {
|
||||
panic("non-canonical prefixes detected")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func allocUint32s(s []uint32, n int) []uint32 {
|
||||
if cap(s) >= n {
|
||||
return s[:n]
|
||||
}
|
||||
return make([]uint32, n, n*3/2)
|
||||
}
|
||||
|
||||
func extendSliceUint32s(s [][]uint32, n int) [][]uint32 {
|
||||
if cap(s) >= n {
|
||||
return s[:n]
|
||||
}
|
||||
ss := make([][]uint32, n, n*3/2)
|
||||
copy(ss, s[:cap(s)])
|
||||
return ss
|
||||
}
|
||||
93
vendor/github.com/dsnet/compress/internal/prefix/range.go
generated
vendored
Normal file
93
vendor/github.com/dsnet/compress/internal/prefix/range.go
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package prefix
|
||||
|
||||
type RangeCode struct {
|
||||
Base uint32 // Starting base offset of the range
|
||||
Len uint32 // Bit-length of a subsequent integer to add to base offset
|
||||
}
|
||||
type RangeCodes []RangeCode
|
||||
|
||||
type RangeEncoder struct {
|
||||
rcs RangeCodes
|
||||
lut [1024]uint32
|
||||
minBase uint
|
||||
}
|
||||
|
||||
// End reports the non-inclusive ending range.
|
||||
func (rc RangeCode) End() uint32 { return rc.Base + (1 << rc.Len) }
|
||||
|
||||
// MakeRangeCodes creates a RangeCodes, where each region is assumed to be
|
||||
// contiguously stacked, without any gaps, with bit-lengths taken from bits.
|
||||
func MakeRangeCodes(minBase uint, bits []uint) (rc RangeCodes) {
|
||||
for _, nb := range bits {
|
||||
rc = append(rc, RangeCode{Base: uint32(minBase), Len: uint32(nb)})
|
||||
minBase += 1 << nb
|
||||
}
|
||||
return rc
|
||||
}
|
||||
|
||||
// Base reports the inclusive starting range for all ranges.
|
||||
func (rcs RangeCodes) Base() uint32 { return rcs[0].Base }
|
||||
|
||||
// End reports the non-inclusive ending range for all ranges.
|
||||
func (rcs RangeCodes) End() uint32 { return rcs[len(rcs)-1].End() }
|
||||
|
||||
// checkValid reports whether the RangeCodes is valid. In order to be valid,
|
||||
// the following must hold true:
|
||||
// rcs[i-1].Base <= rcs[i].Base
|
||||
// rcs[i-1].End <= rcs[i].End
|
||||
// rcs[i-1].End >= rcs[i].Base
|
||||
//
|
||||
// Practically speaking, each range must be increasing and must not have any
|
||||
// gaps in between. It is okay for ranges to overlap.
|
||||
func (rcs RangeCodes) checkValid() bool {
|
||||
if len(rcs) == 0 {
|
||||
return false
|
||||
}
|
||||
pre := rcs[0]
|
||||
for _, cur := range rcs[1:] {
|
||||
preBase, preEnd := pre.Base, pre.End()
|
||||
curBase, curEnd := cur.Base, cur.End()
|
||||
if preBase > curBase || preEnd > curEnd || preEnd < curBase {
|
||||
return false
|
||||
}
|
||||
pre = cur
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (re *RangeEncoder) Init(rcs RangeCodes) {
|
||||
if !rcs.checkValid() {
|
||||
panic("invalid range codes")
|
||||
}
|
||||
*re = RangeEncoder{rcs: rcs, minBase: uint(rcs.Base())}
|
||||
for sym, rc := range rcs {
|
||||
base := int(rc.Base) - int(re.minBase)
|
||||
end := int(rc.End()) - int(re.minBase)
|
||||
if base >= len(re.lut) {
|
||||
break
|
||||
}
|
||||
if end > len(re.lut) {
|
||||
end = len(re.lut)
|
||||
}
|
||||
for i := base; i < end; i++ {
|
||||
re.lut[i] = uint32(sym)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (re *RangeEncoder) Encode(offset uint) (sym uint) {
|
||||
if idx := int(offset - re.minBase); idx < len(re.lut) {
|
||||
return uint(re.lut[idx])
|
||||
}
|
||||
sym = uint(re.lut[len(re.lut)-1])
|
||||
retry:
|
||||
if int(sym) >= len(re.rcs) || re.rcs[sym].Base > uint32(offset) {
|
||||
return sym - 1
|
||||
}
|
||||
sym++
|
||||
goto retry // Avoid for-loop so that this function can be inlined
|
||||
}
|
||||
335
vendor/github.com/dsnet/compress/internal/prefix/reader.go
generated
vendored
Normal file
335
vendor/github.com/dsnet/compress/internal/prefix/reader.go
generated
vendored
Normal file
@@ -0,0 +1,335 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package prefix
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dsnet/compress"
|
||||
"github.com/dsnet/compress/internal"
|
||||
"github.com/dsnet/compress/internal/errors"
|
||||
)
|
||||
|
||||
// Reader implements a prefix decoder. If the input io.Reader satisfies the
|
||||
// compress.ByteReader or compress.BufferedReader interface, then it also
|
||||
// guarantees that it will never read more bytes than is necessary.
|
||||
//
|
||||
// For high performance, provide an io.Reader that satisfies the
|
||||
// compress.BufferedReader interface. If the input does not satisfy either
|
||||
// compress.ByteReader or compress.BufferedReader, then it will be internally
|
||||
// wrapped with a bufio.Reader.
|
||||
type Reader struct {
|
||||
Offset int64 // Number of bytes read from the underlying io.Reader
|
||||
|
||||
rd io.Reader
|
||||
byteRd compress.ByteReader // Set if rd is a ByteReader
|
||||
bufRd compress.BufferedReader // Set if rd is a BufferedReader
|
||||
|
||||
bufBits uint64 // Buffer to hold some bits
|
||||
numBits uint // Number of valid bits in bufBits
|
||||
bigEndian bool // Do we treat input bytes as big endian?
|
||||
|
||||
// These fields are only used if rd is a compress.BufferedReader.
|
||||
bufPeek []byte // Buffer for the Peek data
|
||||
discardBits int // Number of bits to discard from reader
|
||||
fedBits uint // Number of bits fed in last call to PullBits
|
||||
|
||||
// These fields are used to reduce allocations.
|
||||
bb *buffer
|
||||
br *bytesReader
|
||||
sr *stringReader
|
||||
bu *bufio.Reader
|
||||
}
|
||||
|
||||
// Init initializes the bit Reader to read from r. If bigEndian is true, then
|
||||
// bits will be read starting from the most-significant bits of a byte
|
||||
// (as done in bzip2), otherwise it will read starting from the
|
||||
// least-significant bits of a byte (such as for deflate and brotli).
|
||||
func (pr *Reader) Init(r io.Reader, bigEndian bool) {
|
||||
*pr = Reader{
|
||||
rd: r,
|
||||
bigEndian: bigEndian,
|
||||
|
||||
bb: pr.bb,
|
||||
br: pr.br,
|
||||
sr: pr.sr,
|
||||
bu: pr.bu,
|
||||
}
|
||||
switch rr := r.(type) {
|
||||
case *bytes.Buffer:
|
||||
if pr.bb == nil {
|
||||
pr.bb = new(buffer)
|
||||
}
|
||||
*pr.bb = buffer{Buffer: rr}
|
||||
pr.bufRd = pr.bb
|
||||
case *bytes.Reader:
|
||||
if pr.br == nil {
|
||||
pr.br = new(bytesReader)
|
||||
}
|
||||
*pr.br = bytesReader{Reader: rr}
|
||||
pr.bufRd = pr.br
|
||||
case *strings.Reader:
|
||||
if pr.sr == nil {
|
||||
pr.sr = new(stringReader)
|
||||
}
|
||||
*pr.sr = stringReader{Reader: rr}
|
||||
pr.bufRd = pr.sr
|
||||
case compress.BufferedReader:
|
||||
pr.bufRd = rr
|
||||
case compress.ByteReader:
|
||||
pr.byteRd = rr
|
||||
default:
|
||||
if pr.bu == nil {
|
||||
pr.bu = bufio.NewReader(nil)
|
||||
}
|
||||
pr.bu.Reset(r)
|
||||
pr.rd, pr.bufRd = pr.bu, pr.bu
|
||||
}
|
||||
}
|
||||
|
||||
// BitsRead reports the total number of bits emitted from any Read method.
|
||||
func (pr *Reader) BitsRead() int64 {
|
||||
offset := 8*pr.Offset - int64(pr.numBits)
|
||||
if pr.bufRd != nil {
|
||||
discardBits := pr.discardBits + int(pr.fedBits-pr.numBits)
|
||||
offset = 8*pr.Offset + int64(discardBits)
|
||||
}
|
||||
return offset
|
||||
}
|
||||
|
||||
// IsBufferedReader reports whether the underlying io.Reader is also a
|
||||
// compress.BufferedReader.
|
||||
func (pr *Reader) IsBufferedReader() bool {
|
||||
return pr.bufRd != nil
|
||||
}
|
||||
|
||||
// ReadPads reads 0-7 bits from the bit buffer to achieve byte-alignment.
|
||||
func (pr *Reader) ReadPads() uint {
|
||||
nb := pr.numBits % 8
|
||||
val := uint(pr.bufBits & uint64(1<<nb-1))
|
||||
pr.bufBits >>= nb
|
||||
pr.numBits -= nb
|
||||
return val
|
||||
}
|
||||
|
||||
// Read reads bytes into buf.
|
||||
// The bit-ordering mode does not affect this method.
|
||||
func (pr *Reader) Read(buf []byte) (cnt int, err error) {
|
||||
if pr.numBits > 0 {
|
||||
if pr.numBits%8 != 0 {
|
||||
return 0, errorf(errors.Invalid, "non-aligned bit buffer")
|
||||
}
|
||||
for cnt = 0; len(buf) > cnt && pr.numBits > 0; cnt++ {
|
||||
if pr.bigEndian {
|
||||
buf[cnt] = internal.ReverseLUT[byte(pr.bufBits)]
|
||||
} else {
|
||||
buf[cnt] = byte(pr.bufBits)
|
||||
}
|
||||
pr.bufBits >>= 8
|
||||
pr.numBits -= 8
|
||||
}
|
||||
return cnt, nil
|
||||
}
|
||||
if _, err := pr.Flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cnt, err = pr.rd.Read(buf)
|
||||
pr.Offset += int64(cnt)
|
||||
return cnt, err
|
||||
}
|
||||
|
||||
// ReadOffset reads an offset value using the provided RangeCodes indexed by
|
||||
// the symbol read.
|
||||
func (pr *Reader) ReadOffset(pd *Decoder, rcs RangeCodes) uint {
|
||||
rc := rcs[pr.ReadSymbol(pd)]
|
||||
return uint(rc.Base) + pr.ReadBits(uint(rc.Len))
|
||||
}
|
||||
|
||||
// TryReadBits attempts to read nb bits using the contents of the bit buffer
|
||||
// alone. It returns the value and whether it succeeded.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
func (pr *Reader) TryReadBits(nb uint) (uint, bool) {
|
||||
if pr.numBits < nb {
|
||||
return 0, false
|
||||
}
|
||||
val := uint(pr.bufBits & uint64(1<<nb-1))
|
||||
pr.bufBits >>= nb
|
||||
pr.numBits -= nb
|
||||
return val, true
|
||||
}
|
||||
|
||||
// ReadBits reads nb bits in from the underlying reader.
|
||||
func (pr *Reader) ReadBits(nb uint) uint {
|
||||
if err := pr.PullBits(nb); err != nil {
|
||||
errors.Panic(err)
|
||||
}
|
||||
val := uint(pr.bufBits & uint64(1<<nb-1))
|
||||
pr.bufBits >>= nb
|
||||
pr.numBits -= nb
|
||||
return val
|
||||
}
|
||||
|
||||
// TryReadSymbol attempts to decode the next symbol using the contents of the
|
||||
// bit buffer alone. It returns the decoded symbol and whether it succeeded.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
func (pr *Reader) TryReadSymbol(pd *Decoder) (uint, bool) {
|
||||
if pr.numBits < uint(pd.MinBits) || len(pd.chunks) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask]
|
||||
nb := uint(chunk & countMask)
|
||||
if nb > pr.numBits || nb > uint(pd.chunkBits) {
|
||||
return 0, false
|
||||
}
|
||||
pr.bufBits >>= nb
|
||||
pr.numBits -= nb
|
||||
return uint(chunk >> countBits), true
|
||||
}
|
||||
|
||||
// ReadSymbol reads the next symbol using the provided prefix Decoder.
|
||||
func (pr *Reader) ReadSymbol(pd *Decoder) uint {
|
||||
if len(pd.chunks) == 0 {
|
||||
panicf(errors.Invalid, "decode with empty prefix tree")
|
||||
}
|
||||
|
||||
nb := uint(pd.MinBits)
|
||||
for {
|
||||
if err := pr.PullBits(nb); err != nil {
|
||||
errors.Panic(err)
|
||||
}
|
||||
chunk := pd.chunks[uint32(pr.bufBits)&pd.chunkMask]
|
||||
nb = uint(chunk & countMask)
|
||||
if nb > uint(pd.chunkBits) {
|
||||
linkIdx := chunk >> countBits
|
||||
chunk = pd.links[linkIdx][uint32(pr.bufBits>>pd.chunkBits)&pd.linkMask]
|
||||
nb = uint(chunk & countMask)
|
||||
}
|
||||
if nb <= pr.numBits {
|
||||
pr.bufBits >>= nb
|
||||
pr.numBits -= nb
|
||||
return uint(chunk >> countBits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush updates the read offset of the underlying ByteReader.
|
||||
// If reader is a compress.BufferedReader, then this calls Discard to update
|
||||
// the read offset.
|
||||
func (pr *Reader) Flush() (int64, error) {
|
||||
if pr.bufRd == nil {
|
||||
return pr.Offset, nil
|
||||
}
|
||||
|
||||
// Update the number of total bits to discard.
|
||||
pr.discardBits += int(pr.fedBits - pr.numBits)
|
||||
pr.fedBits = pr.numBits
|
||||
|
||||
// Discard some bytes to update read offset.
|
||||
var err error
|
||||
nd := (pr.discardBits + 7) / 8 // Round up to nearest byte
|
||||
nd, err = pr.bufRd.Discard(nd)
|
||||
pr.discardBits -= nd * 8 // -7..0
|
||||
pr.Offset += int64(nd)
|
||||
|
||||
// These are invalid after Discard.
|
||||
pr.bufPeek = nil
|
||||
return pr.Offset, err
|
||||
}
|
||||
|
||||
// PullBits ensures that at least nb bits exist in the bit buffer.
|
||||
// If the underlying reader is a compress.BufferedReader, then this will fill
|
||||
// the bit buffer with as many bits as possible, relying on Peek and Discard to
|
||||
// properly advance the read offset. Otherwise, it will use ReadByte to fill the
|
||||
// buffer with just the right number of bits.
|
||||
func (pr *Reader) PullBits(nb uint) error {
|
||||
if pr.bufRd != nil {
|
||||
pr.discardBits += int(pr.fedBits - pr.numBits)
|
||||
for {
|
||||
if len(pr.bufPeek) == 0 {
|
||||
pr.fedBits = pr.numBits // Don't discard bits just added
|
||||
if _, err := pr.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Peek no more bytes than necessary.
|
||||
// The computation for cntPeek computes the minimum number of
|
||||
// bytes to Peek to fill nb bits.
|
||||
var err error
|
||||
cntPeek := int(nb+(-nb&7)) / 8
|
||||
if cntPeek < pr.bufRd.Buffered() {
|
||||
cntPeek = pr.bufRd.Buffered()
|
||||
}
|
||||
pr.bufPeek, err = pr.bufRd.Peek(cntPeek)
|
||||
pr.bufPeek = pr.bufPeek[int(pr.numBits/8):] // Skip buffered bits
|
||||
if len(pr.bufPeek) == 0 {
|
||||
if pr.numBits >= nb {
|
||||
break
|
||||
}
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
n := int(64-pr.numBits) / 8 // Number of bytes to copy to bit buffer
|
||||
if len(pr.bufPeek) >= 8 {
|
||||
// Starting with Go 1.7, the compiler should use a wide integer
|
||||
// load here if the architecture supports it.
|
||||
u := binary.LittleEndian.Uint64(pr.bufPeek)
|
||||
if pr.bigEndian {
|
||||
// Swap all the bits within each byte.
|
||||
u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1
|
||||
u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2
|
||||
u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4
|
||||
}
|
||||
|
||||
pr.bufBits |= u << pr.numBits
|
||||
pr.numBits += uint(n * 8)
|
||||
pr.bufPeek = pr.bufPeek[n:]
|
||||
break
|
||||
} else {
|
||||
if n > len(pr.bufPeek) {
|
||||
n = len(pr.bufPeek)
|
||||
}
|
||||
for _, c := range pr.bufPeek[:n] {
|
||||
if pr.bigEndian {
|
||||
c = internal.ReverseLUT[c]
|
||||
}
|
||||
pr.bufBits |= uint64(c) << pr.numBits
|
||||
pr.numBits += 8
|
||||
}
|
||||
pr.bufPeek = pr.bufPeek[n:]
|
||||
if pr.numBits > 56 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
pr.fedBits = pr.numBits
|
||||
} else {
|
||||
for pr.numBits < nb {
|
||||
c, err := pr.byteRd.ReadByte()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
if pr.bigEndian {
|
||||
c = internal.ReverseLUT[c]
|
||||
}
|
||||
pr.bufBits |= uint64(c) << pr.numBits
|
||||
pr.numBits += 8
|
||||
pr.Offset++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
146
vendor/github.com/dsnet/compress/internal/prefix/wrap.go
generated
vendored
Normal file
146
vendor/github.com/dsnet/compress/internal/prefix/wrap.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package prefix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// For some of the common Readers, we wrap and extend them to satisfy the
|
||||
// compress.BufferedReader interface to improve performance.
|
||||
|
||||
type buffer struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
type bytesReader struct {
|
||||
*bytes.Reader
|
||||
pos int64
|
||||
buf []byte
|
||||
arr [512]byte
|
||||
}
|
||||
|
||||
type stringReader struct {
|
||||
*strings.Reader
|
||||
pos int64
|
||||
buf []byte
|
||||
arr [512]byte
|
||||
}
|
||||
|
||||
func (r *buffer) Buffered() int {
|
||||
return r.Len()
|
||||
}
|
||||
|
||||
func (r *buffer) Peek(n int) ([]byte, error) {
|
||||
b := r.Bytes()
|
||||
if len(b) < n {
|
||||
return b, io.EOF
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
|
||||
func (r *buffer) Discard(n int) (int, error) {
|
||||
b := r.Next(n)
|
||||
if len(b) < n {
|
||||
return len(b), io.EOF
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (r *bytesReader) Buffered() int {
|
||||
r.update()
|
||||
if r.Len() > len(r.buf) {
|
||||
return len(r.buf)
|
||||
}
|
||||
return r.Len()
|
||||
}
|
||||
|
||||
func (r *bytesReader) Peek(n int) ([]byte, error) {
|
||||
if n > len(r.arr) {
|
||||
return nil, io.ErrShortBuffer
|
||||
}
|
||||
|
||||
// Return sub-slice of local buffer if possible.
|
||||
r.update()
|
||||
if len(r.buf) >= n {
|
||||
return r.buf[:n], nil
|
||||
}
|
||||
|
||||
// Fill entire local buffer, and return appropriate sub-slice.
|
||||
cnt, err := r.ReadAt(r.arr[:], r.pos)
|
||||
r.buf = r.arr[:cnt]
|
||||
if cnt < n {
|
||||
return r.arr[:cnt], err
|
||||
}
|
||||
return r.arr[:n], nil
|
||||
}
|
||||
|
||||
func (r *bytesReader) Discard(n int) (int, error) {
|
||||
var err error
|
||||
if n > r.Len() {
|
||||
n, err = r.Len(), io.EOF
|
||||
}
|
||||
r.Seek(int64(n), io.SeekCurrent)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// update reslices the internal buffer to be consistent with the read offset.
|
||||
func (r *bytesReader) update() {
|
||||
pos, _ := r.Seek(0, io.SeekCurrent)
|
||||
if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) {
|
||||
r.buf, r.pos = r.buf[off:], pos
|
||||
} else {
|
||||
r.buf, r.pos = nil, pos
|
||||
}
|
||||
}
|
||||
|
||||
func (r *stringReader) Buffered() int {
|
||||
r.update()
|
||||
if r.Len() > len(r.buf) {
|
||||
return len(r.buf)
|
||||
}
|
||||
return r.Len()
|
||||
}
|
||||
|
||||
func (r *stringReader) Peek(n int) ([]byte, error) {
|
||||
if n > len(r.arr) {
|
||||
return nil, io.ErrShortBuffer
|
||||
}
|
||||
|
||||
// Return sub-slice of local buffer if possible.
|
||||
r.update()
|
||||
if len(r.buf) >= n {
|
||||
return r.buf[:n], nil
|
||||
}
|
||||
|
||||
// Fill entire local buffer, and return appropriate sub-slice.
|
||||
cnt, err := r.ReadAt(r.arr[:], r.pos)
|
||||
r.buf = r.arr[:cnt]
|
||||
if cnt < n {
|
||||
return r.arr[:cnt], err
|
||||
}
|
||||
return r.arr[:n], nil
|
||||
}
|
||||
|
||||
func (r *stringReader) Discard(n int) (int, error) {
|
||||
var err error
|
||||
if n > r.Len() {
|
||||
n, err = r.Len(), io.EOF
|
||||
}
|
||||
r.Seek(int64(n), io.SeekCurrent)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// update reslices the internal buffer to be consistent with the read offset.
|
||||
func (r *stringReader) update() {
|
||||
pos, _ := r.Seek(0, io.SeekCurrent)
|
||||
if off := pos - r.pos; off >= 0 && off < int64(len(r.buf)) {
|
||||
r.buf, r.pos = r.buf[off:], pos
|
||||
} else {
|
||||
r.buf, r.pos = nil, pos
|
||||
}
|
||||
}
|
||||
166
vendor/github.com/dsnet/compress/internal/prefix/writer.go
generated
vendored
Normal file
166
vendor/github.com/dsnet/compress/internal/prefix/writer.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
package prefix
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/dsnet/compress/internal/errors"
|
||||
)
|
||||
|
||||
// Writer implements a prefix encoder. For performance reasons, Writer will not
|
||||
// write bytes immediately to the underlying stream.
|
||||
type Writer struct {
|
||||
Offset int64 // Number of bytes written to the underlying io.Writer
|
||||
|
||||
wr io.Writer
|
||||
bufBits uint64 // Buffer to hold some bits
|
||||
numBits uint // Number of valid bits in bufBits
|
||||
bigEndian bool // Are bits written in big-endian order?
|
||||
|
||||
buf [512]byte
|
||||
cntBuf int
|
||||
}
|
||||
|
||||
// Init initializes the bit Writer to write to w. If bigEndian is true, then
|
||||
// bits will be written starting from the most-significant bits of a byte
|
||||
// (as done in bzip2), otherwise it will write starting from the
|
||||
// least-significant bits of a byte (such as for deflate and brotli).
|
||||
func (pw *Writer) Init(w io.Writer, bigEndian bool) {
|
||||
*pw = Writer{wr: w, bigEndian: bigEndian}
|
||||
return
|
||||
}
|
||||
|
||||
// BitsWritten reports the total number of bits issued to any Write method.
|
||||
func (pw *Writer) BitsWritten() int64 {
|
||||
return 8*pw.Offset + 8*int64(pw.cntBuf) + int64(pw.numBits)
|
||||
}
|
||||
|
||||
// WritePads writes 0-7 bits to the bit buffer to achieve byte-alignment.
|
||||
func (pw *Writer) WritePads(v uint) {
|
||||
nb := -pw.numBits & 7
|
||||
pw.bufBits |= uint64(v) << pw.numBits
|
||||
pw.numBits += nb
|
||||
}
|
||||
|
||||
// Write writes bytes from buf.
|
||||
// The bit-ordering mode does not affect this method.
|
||||
func (pw *Writer) Write(buf []byte) (cnt int, err error) {
|
||||
if pw.numBits > 0 || pw.cntBuf > 0 {
|
||||
if pw.numBits%8 != 0 {
|
||||
return 0, errorf(errors.Invalid, "non-aligned bit buffer")
|
||||
}
|
||||
if _, err := pw.Flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
cnt, err = pw.wr.Write(buf)
|
||||
pw.Offset += int64(cnt)
|
||||
return cnt, err
|
||||
}
|
||||
|
||||
// WriteOffset writes ofs in a (sym, extra) fashion using the provided prefix
|
||||
// Encoder and RangeEncoder.
|
||||
func (pw *Writer) WriteOffset(ofs uint, pe *Encoder, re *RangeEncoder) {
|
||||
sym := re.Encode(ofs)
|
||||
pw.WriteSymbol(sym, pe)
|
||||
rc := re.rcs[sym]
|
||||
pw.WriteBits(ofs-uint(rc.Base), uint(rc.Len))
|
||||
}
|
||||
|
||||
// TryWriteBits attempts to write nb bits using the contents of the bit buffer
|
||||
// alone. It reports whether it succeeded.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
func (pw *Writer) TryWriteBits(v, nb uint) bool {
|
||||
if 64-pw.numBits < nb {
|
||||
return false
|
||||
}
|
||||
pw.bufBits |= uint64(v) << pw.numBits
|
||||
pw.numBits += nb
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteBits writes nb bits of v to the underlying writer.
|
||||
func (pw *Writer) WriteBits(v, nb uint) {
|
||||
if _, err := pw.PushBits(); err != nil {
|
||||
errors.Panic(err)
|
||||
}
|
||||
pw.bufBits |= uint64(v) << pw.numBits
|
||||
pw.numBits += nb
|
||||
}
|
||||
|
||||
// TryWriteSymbol attempts to encode the next symbol using the contents of the
|
||||
// bit buffer alone. It reports whether it succeeded.
|
||||
//
|
||||
// This method is designed to be inlined for performance reasons.
|
||||
func (pw *Writer) TryWriteSymbol(sym uint, pe *Encoder) bool {
|
||||
chunk := pe.chunks[uint32(sym)&pe.chunkMask]
|
||||
nb := uint(chunk & countMask)
|
||||
if 64-pw.numBits < nb {
|
||||
return false
|
||||
}
|
||||
pw.bufBits |= uint64(chunk>>countBits) << pw.numBits
|
||||
pw.numBits += nb
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteSymbol writes the symbol using the provided prefix Encoder.
|
||||
func (pw *Writer) WriteSymbol(sym uint, pe *Encoder) {
|
||||
if _, err := pw.PushBits(); err != nil {
|
||||
errors.Panic(err)
|
||||
}
|
||||
chunk := pe.chunks[uint32(sym)&pe.chunkMask]
|
||||
nb := uint(chunk & countMask)
|
||||
pw.bufBits |= uint64(chunk>>countBits) << pw.numBits
|
||||
pw.numBits += nb
|
||||
}
|
||||
|
||||
// Flush flushes all complete bytes from the bit buffer to the byte buffer, and
|
||||
// then flushes all bytes in the byte buffer to the underlying writer.
|
||||
// After this call, the bit Writer is will only withhold 7 bits at most.
|
||||
func (pw *Writer) Flush() (int64, error) {
|
||||
if pw.numBits < 8 && pw.cntBuf == 0 {
|
||||
return pw.Offset, nil
|
||||
}
|
||||
if _, err := pw.PushBits(); err != nil {
|
||||
return pw.Offset, err
|
||||
}
|
||||
cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf])
|
||||
pw.cntBuf -= cnt
|
||||
pw.Offset += int64(cnt)
|
||||
return pw.Offset, err
|
||||
}
|
||||
|
||||
// PushBits pushes as many bytes as possible from the bit buffer to the byte
|
||||
// buffer, reporting the number of bits pushed.
|
||||
func (pw *Writer) PushBits() (uint, error) {
|
||||
if pw.cntBuf >= len(pw.buf)-8 {
|
||||
cnt, err := pw.wr.Write(pw.buf[:pw.cntBuf])
|
||||
pw.cntBuf -= cnt
|
||||
pw.Offset += int64(cnt)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
u := pw.bufBits
|
||||
if pw.bigEndian {
|
||||
// Swap all the bits within each byte.
|
||||
u = (u&0xaaaaaaaaaaaaaaaa)>>1 | (u&0x5555555555555555)<<1
|
||||
u = (u&0xcccccccccccccccc)>>2 | (u&0x3333333333333333)<<2
|
||||
u = (u&0xf0f0f0f0f0f0f0f0)>>4 | (u&0x0f0f0f0f0f0f0f0f)<<4
|
||||
}
|
||||
// Starting with Go 1.7, the compiler should use a wide integer
|
||||
// store here if the architecture supports it.
|
||||
binary.LittleEndian.PutUint64(pw.buf[pw.cntBuf:], u)
|
||||
|
||||
nb := pw.numBits / 8 // Number of bytes to copy from bit buffer
|
||||
pw.cntBuf += int(nb)
|
||||
pw.bufBits >>= 8 * nb
|
||||
pw.numBits -= 8 * nb
|
||||
return 8 * nb, nil
|
||||
}
|
||||
21
vendor/github.com/dsnet/compress/internal/release.go
generated
vendored
Normal file
21
vendor/github.com/dsnet/compress/internal/release.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// Copyright 2015, Joe Tsai. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE.md file.
|
||||
|
||||
// +build !debug,!gofuzz
|
||||
|
||||
package internal
|
||||
|
||||
// Debug indicates whether the debug build tag was set.
|
||||
//
|
||||
// If set, programs may choose to print with more human-readable
|
||||
// debug information and also perform sanity checks that would otherwise be too
|
||||
// expensive to run in a release build.
|
||||
const Debug = false
|
||||
|
||||
// GoFuzz indicates whether the gofuzz build tag was set.
|
||||
//
|
||||
// If set, programs may choose to disable certain checks (like checksums) that
|
||||
// would be nearly impossible for gofuzz to properly get right.
|
||||
// If GoFuzz is set, it implies that Debug is set as well.
|
||||
const GoFuzz = false
|
||||
12
vendor/github.com/dsnet/compress/zbench.sh
generated
vendored
Normal file
12
vendor/github.com/dsnet/compress/zbench.sh
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2017, Joe Tsai. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE.md file.
|
||||
|
||||
# zbench wraps internal/tool/bench and is useful for comparing benchmarks from
|
||||
# the implementations in this repository relative to other implementations.
|
||||
#
|
||||
# See internal/tool/bench/main.go for more details.
|
||||
cd $(dirname "${BASH_SOURCE[0]}")/internal/tool/bench
|
||||
go run $(go list -f '{{ join .GoFiles "\n" }}') "$@"
|
||||
10
vendor/github.com/dsnet/compress/zfuzz.sh
generated
vendored
Normal file
10
vendor/github.com/dsnet/compress/zfuzz.sh
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2017, Joe Tsai. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE.md file.
|
||||
|
||||
# zfuzz wraps internal/tool/fuzz and is useful for fuzz testing each of
|
||||
# the implementations in this repository.
|
||||
cd $(dirname "${BASH_SOURCE[0]}")/internal/tool/fuzz
|
||||
./fuzz.sh "$@"
|
||||
54
vendor/github.com/dsnet/compress/zprof.sh
generated
vendored
Normal file
54
vendor/github.com/dsnet/compress/zprof.sh
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2017, Joe Tsai. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE.md file.
|
||||
|
||||
if [ $# == 0 ]; then
|
||||
echo "Usage: $0 PKG_PATH TEST_ARGS..."
|
||||
echo ""
|
||||
echo "Runs coverage and performance benchmarks for a given package."
|
||||
echo "The results are stored in the _zprof_ directory."
|
||||
echo ""
|
||||
echo "Example:"
|
||||
echo " $0 flate -test.bench=Decode/Twain/Default"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PKG_PATH=$1
|
||||
PKG_NAME=$(basename $PKG_PATH)
|
||||
shift
|
||||
|
||||
TMPDIR=$(mktemp -d)
|
||||
trap "rm -rf $TMPDIR $PKG_PATH/$PKG_NAME.test" SIGINT SIGTERM EXIT
|
||||
|
||||
(
|
||||
cd $DIR/$PKG_PATH
|
||||
|
||||
# Print the go version.
|
||||
go version
|
||||
|
||||
# Perform coverage profiling.
|
||||
go test github.com/dsnet/compress/$PKG_PATH -coverprofile $TMPDIR/cover.profile
|
||||
if [ $? != 0 ]; then exit 1; fi
|
||||
go tool cover -html $TMPDIR/cover.profile -o cover.html
|
||||
|
||||
# Perform performance profiling.
|
||||
if [ $# != 0 ]; then
|
||||
go test -c github.com/dsnet/compress/$PKG_PATH
|
||||
if [ $? != 0 ]; then exit 1; fi
|
||||
./$PKG_NAME.test -test.cpuprofile $TMPDIR/cpu.profile -test.memprofile $TMPDIR/mem.profile -test.run - "$@"
|
||||
PPROF="go tool pprof"
|
||||
$PPROF -output=cpu.svg -web $PKG_NAME.test $TMPDIR/cpu.profile 2> /dev/null
|
||||
$PPROF -output=cpu.html -weblist=. $PKG_NAME.test $TMPDIR/cpu.profile 2> /dev/null
|
||||
$PPROF -output=mem_objects.svg -alloc_objects -web $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null
|
||||
$PPROF -output=mem_objects.html -alloc_objects -weblist=. $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null
|
||||
$PPROF -output=mem_space.svg -alloc_space -web $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null
|
||||
$PPROF -output=mem_space.html -alloc_space -weblist=. $PKG_NAME.test $TMPDIR/mem.profile 2> /dev/null
|
||||
fi
|
||||
|
||||
rm -rf $DIR/_zprof_/$PKG_NAME
|
||||
mkdir -p $DIR/_zprof_/$PKG_NAME
|
||||
mv *.html *.svg $DIR/_zprof_/$PKG_NAME 2> /dev/null
|
||||
)
|
||||
54
vendor/github.com/dsnet/compress/ztest.sh
generated
vendored
Normal file
54
vendor/github.com/dsnet/compress/ztest.sh
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2017, Joe Tsai. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE.md file.
|
||||
|
||||
cd $(go list -f '{{ .Dir }}' github.com/dsnet/compress)
|
||||
|
||||
BOLD="\x1b[1mRunning: "
|
||||
PASS="\x1b[32mPASS"
|
||||
FAIL="\x1b[31mFAIL"
|
||||
RESET="\x1b[0m"
|
||||
|
||||
echo -e "${BOLD}fmt${RESET}"
|
||||
RET_FMT=$(find . -name "*.go" | egrep -v "/(_.*_|\..*|testdata)/" | xargs gofmt -d)
|
||||
if [[ ! -z "$RET_FMT" ]]; then echo "$RET_FMT"; echo; fi
|
||||
|
||||
echo -e "${BOLD}test${RESET}"
|
||||
RET_TEST=$(go test -race ./... | egrep -v "^(ok|[?])\s+")
|
||||
if [[ ! -z "$RET_TEST" ]]; then echo "$RET_TEST"; echo; fi
|
||||
|
||||
echo -e "${BOLD}staticcheck${RESET}"
|
||||
RET_SCHK=$(staticcheck \
|
||||
-ignore "
|
||||
github.com/dsnet/compress/brotli/*.go:SA4016
|
||||
github.com/dsnet/compress/brotli/*.go:S1023
|
||||
github.com/dsnet/compress/brotli/*.go:U1000
|
||||
github.com/dsnet/compress/bzip2/*.go:S1023
|
||||
github.com/dsnet/compress/flate/*.go:U1000
|
||||
github.com/dsnet/compress/internal/cgo/lzma/*.go:SA4000
|
||||
github.com/dsnet/compress/internal/prefix/*.go:S1004
|
||||
github.com/dsnet/compress/internal/prefix/*.go:S1023
|
||||
github.com/dsnet/compress/internal/prefix/*.go:SA4016
|
||||
github.com/dsnet/compress/internal/tool/bench/*.go:S1007
|
||||
github.com/dsnet/compress/xflate/internal/meta/*.go:S1023
|
||||
" ./... 2>&1)
|
||||
if [[ ! -z "$RET_SCHK" ]]; then echo "$RET_SCHK"; echo; fi
|
||||
|
||||
echo -e "${BOLD}lint${RESET}"
|
||||
RET_LINT=$(golint ./... 2>&1 |
|
||||
egrep -v "^vendor/" |
|
||||
egrep -v "should have comment(.*)or be unexported" |
|
||||
egrep -v "^(.*)type name will be used as(.*)by other packages" |
|
||||
egrep -v "^brotli/transform.go:(.*)replace i [+]= 1 with i[+]{2}" |
|
||||
egrep -v "^internal/prefix/prefix.go:(.*)replace symBits(.*) [-]= 1 with symBits(.*)[-]{2}" |
|
||||
egrep -v "^xflate/common.go:(.*)NoCompression should be of the form" |
|
||||
egrep -v "^exit status")
|
||||
if [[ ! -z "$RET_LINT" ]]; then echo "$RET_LINT"; echo; fi
|
||||
|
||||
if [[ ! -z "$RET_FMT" ]] || [ ! -z "$RET_TEST" ] || [[ ! -z "$RET_SCHK" ]] || [[ ! -z "$RET_LINT" ]]; then
|
||||
echo -e "${FAIL}${RESET}"; exit 1
|
||||
else
|
||||
echo -e "${PASS}${RESET}"; exit 0
|
||||
fi
|
||||
5
vendor/github.com/mholt/archiver/.gitignore
generated
vendored
Normal file
5
vendor/github.com/mholt/archiver/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
.DS_Store
|
||||
_gitignore
|
||||
builds/
|
||||
*.test
|
||||
cmd/archiver/archiver
|
||||
21
vendor/github.com/mholt/archiver/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/mholt/archiver/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
|
||||
install:
|
||||
- go get -t ./...
|
||||
- go get golang.org/x/lint/golint
|
||||
- go get github.com/gordonklaus/ineffassign
|
||||
|
||||
script:
|
||||
- diff <(echo -n) <(gofmt -s -d .)
|
||||
- ineffassign .
|
||||
- go vet ./...
|
||||
- go test ./...
|
||||
|
||||
after_script:
|
||||
- golint ./...
|
||||
21
vendor/github.com/mholt/archiver/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mholt/archiver/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2016 Matthew Holt
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
255
vendor/github.com/mholt/archiver/README.md
generated
vendored
Normal file
255
vendor/github.com/mholt/archiver/README.md
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
archiver [](https://godoc.org/github.com/mholt/archiver) [](https://travis-ci.org/mholt/archiver) [](https://ci.appveyor.com/project/mholt/archiver)
|
||||
========
|
||||
|
||||
Introducing **Archiver 3.1** - a cross-platform, multi-format archive utility and Go library. A powerful and flexible library meets an elegant CLI in this generic replacement for several of platform-specific, format-specific archive utilities.
|
||||
|
||||
## Features
|
||||
|
||||
Package archiver makes it trivially easy to make and extract common archive formats such as zip and tarball (and its compressed variants). Simply name the input and output file(s). The `arc` command runs the same on all platforms and has no external dependencies (not even libc). It is powered by the Go standard library and several third-party, pure-Go libraries.
|
||||
|
||||
Files are put into the root of the archive; directories are recursively added, preserving structure.
|
||||
|
||||
- Make whole archives from a list of files
|
||||
- Open whole archives to a folder
|
||||
- Extract specific files/folders from archives
|
||||
- Stream files in and out of archives without needing actual files on disk
|
||||
- Traverse archive contents without loading them
|
||||
- Compress files
|
||||
- Decompress files
|
||||
- Streaming compression and decompression
|
||||
- Several archive and compression formats supported
|
||||
|
||||
### Format-dependent features
|
||||
|
||||
- Optionally create a top-level folder to avoid littering a directory or archive root with files
|
||||
- Toggle overwrite existing files
|
||||
- Adjust compression level
|
||||
- Zip: store (not compress) already-compressed files
|
||||
- Make all necessary directories
|
||||
- Open password-protected RAR archives
|
||||
- Optionally continue with other files after an error
|
||||
|
||||
### Supported archive formats
|
||||
|
||||
- .zip
|
||||
- .tar
|
||||
- .tar.gz or .tgz
|
||||
- .tar.bz2 or .tbz2
|
||||
- .tar.xz or .txz
|
||||
- .tar.lz4 or .tlz4
|
||||
- .tar.sz or .tsz
|
||||
- .rar (open only)
|
||||
|
||||
### Supported compression formats
|
||||
|
||||
- bzip2
|
||||
- gzip
|
||||
- lz4
|
||||
- snappy (sz)
|
||||
- xz
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get -u github.com/mholt/archiver/cmd/arc
|
||||
```
|
||||
|
||||
Or download binaries from the [releases](https://github.com/mholt/archiver/releases) page.
|
||||
|
||||
|
||||
## Command Use
|
||||
|
||||
### Make new archive
|
||||
|
||||
```bash
|
||||
# Syntax: arc archive [archive name] [input files...]
|
||||
|
||||
$ arc archive test.tar.gz file1.txt images/file2.jpg folder/subfolder
|
||||
```
|
||||
|
||||
(At least one input file is required.)
|
||||
|
||||
### Extract entire archive
|
||||
|
||||
```bash
|
||||
# Syntax: arc unarchive [archive name] [destination]
|
||||
|
||||
$ arc unarchive test.tar.gz
|
||||
```
|
||||
|
||||
(The destination path is optional; default is current directory.)
|
||||
|
||||
The archive name must end with a supported file extension—this is how it knows what kind of archive to make. Run `arc help` for more help.
|
||||
|
||||
### List archive contents
|
||||
|
||||
```bash
|
||||
# Syntax: arc ls [archive name]
|
||||
|
||||
$ arc ls caddy_dist.tar.gz
|
||||
drwxr-xr-x matt staff 0 2018-09-19 15:47:18 -0600 MDT dist/
|
||||
-rw-r--r-- matt staff 6148 2017-08-07 18:34:22 -0600 MDT dist/.DS_Store
|
||||
-rw-r--r-- matt staff 22481 2018-09-19 15:47:18 -0600 MDT dist/CHANGES.txt
|
||||
-rw-r--r-- matt staff 17189 2018-09-19 15:47:18 -0600 MDT dist/EULA.txt
|
||||
-rw-r--r-- matt staff 25261 2016-03-07 16:32:00 -0700 MST dist/LICENSES.txt
|
||||
-rw-r--r-- matt staff 1017 2018-09-19 15:47:18 -0600 MDT dist/README.txt
|
||||
-rw-r--r-- matt staff 288 2016-03-21 11:52:38 -0600 MDT dist/gitcookie.sh.enc
|
||||
...
|
||||
```
|
||||
|
||||
### Extract a specific file or folder from an archive
|
||||
|
||||
```bash
|
||||
# Syntax: arc extract [archive name] [path in archive] [destination on disk]
|
||||
|
||||
$ arc extract test.tar.gz foo/hello.txt extracted/hello.txt
|
||||
```
|
||||
|
||||
### Compress a single file
|
||||
|
||||
```bash
|
||||
# Syntax: arc compress [input file] [output file]
|
||||
|
||||
$ arc compress test.txt compressed_test.txt.gz
|
||||
$ arc compress test.txt gz
|
||||
```
|
||||
|
||||
For convenience, the output file (second argument) may simply be a compression format (without leading dot), in which case the output filename will be the same as the input filename but with the format extension appended, and the input file will be deleted if successful.
|
||||
|
||||
### Decompress a single file
|
||||
|
||||
```bash
|
||||
# Syntax: arc decompress [input file] [output file]
|
||||
|
||||
$ arc decompress test.txt.gz original_test.txt
|
||||
$ arc decompress test.txt.gz
|
||||
```
|
||||
|
||||
For convenience, the output file (second argument) may be omitted. In that case, the output filename will have the same name as the input filename, but with the compression extension stripped from the end; and the input file will be deleted if successful.
|
||||
|
||||
### Flags
|
||||
|
||||
Flags are specified before the subcommand. Use `arc help` or `arc -h` to get usage help and a description of flags with their default values.
|
||||
|
||||
## Library Use
|
||||
|
||||
The archiver package allows you to easily create and open archives, walk their contents, extract specific files, compress and decompress files, and even stream archives in and out using pure io.Reader and io.Writer interfaces, without ever needing to touch the disk.
|
||||
|
||||
```go
|
||||
import "github.com/mholt/archiver"
|
||||
```
|
||||
|
||||
[See the package's GoDoc](https://godoc.org/github.com/mholt/archiver) for full API documentation.
|
||||
|
||||
For example, creating or unpacking an archive file:
|
||||
|
||||
```go
|
||||
err := archiver.Archive([]string{"testdata", "other/file.txt"}, "test.zip")
|
||||
// ...
|
||||
err = archiver.Unarchive("test.tar.gz", "test")
|
||||
```
|
||||
|
||||
The archive format is determined by file extension. (There are [several functions in this package](https://godoc.org/github.com/mholt/archiver) which perform a task by inferring the format from file extension or file header, including `Archive()`, `Unarchive()`, `CompressFile()`, and `DecompressFile()`.)
|
||||
|
||||
To configure the archiver used or perform, create an instance of the format's type:
|
||||
|
||||
```go
|
||||
z := archiver.Zip{
|
||||
CompressionLevel: flate.DefaultCompression,
|
||||
MkdirAll: true,
|
||||
SelectiveCompression: true,
|
||||
ContinueOnError: false,
|
||||
OverwriteExisting: false,
|
||||
ImplicitTopLevelFolder: false,
|
||||
}
|
||||
|
||||
err := z.Archive([]string{"testdata", "other/file.txt"}, "/Users/matt/Desktop/test.zip")
|
||||
```
|
||||
|
||||
Inspecting an archive:
|
||||
|
||||
```go
|
||||
err = z.Walk("/Users/matt/Desktop/test.zip", func(f archiver.File) error {
|
||||
zfh, ok := f.Header.(zip.FileHeader)
|
||||
if ok {
|
||||
fmt.Println("Filename:", zfh.Name)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Streaming files into an archive that is being written to the HTTP response:
|
||||
|
||||
```go
|
||||
err = z.Create(responseWriter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer z.Close()
|
||||
|
||||
for _, fname := range filenames {
|
||||
info, err := os.Stat(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get file's name for the inside of the archive
|
||||
internalName, err := archiver.NameInArchive(info, fname, fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// open the file
|
||||
file, err := os.Open(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// write it to the archive
|
||||
err = z.Write(archiver.File{
|
||||
FileInfo: archiver.FileInfo{
|
||||
FileInfo: info,
|
||||
CustomName: internalName,
|
||||
},
|
||||
ReadCloser: file,
|
||||
})
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `archiver.File` type allows you to use actual files with archives, or to mimic files when you only have streams.
|
||||
|
||||
There's a lot more that can be done, too. [See the GoDoc](https://godoc.org/github.com/mholt/archiver) for full API documentation.
|
||||
|
||||
**Security note: This package does NOT attempt to mitigate zip-slip attacks.** It is [extremely difficult](https://github.com/rubyzip/rubyzip/pull/376) [to do properly](https://github.com/mholt/archiver/pull/65#issuecomment-395988244) and [seemingly impossible to mitigate effectively across platforms](https://github.com/golang/go/issues/20126). [Attempted fixes have broken processing of legitimate files in production](https://github.com/mholt/archiver/pull/70#issuecomment-423267320), rendering the program unusable. Our recommendation instead is to inspect the contents of an untrusted archive before extracting it (this package provides `Walkers`) and decide if you want to proceed with extraction.
|
||||
|
||||
|
||||
## Project Values
|
||||
|
||||
This project has a few principle-based goals that guide its development:
|
||||
|
||||
- **Do our thing really well.** Our thing is creating, opening, inspecting, compressing, and streaming archive files. It is not meant to be a replacement for specific archive format tools like tar, zip, etc. that have lots of features and customizability. (Some customizability is OK, but not to the extent that it becomes overly complicated or error-prone.)
|
||||
|
||||
- **Have good tests.** Changes should be covered by tests.
|
||||
|
||||
- **Limit dependencies.** Keep the package lightweight.
|
||||
|
||||
- **Pure Go.** This means no cgo or other external/system dependencies. This package should be able to stand on its own and cross-compile easily to any platform -- and that includes its library dependencies.
|
||||
|
||||
- **Idiomatic Go.** Keep interfaces small, variable names semantic, vet shows no errors, the linter is generally quiet, etc.
|
||||
|
||||
- **Be elegant.** This package should be elegant to use and its code should be elegant when reading and testing. If it doesn't feel good, fix it up.
|
||||
|
||||
- **Well-documented.** Use comments prudently; explain why non-obvious code is necessary (and use tests to enforce it). Keep the docs updated, and have examples where helpful.
|
||||
|
||||
- **Keep it efficient.** This often means keep it simple. Fast code is valuable.
|
||||
|
||||
- **Consensus.** Contributions should ideally be approved by multiple reviewers before being merged. Generally, avoid merging multi-chunk changes that do not go through at least one or two iterations/reviews. Except for trivial changes, PRs are seldom ready to merge right away.
|
||||
|
||||
- **Have fun contributing.** Coding is awesome!
|
||||
|
||||
We welcome contributions and appreciate your efforts! However, please open issues to discuss any changes before spending the time preparing a pull request. This will save time, reduce frustration, and help coordinate the work. Thank you!
|
||||
31
vendor/github.com/mholt/archiver/appveyor.yml
generated
vendored
Normal file
31
vendor/github.com/mholt/archiver/appveyor.yml
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
version: "{build}"
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\mholt\archiver
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
CGO_ENABLED: 0
|
||||
|
||||
stack: go 1.11
|
||||
|
||||
install:
|
||||
- go get ./...
|
||||
- go get golang.org/x/lint/golint
|
||||
- go get github.com/gordonklaus/ineffassign
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
|
||||
build: off
|
||||
|
||||
before_test:
|
||||
- go version
|
||||
- go env
|
||||
|
||||
test_script:
|
||||
- go vet ./...
|
||||
- go test ./...
|
||||
- ineffassign .
|
||||
|
||||
after_test:
|
||||
- golint ./...
|
||||
|
||||
deploy: off
|
||||
498
vendor/github.com/mholt/archiver/archiver.go
generated
vendored
Normal file
498
vendor/github.com/mholt/archiver/archiver.go
generated
vendored
Normal file
@@ -0,0 +1,498 @@
|
||||
// Package archiver facilitates convenient, cross-platform, high-level archival
|
||||
// and compression operations for a variety of formats and compression algorithms.
|
||||
//
|
||||
// This package and its dependencies are written in pure Go (not cgo) and
|
||||
// have no external dependencies, so they should run on all major platforms.
|
||||
// (It also comes with a command for CLI use in the cmd/arc folder.)
|
||||
//
|
||||
// Each supported format or algorithm has a unique type definition that
|
||||
// implements the interfaces corresponding to the tasks they perform. For
|
||||
// example, the Tar type implements Reader, Writer, Archiver, Unarchiver,
|
||||
// Walker, and several other interfaces.
|
||||
//
|
||||
// The most common functions are implemented at the package level for
|
||||
// convenience: Archive, Unarchive, Walk, Extract, CompressFile, and
|
||||
// DecompressFile. With these, the format type is chosen implicitly,
|
||||
// and a sane default configuration is used.
|
||||
//
|
||||
// To customize a format's configuration, create an instance of its struct
|
||||
// with its fields set to the desired values. You can also use and customize
|
||||
// the handy Default* (replace the wildcard with the format's type name)
|
||||
// for a quick, one-off instance of the format's type.
|
||||
//
|
||||
// To obtain a new instance of a format's struct with the default config, use
|
||||
// the provided New*() functions. This is not required, however. An empty
|
||||
// struct of any type, for example &Zip{} is perfectly valid, so you may
|
||||
// create the structs manually, too. The examples on this page show how
|
||||
// either may be done.
|
||||
//
|
||||
// See the examples in this package for an idea of how to wield this package
|
||||
// for common tasks. Most of the examples which are specific to a certain
|
||||
// format type, for example Zip, can be applied to other types that implement
|
||||
// the same interfaces. For example, using Zip is very similar to using Tar
|
||||
// or TarGz (etc), and using Gz is very similar to using Sz or Xz (etc).
|
||||
//
|
||||
// When creating archives or compressing files using a specific instance of
|
||||
// the format's type, the name of the output file MUST match that of the
|
||||
// format, to prevent confusion later on. If you absolutely need a different
|
||||
// file extension, you may rename the file afterward.
|
||||
//
|
||||
// Values in this package are NOT safe for concurrent use. There is no
|
||||
// performance benefit of reusing them, and since they may contain important
|
||||
// state (especially while walking, reading, or writing), it is NOT
|
||||
// recommended to reuse values from this package or change their configuration
|
||||
// after they are in use.
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Archiver is a type that can create an archive file
|
||||
// from a list of source file names.
|
||||
type Archiver interface {
|
||||
ExtensionChecker
|
||||
|
||||
// Archive adds all the files or folders in sources
|
||||
// to an archive to be created at destination. Files
|
||||
// are added to the root of the archive, and directories
|
||||
// are walked and recursively added, preserving folder
|
||||
// structure.
|
||||
Archive(sources []string, destination string) error
|
||||
}
|
||||
|
||||
// ExtensionChecker validates file extensions
|
||||
type ExtensionChecker interface {
|
||||
CheckExt(name string) error
|
||||
}
|
||||
|
||||
// Unarchiver is a type that can extract archive files
|
||||
// into a folder.
|
||||
type Unarchiver interface {
|
||||
Unarchive(source, destination string) error
|
||||
}
|
||||
|
||||
// Writer can write discrete byte streams of files to
|
||||
// an output stream.
|
||||
type Writer interface {
|
||||
Create(out io.Writer) error
|
||||
Write(f File) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Reader can read discrete byte streams of files from
|
||||
// an input stream.
|
||||
type Reader interface {
|
||||
Open(in io.Reader, size int64) error
|
||||
Read() (File, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Extractor can extract a specific file from a source
|
||||
// archive to a specific destination folder on disk.
|
||||
type Extractor interface {
|
||||
Extract(source, target, destination string) error
|
||||
}
|
||||
|
||||
// File provides methods for accessing information about
|
||||
// or contents of a file within an archive.
|
||||
type File struct {
|
||||
os.FileInfo
|
||||
|
||||
// The original header info; depends on
|
||||
// type of archive -- could be nil, too.
|
||||
Header interface{}
|
||||
|
||||
// Allow the file contents to be read (and closed)
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// FileInfo is an os.FileInfo but optionally with
|
||||
// a custom name, useful if dealing with files that
|
||||
// are not actual files on disk, or which have a
|
||||
// different name in an archive than on disk.
|
||||
type FileInfo struct {
|
||||
os.FileInfo
|
||||
CustomName string
|
||||
}
|
||||
|
||||
// Name returns fi.CustomName if not empty;
|
||||
// otherwise it returns fi.FileInfo.Name().
|
||||
func (fi FileInfo) Name() string {
|
||||
if fi.CustomName != "" {
|
||||
return fi.CustomName
|
||||
}
|
||||
return fi.FileInfo.Name()
|
||||
}
|
||||
|
||||
// ReadFakeCloser is an io.Reader that has
|
||||
// a no-op close method to satisfy the
|
||||
// io.ReadCloser interface.
|
||||
type ReadFakeCloser struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// Close implements io.Closer.
|
||||
func (rfc ReadFakeCloser) Close() error { return nil }
|
||||
|
||||
// Walker can walk an archive file and return information
|
||||
// about each item in the archive.
|
||||
type Walker interface {
|
||||
Walk(archive string, walkFn WalkFunc) error
|
||||
}
|
||||
|
||||
// WalkFunc is called at each item visited by Walk.
|
||||
// If an error is returned, the walk may continue
|
||||
// if the Walker is configured to continue on error.
|
||||
// The sole exception is the error value ErrStopWalk,
|
||||
// which stops the walk without an actual error.
|
||||
type WalkFunc func(f File) error
|
||||
|
||||
// ErrStopWalk signals Walk to break without error.
|
||||
var ErrStopWalk = fmt.Errorf("walk stopped")
|
||||
|
||||
// Compressor compresses to out what it reads from in.
|
||||
// It also ensures a compatible or matching file extension.
|
||||
type Compressor interface {
|
||||
ExtensionChecker
|
||||
Compress(in io.Reader, out io.Writer) error
|
||||
}
|
||||
|
||||
// Decompressor decompresses to out what it reads from in.
|
||||
type Decompressor interface {
|
||||
Decompress(in io.Reader, out io.Writer) error
|
||||
}
|
||||
|
||||
// Matcher is a type that can return whether the given
|
||||
// file appears to match the implementation's format.
|
||||
// Implementations should return the file's read position
|
||||
// to where it was when the method was called.
|
||||
type Matcher interface {
|
||||
Match(io.ReadSeeker) (bool, error)
|
||||
}
|
||||
|
||||
// Archive creates an archive of the source files to a new file at destination.
|
||||
// The archive format is chosen implicitly by file extension.
|
||||
func Archive(sources []string, destination string) error {
|
||||
aIface, err := ByExtension(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a, ok := aIface.(Archiver)
|
||||
if !ok {
|
||||
return fmt.Errorf("format specified by destination filename is not an archive format: %s (%T)", destination, aIface)
|
||||
}
|
||||
return a.Archive(sources, destination)
|
||||
}
|
||||
|
||||
// Unarchive unarchives the given archive file into the destination folder.
|
||||
// The archive format is selected implicitly.
|
||||
func Unarchive(source, destination string) error {
|
||||
uaIface, err := ByExtension(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u, ok := uaIface.(Unarchiver)
|
||||
if !ok {
|
||||
return fmt.Errorf("format specified by source filename is not an archive format: %s (%T)", source, uaIface)
|
||||
}
|
||||
return u.Unarchive(source, destination)
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each file within the given archive file.
|
||||
// The archive format is chosen implicitly.
|
||||
func Walk(archive string, walkFn WalkFunc) error {
|
||||
wIface, err := ByExtension(archive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w, ok := wIface.(Walker)
|
||||
if !ok {
|
||||
return fmt.Errorf("format specified by archive filename is not a walker format: %s (%T)", archive, wIface)
|
||||
}
|
||||
return w.Walk(archive, walkFn)
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the given source archive. If the target
|
||||
// is a directory, the entire folder will be extracted into destination. The
|
||||
// archive format is chosen implicitly.
|
||||
func Extract(source, target, destination string) error {
|
||||
eIface, err := ByExtension(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e, ok := eIface.(Extractor)
|
||||
if !ok {
|
||||
return fmt.Errorf("format specified by source filename is not an extractor format: %s (%T)", source, eIface)
|
||||
}
|
||||
return e.Extract(source, target, destination)
|
||||
}
|
||||
|
||||
// CompressFile is a convenience function to simply compress a file.
|
||||
// The compression algorithm is selected implicitly based on the
|
||||
// destination's extension.
|
||||
func CompressFile(source, destination string) error {
|
||||
cIface, err := ByExtension(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, ok := cIface.(Compressor)
|
||||
if !ok {
|
||||
return fmt.Errorf("format specified by destination filename is not a recognized compression algorithm: %s", destination)
|
||||
}
|
||||
return FileCompressor{Compressor: c}.CompressFile(source, destination)
|
||||
}
|
||||
|
||||
// DecompressFile is a convenience function to simply compress a file.
|
||||
// The compression algorithm is selected implicitly based on the
|
||||
// source's extension.
|
||||
func DecompressFile(source, destination string) error {
|
||||
cIface, err := ByExtension(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, ok := cIface.(Decompressor)
|
||||
if !ok {
|
||||
return fmt.Errorf("format specified by source filename is not a recognized compression algorithm: %s", source)
|
||||
}
|
||||
return FileCompressor{Decompressor: c}.DecompressFile(source, destination)
|
||||
}
|
||||
|
||||
func fileExists(name string) bool {
|
||||
_, err := os.Stat(name)
|
||||
return !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
func mkdir(dirPath string) error {
|
||||
err := os.MkdirAll(dirPath, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: making directory: %v", dirPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeNewFile(fpath string, in io.Reader, fm os.FileMode) error {
|
||||
err := os.MkdirAll(filepath.Dir(fpath), 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: making directory for file: %v", fpath, err)
|
||||
}
|
||||
|
||||
out, err := os.Create(fpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: creating new file: %v", fpath, err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
err = out.Chmod(fm)
|
||||
if err != nil && runtime.GOOS != "windows" {
|
||||
return fmt.Errorf("%s: changing file mode: %v", fpath, err)
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: writing file: %v", fpath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeNewSymbolicLink(fpath string, target string) error {
|
||||
err := os.MkdirAll(filepath.Dir(fpath), 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: making directory for file: %v", fpath, err)
|
||||
}
|
||||
|
||||
err = os.Symlink(target, fpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: making symbolic link for: %v", fpath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeNewHardLink(fpath string, target string) error {
|
||||
err := os.MkdirAll(filepath.Dir(fpath), 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: making directory for file: %v", fpath, err)
|
||||
}
|
||||
|
||||
err = os.Link(target, fpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: making hard link for: %v", fpath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// within returns true if sub is within or equal to parent.
|
||||
func within(parent, sub string) bool {
|
||||
rel, err := filepath.Rel(parent, sub)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return !strings.Contains(rel, "..")
|
||||
}
|
||||
|
||||
// multipleTopLevels returns true if the paths do not
|
||||
// share a common top-level folder.
|
||||
func multipleTopLevels(paths []string) bool {
|
||||
if len(paths) < 2 {
|
||||
return false
|
||||
}
|
||||
var lastTop string
|
||||
for _, p := range paths {
|
||||
p = strings.TrimPrefix(strings.Replace(p, `\`, "/", -1), "/")
|
||||
for {
|
||||
next := path.Dir(p)
|
||||
if next == "." {
|
||||
break
|
||||
}
|
||||
p = next
|
||||
}
|
||||
if lastTop == "" {
|
||||
lastTop = p
|
||||
}
|
||||
if p != lastTop {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// folderNameFromFileName returns a name for a folder
|
||||
// that is suitable based on the filename, which will
|
||||
// be stripped of its extensions.
|
||||
func folderNameFromFileName(filename string) string {
|
||||
base := filepath.Base(filename)
|
||||
firstDot := strings.Index(base, ".")
|
||||
if firstDot > -1 {
|
||||
return base[:firstDot]
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
// makeNameInArchive returns the filename for the file given by fpath to be used within
|
||||
// the archive. sourceInfo is the FileInfo obtained by calling os.Stat on source, and baseDir
|
||||
// is an optional base directory that becomes the root of the archive. fpath should be the
|
||||
// unaltered file path of the file given to a filepath.WalkFunc.
|
||||
func makeNameInArchive(sourceInfo os.FileInfo, source, baseDir, fpath string) (string, error) {
|
||||
name := filepath.Base(fpath) // start with the file or dir name
|
||||
if sourceInfo.IsDir() {
|
||||
// preserve internal directory structure; that's the path components
|
||||
// between the source directory's leaf and this file's leaf
|
||||
dir, err := filepath.Rel(filepath.Dir(source), filepath.Dir(fpath))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// prepend the internal directory structure to the leaf name,
|
||||
// and convert path separators to forward slashes as per spec
|
||||
name = path.Join(filepath.ToSlash(dir), name)
|
||||
}
|
||||
return path.Join(baseDir, name), nil // prepend the base directory
|
||||
}
|
||||
|
||||
// NameInArchive returns a name for the file at fpath suitable for
|
||||
// the inside of an archive. The source and its associated sourceInfo
|
||||
// is the path where walking a directory started, and if no directory
|
||||
// was walked, source may == fpath. The returned name is essentially
|
||||
// the components of the path between source and fpath, preserving
|
||||
// the internal directory structure.
|
||||
func NameInArchive(sourceInfo os.FileInfo, source, fpath string) (string, error) {
|
||||
return makeNameInArchive(sourceInfo, source, "", fpath)
|
||||
}
|
||||
|
||||
// ByExtension returns an archiver and unarchiver, or compressor
|
||||
// and decompressor, based on the extension of the filename.
|
||||
func ByExtension(filename string) (interface{}, error) {
|
||||
var ec interface{}
|
||||
for _, c := range extCheckers {
|
||||
if err := c.CheckExt(filename); err == nil {
|
||||
ec = c
|
||||
break
|
||||
}
|
||||
}
|
||||
switch ec.(type) {
|
||||
case *Rar:
|
||||
return NewRar(), nil
|
||||
case *Tar:
|
||||
return NewTar(), nil
|
||||
case *TarBz2:
|
||||
return NewTarBz2(), nil
|
||||
case *TarGz:
|
||||
return NewTarGz(), nil
|
||||
case *TarLz4:
|
||||
return NewTarLz4(), nil
|
||||
case *TarSz:
|
||||
return NewTarSz(), nil
|
||||
case *TarXz:
|
||||
return NewTarXz(), nil
|
||||
case *Zip:
|
||||
return NewZip(), nil
|
||||
case *Gz:
|
||||
return NewGz(), nil
|
||||
case *Bz2:
|
||||
return NewBz2(), nil
|
||||
case *Lz4:
|
||||
return NewBz2(), nil
|
||||
case *Snappy:
|
||||
return NewSnappy(), nil
|
||||
case *Xz:
|
||||
return NewXz(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("format unrecognized by filename: %s", filename)
|
||||
}
|
||||
|
||||
// ByHeader returns the unarchiver value that matches the input's
|
||||
// file header. It does not affect the current read position.
|
||||
func ByHeader(input io.ReadSeeker) (Unarchiver, error) {
|
||||
var matcher Matcher
|
||||
for _, m := range matchers {
|
||||
ok, err := m.Match(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("matching on format %s: %v", m, err)
|
||||
}
|
||||
if ok {
|
||||
matcher = m
|
||||
break
|
||||
}
|
||||
}
|
||||
switch matcher.(type) {
|
||||
case *Zip:
|
||||
return NewZip(), nil
|
||||
case *Tar:
|
||||
return NewTar(), nil
|
||||
case *Rar:
|
||||
return NewRar(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("format unrecognized")
|
||||
}
|
||||
|
||||
// extCheckers is a list of the format implementations
|
||||
// that can check extensions. Only to be used for
|
||||
// checking extensions - not any archival operations.
|
||||
var extCheckers = []ExtensionChecker{
|
||||
&TarBz2{},
|
||||
&TarGz{},
|
||||
&TarLz4{},
|
||||
&TarSz{},
|
||||
&TarXz{},
|
||||
&Rar{},
|
||||
&Tar{},
|
||||
&Zip{},
|
||||
&Gz{},
|
||||
&Bz2{},
|
||||
&Lz4{},
|
||||
&Snappy{},
|
||||
&Xz{},
|
||||
}
|
||||
|
||||
var matchers = []Matcher{
|
||||
&Rar{},
|
||||
&Tar{},
|
||||
&Zip{},
|
||||
}
|
||||
17
vendor/github.com/mholt/archiver/build.bash
generated
vendored
Normal file
17
vendor/github.com/mholt/archiver/build.bash
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
|
||||
# This script builds archiver for most common platforms.
|
||||
|
||||
export CGO_ENABLED=0
|
||||
|
||||
cd cmd/arc
|
||||
GOOS=linux GOARCH=386 go build -o ../../builds/arc_linux_386
|
||||
GOOS=linux GOARCH=amd64 go build -o ../../builds/arc_linux_amd64
|
||||
GOOS=linux GOARCH=arm go build -o ../../builds/arc_linux_arm7
|
||||
GOOS=linux GOARCH=arm64 go build -o ../../builds/arc_linux_arm64
|
||||
GOOS=darwin GOARCH=amd64 go build -o ../../builds/arc_mac_amd64
|
||||
GOOS=windows GOARCH=amd64 go build -o ../../builds/arc_windows_amd64.exe
|
||||
GOOS=freebsd GOARCH=amd64 go build -o ../../builds/arc_freebsd_amd64
|
||||
GOOS=openbsd GOARCH=amd64 go build -o ../../builds/arc_openbsd_amd64
|
||||
cd ../..
|
||||
64
vendor/github.com/mholt/archiver/bz2.go
generated
vendored
Normal file
64
vendor/github.com/mholt/archiver/bz2.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dsnet/compress/bzip2"
|
||||
)
|
||||
|
||||
// Bz2 facilitates bzip2 compression.
|
||||
type Bz2 struct {
|
||||
CompressionLevel int
|
||||
}
|
||||
|
||||
// Compress reads in, compresses it, and writes it to out.
|
||||
func (bz *Bz2) Compress(in io.Reader, out io.Writer) error {
|
||||
w, err := bzip2.NewWriter(out, &bzip2.WriterConfig{
|
||||
Level: bz.CompressionLevel,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
_, err = io.Copy(w, in)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decompress reads in, decompresses it, and writes it to out.
|
||||
func (bz *Bz2) Decompress(in io.Reader, out io.Writer) error {
|
||||
r, err := bzip2.NewReader(in, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
_, err = io.Copy(out, r)
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (bz *Bz2) CheckExt(filename string) error {
|
||||
if filepath.Ext(filename) != ".bz2" {
|
||||
return fmt.Errorf("filename must have a .bz2 extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bz *Bz2) String() string { return "bz2" }
|
||||
|
||||
// NewBz2 returns a new, default instance ready to be customized and used.
|
||||
func NewBz2() *Bz2 {
|
||||
return &Bz2{
|
||||
CompressionLevel: bzip2.DefaultCompression,
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Compressor(new(Bz2))
|
||||
_ = Decompressor(new(Bz2))
|
||||
)
|
||||
|
||||
// DefaultBz2 is a default instance that is conveniently ready to use.
|
||||
var DefaultBz2 = NewBz2()
|
||||
67
vendor/github.com/mholt/archiver/filecompressor.go
generated
vendored
Normal file
67
vendor/github.com/mholt/archiver/filecompressor.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// FileCompressor can compress and decompress single files.
|
||||
type FileCompressor struct {
|
||||
Compressor
|
||||
Decompressor
|
||||
|
||||
// Whether to overwrite existing files when creating files.
|
||||
OverwriteExisting bool
|
||||
}
|
||||
|
||||
// CompressFile reads the source file and compresses it to destination.
|
||||
// The destination must have a matching extension.
|
||||
func (fc FileCompressor) CompressFile(source, destination string) error {
|
||||
if err := fc.CheckExt(destination); err != nil {
|
||||
return err
|
||||
}
|
||||
if fc.Compressor == nil {
|
||||
return fmt.Errorf("no compressor specified")
|
||||
}
|
||||
if !fc.OverwriteExisting && fileExists(destination) {
|
||||
return fmt.Errorf("file exists: %s", destination)
|
||||
}
|
||||
|
||||
in, err := os.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
out, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
return fc.Compress(in, out)
|
||||
}
|
||||
|
||||
// DecompressFile reads the source file and decompresses it to destination.
|
||||
func (fc FileCompressor) DecompressFile(source, destination string) error {
|
||||
if fc.Decompressor == nil {
|
||||
return fmt.Errorf("no decompressor specified")
|
||||
}
|
||||
if !fc.OverwriteExisting && fileExists(destination) {
|
||||
return fmt.Errorf("file exists: %s", destination)
|
||||
}
|
||||
|
||||
in, err := os.Open(source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
out, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
return fc.Decompress(in, out)
|
||||
}
|
||||
61
vendor/github.com/mholt/archiver/gz.go
generated
vendored
Normal file
61
vendor/github.com/mholt/archiver/gz.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Gz facilitates gzip compression.
|
||||
type Gz struct {
|
||||
CompressionLevel int
|
||||
}
|
||||
|
||||
// Compress reads in, compresses it, and writes it to out.
|
||||
func (gz *Gz) Compress(in io.Reader, out io.Writer) error {
|
||||
w, err := gzip.NewWriterLevel(out, gz.CompressionLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
_, err = io.Copy(w, in)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decompress reads in, decompresses it, and writes it to out.
|
||||
func (gz *Gz) Decompress(in io.Reader, out io.Writer) error {
|
||||
r, err := gzip.NewReader(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
_, err = io.Copy(out, r)
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (gz *Gz) CheckExt(filename string) error {
|
||||
if filepath.Ext(filename) != ".gz" {
|
||||
return fmt.Errorf("filename must have a .gz extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gz *Gz) String() string { return "gz" }
|
||||
|
||||
// NewGz returns a new, default instance ready to be customized and used.
|
||||
func NewGz() *Gz {
|
||||
return &Gz{
|
||||
CompressionLevel: gzip.DefaultCompression,
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Compressor(new(Gz))
|
||||
_ = Decompressor(new(Gz))
|
||||
)
|
||||
|
||||
// DefaultGz is a default instance that is conveniently ready to use.
|
||||
var DefaultGz = NewGz()
|
||||
56
vendor/github.com/mholt/archiver/lz4.go
generated
vendored
Normal file
56
vendor/github.com/mholt/archiver/lz4.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
// Lz4 facilitates LZ4 compression.
|
||||
type Lz4 struct {
|
||||
CompressionLevel int
|
||||
}
|
||||
|
||||
// Compress reads in, compresses it, and writes it to out.
|
||||
func (lz *Lz4) Compress(in io.Reader, out io.Writer) error {
|
||||
w := lz4.NewWriter(out)
|
||||
w.Header.CompressionLevel = lz.CompressionLevel
|
||||
defer w.Close()
|
||||
_, err := io.Copy(w, in)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decompress reads in, decompresses it, and writes it to out.
|
||||
func (lz *Lz4) Decompress(in io.Reader, out io.Writer) error {
|
||||
r := lz4.NewReader(in)
|
||||
_, err := io.Copy(out, r)
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (lz *Lz4) CheckExt(filename string) error {
|
||||
if filepath.Ext(filename) != ".lz4" {
|
||||
return fmt.Errorf("filename must have a .lz4 extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lz *Lz4) String() string { return "lz4" }
|
||||
|
||||
// NewLz4 returns a new, default instance ready to be customized and used.
|
||||
func NewLz4() *Lz4 {
|
||||
return &Lz4{
|
||||
CompressionLevel: 9, // https://github.com/lz4/lz4/blob/1b819bfd633ae285df2dfe1b0589e1ec064f2873/lib/lz4hc.h#L48
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Compressor(new(Lz4))
|
||||
_ = Decompressor(new(Lz4))
|
||||
)
|
||||
|
||||
// DefaultLz4 is a default instance that is conveniently ready to use.
|
||||
var DefaultLz4 = NewLz4()
|
||||
390
vendor/github.com/mholt/archiver/rar.go
generated
vendored
Normal file
390
vendor/github.com/mholt/archiver/rar.go
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nwaples/rardecode"
|
||||
)
|
||||
|
||||
// Rar provides facilities for reading RAR archives.
|
||||
// See https://www.rarlab.com/technote.htm.
|
||||
type Rar struct {
|
||||
// Whether to overwrite existing files; if false,
|
||||
// an error is returned if the file exists.
|
||||
OverwriteExisting bool
|
||||
|
||||
// Whether to make all the directories necessary
|
||||
// to create a rar archive in the desired path.
|
||||
MkdirAll bool
|
||||
|
||||
// A single top-level folder can be implicitly
|
||||
// created by the Unarchive method if the files
|
||||
// to be extracted from the archive do not all
|
||||
// have a common root. This roughly mimics the
|
||||
// behavior of archival tools integrated into OS
|
||||
// file browsers which create a subfolder to
|
||||
// avoid unexpectedly littering the destination
|
||||
// folder with potentially many files, causing a
|
||||
// problematic cleanup/organization situation.
|
||||
// This feature is available for both creation
|
||||
// and extraction of archives, but may be slightly
|
||||
// inefficient with lots and lots of files,
|
||||
// especially on extraction.
|
||||
ImplicitTopLevelFolder bool
|
||||
|
||||
// If true, errors encountered during reading
|
||||
// or writing a single file will be logged and
|
||||
// the operation will continue on remaining files.
|
||||
ContinueOnError bool
|
||||
|
||||
// The password to open archives (optional).
|
||||
Password string
|
||||
|
||||
rr *rardecode.Reader // underlying stream reader
|
||||
rc *rardecode.ReadCloser // supports multi-volume archives (files only)
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (*Rar) CheckExt(filename string) error {
|
||||
if !strings.HasSuffix(filename, ".rar") {
|
||||
return fmt.Errorf("filename must have a .rar extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unarchive unpacks the .rar file at source to destination.
|
||||
// Destination will be treated as a folder name. It supports
|
||||
// multi-volume archives.
|
||||
func (r *Rar) Unarchive(source, destination string) error {
|
||||
if !fileExists(destination) && r.MkdirAll {
|
||||
err := mkdir(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("preparing destination: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if the files in the archive do not all share a common
|
||||
// root, then make sure we extract to a single subfolder
|
||||
// rather than potentially littering the destination...
|
||||
if r.ImplicitTopLevelFolder {
|
||||
var err error
|
||||
destination, err = r.addTopLevelFolder(source, destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scanning source archive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.OpenFile(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening rar archive for reading: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
for {
|
||||
err := r.unrarNext(destination)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if r.ContinueOnError {
|
||||
log.Printf("[ERROR] Reading file in rar archive: %v", err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("reading file in rar archive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addTopLevelFolder scans the files contained inside
|
||||
// the tarball named sourceArchive and returns a modified
|
||||
// destination if all the files do not share the same
|
||||
// top-level folder.
|
||||
func (r *Rar) addTopLevelFolder(sourceArchive, destination string) (string, error) {
|
||||
file, err := os.Open(sourceArchive)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("opening source archive: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
rc, err := rardecode.NewReader(file, r.Password)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating archive reader: %v", err)
|
||||
}
|
||||
|
||||
var files []string
|
||||
for {
|
||||
hdr, err := rc.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("scanning tarball's file listing: %v", err)
|
||||
}
|
||||
files = append(files, hdr.Name)
|
||||
}
|
||||
|
||||
if multipleTopLevels(files) {
|
||||
destination = filepath.Join(destination, folderNameFromFileName(sourceArchive))
|
||||
}
|
||||
|
||||
return destination, nil
|
||||
}
|
||||
|
||||
func (r *Rar) unrarNext(to string) error {
|
||||
f, err := r.Read()
|
||||
if err != nil {
|
||||
return err // don't wrap error; calling loop must break on io.EOF
|
||||
}
|
||||
header, ok := f.Header.(*rardecode.FileHeader)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected header to be *rardecode.FileHeader but was %T", f.Header)
|
||||
}
|
||||
return r.unrarFile(f, filepath.Join(to, header.Name))
|
||||
}
|
||||
|
||||
func (r *Rar) unrarFile(f File, to string) error {
|
||||
// do not overwrite existing files, if configured
|
||||
if !f.IsDir() && !r.OverwriteExisting && fileExists(to) {
|
||||
return fmt.Errorf("file already exists: %s", to)
|
||||
}
|
||||
|
||||
hdr, ok := f.Header.(*rardecode.FileHeader)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected header to be *rardecode.FileHeader but was %T", f.Header)
|
||||
}
|
||||
|
||||
// if files come before their containing folders, then we must
|
||||
// create their folders before writing the file
|
||||
err := mkdir(filepath.Dir(to))
|
||||
if err != nil {
|
||||
return fmt.Errorf("making parent directories: %v", err)
|
||||
}
|
||||
|
||||
return writeNewFile(to, r.rr, hdr.Mode())
|
||||
}
|
||||
|
||||
// OpenFile opens filename for reading. This method supports
|
||||
// multi-volume archives, whereas Open does not (but Open
|
||||
// supports any stream, not just files).
|
||||
func (r *Rar) OpenFile(filename string) error {
|
||||
if r.rr != nil {
|
||||
return fmt.Errorf("rar archive is already open for reading")
|
||||
}
|
||||
var err error
|
||||
r.rc, err = rardecode.OpenReader(filename, r.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.rr = &r.rc.Reader
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens t for reading an archive from
|
||||
// in. The size parameter is not used.
|
||||
func (r *Rar) Open(in io.Reader, size int64) error {
|
||||
if r.rr != nil {
|
||||
return fmt.Errorf("rar archive is already open for reading")
|
||||
}
|
||||
var err error
|
||||
r.rr, err = rardecode.NewReader(in, r.Password)
|
||||
return err
|
||||
}
|
||||
|
||||
// Read reads the next file from t, which must have
|
||||
// already been opened for reading. If there are no
|
||||
// more files, the error is io.EOF. The File must
|
||||
// be closed when finished reading from it.
|
||||
func (r *Rar) Read() (File, error) {
|
||||
if r.rr == nil {
|
||||
return File{}, fmt.Errorf("rar archive is not open")
|
||||
}
|
||||
|
||||
hdr, err := r.rr.Next()
|
||||
if err != nil {
|
||||
return File{}, err // don't wrap error; preserve io.EOF
|
||||
}
|
||||
|
||||
file := File{
|
||||
FileInfo: rarFileInfo{hdr},
|
||||
Header: hdr,
|
||||
ReadCloser: ReadFakeCloser{r.rr},
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Close closes the rar archive(s) opened by Create and Open.
|
||||
func (r *Rar) Close() error {
|
||||
var err error
|
||||
if r.rc != nil {
|
||||
rc := r.rc
|
||||
r.rc = nil
|
||||
err = rc.Close()
|
||||
}
|
||||
if r.rr != nil {
|
||||
r.rr = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each visited item in archive.
|
||||
func (r *Rar) Walk(archive string, walkFn WalkFunc) error {
|
||||
file, err := os.Open(archive)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening archive file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
err = r.Open(file, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening archive: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
for {
|
||||
f, err := r.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if r.ContinueOnError {
|
||||
log.Printf("[ERROR] Opening next file: %v", err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("opening next file: %v", err)
|
||||
}
|
||||
err = walkFn(f)
|
||||
if err != nil {
|
||||
if err == ErrStopWalk {
|
||||
break
|
||||
}
|
||||
if r.ContinueOnError {
|
||||
log.Printf("[ERROR] Walking %s: %v", f.Name(), err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("walking %s: %v", f.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the rar archive.
|
||||
// If the target is a directory, the entire folder will
|
||||
// be extracted into destination.
|
||||
func (r *Rar) Extract(source, target, destination string) error {
|
||||
// target refers to a path inside the archive, which should be clean also
|
||||
target = path.Clean(target)
|
||||
|
||||
// if the target ends up being a directory, then
|
||||
// we will continue walking and extracting files
|
||||
// until we are no longer within that directory
|
||||
var targetDirPath string
|
||||
|
||||
return r.Walk(source, func(f File) error {
|
||||
th, ok := f.Header.(*rardecode.FileHeader)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected header to be *rardecode.FileHeader but was %T", f.Header)
|
||||
}
|
||||
|
||||
// importantly, cleaning the path strips tailing slash,
|
||||
// which must be appended to folders within the archive
|
||||
name := path.Clean(th.Name)
|
||||
if f.IsDir() && target == name {
|
||||
targetDirPath = path.Dir(name)
|
||||
}
|
||||
|
||||
if within(target, th.Name) {
|
||||
// either this is the exact file we want, or is
|
||||
// in the directory we want to extract
|
||||
|
||||
// build the filename we will extract to
|
||||
end, err := filepath.Rel(targetDirPath, th.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("relativizing paths: %v", err)
|
||||
}
|
||||
joined := filepath.Join(destination, end)
|
||||
|
||||
err = r.unrarFile(f, joined)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extracting file %s: %v", th.Name, err)
|
||||
}
|
||||
|
||||
// if our target was not a directory, stop walk
|
||||
if targetDirPath == "" {
|
||||
return ErrStopWalk
|
||||
}
|
||||
} else if targetDirPath != "" {
|
||||
// finished walking the entire directory
|
||||
return ErrStopWalk
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Match returns true if the format of file matches this
|
||||
// type's format. It should not affect reader position.
|
||||
func (*Rar) Match(file io.ReadSeeker) (bool, error) {
|
||||
currentPos, err := file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Seek(currentPos, io.SeekStart)
|
||||
|
||||
buf := make([]byte, 8)
|
||||
if n, err := file.Read(buf); err != nil || n < 8 {
|
||||
return false, nil
|
||||
}
|
||||
hasTarHeader := bytes.Equal(buf[:7], []byte("Rar!\x1a\x07\x00")) || // ver 1.5
|
||||
bytes.Equal(buf, []byte("Rar!\x1a\x07\x01\x00")) // ver 5.0
|
||||
return hasTarHeader, nil
|
||||
}
|
||||
|
||||
func (r *Rar) String() string { return "rar" }
|
||||
|
||||
// NewRar returns a new, default instance ready to be customized and used.
|
||||
func NewRar() *Rar {
|
||||
return &Rar{
|
||||
MkdirAll: true,
|
||||
}
|
||||
}
|
||||
|
||||
type rarFileInfo struct {
|
||||
fh *rardecode.FileHeader
|
||||
}
|
||||
|
||||
func (rfi rarFileInfo) Name() string { return rfi.fh.Name }
|
||||
func (rfi rarFileInfo) Size() int64 { return rfi.fh.UnPackedSize }
|
||||
func (rfi rarFileInfo) Mode() os.FileMode { return rfi.fh.Mode() }
|
||||
func (rfi rarFileInfo) ModTime() time.Time { return rfi.fh.ModificationTime }
|
||||
func (rfi rarFileInfo) IsDir() bool { return rfi.fh.IsDir }
|
||||
func (rfi rarFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Reader(new(Rar))
|
||||
_ = Unarchiver(new(Rar))
|
||||
_ = Walker(new(Rar))
|
||||
_ = Extractor(new(Rar))
|
||||
_ = Matcher(new(Rar))
|
||||
_ = ExtensionChecker(new(Rar))
|
||||
_ = os.FileInfo(rarFileInfo{})
|
||||
)
|
||||
|
||||
// DefaultRar is a default instance that is conveniently ready to use.
|
||||
var DefaultRar = NewRar()
|
||||
51
vendor/github.com/mholt/archiver/sz.go
generated
vendored
Normal file
51
vendor/github.com/mholt/archiver/sz.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
// Snappy facilitates Snappy compression.
|
||||
type Snappy struct{}
|
||||
|
||||
// Compress reads in, compresses it, and writes it to out.
|
||||
func (s *Snappy) Compress(in io.Reader, out io.Writer) error {
|
||||
w := snappy.NewWriter(out)
|
||||
defer w.Close()
|
||||
_, err := io.Copy(w, in)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decompress reads in, decompresses it, and writes it to out.
|
||||
func (s *Snappy) Decompress(in io.Reader, out io.Writer) error {
|
||||
r := snappy.NewReader(in)
|
||||
_, err := io.Copy(out, r)
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (s *Snappy) CheckExt(filename string) error {
|
||||
if filepath.Ext(filename) != ".sz" {
|
||||
return fmt.Errorf("filename must have a .sz extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Snappy) String() string { return "sz" }
|
||||
|
||||
// NewSnappy returns a new, default instance ready to be customized and used.
|
||||
func NewSnappy() *Snappy {
|
||||
return new(Snappy)
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Compressor(new(Snappy))
|
||||
_ = Decompressor(new(Snappy))
|
||||
)
|
||||
|
||||
// DefaultSnappy is a default instance that is conveniently ready to use.
|
||||
var DefaultSnappy = NewSnappy()
|
||||
605
vendor/github.com/mholt/archiver/tar.go
generated
vendored
Normal file
605
vendor/github.com/mholt/archiver/tar.go
generated
vendored
Normal file
@@ -0,0 +1,605 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tar provides facilities for operating TAR archives.
|
||||
// See http://www.gnu.org/software/tar/manual/html_node/Standard.html.
|
||||
type Tar struct {
|
||||
// Whether to overwrite existing files; if false,
|
||||
// an error is returned if the file exists.
|
||||
OverwriteExisting bool
|
||||
|
||||
// Whether to make all the directories necessary
|
||||
// to create a tar archive in the desired path.
|
||||
MkdirAll bool
|
||||
|
||||
// A single top-level folder can be implicitly
|
||||
// created by the Archive or Unarchive methods
|
||||
// if the files to be added to the archive
|
||||
// or the files to be extracted from the archive
|
||||
// do not all have a common root. This roughly
|
||||
// mimics the behavior of archival tools integrated
|
||||
// into OS file browsers which create a subfolder
|
||||
// to avoid unexpectedly littering the destination
|
||||
// folder with potentially many files, causing a
|
||||
// problematic cleanup/organization situation.
|
||||
// This feature is available for both creation
|
||||
// and extraction of archives, but may be slightly
|
||||
// inefficient with lots and lots of files,
|
||||
// especially on extraction.
|
||||
ImplicitTopLevelFolder bool
|
||||
|
||||
// If true, errors encountered during reading
|
||||
// or writing a single file will be logged and
|
||||
// the operation will continue on remaining files.
|
||||
ContinueOnError bool
|
||||
|
||||
tw *tar.Writer
|
||||
tr *tar.Reader
|
||||
|
||||
readerWrapFn func(io.Reader) (io.Reader, error)
|
||||
writerWrapFn func(io.Writer) (io.Writer, error)
|
||||
cleanupWrapFn func()
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (*Tar) CheckExt(filename string) error {
|
||||
if !strings.HasSuffix(filename, ".tar") {
|
||||
return fmt.Errorf("filename must have a .tar extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Archive creates a tarball file at destination containing
|
||||
// the files listed in sources. The destination must end with
|
||||
// ".tar". File paths can be those of regular files or
|
||||
// directories; directories will be recursively added.
|
||||
func (t *Tar) Archive(sources []string, destination string) error {
|
||||
err := t.CheckExt(destination)
|
||||
if t.writerWrapFn == nil && err != nil {
|
||||
return fmt.Errorf("checking extension: %v", err)
|
||||
}
|
||||
if !t.OverwriteExisting && fileExists(destination) {
|
||||
return fmt.Errorf("file already exists: %s", destination)
|
||||
}
|
||||
|
||||
// make the folder to contain the resulting archive
|
||||
// if it does not already exist
|
||||
destDir := filepath.Dir(destination)
|
||||
if t.MkdirAll && !fileExists(destDir) {
|
||||
err := mkdir(destDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("making folder for destination: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
out, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating %s: %v", destination, err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
err = t.Create(out)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating tar: %v", err)
|
||||
}
|
||||
defer t.Close()
|
||||
|
||||
var topLevelFolder string
|
||||
if t.ImplicitTopLevelFolder && multipleTopLevels(sources) {
|
||||
topLevelFolder = folderNameFromFileName(destination)
|
||||
}
|
||||
|
||||
for _, source := range sources {
|
||||
err := t.writeWalk(source, topLevelFolder, destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("walking %s: %v", source, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unarchive unpacks the .tar file at source to destination.
|
||||
// Destination will be treated as a folder name.
|
||||
func (t *Tar) Unarchive(source, destination string) error {
|
||||
if !fileExists(destination) && t.MkdirAll {
|
||||
err := mkdir(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("preparing destination: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if the files in the archive do not all share a common
|
||||
// root, then make sure we extract to a single subfolder
|
||||
// rather than potentially littering the destination...
|
||||
if t.ImplicitTopLevelFolder {
|
||||
var err error
|
||||
destination, err = t.addTopLevelFolder(source, destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("scanning source archive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
file, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening source archive: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
err = t.Open(file, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening tar archive for reading: %v", err)
|
||||
}
|
||||
defer t.Close()
|
||||
|
||||
for {
|
||||
err := t.untarNext(destination)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if t.ContinueOnError {
|
||||
log.Printf("[ERROR] Reading file in tar archive: %v", err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("reading file in tar archive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addTopLevelFolder scans the files contained inside
|
||||
// the tarball named sourceArchive and returns a modified
|
||||
// destination if all the files do not share the same
|
||||
// top-level folder.
|
||||
func (t *Tar) addTopLevelFolder(sourceArchive, destination string) (string, error) {
|
||||
file, err := os.Open(sourceArchive)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("opening source archive: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// if the reader is to be wrapped, ensure we do that now
|
||||
// or we will not be able to read the archive successfully
|
||||
reader := io.Reader(file)
|
||||
if t.readerWrapFn != nil {
|
||||
reader, err = t.readerWrapFn(reader)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("wrapping reader: %v", err)
|
||||
}
|
||||
}
|
||||
if t.cleanupWrapFn != nil {
|
||||
defer t.cleanupWrapFn()
|
||||
}
|
||||
|
||||
tr := tar.NewReader(reader)
|
||||
|
||||
var files []string
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("scanning tarball's file listing: %v", err)
|
||||
}
|
||||
files = append(files, hdr.Name)
|
||||
}
|
||||
|
||||
if multipleTopLevels(files) {
|
||||
destination = filepath.Join(destination, folderNameFromFileName(sourceArchive))
|
||||
}
|
||||
|
||||
return destination, nil
|
||||
}
|
||||
|
||||
func (t *Tar) untarNext(to string) error {
|
||||
f, err := t.Read()
|
||||
if err != nil {
|
||||
return err // don't wrap error; calling loop must break on io.EOF
|
||||
}
|
||||
header, ok := f.Header.(*tar.Header)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected header to be *tar.Header but was %T", f.Header)
|
||||
}
|
||||
return t.untarFile(f, filepath.Join(to, header.Name))
|
||||
}
|
||||
|
||||
func (t *Tar) untarFile(f File, to string) error {
|
||||
// do not overwrite existing files, if configured
|
||||
if !f.IsDir() && !t.OverwriteExisting && fileExists(to) {
|
||||
return fmt.Errorf("file already exists: %s", to)
|
||||
}
|
||||
|
||||
hdr, ok := f.Header.(*tar.Header)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected header to be *tar.Header but was %T", f.Header)
|
||||
}
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
return mkdir(to)
|
||||
case tar.TypeReg, tar.TypeRegA, tar.TypeChar, tar.TypeBlock, tar.TypeFifo:
|
||||
return writeNewFile(to, f, f.Mode())
|
||||
case tar.TypeSymlink:
|
||||
return writeNewSymbolicLink(to, hdr.Linkname)
|
||||
case tar.TypeLink:
|
||||
return writeNewHardLink(to, filepath.Join(to, hdr.Linkname))
|
||||
case tar.TypeXGlobalHeader:
|
||||
return nil // ignore the pax global header from git-generated tarballs
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown type flag: %c", hdr.Name, hdr.Typeflag)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tar) writeWalk(source, topLevelFolder, destination string) error {
|
||||
sourceInfo, err := os.Stat(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: stat: %v", source, err)
|
||||
}
|
||||
destAbs, err := filepath.Abs(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: getting absolute path of destination %s: %v", source, destination, err)
|
||||
}
|
||||
|
||||
return filepath.Walk(source, func(fpath string, info os.FileInfo, err error) error {
|
||||
handleErr := func(err error) error {
|
||||
if t.ContinueOnError {
|
||||
log.Printf("[ERROR] Walking %s: %v", fpath, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return handleErr(fmt.Errorf("traversing %s: %v", fpath, err))
|
||||
}
|
||||
if info == nil {
|
||||
return handleErr(fmt.Errorf("no file info"))
|
||||
}
|
||||
|
||||
// make sure we do not copy our output file into itself
|
||||
fpathAbs, err := filepath.Abs(fpath)
|
||||
if err != nil {
|
||||
return handleErr(fmt.Errorf("%s: getting absolute path: %v", fpath, err))
|
||||
}
|
||||
if within(fpathAbs, destAbs) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// build the name to be used within the archive
|
||||
nameInArchive, err := makeNameInArchive(sourceInfo, source, topLevelFolder, fpath)
|
||||
if err != nil {
|
||||
return handleErr(err)
|
||||
}
|
||||
|
||||
file, err := os.Open(fpath)
|
||||
if err != nil {
|
||||
return handleErr(fmt.Errorf("%s: opening: %v", fpath, err))
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
err = t.Write(File{
|
||||
FileInfo: FileInfo{
|
||||
FileInfo: info,
|
||||
CustomName: nameInArchive,
|
||||
},
|
||||
ReadCloser: file,
|
||||
})
|
||||
if err != nil {
|
||||
return handleErr(fmt.Errorf("%s: writing: %s", fpath, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Create opens t for writing a tar archive to out.
|
||||
func (t *Tar) Create(out io.Writer) error {
|
||||
if t.tw != nil {
|
||||
return fmt.Errorf("tar archive is already created for writing")
|
||||
}
|
||||
|
||||
// wrapping writers allows us to output
|
||||
// compressed tarballs, for example
|
||||
if t.writerWrapFn != nil {
|
||||
var err error
|
||||
out, err = t.writerWrapFn(out)
|
||||
if err != nil {
|
||||
return fmt.Errorf("wrapping writer: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
t.tw = tar.NewWriter(out)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes f to t, which must have been opened for writing first.
|
||||
func (t *Tar) Write(f File) error {
|
||||
if t.tw == nil {
|
||||
return fmt.Errorf("tar archive was not created for writing first")
|
||||
}
|
||||
if f.FileInfo == nil {
|
||||
return fmt.Errorf("no file info")
|
||||
}
|
||||
if f.FileInfo.Name() == "" {
|
||||
return fmt.Errorf("missing file name")
|
||||
}
|
||||
|
||||
hdr, err := tar.FileInfoHeader(f, f.Name())
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: making header: %v", f.Name(), err)
|
||||
}
|
||||
|
||||
err = t.tw.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: writing header: %v", hdr.Name, err)
|
||||
}
|
||||
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg {
|
||||
if f.ReadCloser == nil {
|
||||
return fmt.Errorf("%s: no way to read file contents", f.Name())
|
||||
}
|
||||
_, err := io.Copy(t.tw, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: copying contents: %v", f.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens t for reading an archive from
|
||||
// in. The size parameter is not used.
|
||||
func (t *Tar) Open(in io.Reader, size int64) error {
|
||||
if t.tr != nil {
|
||||
return fmt.Errorf("tar archive is already open for reading")
|
||||
}
|
||||
// wrapping readers allows us to open compressed tarballs
|
||||
if t.readerWrapFn != nil {
|
||||
var err error
|
||||
in, err = t.readerWrapFn(in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("wrapping file reader: %v", err)
|
||||
}
|
||||
}
|
||||
t.tr = tar.NewReader(in)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads the next file from t, which must have
|
||||
// already been opened for reading. If there are no
|
||||
// more files, the error is io.EOF. The File must
|
||||
// be closed when finished reading from it.
|
||||
func (t *Tar) Read() (File, error) {
|
||||
if t.tr == nil {
|
||||
return File{}, fmt.Errorf("tar archive is not open")
|
||||
}
|
||||
|
||||
hdr, err := t.tr.Next()
|
||||
if err != nil {
|
||||
return File{}, err // don't wrap error; preserve io.EOF
|
||||
}
|
||||
|
||||
file := File{
|
||||
FileInfo: hdr.FileInfo(),
|
||||
Header: hdr,
|
||||
ReadCloser: ReadFakeCloser{t.tr},
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Close closes the tar archive(s) opened by Create and Open.
|
||||
func (t *Tar) Close() error {
|
||||
var err error
|
||||
if t.tr != nil {
|
||||
t.tr = nil
|
||||
}
|
||||
if t.tw != nil {
|
||||
tw := t.tw
|
||||
t.tw = nil
|
||||
err = tw.Close()
|
||||
}
|
||||
// make sure cleanup of "Reader/Writer wrapper"
|
||||
// (say that ten times fast) happens AFTER the
|
||||
// underlying stream is closed
|
||||
if t.cleanupWrapFn != nil {
|
||||
t.cleanupWrapFn()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each visited item in archive.
|
||||
func (t *Tar) Walk(archive string, walkFn WalkFunc) error {
|
||||
file, err := os.Open(archive)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening archive file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
err = t.Open(file, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening archive: %v", err)
|
||||
}
|
||||
defer t.Close()
|
||||
|
||||
for {
|
||||
f, err := t.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if t.ContinueOnError {
|
||||
log.Printf("[ERROR] Opening next file: %v", err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("opening next file: %v", err)
|
||||
}
|
||||
err = walkFn(f)
|
||||
if err != nil {
|
||||
if err == ErrStopWalk {
|
||||
break
|
||||
}
|
||||
if t.ContinueOnError {
|
||||
log.Printf("[ERROR] Walking %s: %v", f.Name(), err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("walking %s: %v", f.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the tar archive.
|
||||
// If the target is a directory, the entire folder will
|
||||
// be extracted into destination.
|
||||
func (t *Tar) Extract(source, target, destination string) error {
|
||||
// target refers to a path inside the archive, which should be clean also
|
||||
target = path.Clean(target)
|
||||
|
||||
// if the target ends up being a directory, then
|
||||
// we will continue walking and extracting files
|
||||
// until we are no longer within that directory
|
||||
var targetDirPath string
|
||||
|
||||
return t.Walk(source, func(f File) error {
|
||||
th, ok := f.Header.(*tar.Header)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected header to be *tar.Header but was %T", f.Header)
|
||||
}
|
||||
|
||||
// importantly, cleaning the path strips tailing slash,
|
||||
// which must be appended to folders within the archive
|
||||
name := path.Clean(th.Name)
|
||||
if f.IsDir() && target == name {
|
||||
targetDirPath = path.Dir(name)
|
||||
}
|
||||
|
||||
if within(target, th.Name) {
|
||||
// either this is the exact file we want, or is
|
||||
// in the directory we want to extract
|
||||
|
||||
// build the filename we will extract to
|
||||
end, err := filepath.Rel(targetDirPath, th.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("relativizing paths: %v", err)
|
||||
}
|
||||
joined := filepath.Join(destination, end)
|
||||
|
||||
err = t.untarFile(f, joined)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extracting file %s: %v", th.Name, err)
|
||||
}
|
||||
|
||||
// if our target was not a directory, stop walk
|
||||
if targetDirPath == "" {
|
||||
return ErrStopWalk
|
||||
}
|
||||
} else if targetDirPath != "" {
|
||||
// finished walking the entire directory
|
||||
return ErrStopWalk
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Match returns true if the format of file matches this
|
||||
// type's format. It should not affect reader position.
|
||||
func (*Tar) Match(file io.ReadSeeker) (bool, error) {
|
||||
currentPos, err := file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Seek(currentPos, io.SeekStart)
|
||||
|
||||
buf := make([]byte, tarBlockSize)
|
||||
if _, err = io.ReadFull(file, buf); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return hasTarHeader(buf), nil
|
||||
}
|
||||
|
||||
// hasTarHeader checks passed bytes has a valid tar header or not. buf must
|
||||
// contain at least 512 bytes and if not, it always returns false.
|
||||
func hasTarHeader(buf []byte) bool {
|
||||
if len(buf) < tarBlockSize {
|
||||
return false
|
||||
}
|
||||
|
||||
b := buf[148:156]
|
||||
b = bytes.Trim(b, " \x00") // clean up all spaces and null bytes
|
||||
if len(b) == 0 {
|
||||
return false // unknown format
|
||||
}
|
||||
hdrSum, err := strconv.ParseUint(string(b), 8, 64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// According to the go official archive/tar, Sun tar uses signed byte
|
||||
// values so this calcs both signed and unsigned
|
||||
var usum uint64
|
||||
var sum int64
|
||||
for i, c := range buf {
|
||||
if 148 <= i && i < 156 {
|
||||
c = ' ' // checksum field itself is counted as branks
|
||||
}
|
||||
usum += uint64(uint8(c))
|
||||
sum += int64(int8(c))
|
||||
}
|
||||
|
||||
if hdrSum != usum && int64(hdrSum) != sum {
|
||||
return false // invalid checksum
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *Tar) String() string { return "tar" }
|
||||
|
||||
// NewTar returns a new, default instance ready to be customized and used.
|
||||
func NewTar() *Tar {
|
||||
return &Tar{
|
||||
MkdirAll: true,
|
||||
}
|
||||
}
|
||||
|
||||
const tarBlockSize = 512
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Reader(new(Tar))
|
||||
_ = Writer(new(Tar))
|
||||
_ = Archiver(new(Tar))
|
||||
_ = Unarchiver(new(Tar))
|
||||
_ = Walker(new(Tar))
|
||||
_ = Extractor(new(Tar))
|
||||
_ = Matcher(new(Tar))
|
||||
_ = ExtensionChecker(new(Rar))
|
||||
)
|
||||
|
||||
// DefaultTar is a default instance that is conveniently ready to use.
|
||||
var DefaultTar = NewTar()
|
||||
126
vendor/github.com/mholt/archiver/tarbz2.go
generated
vendored
Normal file
126
vendor/github.com/mholt/archiver/tarbz2.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/dsnet/compress/bzip2"
|
||||
)
|
||||
|
||||
// TarBz2 facilitates bzip2 compression
|
||||
// (https://github.com/dsnet/compress/blob/master/doc/bzip2-format.pdf)
|
||||
// of tarball archives.
|
||||
type TarBz2 struct {
|
||||
*Tar
|
||||
|
||||
CompressionLevel int
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (*TarBz2) CheckExt(filename string) error {
|
||||
if !strings.HasSuffix(filename, ".tar.bz2") &&
|
||||
!strings.HasSuffix(filename, ".tbz2") {
|
||||
return fmt.Errorf("filename must have a .tar.bz2 or .tbz2 extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Archive creates a compressed tar file at destination
|
||||
// containing the files listed in sources. The destination
|
||||
// must end with ".tar.bz2" or ".tbz2". File paths can be
|
||||
// those of regular files or directories; directories will
|
||||
// be recursively added.
|
||||
func (tbz2 *TarBz2) Archive(sources []string, destination string) error {
|
||||
err := tbz2.CheckExt(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("output %s", err.Error())
|
||||
}
|
||||
tbz2.wrapWriter()
|
||||
return tbz2.Tar.Archive(sources, destination)
|
||||
}
|
||||
|
||||
// Unarchive unpacks the compressed tarball at
|
||||
// source to destination. Destination will be
|
||||
// treated as a folder name.
|
||||
func (tbz2 *TarBz2) Unarchive(source, destination string) error {
|
||||
tbz2.wrapReader()
|
||||
return tbz2.Tar.Unarchive(source, destination)
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each visited item in archive.
|
||||
func (tbz2 *TarBz2) Walk(archive string, walkFn WalkFunc) error {
|
||||
tbz2.wrapReader()
|
||||
return tbz2.Tar.Walk(archive, walkFn)
|
||||
}
|
||||
|
||||
// Create opens tbz2 for writing a compressed
|
||||
// tar archive to out.
|
||||
func (tbz2 *TarBz2) Create(out io.Writer) error {
|
||||
tbz2.wrapWriter()
|
||||
return tbz2.Tar.Create(out)
|
||||
}
|
||||
|
||||
// Open opens t for reading a compressed archive from
|
||||
// in. The size parameter is not used.
|
||||
func (tbz2 *TarBz2) Open(in io.Reader, size int64) error {
|
||||
tbz2.wrapReader()
|
||||
return tbz2.Tar.Open(in, size)
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the tar archive.
|
||||
// If the target is a directory, the entire folder will
|
||||
// be extracted into destination.
|
||||
func (tbz2 *TarBz2) Extract(source, target, destination string) error {
|
||||
tbz2.wrapReader()
|
||||
return tbz2.Tar.Extract(source, target, destination)
|
||||
}
|
||||
|
||||
func (tbz2 *TarBz2) wrapWriter() {
|
||||
var bz2w *bzip2.Writer
|
||||
tbz2.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) {
|
||||
var err error
|
||||
bz2w, err = bzip2.NewWriter(w, &bzip2.WriterConfig{
|
||||
Level: tbz2.CompressionLevel,
|
||||
})
|
||||
return bz2w, err
|
||||
}
|
||||
tbz2.Tar.cleanupWrapFn = func() {
|
||||
bz2w.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (tbz2 *TarBz2) wrapReader() {
|
||||
var bz2r *bzip2.Reader
|
||||
tbz2.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) {
|
||||
var err error
|
||||
bz2r, err = bzip2.NewReader(r, nil)
|
||||
return bz2r, err
|
||||
}
|
||||
tbz2.Tar.cleanupWrapFn = func() {
|
||||
bz2r.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (tbz2 *TarBz2) String() string { return "tar.bz2" }
|
||||
|
||||
// NewTarBz2 returns a new, default instance ready to be customized and used.
|
||||
func NewTarBz2() *TarBz2 {
|
||||
return &TarBz2{
|
||||
CompressionLevel: bzip2.DefaultCompression,
|
||||
Tar: NewTar(),
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Reader(new(TarBz2))
|
||||
_ = Writer(new(TarBz2))
|
||||
_ = Archiver(new(TarBz2))
|
||||
_ = Unarchiver(new(TarBz2))
|
||||
_ = Walker(new(TarBz2))
|
||||
_ = Extractor(new(TarBz2))
|
||||
)
|
||||
|
||||
// DefaultTarBz2 is a convenient archiver ready to use.
|
||||
var DefaultTarBz2 = NewTarBz2()
|
||||
124
vendor/github.com/mholt/archiver/targz.go
generated
vendored
Normal file
124
vendor/github.com/mholt/archiver/targz.go
generated
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TarGz facilitates gzip compression
|
||||
// (RFC 1952) of tarball archives.
|
||||
type TarGz struct {
|
||||
*Tar
|
||||
|
||||
// The compression level to use, as described
|
||||
// in the compress/gzip package.
|
||||
CompressionLevel int
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (*TarGz) CheckExt(filename string) error {
|
||||
if !strings.HasSuffix(filename, ".tar.gz") &&
|
||||
!strings.HasSuffix(filename, ".tgz") {
|
||||
return fmt.Errorf("filename must have a .tar.gz or .tgz extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Archive creates a compressed tar file at destination
|
||||
// containing the files listed in sources. The destination
|
||||
// must end with ".tar.gz" or ".tgz". File paths can be
|
||||
// those of regular files or directories; directories will
|
||||
// be recursively added.
|
||||
func (tgz *TarGz) Archive(sources []string, destination string) error {
|
||||
err := tgz.CheckExt(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("output %s", err.Error())
|
||||
}
|
||||
tgz.wrapWriter()
|
||||
return tgz.Tar.Archive(sources, destination)
|
||||
}
|
||||
|
||||
// Unarchive unpacks the compressed tarball at
|
||||
// source to destination. Destination will be
|
||||
// treated as a folder name.
|
||||
func (tgz *TarGz) Unarchive(source, destination string) error {
|
||||
tgz.wrapReader()
|
||||
return tgz.Tar.Unarchive(source, destination)
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each visited item in archive.
|
||||
func (tgz *TarGz) Walk(archive string, walkFn WalkFunc) error {
|
||||
tgz.wrapReader()
|
||||
return tgz.Tar.Walk(archive, walkFn)
|
||||
}
|
||||
|
||||
// Create opens txz for writing a compressed
|
||||
// tar archive to out.
|
||||
func (tgz *TarGz) Create(out io.Writer) error {
|
||||
tgz.wrapWriter()
|
||||
return tgz.Tar.Create(out)
|
||||
}
|
||||
|
||||
// Open opens t for reading a compressed archive from
|
||||
// in. The size parameter is not used.
|
||||
func (tgz *TarGz) Open(in io.Reader, size int64) error {
|
||||
tgz.wrapReader()
|
||||
return tgz.Tar.Open(in, size)
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the tar archive.
|
||||
// If the target is a directory, the entire folder will
|
||||
// be extracted into destination.
|
||||
func (tgz *TarGz) Extract(source, target, destination string) error {
|
||||
tgz.wrapReader()
|
||||
return tgz.Tar.Extract(source, target, destination)
|
||||
}
|
||||
|
||||
func (tgz *TarGz) wrapWriter() {
|
||||
var gzw *gzip.Writer
|
||||
tgz.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) {
|
||||
var err error
|
||||
gzw, err = gzip.NewWriterLevel(w, tgz.CompressionLevel)
|
||||
return gzw, err
|
||||
}
|
||||
tgz.Tar.cleanupWrapFn = func() {
|
||||
gzw.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (tgz *TarGz) wrapReader() {
|
||||
var gzr *gzip.Reader
|
||||
tgz.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) {
|
||||
var err error
|
||||
gzr, err = gzip.NewReader(r)
|
||||
return gzr, err
|
||||
}
|
||||
tgz.Tar.cleanupWrapFn = func() {
|
||||
gzr.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (tgz *TarGz) String() string { return "tar.gz" }
|
||||
|
||||
// NewTarGz returns a new, default instance ready to be customized and used.
|
||||
func NewTarGz() *TarGz {
|
||||
return &TarGz{
|
||||
CompressionLevel: gzip.DefaultCompression,
|
||||
Tar: NewTar(),
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Reader(new(TarGz))
|
||||
_ = Writer(new(TarGz))
|
||||
_ = Archiver(new(TarGz))
|
||||
_ = Unarchiver(new(TarGz))
|
||||
_ = Walker(new(TarGz))
|
||||
_ = Extractor(new(TarGz))
|
||||
)
|
||||
|
||||
// DefaultTarGz is a convenient archiver ready to use.
|
||||
var DefaultTarGz = NewTarGz()
|
||||
122
vendor/github.com/mholt/archiver/tarlz4.go
generated
vendored
Normal file
122
vendor/github.com/mholt/archiver/tarlz4.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/pierrec/lz4"
|
||||
)
|
||||
|
||||
// TarLz4 facilitates lz4 compression
|
||||
// (https://github.com/lz4/lz4/tree/master/doc)
|
||||
// of tarball archives.
|
||||
type TarLz4 struct {
|
||||
*Tar
|
||||
|
||||
// The compression level to use when writing.
|
||||
// Minimum 0 (fast compression), maximum 12
|
||||
// (most space savings).
|
||||
CompressionLevel int
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (*TarLz4) CheckExt(filename string) error {
|
||||
if !strings.HasSuffix(filename, ".tar.lz4") &&
|
||||
!strings.HasSuffix(filename, ".tlz4") {
|
||||
|
||||
return fmt.Errorf("filename must have a .tar.lz4 or .tlz4 extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Archive creates a compressed tar file at destination
|
||||
// containing the files listed in sources. The destination
|
||||
// must end with ".tar.lz4" or ".tlz4". File paths can be
|
||||
// those of regular files or directories; directories will
|
||||
// be recursively added.
|
||||
func (tlz4 *TarLz4) Archive(sources []string, destination string) error {
|
||||
err := tlz4.CheckExt(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("output %s", err.Error())
|
||||
}
|
||||
tlz4.wrapWriter()
|
||||
return tlz4.Tar.Archive(sources, destination)
|
||||
}
|
||||
|
||||
// Unarchive unpacks the compressed tarball at
|
||||
// source to destination. Destination will be
|
||||
// treated as a folder name.
|
||||
func (tlz4 *TarLz4) Unarchive(source, destination string) error {
|
||||
tlz4.wrapReader()
|
||||
return tlz4.Tar.Unarchive(source, destination)
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each visited item in archive.
|
||||
func (tlz4 *TarLz4) Walk(archive string, walkFn WalkFunc) error {
|
||||
tlz4.wrapReader()
|
||||
return tlz4.Tar.Walk(archive, walkFn)
|
||||
}
|
||||
|
||||
// Create opens tlz4 for writing a compressed
|
||||
// tar archive to out.
|
||||
func (tlz4 *TarLz4) Create(out io.Writer) error {
|
||||
tlz4.wrapWriter()
|
||||
return tlz4.Tar.Create(out)
|
||||
}
|
||||
|
||||
// Open opens t for reading a compressed archive from
|
||||
// in. The size parameter is not used.
|
||||
func (tlz4 *TarLz4) Open(in io.Reader, size int64) error {
|
||||
tlz4.wrapReader()
|
||||
return tlz4.Tar.Open(in, size)
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the tar archive.
|
||||
// If the target is a directory, the entire folder will
|
||||
// be extracted into destination.
|
||||
func (tlz4 *TarLz4) Extract(source, target, destination string) error {
|
||||
tlz4.wrapReader()
|
||||
return tlz4.Tar.Extract(source, target, destination)
|
||||
}
|
||||
|
||||
func (tlz4 *TarLz4) wrapWriter() {
|
||||
var lz4w *lz4.Writer
|
||||
tlz4.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) {
|
||||
lz4w = lz4.NewWriter(w)
|
||||
lz4w.Header.CompressionLevel = tlz4.CompressionLevel
|
||||
return lz4w, nil
|
||||
}
|
||||
tlz4.Tar.cleanupWrapFn = func() {
|
||||
lz4w.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (tlz4 *TarLz4) wrapReader() {
|
||||
tlz4.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) {
|
||||
return lz4.NewReader(r), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (tlz4 *TarLz4) String() string { return "tar.lz4" }
|
||||
|
||||
// NewTarLz4 returns a new, default instance ready to be customized and used.
|
||||
func NewTarLz4() *TarLz4 {
|
||||
return &TarLz4{
|
||||
CompressionLevel: 9, // https://github.com/lz4/lz4/blob/1b819bfd633ae285df2dfe1b0589e1ec064f2873/lib/lz4hc.h#L48
|
||||
Tar: NewTar(),
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Reader(new(TarLz4))
|
||||
_ = Writer(new(TarLz4))
|
||||
_ = Archiver(new(TarLz4))
|
||||
_ = Unarchiver(new(TarLz4))
|
||||
_ = Walker(new(TarLz4))
|
||||
_ = Extractor(new(TarLz4))
|
||||
)
|
||||
|
||||
// DefaultTarLz4 is a convenient archiver ready to use.
|
||||
var DefaultTarLz4 = NewTarLz4()
|
||||
114
vendor/github.com/mholt/archiver/tarsz.go
generated
vendored
Normal file
114
vendor/github.com/mholt/archiver/tarsz.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
// TarSz facilitates Snappy compression
|
||||
// (https://github.com/google/snappy)
|
||||
// of tarball archives.
|
||||
type TarSz struct {
|
||||
*Tar
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (*TarSz) CheckExt(filename string) error {
|
||||
if !strings.HasSuffix(filename, ".tar.sz") &&
|
||||
!strings.HasSuffix(filename, ".tsz") {
|
||||
return fmt.Errorf("filename must have a .tar.sz or .tsz extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Archive creates a compressed tar file at destination
|
||||
// containing the files listed in sources. The destination
|
||||
// must end with ".tar.sz" or ".tsz". File paths can be
|
||||
// those of regular files or directories; directories will
|
||||
// be recursively added.
|
||||
func (tsz *TarSz) Archive(sources []string, destination string) error {
|
||||
err := tsz.CheckExt(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("output %s", err.Error())
|
||||
}
|
||||
tsz.wrapWriter()
|
||||
return tsz.Tar.Archive(sources, destination)
|
||||
}
|
||||
|
||||
// Unarchive unpacks the compressed tarball at
|
||||
// source to destination. Destination will be
|
||||
// treated as a folder name.
|
||||
func (tsz *TarSz) Unarchive(source, destination string) error {
|
||||
tsz.wrapReader()
|
||||
return tsz.Tar.Unarchive(source, destination)
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each visited item in archive.
|
||||
func (tsz *TarSz) Walk(archive string, walkFn WalkFunc) error {
|
||||
tsz.wrapReader()
|
||||
return tsz.Tar.Walk(archive, walkFn)
|
||||
}
|
||||
|
||||
// Create opens tsz for writing a compressed
|
||||
// tar archive to out.
|
||||
func (tsz *TarSz) Create(out io.Writer) error {
|
||||
tsz.wrapWriter()
|
||||
return tsz.Tar.Create(out)
|
||||
}
|
||||
|
||||
// Open opens t for reading a compressed archive from
|
||||
// in. The size parameter is not used.
|
||||
func (tsz *TarSz) Open(in io.Reader, size int64) error {
|
||||
tsz.wrapReader()
|
||||
return tsz.Tar.Open(in, size)
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the tar archive.
|
||||
// If the target is a directory, the entire folder will
|
||||
// be extracted into destination.
|
||||
func (tsz *TarSz) Extract(source, target, destination string) error {
|
||||
tsz.wrapReader()
|
||||
return tsz.Tar.Extract(source, target, destination)
|
||||
}
|
||||
|
||||
func (tsz *TarSz) wrapWriter() {
|
||||
var sw *snappy.Writer
|
||||
tsz.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) {
|
||||
sw = snappy.NewWriter(w)
|
||||
return sw, nil
|
||||
}
|
||||
tsz.Tar.cleanupWrapFn = func() {
|
||||
sw.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (tsz *TarSz) wrapReader() {
|
||||
tsz.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) {
|
||||
return snappy.NewReader(r), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (tsz *TarSz) String() string { return "tar.sz" }
|
||||
|
||||
// NewTarSz returns a new, default instance ready to be customized and used.
|
||||
func NewTarSz() *TarSz {
|
||||
return &TarSz{
|
||||
Tar: NewTar(),
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Reader(new(TarSz))
|
||||
_ = Writer(new(TarSz))
|
||||
_ = Archiver(new(TarSz))
|
||||
_ = Unarchiver(new(TarSz))
|
||||
_ = Walker(new(TarSz))
|
||||
_ = Extractor(new(TarSz))
|
||||
)
|
||||
|
||||
// DefaultTarSz is a convenient archiver ready to use.
|
||||
var DefaultTarSz = NewTarSz()
|
||||
119
vendor/github.com/mholt/archiver/tarxz.go
generated
vendored
Normal file
119
vendor/github.com/mholt/archiver/tarxz.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
fastxz "github.com/xi2/xz"
|
||||
)
|
||||
|
||||
// TarXz facilitates xz compression
|
||||
// (https://tukaani.org/xz/format.html)
|
||||
// of tarball archives.
|
||||
type TarXz struct {
|
||||
*Tar
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (*TarXz) CheckExt(filename string) error {
|
||||
if !strings.HasSuffix(filename, ".tar.xz") &&
|
||||
!strings.HasSuffix(filename, ".txz") {
|
||||
return fmt.Errorf("filename must have a .tar.xz or .txz extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Archive creates a compressed tar file at destination
|
||||
// containing the files listed in sources. The destination
|
||||
// must end with ".tar.xz" or ".txz". File paths can be
|
||||
// those of regular files or directories; directories will
|
||||
// be recursively added.
|
||||
func (txz *TarXz) Archive(sources []string, destination string) error {
|
||||
err := txz.CheckExt(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("output %s", err.Error())
|
||||
}
|
||||
txz.wrapWriter()
|
||||
return txz.Tar.Archive(sources, destination)
|
||||
}
|
||||
|
||||
// Unarchive unpacks the compressed tarball at
|
||||
// source to destination. Destination will be
|
||||
// treated as a folder name.
|
||||
func (txz *TarXz) Unarchive(source, destination string) error {
|
||||
txz.wrapReader()
|
||||
return txz.Tar.Unarchive(source, destination)
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each visited item in archive.
|
||||
func (txz *TarXz) Walk(archive string, walkFn WalkFunc) error {
|
||||
txz.wrapReader()
|
||||
return txz.Tar.Walk(archive, walkFn)
|
||||
}
|
||||
|
||||
// Create opens txz for writing a compressed
|
||||
// tar archive to out.
|
||||
func (txz *TarXz) Create(out io.Writer) error {
|
||||
txz.wrapWriter()
|
||||
return txz.Tar.Create(out)
|
||||
}
|
||||
|
||||
// Open opens t for reading a compressed archive from
|
||||
// in. The size parameter is not used.
|
||||
func (txz *TarXz) Open(in io.Reader, size int64) error {
|
||||
txz.wrapReader()
|
||||
return txz.Tar.Open(in, size)
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the tar archive.
|
||||
// If the target is a directory, the entire folder will
|
||||
// be extracted into destination.
|
||||
func (txz *TarXz) Extract(source, target, destination string) error {
|
||||
txz.wrapReader()
|
||||
return txz.Tar.Extract(source, target, destination)
|
||||
}
|
||||
|
||||
func (txz *TarXz) wrapWriter() {
|
||||
var xzw *xz.Writer
|
||||
txz.Tar.writerWrapFn = func(w io.Writer) (io.Writer, error) {
|
||||
var err error
|
||||
xzw, err = xz.NewWriter(w)
|
||||
return xzw, err
|
||||
}
|
||||
txz.Tar.cleanupWrapFn = func() {
|
||||
xzw.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (txz *TarXz) wrapReader() {
|
||||
var xzr *fastxz.Reader
|
||||
txz.Tar.readerWrapFn = func(r io.Reader) (io.Reader, error) {
|
||||
var err error
|
||||
xzr, err = fastxz.NewReader(r, 0)
|
||||
return xzr, err
|
||||
}
|
||||
}
|
||||
|
||||
func (txz *TarXz) String() string { return "tar.xz" }
|
||||
|
||||
// NewTarXz returns a new, default instance ready to be customized and used.
|
||||
func NewTarXz() *TarXz {
|
||||
return &TarXz{
|
||||
Tar: NewTar(),
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Reader(new(TarXz))
|
||||
_ = Writer(new(TarXz))
|
||||
_ = Archiver(new(TarXz))
|
||||
_ = Unarchiver(new(TarXz))
|
||||
_ = Walker(new(TarXz))
|
||||
_ = Extractor(new(TarXz))
|
||||
)
|
||||
|
||||
// DefaultTarXz is a convenient archiver ready to use.
|
||||
var DefaultTarXz = NewTarXz()
|
||||
58
vendor/github.com/mholt/archiver/xz.go
generated
vendored
Normal file
58
vendor/github.com/mholt/archiver/xz.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
fastxz "github.com/xi2/xz"
|
||||
)
|
||||
|
||||
// Xz facilitates XZ compression.
|
||||
type Xz struct{}
|
||||
|
||||
// Compress reads in, compresses it, and writes it to out.
|
||||
func (x *Xz) Compress(in io.Reader, out io.Writer) error {
|
||||
w, err := xz.NewWriter(out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
_, err = io.Copy(w, in)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decompress reads in, decompresses it, and writes it to out.
|
||||
func (x *Xz) Decompress(in io.Reader, out io.Writer) error {
|
||||
r, err := fastxz.NewReader(in, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(out, r)
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (x *Xz) CheckExt(filename string) error {
|
||||
if filepath.Ext(filename) != ".xz" {
|
||||
return fmt.Errorf("filename must have a .xz extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Xz) String() string { return "xz" }
|
||||
|
||||
// NewXz returns a new, default instance ready to be customized and used.
|
||||
func NewXz() *Xz {
|
||||
return new(Xz)
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Compressor(new(Xz))
|
||||
_ = Decompressor(new(Xz))
|
||||
)
|
||||
|
||||
// DefaultXz is a default instance that is conveniently ready to use.
|
||||
var DefaultXz = NewXz()
|
||||
575
vendor/github.com/mholt/archiver/zip.go
generated
vendored
Normal file
575
vendor/github.com/mholt/archiver/zip.go
generated
vendored
Normal file
@@ -0,0 +1,575 @@
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Zip provides facilities for operating ZIP archives.
|
||||
// See https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT.
|
||||
type Zip struct {
|
||||
// The compression level to use, as described
|
||||
// in the compress/flate package.
|
||||
CompressionLevel int
|
||||
|
||||
// Whether to overwrite existing files; if false,
|
||||
// an error is returned if the file exists.
|
||||
OverwriteExisting bool
|
||||
|
||||
// Whether to make all the directories necessary
|
||||
// to create a zip archive in the desired path.
|
||||
MkdirAll bool
|
||||
|
||||
// If enabled, selective compression will only
|
||||
// compress files which are not already in a
|
||||
// compressed format; this is decided based
|
||||
// simply on file extension.
|
||||
SelectiveCompression bool
|
||||
|
||||
// A single top-level folder can be implicitly
|
||||
// created by the Archive or Unarchive methods
|
||||
// if the files to be added to the archive
|
||||
// or the files to be extracted from the archive
|
||||
// do not all have a common root. This roughly
|
||||
// mimics the behavior of archival tools integrated
|
||||
// into OS file browsers which create a subfolder
|
||||
// to avoid unexpectedly littering the destination
|
||||
// folder with potentially many files, causing a
|
||||
// problematic cleanup/organization situation.
|
||||
// This feature is available for both creation
|
||||
// and extraction of archives, but may be slightly
|
||||
// inefficient with lots and lots of files,
|
||||
// especially on extraction.
|
||||
ImplicitTopLevelFolder bool
|
||||
|
||||
// If true, errors encountered during reading
|
||||
// or writing a single file will be logged and
|
||||
// the operation will continue on remaining files.
|
||||
ContinueOnError bool
|
||||
|
||||
zw *zip.Writer
|
||||
zr *zip.Reader
|
||||
ridx int
|
||||
}
|
||||
|
||||
// CheckExt ensures the file extension matches the format.
|
||||
func (*Zip) CheckExt(filename string) error {
|
||||
if !strings.HasSuffix(filename, ".zip") {
|
||||
return fmt.Errorf("filename must have a .zip extension")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Archive creates a .zip file at destination containing
|
||||
// the files listed in sources. The destination must end
|
||||
// with ".zip". File paths can be those of regular files
|
||||
// or directories. Regular files are stored at the 'root'
|
||||
// of the archive, and directories are recursively added.
|
||||
func (z *Zip) Archive(sources []string, destination string) error {
|
||||
err := z.CheckExt(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking extension: %v", err)
|
||||
}
|
||||
if !z.OverwriteExisting && fileExists(destination) {
|
||||
return fmt.Errorf("file already exists: %s", destination)
|
||||
}
|
||||
|
||||
// make the folder to contain the resulting archive
|
||||
// if it does not already exist
|
||||
destDir := filepath.Dir(destination)
|
||||
if z.MkdirAll && !fileExists(destDir) {
|
||||
err := mkdir(destDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("making folder for destination: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
out, err := os.Create(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating %s: %v", destination, err)
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
err = z.Create(out)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating zip: %v", err)
|
||||
}
|
||||
defer z.Close()
|
||||
|
||||
var topLevelFolder string
|
||||
if z.ImplicitTopLevelFolder && multipleTopLevels(sources) {
|
||||
topLevelFolder = folderNameFromFileName(destination)
|
||||
}
|
||||
|
||||
for _, source := range sources {
|
||||
err := z.writeWalk(source, topLevelFolder, destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("walking %s: %v", source, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unarchive unpacks the .zip file at source to destination.
|
||||
// Destination will be treated as a folder name.
|
||||
func (z *Zip) Unarchive(source, destination string) error {
|
||||
if !fileExists(destination) && z.MkdirAll {
|
||||
err := mkdir(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("preparing destination: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
file, err := os.Open(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening source file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("statting source file: %v", err)
|
||||
}
|
||||
|
||||
err = z.Open(file, fileInfo.Size())
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening zip archive for reading: %v", err)
|
||||
}
|
||||
defer z.Close()
|
||||
|
||||
// if the files in the archive do not all share a common
|
||||
// root, then make sure we extract to a single subfolder
|
||||
// rather than potentially littering the destination...
|
||||
if z.ImplicitTopLevelFolder {
|
||||
files := make([]string, len(z.zr.File))
|
||||
for i := range z.zr.File {
|
||||
files[i] = z.zr.File[i].Name
|
||||
}
|
||||
if multipleTopLevels(files) {
|
||||
destination = filepath.Join(destination, folderNameFromFileName(source))
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
err := z.extractNext(destination)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if z.ContinueOnError {
|
||||
log.Printf("[ERROR] Reading file in zip archive: %v", err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("reading file in zip archive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *Zip) extractNext(to string) error {
|
||||
f, err := z.Read()
|
||||
if err != nil {
|
||||
return err // don't wrap error; calling loop must break on io.EOF
|
||||
}
|
||||
defer f.Close()
|
||||
header, ok := f.Header.(zip.FileHeader)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected header to be zip.FileHeader but was %T", f.Header)
|
||||
}
|
||||
return z.extractFile(f, filepath.Join(to, header.Name))
|
||||
}
|
||||
|
||||
func (z *Zip) extractFile(f File, to string) error {
|
||||
// if a directory, no content; simply make the directory and return
|
||||
if f.IsDir() {
|
||||
return mkdir(to)
|
||||
}
|
||||
|
||||
// do not overwrite existing files, if configured
|
||||
if !z.OverwriteExisting && fileExists(to) {
|
||||
return fmt.Errorf("file already exists: %s", to)
|
||||
}
|
||||
|
||||
return writeNewFile(to, f, f.Mode())
|
||||
}
|
||||
|
||||
func (z *Zip) writeWalk(source, topLevelFolder, destination string) error {
|
||||
sourceInfo, err := os.Stat(source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: stat: %v", source, err)
|
||||
}
|
||||
destAbs, err := filepath.Abs(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: getting absolute path of destination %s: %v", source, destination, err)
|
||||
}
|
||||
|
||||
return filepath.Walk(source, func(fpath string, info os.FileInfo, err error) error {
|
||||
handleErr := func(err error) error {
|
||||
if z.ContinueOnError {
|
||||
log.Printf("[ERROR] Walking %s: %v", fpath, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return handleErr(fmt.Errorf("traversing %s: %v", fpath, err))
|
||||
}
|
||||
if info == nil {
|
||||
return handleErr(fmt.Errorf("%s: no file info", fpath))
|
||||
}
|
||||
|
||||
// make sure we do not copy the output file into the output
|
||||
// file; that results in an infinite loop and disk exhaustion!
|
||||
fpathAbs, err := filepath.Abs(fpath)
|
||||
if err != nil {
|
||||
return handleErr(fmt.Errorf("%s: getting absolute path: %v", fpath, err))
|
||||
}
|
||||
if within(fpathAbs, destAbs) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// build the name to be used within the archive
|
||||
nameInArchive, err := makeNameInArchive(sourceInfo, source, topLevelFolder, fpath)
|
||||
if err != nil {
|
||||
return handleErr(err)
|
||||
}
|
||||
|
||||
file, err := os.Open(fpath)
|
||||
if err != nil {
|
||||
return handleErr(fmt.Errorf("%s: opening: %v", fpath, err))
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
err = z.Write(File{
|
||||
FileInfo: FileInfo{
|
||||
FileInfo: info,
|
||||
CustomName: nameInArchive,
|
||||
},
|
||||
ReadCloser: file,
|
||||
})
|
||||
if err != nil {
|
||||
return handleErr(fmt.Errorf("%s: writing: %s", fpath, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Create opens z for writing a ZIP archive to out.
|
||||
func (z *Zip) Create(out io.Writer) error {
|
||||
if z.zw != nil {
|
||||
return fmt.Errorf("zip archive is already created for writing")
|
||||
}
|
||||
z.zw = zip.NewWriter(out)
|
||||
if z.CompressionLevel != flate.DefaultCompression {
|
||||
z.zw.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) {
|
||||
return flate.NewWriter(out, z.CompressionLevel)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes f to z, which must have been opened for writing first.
|
||||
func (z *Zip) Write(f File) error {
|
||||
if z.zw == nil {
|
||||
return fmt.Errorf("zip archive was not created for writing first")
|
||||
}
|
||||
if f.FileInfo == nil {
|
||||
return fmt.Errorf("no file info")
|
||||
}
|
||||
if f.FileInfo.Name() == "" {
|
||||
return fmt.Errorf("missing file name")
|
||||
}
|
||||
|
||||
header, err := zip.FileInfoHeader(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: getting header: %v", f.Name(), err)
|
||||
}
|
||||
|
||||
if f.IsDir() {
|
||||
header.Name += "/" // required - strangely no mention of this in zip spec? but is in godoc...
|
||||
header.Method = zip.Store
|
||||
} else {
|
||||
ext := strings.ToLower(path.Ext(header.Name))
|
||||
if _, ok := compressedFormats[ext]; ok && z.SelectiveCompression {
|
||||
header.Method = zip.Store
|
||||
} else {
|
||||
header.Method = zip.Deflate
|
||||
}
|
||||
}
|
||||
|
||||
writer, err := z.zw.CreateHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: making header: %v", f.Name(), err)
|
||||
}
|
||||
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if header.Mode().IsRegular() {
|
||||
if f.ReadCloser == nil {
|
||||
return fmt.Errorf("%s: no way to read file contents", f.Name())
|
||||
}
|
||||
_, err := io.Copy(writer, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: copying contents: %v", f.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open opens z for reading an archive from in,
|
||||
// which is expected to have the given size and
|
||||
// which must be an io.ReaderAt.
|
||||
func (z *Zip) Open(in io.Reader, size int64) error {
|
||||
inRdrAt, ok := in.(io.ReaderAt)
|
||||
if !ok {
|
||||
return fmt.Errorf("reader must be io.ReaderAt")
|
||||
}
|
||||
if z.zr != nil {
|
||||
return fmt.Errorf("zip archive is already open for reading")
|
||||
}
|
||||
var err error
|
||||
z.zr, err = zip.NewReader(inRdrAt, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating reader: %v", err)
|
||||
}
|
||||
z.ridx = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads the next file from z, which must have
|
||||
// already been opened for reading. If there are no
|
||||
// more files, the error is io.EOF. The File must
|
||||
// be closed when finished reading from it.
|
||||
func (z *Zip) Read() (File, error) {
|
||||
if z.zr == nil {
|
||||
return File{}, fmt.Errorf("zip archive is not open")
|
||||
}
|
||||
if z.ridx >= len(z.zr.File) {
|
||||
return File{}, io.EOF
|
||||
}
|
||||
|
||||
// access the file and increment counter so that
|
||||
// if there is an error processing this file, the
|
||||
// caller can still iterate to the next file
|
||||
zf := z.zr.File[z.ridx]
|
||||
z.ridx++
|
||||
|
||||
file := File{
|
||||
FileInfo: zf.FileInfo(),
|
||||
Header: zf.FileHeader,
|
||||
}
|
||||
|
||||
rc, err := zf.Open()
|
||||
if err != nil {
|
||||
return file, fmt.Errorf("%s: open compressed file: %v", zf.Name, err)
|
||||
}
|
||||
file.ReadCloser = rc
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Close closes the zip archive(s) opened by Create and Open.
|
||||
func (z *Zip) Close() error {
|
||||
if z.zr != nil {
|
||||
z.zr = nil
|
||||
}
|
||||
if z.zw != nil {
|
||||
zw := z.zw
|
||||
z.zw = nil
|
||||
return zw.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk calls walkFn for each visited item in archive.
|
||||
func (z *Zip) Walk(archive string, walkFn WalkFunc) error {
|
||||
zr, err := zip.OpenReader(archive)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening zip reader: %v", err)
|
||||
}
|
||||
defer zr.Close()
|
||||
|
||||
for _, zf := range zr.File {
|
||||
zfrc, err := zf.Open()
|
||||
if err != nil {
|
||||
zfrc.Close()
|
||||
if z.ContinueOnError {
|
||||
log.Printf("[ERROR] Opening %s: %v", zf.Name, err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("opening %s: %v", zf.Name, err)
|
||||
}
|
||||
|
||||
err = walkFn(File{
|
||||
FileInfo: zf.FileInfo(),
|
||||
Header: zf.FileHeader,
|
||||
ReadCloser: zfrc,
|
||||
})
|
||||
zfrc.Close()
|
||||
if err != nil {
|
||||
if err == ErrStopWalk {
|
||||
break
|
||||
}
|
||||
if z.ContinueOnError {
|
||||
log.Printf("[ERROR] Walking %s: %v", zf.Name, err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("walking %s: %v", zf.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract extracts a single file from the zip archive.
|
||||
// If the target is a directory, the entire folder will
|
||||
// be extracted into destination.
|
||||
func (z *Zip) Extract(source, target, destination string) error {
|
||||
// target refers to a path inside the archive, which should be clean also
|
||||
target = path.Clean(target)
|
||||
|
||||
// if the target ends up being a directory, then
|
||||
// we will continue walking and extracting files
|
||||
// until we are no longer within that directory
|
||||
var targetDirPath string
|
||||
|
||||
return z.Walk(source, func(f File) error {
|
||||
zfh, ok := f.Header.(zip.FileHeader)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected header to be zip.FileHeader but was %T", f.Header)
|
||||
}
|
||||
|
||||
// importantly, cleaning the path strips tailing slash,
|
||||
// which must be appended to folders within the archive
|
||||
name := path.Clean(zfh.Name)
|
||||
if f.IsDir() && target == name {
|
||||
targetDirPath = path.Dir(name)
|
||||
}
|
||||
|
||||
if within(target, zfh.Name) {
|
||||
// either this is the exact file we want, or is
|
||||
// in the directory we want to extract
|
||||
|
||||
// build the filename we will extract to
|
||||
end, err := filepath.Rel(targetDirPath, zfh.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("relativizing paths: %v", err)
|
||||
}
|
||||
joined := filepath.Join(destination, end)
|
||||
|
||||
err = z.extractFile(f, joined)
|
||||
if err != nil {
|
||||
return fmt.Errorf("extracting file %s: %v", zfh.Name, err)
|
||||
}
|
||||
|
||||
// if our target was not a directory, stop walk
|
||||
if targetDirPath == "" {
|
||||
return ErrStopWalk
|
||||
}
|
||||
} else if targetDirPath != "" {
|
||||
// finished walking the entire directory
|
||||
return ErrStopWalk
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Match returns true if the format of file matches this
|
||||
// type's format. It should not affect reader position.
|
||||
func (*Zip) Match(file io.ReadSeeker) (bool, error) {
|
||||
currentPos, err := file.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Seek(currentPos, io.SeekStart)
|
||||
|
||||
buf := make([]byte, 4)
|
||||
if n, err := file.Read(buf); err != nil || n < 4 {
|
||||
return false, nil
|
||||
}
|
||||
return bytes.Equal(buf, []byte("PK\x03\x04")), nil
|
||||
}
|
||||
|
||||
func (z *Zip) String() string { return "zip" }
|
||||
|
||||
// NewZip returns a new, default instance ready to be customized and used.
|
||||
func NewZip() *Zip {
|
||||
return &Zip{
|
||||
CompressionLevel: flate.DefaultCompression,
|
||||
MkdirAll: true,
|
||||
SelectiveCompression: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time checks to ensure type implements desired interfaces.
|
||||
var (
|
||||
_ = Reader(new(Zip))
|
||||
_ = Writer(new(Zip))
|
||||
_ = Archiver(new(Zip))
|
||||
_ = Unarchiver(new(Zip))
|
||||
_ = Walker(new(Zip))
|
||||
_ = Extractor(new(Zip))
|
||||
_ = Matcher(new(Zip))
|
||||
_ = ExtensionChecker(new(Zip))
|
||||
)
|
||||
|
||||
// compressedFormats is a (non-exhaustive) set of lowercased
|
||||
// file extensions for formats that are typically already
|
||||
// compressed. Compressing files that are already compressed
|
||||
// is inefficient, so use this set of extension to avoid that.
|
||||
var compressedFormats = map[string]struct{}{
|
||||
".7z": {},
|
||||
".avi": {},
|
||||
".br": {},
|
||||
".bz2": {},
|
||||
".cab": {},
|
||||
".docx": {},
|
||||
".gif": {},
|
||||
".gz": {},
|
||||
".jar": {},
|
||||
".jpeg": {},
|
||||
".jpg": {},
|
||||
".lz": {},
|
||||
".lz4": {},
|
||||
".lzma": {},
|
||||
".m4v": {},
|
||||
".mov": {},
|
||||
".mp3": {},
|
||||
".mp4": {},
|
||||
".mpeg": {},
|
||||
".mpg": {},
|
||||
".png": {},
|
||||
".pptx": {},
|
||||
".rar": {},
|
||||
".sz": {},
|
||||
".tbz2": {},
|
||||
".tgz": {},
|
||||
".tsz": {},
|
||||
".txz": {},
|
||||
".xlsx": {},
|
||||
".xz": {},
|
||||
".zip": {},
|
||||
".zipx": {},
|
||||
}
|
||||
|
||||
// DefaultZip is a default instance that is conveniently ready to use.
|
||||
var DefaultZip = NewZip()
|
||||
23
vendor/github.com/nwaples/rardecode/LICENSE
generated
vendored
Normal file
23
vendor/github.com/nwaples/rardecode/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
Copyright (c) 2015, Nicholas Waples
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
4
vendor/github.com/nwaples/rardecode/README.md
generated
vendored
Normal file
4
vendor/github.com/nwaples/rardecode/README.md
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# rardecode
|
||||
[](https://godoc.org/github.com/nwaples/rardecode)
|
||||
|
||||
A go package for reading RAR archives.
|
||||
306
vendor/github.com/nwaples/rardecode/archive.go
generated
vendored
Normal file
306
vendor/github.com/nwaples/rardecode/archive.go
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSfxSize = 0x100000 // maximum number of bytes to read when searching for RAR signature
|
||||
sigPrefix = "Rar!\x1A\x07"
|
||||
|
||||
fileFmt15 = iota + 1 // Version 1.5 archive file format
|
||||
fileFmt50 // Version 5.0 archive file format
|
||||
)
|
||||
|
||||
var (
|
||||
errNoSig = errors.New("rardecode: RAR signature not found")
|
||||
errVerMismatch = errors.New("rardecode: volume version mistmatch")
|
||||
errCorruptHeader = errors.New("rardecode: corrupt block header")
|
||||
errCorruptFileHeader = errors.New("rardecode: corrupt file header")
|
||||
errBadHeaderCrc = errors.New("rardecode: bad header crc")
|
||||
errUnknownArc = errors.New("rardecode: unknown archive version")
|
||||
errUnknownDecoder = errors.New("rardecode: unknown decoder version")
|
||||
errUnsupportedDecoder = errors.New("rardecode: unsupported decoder version")
|
||||
errArchiveContinues = errors.New("rardecode: archive continues in next volume")
|
||||
errArchiveEnd = errors.New("rardecode: archive end reached")
|
||||
errDecoderOutOfData = errors.New("rardecode: decoder expected more data than is in packed file")
|
||||
|
||||
reDigits = regexp.MustCompile(`\d+`)
|
||||
)
|
||||
|
||||
type readBuf []byte
|
||||
|
||||
func (b *readBuf) byte() byte {
|
||||
v := (*b)[0]
|
||||
*b = (*b)[1:]
|
||||
return v
|
||||
}
|
||||
|
||||
func (b *readBuf) uint16() uint16 {
|
||||
v := uint16((*b)[0]) | uint16((*b)[1])<<8
|
||||
*b = (*b)[2:]
|
||||
return v
|
||||
}
|
||||
|
||||
func (b *readBuf) uint32() uint32 {
|
||||
v := uint32((*b)[0]) | uint32((*b)[1])<<8 | uint32((*b)[2])<<16 | uint32((*b)[3])<<24
|
||||
*b = (*b)[4:]
|
||||
return v
|
||||
}
|
||||
|
||||
func (b *readBuf) bytes(n int) []byte {
|
||||
v := (*b)[:n]
|
||||
*b = (*b)[n:]
|
||||
return v
|
||||
}
|
||||
|
||||
func (b *readBuf) uvarint() uint64 {
|
||||
var x uint64
|
||||
var s uint
|
||||
for i, n := range *b {
|
||||
if n < 0x80 {
|
||||
*b = (*b)[i+1:]
|
||||
return x | uint64(n)<<s
|
||||
}
|
||||
x |= uint64(n&0x7f) << s
|
||||
s += 7
|
||||
|
||||
}
|
||||
// if we run out of bytes, just return 0
|
||||
*b = (*b)[len(*b):]
|
||||
return 0
|
||||
}
|
||||
|
||||
// readFull wraps io.ReadFull to return io.ErrUnexpectedEOF instead
|
||||
// of io.EOF when 0 bytes are read.
|
||||
func readFull(r io.Reader, buf []byte) error {
|
||||
_, err := io.ReadFull(r, buf)
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// findSig searches for the RAR signature and version at the beginning of a file.
|
||||
// It searches no more than maxSfxSize bytes.
|
||||
func findSig(br *bufio.Reader) (int, error) {
|
||||
for n := 0; n <= maxSfxSize; {
|
||||
b, err := br.ReadSlice(sigPrefix[0])
|
||||
n += len(b)
|
||||
if err == bufio.ErrBufferFull {
|
||||
continue
|
||||
} else if err != nil {
|
||||
if err == io.EOF {
|
||||
err = errNoSig
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
b, err = br.Peek(len(sigPrefix[1:]) + 2)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = errNoSig
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if !bytes.HasPrefix(b, []byte(sigPrefix[1:])) {
|
||||
continue
|
||||
}
|
||||
b = b[len(sigPrefix)-1:]
|
||||
|
||||
var ver int
|
||||
switch {
|
||||
case b[0] == 0:
|
||||
ver = fileFmt15
|
||||
case b[0] == 1 && b[1] == 0:
|
||||
ver = fileFmt50
|
||||
default:
|
||||
continue
|
||||
}
|
||||
_, _ = br.ReadSlice('\x00')
|
||||
|
||||
return ver, nil
|
||||
}
|
||||
return 0, errNoSig
|
||||
}
|
||||
|
||||
// volume extends a fileBlockReader to be used across multiple
|
||||
// files in a multi-volume archive
|
||||
type volume struct {
|
||||
fileBlockReader
|
||||
f *os.File // current file handle
|
||||
br *bufio.Reader // buffered reader for current volume file
|
||||
dir string // volume directory
|
||||
file string // current volume file
|
||||
num int // volume number
|
||||
old bool // uses old naming scheme
|
||||
}
|
||||
|
||||
// nextVolName updates name to the next filename in the archive.
|
||||
func (v *volume) nextVolName() {
|
||||
if v.num == 0 {
|
||||
// check file extensions
|
||||
i := strings.LastIndex(v.file, ".")
|
||||
if i < 0 {
|
||||
// no file extension, add one
|
||||
i = len(v.file)
|
||||
v.file += ".rar"
|
||||
} else {
|
||||
ext := strings.ToLower(v.file[i+1:])
|
||||
// replace with .rar for empty extensions & self extracting archives
|
||||
if ext == "" || ext == "exe" || ext == "sfx" {
|
||||
v.file = v.file[:i+1] + "rar"
|
||||
}
|
||||
}
|
||||
if a, ok := v.fileBlockReader.(*archive15); ok {
|
||||
v.old = a.old
|
||||
}
|
||||
// new naming scheme must have volume number in filename
|
||||
if !v.old && reDigits.FindStringIndex(v.file) == nil {
|
||||
v.old = true
|
||||
}
|
||||
// For old style naming if 2nd and 3rd character of file extension is not a digit replace
|
||||
// with "00" and ignore any trailing characters.
|
||||
if v.old && (len(v.file) < i+4 || v.file[i+2] < '0' || v.file[i+2] > '9' || v.file[i+3] < '0' || v.file[i+3] > '9') {
|
||||
v.file = v.file[:i+2] + "00"
|
||||
return
|
||||
}
|
||||
}
|
||||
// new style volume naming
|
||||
if !v.old {
|
||||
// find all numbers in volume name
|
||||
m := reDigits.FindAllStringIndex(v.file, -1)
|
||||
if l := len(m); l > 1 {
|
||||
// More than 1 match so assume name.part###of###.rar style.
|
||||
// Take the last 2 matches where the first is the volume number.
|
||||
m = m[l-2 : l]
|
||||
if strings.Contains(v.file[m[0][1]:m[1][0]], ".") || !strings.Contains(v.file[:m[0][0]], ".") {
|
||||
// Didn't match above style as volume had '.' between the two numbers or didnt have a '.'
|
||||
// before the first match. Use the second number as volume number.
|
||||
m = m[1:]
|
||||
}
|
||||
}
|
||||
// extract and increment volume number
|
||||
lo, hi := m[0][0], m[0][1]
|
||||
n, err := strconv.Atoi(v.file[lo:hi])
|
||||
if err != nil {
|
||||
n = 0
|
||||
} else {
|
||||
n++
|
||||
}
|
||||
// volume number must use at least the same number of characters as previous volume
|
||||
vol := fmt.Sprintf("%0"+fmt.Sprint(hi-lo)+"d", n)
|
||||
v.file = v.file[:lo] + vol + v.file[hi:]
|
||||
return
|
||||
}
|
||||
// old style volume naming
|
||||
i := strings.LastIndex(v.file, ".")
|
||||
// get file extension
|
||||
b := []byte(v.file[i+1:])
|
||||
// start incrementing volume number digits from rightmost
|
||||
for j := 2; j >= 0; j-- {
|
||||
if b[j] != '9' {
|
||||
b[j]++
|
||||
break
|
||||
}
|
||||
// digit overflow
|
||||
if j == 0 {
|
||||
// last character before '.'
|
||||
b[j] = 'A'
|
||||
} else {
|
||||
// set to '0' and loop to next character
|
||||
b[j] = '0'
|
||||
}
|
||||
}
|
||||
v.file = v.file[:i+1] + string(b)
|
||||
}
|
||||
|
||||
func (v *volume) next() (*fileBlockHeader, error) {
|
||||
for {
|
||||
var atEOF bool
|
||||
|
||||
h, err := v.fileBlockReader.next()
|
||||
switch err {
|
||||
case errArchiveContinues:
|
||||
case io.EOF:
|
||||
// Read all of volume without finding an end block. The only way
|
||||
// to tell if the archive continues is to try to open the next volume.
|
||||
atEOF = true
|
||||
default:
|
||||
return h, err
|
||||
}
|
||||
|
||||
v.f.Close()
|
||||
v.nextVolName()
|
||||
v.f, err = os.Open(v.dir + v.file) // Open next volume file
|
||||
if err != nil {
|
||||
if atEOF && os.IsNotExist(err) {
|
||||
// volume not found so assume that the archive has ended
|
||||
return nil, io.EOF
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
v.num++
|
||||
v.br.Reset(v.f)
|
||||
ver, err := findSig(v.br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v.version() != ver {
|
||||
return nil, errVerMismatch
|
||||
}
|
||||
v.reset() // reset encryption
|
||||
}
|
||||
}
|
||||
|
||||
func (v *volume) Close() error {
|
||||
// may be nil if os.Open fails in next()
|
||||
if v.f == nil {
|
||||
return nil
|
||||
}
|
||||
return v.f.Close()
|
||||
}
|
||||
|
||||
func openVolume(name, password string) (*volume, error) {
|
||||
var err error
|
||||
v := new(volume)
|
||||
v.dir, v.file = filepath.Split(name)
|
||||
v.f, err = os.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.br = bufio.NewReader(v.f)
|
||||
v.fileBlockReader, err = newFileBlockReader(v.br, password)
|
||||
if err != nil {
|
||||
v.f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func newFileBlockReader(br *bufio.Reader, pass string) (fileBlockReader, error) {
|
||||
runes := []rune(pass)
|
||||
if len(runes) > maxPassword {
|
||||
pass = string(runes[:maxPassword])
|
||||
}
|
||||
ver, err := findSig(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch ver {
|
||||
case fileFmt15:
|
||||
return newArchive15(br, pass), nil
|
||||
case fileFmt50:
|
||||
return newArchive50(br, pass), nil
|
||||
}
|
||||
return nil, errUnknownArc
|
||||
}
|
||||
468
vendor/github.com/nwaples/rardecode/archive15.go
generated
vendored
Normal file
468
vendor/github.com/nwaples/rardecode/archive15.go
generated
vendored
Normal file
@@ -0,0 +1,468 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
const (
|
||||
// block types
|
||||
blockArc = 0x73
|
||||
blockFile = 0x74
|
||||
blockService = 0x7a
|
||||
blockEnd = 0x7b
|
||||
|
||||
// block flags
|
||||
blockHasData = 0x8000
|
||||
|
||||
// archive block flags
|
||||
arcVolume = 0x0001
|
||||
arcSolid = 0x0008
|
||||
arcNewNaming = 0x0010
|
||||
arcEncrypted = 0x0080
|
||||
|
||||
// file block flags
|
||||
fileSplitBefore = 0x0001
|
||||
fileSplitAfter = 0x0002
|
||||
fileEncrypted = 0x0004
|
||||
fileSolid = 0x0010
|
||||
fileWindowMask = 0x00e0
|
||||
fileLargeData = 0x0100
|
||||
fileUnicode = 0x0200
|
||||
fileSalt = 0x0400
|
||||
fileVersion = 0x0800
|
||||
fileExtTime = 0x1000
|
||||
|
||||
// end block flags
|
||||
endArcNotLast = 0x0001
|
||||
|
||||
saltSize = 8 // size of salt for calculating AES keys
|
||||
cacheSize30 = 4 // number of AES keys to cache
|
||||
hashRounds = 0x40000
|
||||
)
|
||||
|
||||
var (
|
||||
errMultipleDecoders = errors.New("rardecode: multiple decoders in a single archive not supported")
|
||||
)
|
||||
|
||||
type blockHeader15 struct {
|
||||
htype byte // block header type
|
||||
flags uint16
|
||||
data readBuf // header data
|
||||
dataSize int64 // size of extra block data
|
||||
}
|
||||
|
||||
// fileHash32 implements fileChecksum for 32-bit hashes
|
||||
type fileHash32 struct {
|
||||
hash.Hash32 // hash to write file contents to
|
||||
sum uint32 // 32bit checksum for file
|
||||
}
|
||||
|
||||
func (h *fileHash32) valid() bool {
|
||||
return h.sum == h.Sum32()
|
||||
}
|
||||
|
||||
// archive15 implements fileBlockReader for RAR 1.5 file format archives
|
||||
type archive15 struct {
|
||||
byteReader // reader for current block data
|
||||
v *bufio.Reader // reader for current archive volume
|
||||
dec decoder // current decoder
|
||||
decVer byte // current decoder version
|
||||
multi bool // archive is multi-volume
|
||||
old bool // archive uses old naming scheme
|
||||
solid bool // archive is a solid archive
|
||||
encrypted bool
|
||||
pass []uint16 // password in UTF-16
|
||||
checksum fileHash32 // file checksum
|
||||
buf readBuf // temporary buffer
|
||||
keyCache [cacheSize30]struct { // cache of previously calculated decryption keys
|
||||
salt []byte
|
||||
key []byte
|
||||
iv []byte
|
||||
}
|
||||
}
|
||||
|
||||
// Calculates the key and iv for AES decryption given a password and salt.
|
||||
func calcAes30Params(pass []uint16, salt []byte) (key, iv []byte) {
|
||||
p := make([]byte, 0, len(pass)*2+len(salt))
|
||||
for _, v := range pass {
|
||||
p = append(p, byte(v), byte(v>>8))
|
||||
}
|
||||
p = append(p, salt...)
|
||||
|
||||
hash := sha1.New()
|
||||
iv = make([]byte, 16)
|
||||
s := make([]byte, 0, hash.Size())
|
||||
for i := 0; i < hashRounds; i++ {
|
||||
hash.Write(p)
|
||||
hash.Write([]byte{byte(i), byte(i >> 8), byte(i >> 16)})
|
||||
if i%(hashRounds/16) == 0 {
|
||||
s = hash.Sum(s[:0])
|
||||
iv[i/(hashRounds/16)] = s[4*4+3]
|
||||
}
|
||||
}
|
||||
key = hash.Sum(s[:0])
|
||||
key = key[:16]
|
||||
|
||||
for k := key; len(k) >= 4; k = k[4:] {
|
||||
k[0], k[1], k[2], k[3] = k[3], k[2], k[1], k[0]
|
||||
}
|
||||
return key, iv
|
||||
}
|
||||
|
||||
// parseDosTime converts a 32bit DOS time value to time.Time
|
||||
func parseDosTime(t uint32) time.Time {
|
||||
n := int(t)
|
||||
sec := n & 0x1f << 1
|
||||
min := n >> 5 & 0x3f
|
||||
hr := n >> 11 & 0x1f
|
||||
day := n >> 16 & 0x1f
|
||||
mon := time.Month(n >> 21 & 0x0f)
|
||||
yr := n>>25&0x7f + 1980
|
||||
return time.Date(yr, mon, day, hr, min, sec, 0, time.Local)
|
||||
}
|
||||
|
||||
// decodeName decodes a non-unicode filename from a file header.
|
||||
func decodeName(buf []byte) string {
|
||||
i := bytes.IndexByte(buf, 0)
|
||||
if i < 0 {
|
||||
return string(buf) // filename is UTF-8
|
||||
}
|
||||
|
||||
name := buf[:i]
|
||||
encName := readBuf(buf[i+1:])
|
||||
if len(encName) < 2 {
|
||||
return "" // invalid encoding
|
||||
}
|
||||
highByte := uint16(encName.byte()) << 8
|
||||
flags := encName.byte()
|
||||
flagBits := 8
|
||||
var wchars []uint16 // decoded characters are UTF-16
|
||||
for len(wchars) < len(name) && len(encName) > 0 {
|
||||
if flagBits == 0 {
|
||||
flags = encName.byte()
|
||||
flagBits = 8
|
||||
if len(encName) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
switch flags >> 6 {
|
||||
case 0:
|
||||
wchars = append(wchars, uint16(encName.byte()))
|
||||
case 1:
|
||||
wchars = append(wchars, uint16(encName.byte())|highByte)
|
||||
case 2:
|
||||
if len(encName) < 2 {
|
||||
break
|
||||
}
|
||||
wchars = append(wchars, encName.uint16())
|
||||
case 3:
|
||||
n := encName.byte()
|
||||
b := name[len(wchars):]
|
||||
if l := int(n&0x7f) + 2; l < len(b) {
|
||||
b = b[:l]
|
||||
}
|
||||
if n&0x80 > 0 {
|
||||
if len(encName) < 1 {
|
||||
break
|
||||
}
|
||||
ec := encName.byte()
|
||||
for _, c := range b {
|
||||
wchars = append(wchars, uint16(c+ec)|highByte)
|
||||
}
|
||||
} else {
|
||||
for _, c := range b {
|
||||
wchars = append(wchars, uint16(c))
|
||||
}
|
||||
}
|
||||
}
|
||||
flags <<= 2
|
||||
flagBits -= 2
|
||||
}
|
||||
return string(utf16.Decode(wchars))
|
||||
}
|
||||
|
||||
// readExtTimes reads and parses the optional extra time field from the file header.
|
||||
func readExtTimes(f *fileBlockHeader, b *readBuf) {
|
||||
if len(*b) < 2 {
|
||||
return // invalid, not enough data
|
||||
}
|
||||
flags := b.uint16()
|
||||
|
||||
ts := []*time.Time{&f.ModificationTime, &f.CreationTime, &f.AccessTime}
|
||||
|
||||
for i, t := range ts {
|
||||
n := flags >> uint((3-i)*4)
|
||||
if n&0x8 == 0 {
|
||||
continue
|
||||
}
|
||||
if i != 0 { // ModificationTime already read so skip
|
||||
if len(*b) < 4 {
|
||||
return // invalid, not enough data
|
||||
}
|
||||
*t = parseDosTime(b.uint32())
|
||||
}
|
||||
if n&0x4 > 0 {
|
||||
*t = t.Add(time.Second)
|
||||
}
|
||||
n &= 0x3
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
if len(*b) < int(n) {
|
||||
return // invalid, not enough data
|
||||
}
|
||||
// add extra time data in 100's of nanoseconds
|
||||
d := time.Duration(0)
|
||||
for j := 3 - n; j < n; j++ {
|
||||
d |= time.Duration(b.byte()) << (j * 8)
|
||||
}
|
||||
d *= 100
|
||||
*t = t.Add(d)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *archive15) getKeys(salt []byte) (key, iv []byte) {
|
||||
// check cache of keys
|
||||
for _, v := range a.keyCache {
|
||||
if bytes.Equal(v.salt[:], salt) {
|
||||
return v.key, v.iv
|
||||
}
|
||||
}
|
||||
key, iv = calcAes30Params(a.pass, salt)
|
||||
|
||||
// save a copy in the cache
|
||||
copy(a.keyCache[1:], a.keyCache[:])
|
||||
a.keyCache[0].salt = append([]byte(nil), salt...) // copy so byte slice can be reused
|
||||
a.keyCache[0].key = key
|
||||
a.keyCache[0].iv = iv
|
||||
|
||||
return key, iv
|
||||
}
|
||||
|
||||
func (a *archive15) parseFileHeader(h *blockHeader15) (*fileBlockHeader, error) {
|
||||
f := new(fileBlockHeader)
|
||||
|
||||
f.first = h.flags&fileSplitBefore == 0
|
||||
f.last = h.flags&fileSplitAfter == 0
|
||||
|
||||
f.solid = h.flags&fileSolid > 0
|
||||
f.IsDir = h.flags&fileWindowMask == fileWindowMask
|
||||
if !f.IsDir {
|
||||
f.winSize = uint(h.flags&fileWindowMask)>>5 + 16
|
||||
}
|
||||
|
||||
b := h.data
|
||||
if len(b) < 21 {
|
||||
return nil, errCorruptFileHeader
|
||||
}
|
||||
|
||||
f.PackedSize = h.dataSize
|
||||
f.UnPackedSize = int64(b.uint32())
|
||||
f.HostOS = b.byte() + 1
|
||||
if f.HostOS > HostOSBeOS {
|
||||
f.HostOS = HostOSUnknown
|
||||
}
|
||||
a.checksum.sum = b.uint32()
|
||||
|
||||
f.ModificationTime = parseDosTime(b.uint32())
|
||||
unpackver := b.byte() // decoder version
|
||||
method := b.byte() - 0x30 // decryption method
|
||||
namesize := int(b.uint16())
|
||||
f.Attributes = int64(b.uint32())
|
||||
if h.flags&fileLargeData > 0 {
|
||||
if len(b) < 8 {
|
||||
return nil, errCorruptFileHeader
|
||||
}
|
||||
_ = b.uint32() // already read large PackedSize in readBlockHeader
|
||||
f.UnPackedSize |= int64(b.uint32()) << 32
|
||||
f.UnKnownSize = f.UnPackedSize == -1
|
||||
} else if int32(f.UnPackedSize) == -1 {
|
||||
f.UnKnownSize = true
|
||||
f.UnPackedSize = -1
|
||||
}
|
||||
if len(b) < namesize {
|
||||
return nil, errCorruptFileHeader
|
||||
}
|
||||
name := b.bytes(namesize)
|
||||
if h.flags&fileUnicode == 0 {
|
||||
f.Name = string(name)
|
||||
} else {
|
||||
f.Name = decodeName(name)
|
||||
}
|
||||
// Rar 4.x uses '\' as file separator
|
||||
f.Name = strings.Replace(f.Name, "\\", "/", -1)
|
||||
|
||||
if h.flags&fileVersion > 0 {
|
||||
// file version is stored as ';n' appended to file name
|
||||
i := strings.LastIndex(f.Name, ";")
|
||||
if i > 0 {
|
||||
j, err := strconv.Atoi(f.Name[i+1:])
|
||||
if err == nil && j >= 0 {
|
||||
f.Version = j
|
||||
f.Name = f.Name[:i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var salt []byte
|
||||
if h.flags&fileSalt > 0 {
|
||||
if len(b) < saltSize {
|
||||
return nil, errCorruptFileHeader
|
||||
}
|
||||
salt = b.bytes(saltSize)
|
||||
}
|
||||
if h.flags&fileExtTime > 0 {
|
||||
readExtTimes(f, &b)
|
||||
}
|
||||
|
||||
if !f.first {
|
||||
return f, nil
|
||||
}
|
||||
// fields only needed for first block in a file
|
||||
if h.flags&fileEncrypted > 0 && len(salt) == saltSize {
|
||||
f.key, f.iv = a.getKeys(salt)
|
||||
}
|
||||
a.checksum.Reset()
|
||||
f.cksum = &a.checksum
|
||||
if method == 0 {
|
||||
return f, nil
|
||||
}
|
||||
if a.dec == nil {
|
||||
switch unpackver {
|
||||
case 15, 20, 26:
|
||||
return nil, errUnsupportedDecoder
|
||||
case 29:
|
||||
a.dec = new(decoder29)
|
||||
default:
|
||||
return nil, errUnknownDecoder
|
||||
}
|
||||
a.decVer = unpackver
|
||||
} else if a.decVer != unpackver {
|
||||
return nil, errMultipleDecoders
|
||||
}
|
||||
f.decoder = a.dec
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// readBlockHeader returns the next block header in the archive.
|
||||
// It will return io.EOF if there were no bytes read.
|
||||
func (a *archive15) readBlockHeader() (*blockHeader15, error) {
|
||||
var err error
|
||||
b := a.buf[:7]
|
||||
r := io.Reader(a.v)
|
||||
if a.encrypted {
|
||||
salt := a.buf[:saltSize]
|
||||
_, err = io.ReadFull(r, salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, iv := a.getKeys(salt)
|
||||
r = newAesDecryptReader(r, key, iv)
|
||||
err = readFull(r, b)
|
||||
} else {
|
||||
_, err = io.ReadFull(r, b)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
crc := b.uint16()
|
||||
hash := crc32.NewIEEE()
|
||||
hash.Write(b)
|
||||
h := new(blockHeader15)
|
||||
h.htype = b.byte()
|
||||
h.flags = b.uint16()
|
||||
size := b.uint16()
|
||||
if size < 7 {
|
||||
return nil, errCorruptHeader
|
||||
}
|
||||
size -= 7
|
||||
if int(size) > cap(a.buf) {
|
||||
a.buf = readBuf(make([]byte, size))
|
||||
}
|
||||
h.data = a.buf[:size]
|
||||
if err := readFull(r, h.data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash.Write(h.data)
|
||||
if crc != uint16(hash.Sum32()) {
|
||||
return nil, errBadHeaderCrc
|
||||
}
|
||||
if h.flags&blockHasData > 0 {
|
||||
if len(h.data) < 4 {
|
||||
return nil, errCorruptHeader
|
||||
}
|
||||
h.dataSize = int64(h.data.uint32())
|
||||
}
|
||||
if (h.htype == blockService || h.htype == blockFile) && h.flags&fileLargeData > 0 {
|
||||
if len(h.data) < 25 {
|
||||
return nil, errCorruptHeader
|
||||
}
|
||||
b := h.data[21:25]
|
||||
h.dataSize |= int64(b.uint32()) << 32
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// next advances to the next file block in the archive
|
||||
func (a *archive15) next() (*fileBlockHeader, error) {
|
||||
for {
|
||||
// could return an io.EOF here as 1.5 archives may not have an end block.
|
||||
h, err := a.readBlockHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.byteReader = limitByteReader(a.v, h.dataSize) // reader for block data
|
||||
|
||||
switch h.htype {
|
||||
case blockFile:
|
||||
return a.parseFileHeader(h)
|
||||
case blockArc:
|
||||
a.encrypted = h.flags&arcEncrypted > 0
|
||||
a.multi = h.flags&arcVolume > 0
|
||||
a.old = h.flags&arcNewNaming == 0
|
||||
a.solid = h.flags&arcSolid > 0
|
||||
case blockEnd:
|
||||
if h.flags&endArcNotLast == 0 || !a.multi {
|
||||
return nil, errArchiveEnd
|
||||
}
|
||||
return nil, errArchiveContinues
|
||||
default:
|
||||
_, err = io.Copy(ioutil.Discard, a.byteReader)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *archive15) version() int { return fileFmt15 }
|
||||
|
||||
func (a *archive15) reset() {
|
||||
a.encrypted = false // reset encryption when opening new volume file
|
||||
}
|
||||
|
||||
func (a *archive15) isSolid() bool {
|
||||
return a.solid
|
||||
}
|
||||
|
||||
// newArchive15 creates a new fileBlockReader for a Version 1.5 archive
|
||||
func newArchive15(r *bufio.Reader, password string) fileBlockReader {
|
||||
a := new(archive15)
|
||||
a.v = r
|
||||
a.pass = utf16.Encode([]rune(password)) // convert to UTF-16
|
||||
a.checksum.Hash32 = crc32.NewIEEE()
|
||||
a.buf = readBuf(make([]byte, 100))
|
||||
return a
|
||||
}
|
||||
475
vendor/github.com/nwaples/rardecode/archive50.go
generated
vendored
Normal file
475
vendor/github.com/nwaples/rardecode/archive50.go
generated
vendored
Normal file
@@ -0,0 +1,475 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// block types
|
||||
block5Arc = 1
|
||||
block5File = 2
|
||||
block5Service = 3
|
||||
block5Encrypt = 4
|
||||
block5End = 5
|
||||
|
||||
// block flags
|
||||
block5HasExtra = 0x0001
|
||||
block5HasData = 0x0002
|
||||
block5DataNotFirst = 0x0008
|
||||
block5DataNotLast = 0x0010
|
||||
|
||||
// end block flags
|
||||
endArc5NotLast = 0x0001
|
||||
|
||||
// archive encryption block flags
|
||||
enc5CheckPresent = 0x0001 // password check data is present
|
||||
|
||||
// main archive block flags
|
||||
arc5MultiVol = 0x0001
|
||||
arc5Solid = 0x0004
|
||||
|
||||
// file block flags
|
||||
file5IsDir = 0x0001
|
||||
file5HasUnixMtime = 0x0002
|
||||
file5HasCRC32 = 0x0004
|
||||
file5UnpSizeUnknown = 0x0008
|
||||
|
||||
// file encryption record flags
|
||||
file5EncCheckPresent = 0x0001 // password check data is present
|
||||
file5EncUseMac = 0x0002 // use MAC instead of plain checksum
|
||||
|
||||
cacheSize50 = 4
|
||||
maxPbkdf2Salt = 64
|
||||
pwCheckSize = 8
|
||||
maxKdfCount = 24
|
||||
|
||||
minHeaderSize = 7
|
||||
)
|
||||
|
||||
var (
|
||||
errBadPassword = errors.New("rardecode: incorrect password")
|
||||
errCorruptEncrypt = errors.New("rardecode: corrupt encryption data")
|
||||
errUnknownEncMethod = errors.New("rardecode: unknown encryption method")
|
||||
)
|
||||
|
||||
type extra struct {
|
||||
ftype uint64 // field type
|
||||
data readBuf // field data
|
||||
}
|
||||
|
||||
type blockHeader50 struct {
|
||||
htype uint64 // block type
|
||||
flags uint64
|
||||
data readBuf // block header data
|
||||
extra []extra // extra fields
|
||||
dataSize int64 // size of block data
|
||||
}
|
||||
|
||||
// leHash32 wraps a hash.Hash32 to return the result of Sum in little
|
||||
// endian format.
|
||||
type leHash32 struct {
|
||||
hash.Hash32
|
||||
}
|
||||
|
||||
func (h leHash32) Sum(b []byte) []byte {
|
||||
s := h.Sum32()
|
||||
return append(b, byte(s), byte(s>>8), byte(s>>16), byte(s>>24))
|
||||
}
|
||||
|
||||
func newLittleEndianCRC32() hash.Hash32 {
|
||||
return leHash32{crc32.NewIEEE()}
|
||||
}
|
||||
|
||||
// hash50 implements fileChecksum for RAR 5 archives
|
||||
type hash50 struct {
|
||||
hash.Hash // hash file data is written to
|
||||
sum []byte // file checksum
|
||||
key []byte // if present used with hmac in calculating checksum from hash
|
||||
}
|
||||
|
||||
func (h *hash50) valid() bool {
|
||||
sum := h.Sum(nil)
|
||||
if len(h.key) > 0 {
|
||||
mac := hmac.New(sha256.New, h.key)
|
||||
mac.Write(sum)
|
||||
sum = mac.Sum(sum[:0])
|
||||
if len(h.sum) == 4 {
|
||||
// CRC32
|
||||
for i, v := range sum[4:] {
|
||||
sum[i&3] ^= v
|
||||
}
|
||||
sum = sum[:4]
|
||||
}
|
||||
}
|
||||
return bytes.Equal(sum, h.sum)
|
||||
}
|
||||
|
||||
// archive50 implements fileBlockReader for RAR 5 file format archives
|
||||
type archive50 struct {
|
||||
byteReader // reader for current block data
|
||||
v *bufio.Reader // reader for current archive volume
|
||||
pass []byte
|
||||
blockKey []byte // key used to encrypt blocks
|
||||
multi bool // archive is multi-volume
|
||||
solid bool // is a solid archive
|
||||
checksum hash50 // file checksum
|
||||
dec decoder // optional decoder used to unpack file
|
||||
buf readBuf // temporary buffer
|
||||
keyCache [cacheSize50]struct { // encryption key cache
|
||||
kdfCount int
|
||||
salt []byte
|
||||
keys [][]byte
|
||||
}
|
||||
}
|
||||
|
||||
// calcKeys50 calculates the keys used in RAR 5 archive processing.
|
||||
// The returned slice of byte slices contains 3 keys.
|
||||
// Key 0 is used for block or file decryption.
|
||||
// Key 1 is optionally used for file checksum calculation.
|
||||
// Key 2 is optionally used for password checking.
|
||||
func calcKeys50(pass, salt []byte, kdfCount int) [][]byte {
|
||||
if len(salt) > maxPbkdf2Salt {
|
||||
salt = salt[:maxPbkdf2Salt]
|
||||
}
|
||||
keys := make([][]byte, 3)
|
||||
if len(keys) == 0 {
|
||||
return keys
|
||||
}
|
||||
|
||||
prf := hmac.New(sha256.New, pass)
|
||||
prf.Write(salt)
|
||||
prf.Write([]byte{0, 0, 0, 1})
|
||||
|
||||
t := prf.Sum(nil)
|
||||
u := append([]byte(nil), t...)
|
||||
|
||||
kdfCount--
|
||||
|
||||
for i, iter := range []int{kdfCount, 16, 16} {
|
||||
for iter > 0 {
|
||||
prf.Reset()
|
||||
prf.Write(u)
|
||||
u = prf.Sum(u[:0])
|
||||
for j := range u {
|
||||
t[j] ^= u[j]
|
||||
}
|
||||
iter--
|
||||
}
|
||||
keys[i] = append([]byte(nil), t...)
|
||||
}
|
||||
|
||||
pwcheck := keys[2]
|
||||
for i, v := range pwcheck[pwCheckSize:] {
|
||||
pwcheck[i&(pwCheckSize-1)] ^= v
|
||||
}
|
||||
keys[2] = pwcheck[:pwCheckSize]
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// getKeys reads kdfcount and salt from b and returns the corresponding encryption keys.
|
||||
func (a *archive50) getKeys(b *readBuf) (keys [][]byte, err error) {
|
||||
if len(*b) < 17 {
|
||||
return nil, errCorruptEncrypt
|
||||
}
|
||||
// read kdf count and salt
|
||||
kdfCount := int(b.byte())
|
||||
if kdfCount > maxKdfCount {
|
||||
return nil, errCorruptEncrypt
|
||||
}
|
||||
kdfCount = 1 << uint(kdfCount)
|
||||
salt := b.bytes(16)
|
||||
|
||||
// check cache of keys for match
|
||||
for _, v := range a.keyCache {
|
||||
if kdfCount == v.kdfCount && bytes.Equal(salt, v.salt) {
|
||||
return v.keys, nil
|
||||
}
|
||||
}
|
||||
// not found, calculate keys
|
||||
keys = calcKeys50(a.pass, salt, kdfCount)
|
||||
|
||||
// store in cache
|
||||
copy(a.keyCache[1:], a.keyCache[:])
|
||||
a.keyCache[0].kdfCount = kdfCount
|
||||
a.keyCache[0].salt = append([]byte(nil), salt...)
|
||||
a.keyCache[0].keys = keys
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// checkPassword calculates if a password is correct given password check data and keys.
|
||||
func checkPassword(b *readBuf, keys [][]byte) error {
|
||||
if len(*b) < 12 {
|
||||
return nil // not enough bytes, ignore for the moment
|
||||
}
|
||||
pwcheck := b.bytes(8)
|
||||
sum := b.bytes(4)
|
||||
csum := sha256.Sum256(pwcheck)
|
||||
if bytes.Equal(sum, csum[:len(sum)]) && !bytes.Equal(pwcheck, keys[2]) {
|
||||
return errBadPassword
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseFileEncryptionRecord processes the optional file encryption record from a file header.
|
||||
func (a *archive50) parseFileEncryptionRecord(b readBuf, f *fileBlockHeader) error {
|
||||
if ver := b.uvarint(); ver != 0 {
|
||||
return errUnknownEncMethod
|
||||
}
|
||||
flags := b.uvarint()
|
||||
|
||||
keys, err := a.getKeys(&b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.key = keys[0]
|
||||
if len(b) < 16 {
|
||||
return errCorruptEncrypt
|
||||
}
|
||||
f.iv = b.bytes(16)
|
||||
|
||||
if flags&file5EncCheckPresent > 0 {
|
||||
if err := checkPassword(&b, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if flags&file5EncUseMac > 0 {
|
||||
a.checksum.key = keys[1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *archive50) parseFileHeader(h *blockHeader50) (*fileBlockHeader, error) {
|
||||
a.checksum.sum = nil
|
||||
a.checksum.key = nil
|
||||
|
||||
f := new(fileBlockHeader)
|
||||
|
||||
f.first = h.flags&block5DataNotFirst == 0
|
||||
f.last = h.flags&block5DataNotLast == 0
|
||||
|
||||
flags := h.data.uvarint() // file flags
|
||||
f.IsDir = flags&file5IsDir > 0
|
||||
f.UnKnownSize = flags&file5UnpSizeUnknown > 0
|
||||
f.UnPackedSize = int64(h.data.uvarint())
|
||||
f.PackedSize = h.dataSize
|
||||
f.Attributes = int64(h.data.uvarint())
|
||||
if flags&file5HasUnixMtime > 0 {
|
||||
if len(h.data) < 4 {
|
||||
return nil, errCorruptFileHeader
|
||||
}
|
||||
f.ModificationTime = time.Unix(int64(h.data.uint32()), 0)
|
||||
}
|
||||
if flags&file5HasCRC32 > 0 {
|
||||
if len(h.data) < 4 {
|
||||
return nil, errCorruptFileHeader
|
||||
}
|
||||
a.checksum.sum = append([]byte(nil), h.data.bytes(4)...)
|
||||
if f.first {
|
||||
a.checksum.Hash = newLittleEndianCRC32()
|
||||
f.cksum = &a.checksum
|
||||
}
|
||||
}
|
||||
|
||||
flags = h.data.uvarint() // compression flags
|
||||
f.solid = flags&0x0040 > 0
|
||||
f.winSize = uint(flags&0x3C00)>>10 + 17
|
||||
method := (flags >> 7) & 7 // compression method (0 == none)
|
||||
if f.first && method != 0 {
|
||||
unpackver := flags & 0x003f
|
||||
if unpackver != 0 {
|
||||
return nil, errUnknownDecoder
|
||||
}
|
||||
if a.dec == nil {
|
||||
a.dec = new(decoder50)
|
||||
}
|
||||
f.decoder = a.dec
|
||||
}
|
||||
switch h.data.uvarint() {
|
||||
case 0:
|
||||
f.HostOS = HostOSWindows
|
||||
case 1:
|
||||
f.HostOS = HostOSUnix
|
||||
default:
|
||||
f.HostOS = HostOSUnknown
|
||||
}
|
||||
nlen := int(h.data.uvarint())
|
||||
if len(h.data) < nlen {
|
||||
return nil, errCorruptFileHeader
|
||||
}
|
||||
f.Name = string(h.data.bytes(nlen))
|
||||
|
||||
// parse optional extra records
|
||||
for _, e := range h.extra {
|
||||
var err error
|
||||
switch e.ftype {
|
||||
case 1: // encryption
|
||||
err = a.parseFileEncryptionRecord(e.data, f)
|
||||
case 2:
|
||||
// TODO: hash
|
||||
case 3:
|
||||
// TODO: time
|
||||
case 4: // version
|
||||
_ = e.data.uvarint() // ignore flags field
|
||||
f.Version = int(e.data.uvarint())
|
||||
case 5:
|
||||
// TODO: redirection
|
||||
case 6:
|
||||
// TODO: owner
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// parseEncryptionBlock calculates the key for block encryption.
|
||||
func (a *archive50) parseEncryptionBlock(b readBuf) error {
|
||||
if ver := b.uvarint(); ver != 0 {
|
||||
return errUnknownEncMethod
|
||||
}
|
||||
flags := b.uvarint()
|
||||
keys, err := a.getKeys(&b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&enc5CheckPresent > 0 {
|
||||
if err := checkPassword(&b, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
a.blockKey = keys[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *archive50) readBlockHeader() (*blockHeader50, error) {
|
||||
r := io.Reader(a.v)
|
||||
if a.blockKey != nil {
|
||||
// block is encrypted
|
||||
iv := a.buf[:16]
|
||||
if err := readFull(r, iv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r = newAesDecryptReader(r, a.blockKey, iv)
|
||||
}
|
||||
|
||||
b := a.buf[:minHeaderSize]
|
||||
if err := readFull(r, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crc := b.uint32()
|
||||
|
||||
hash := crc32.NewIEEE()
|
||||
hash.Write(b)
|
||||
|
||||
size := int(b.uvarint()) // header size
|
||||
if size > cap(a.buf) {
|
||||
a.buf = readBuf(make([]byte, size))
|
||||
} else {
|
||||
a.buf = a.buf[:size]
|
||||
}
|
||||
n := copy(a.buf, b) // copy left over bytes
|
||||
if err := readFull(r, a.buf[n:]); err != nil { // read rest of header
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check header crc
|
||||
hash.Write(a.buf[n:])
|
||||
if crc != hash.Sum32() {
|
||||
return nil, errBadHeaderCrc
|
||||
}
|
||||
|
||||
b = a.buf
|
||||
h := new(blockHeader50)
|
||||
h.htype = b.uvarint()
|
||||
h.flags = b.uvarint()
|
||||
|
||||
var extraSize int
|
||||
if h.flags&block5HasExtra > 0 {
|
||||
extraSize = int(b.uvarint())
|
||||
}
|
||||
if h.flags&block5HasData > 0 {
|
||||
h.dataSize = int64(b.uvarint())
|
||||
}
|
||||
if len(b) < extraSize {
|
||||
return nil, errCorruptHeader
|
||||
}
|
||||
h.data = b.bytes(len(b) - extraSize)
|
||||
|
||||
// read header extra records
|
||||
for len(b) > 0 {
|
||||
size = int(b.uvarint())
|
||||
if len(b) < size {
|
||||
return nil, errCorruptHeader
|
||||
}
|
||||
data := readBuf(b.bytes(size))
|
||||
ftype := data.uvarint()
|
||||
h.extra = append(h.extra, extra{ftype, data})
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// next advances to the next file block in the archive
|
||||
func (a *archive50) next() (*fileBlockHeader, error) {
|
||||
for {
|
||||
h, err := a.readBlockHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.byteReader = limitByteReader(a.v, h.dataSize)
|
||||
switch h.htype {
|
||||
case block5File:
|
||||
return a.parseFileHeader(h)
|
||||
case block5Arc:
|
||||
flags := h.data.uvarint()
|
||||
a.multi = flags&arc5MultiVol > 0
|
||||
a.solid = flags&arc5Solid > 0
|
||||
case block5Encrypt:
|
||||
err = a.parseEncryptionBlock(h.data)
|
||||
case block5End:
|
||||
flags := h.data.uvarint()
|
||||
if flags&endArc5NotLast == 0 || !a.multi {
|
||||
return nil, errArchiveEnd
|
||||
}
|
||||
return nil, errArchiveContinues
|
||||
default:
|
||||
// discard block data
|
||||
_, err = io.Copy(ioutil.Discard, a.byteReader)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (a *archive50) version() int { return fileFmt50 }
|
||||
|
||||
func (a *archive50) reset() {
|
||||
a.blockKey = nil // reset encryption when opening new volume file
|
||||
}
|
||||
|
||||
func (a *archive50) isSolid() bool {
|
||||
return a.solid
|
||||
}
|
||||
|
||||
// newArchive50 creates a new fileBlockReader for a Version 5 archive.
|
||||
func newArchive50(r *bufio.Reader, password string) fileBlockReader {
|
||||
a := new(archive50)
|
||||
a.v = r
|
||||
a.pass = []byte(password)
|
||||
a.buf = make([]byte, 100)
|
||||
return a
|
||||
}
|
||||
119
vendor/github.com/nwaples/rardecode/bit_reader.go
generated
vendored
Normal file
119
vendor/github.com/nwaples/rardecode/bit_reader.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
package rardecode
|
||||
|
||||
import "io"
|
||||
|
||||
type bitReader interface {
|
||||
readBits(n uint) (int, error) // read n bits of data
|
||||
unreadBits(n uint) // revert the reading of the last n bits read
|
||||
}
|
||||
|
||||
type limitedBitReader struct {
|
||||
br bitReader
|
||||
n int
|
||||
err error // error to return if br returns EOF before all n bits have been read
|
||||
}
|
||||
|
||||
// limitBitReader returns a bitReader that reads from br and stops with io.EOF after n bits.
|
||||
// If br returns an io.EOF before reading n bits, err is returned.
|
||||
func limitBitReader(br bitReader, n int, err error) bitReader {
|
||||
return &limitedBitReader{br, n, err}
|
||||
}
|
||||
|
||||
func (l *limitedBitReader) readBits(n uint) (int, error) {
|
||||
if int(n) > l.n {
|
||||
return 0, io.EOF
|
||||
}
|
||||
v, err := l.br.readBits(n)
|
||||
if err == nil {
|
||||
l.n -= int(n)
|
||||
} else if err == io.EOF {
|
||||
err = l.err
|
||||
}
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (l *limitedBitReader) unreadBits(n uint) {
|
||||
l.n += int(n)
|
||||
l.br.unreadBits(n)
|
||||
}
|
||||
|
||||
// rarBitReader wraps an io.ByteReader to perform various bit and byte
|
||||
// reading utility functions used in RAR file processing.
|
||||
type rarBitReader struct {
|
||||
r io.ByteReader
|
||||
v int
|
||||
n uint
|
||||
}
|
||||
|
||||
func (r *rarBitReader) reset(br io.ByteReader) {
|
||||
r.r = br
|
||||
r.n = 0
|
||||
r.v = 0
|
||||
}
|
||||
|
||||
func (r *rarBitReader) readBits(n uint) (int, error) {
|
||||
for n > r.n {
|
||||
c, err := r.r.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
r.v = r.v<<8 | int(c)
|
||||
r.n += 8
|
||||
}
|
||||
r.n -= n
|
||||
return (r.v >> r.n) & ((1 << n) - 1), nil
|
||||
}
|
||||
|
||||
func (r *rarBitReader) unreadBits(n uint) {
|
||||
r.n += n
|
||||
}
|
||||
|
||||
// alignByte aligns the current bit reading input to the next byte boundary.
|
||||
func (r *rarBitReader) alignByte() {
|
||||
r.n -= r.n % 8
|
||||
}
|
||||
|
||||
// readUint32 reads a RAR V3 encoded uint32
|
||||
func (r *rarBitReader) readUint32() (uint32, error) {
|
||||
n, err := r.readBits(2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n != 1 {
|
||||
n, err = r.readBits(4 << uint(n))
|
||||
return uint32(n), err
|
||||
}
|
||||
n, err = r.readBits(4)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n == 0 {
|
||||
n, err = r.readBits(8)
|
||||
n |= -1 << 8
|
||||
return uint32(n), err
|
||||
}
|
||||
nlow, err := r.readBits(4)
|
||||
n = n<<4 | nlow
|
||||
return uint32(n), err
|
||||
}
|
||||
|
||||
func (r *rarBitReader) ReadByte() (byte, error) {
|
||||
n, err := r.readBits(8)
|
||||
return byte(n), err
|
||||
}
|
||||
|
||||
// readFull reads len(p) bytes into p. If fewer bytes are read an error is returned.
|
||||
func (r *rarBitReader) readFull(p []byte) error {
|
||||
for i := range p {
|
||||
c, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p[i] = c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newRarBitReader(r io.ByteReader) *rarBitReader {
|
||||
return &rarBitReader{r: r}
|
||||
}
|
||||
264
vendor/github.com/nwaples/rardecode/decode29.go
generated
vendored
Normal file
264
vendor/github.com/nwaples/rardecode/decode29.go
generated
vendored
Normal file
@@ -0,0 +1,264 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
maxCodeSize = 0x10000
|
||||
maxUniqueFilters = 1024
|
||||
)
|
||||
|
||||
var (
|
||||
// Errors marking the end of the decoding block and/or file
|
||||
endOfFile = errors.New("rardecode: end of file")
|
||||
endOfBlock = errors.New("rardecode: end of block")
|
||||
endOfBlockAndFile = errors.New("rardecode: end of block and file")
|
||||
)
|
||||
|
||||
// decoder29 implements the decoder interface for RAR 3.0 compression (unpack version 29)
|
||||
// Decode input is broken up into 1 or more blocks. The start of each block specifies
|
||||
// the decoding algorithm (ppm or lz) and optional data to initialize with.
|
||||
// Block length is not stored, it is determined only after decoding an end of file and/or
|
||||
// block marker in the data.
|
||||
type decoder29 struct {
|
||||
br *rarBitReader
|
||||
eof bool // at file eof
|
||||
fnum int // current filter number (index into filters)
|
||||
flen []int // filter block length history
|
||||
filters []v3Filter // list of current filters used by archive encoding
|
||||
|
||||
// current decode function (lz or ppm).
|
||||
// When called it should perform a single decode operation, and either apply the
|
||||
// data to the window or return they raw bytes for a filter.
|
||||
decode func(w *window) ([]byte, error)
|
||||
|
||||
lz lz29Decoder // lz decoder
|
||||
ppm ppm29Decoder // ppm decoder
|
||||
}
|
||||
|
||||
// init intializes the decoder for decoding a new file.
|
||||
func (d *decoder29) init(r io.ByteReader, reset bool) error {
|
||||
if d.br == nil {
|
||||
d.br = newRarBitReader(r)
|
||||
} else {
|
||||
d.br.reset(r)
|
||||
}
|
||||
d.eof = false
|
||||
if reset {
|
||||
d.initFilters()
|
||||
d.lz.reset()
|
||||
d.ppm.reset()
|
||||
d.decode = nil
|
||||
}
|
||||
if d.decode == nil {
|
||||
return d.readBlockHeader()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *decoder29) initFilters() {
|
||||
d.fnum = 0
|
||||
d.flen = nil
|
||||
d.filters = nil
|
||||
}
|
||||
|
||||
// readVMCode reads the raw bytes for the code/commands used in a vm filter
|
||||
func readVMCode(br *rarBitReader) ([]byte, error) {
|
||||
n, err := br.readUint32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n > maxCodeSize || n == 0 {
|
||||
return nil, errInvalidFilter
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
err = br.readFull(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var x byte
|
||||
for _, c := range buf[1:] {
|
||||
x ^= c
|
||||
}
|
||||
// simple xor checksum on data
|
||||
if x != buf[0] {
|
||||
return nil, errInvalidFilter
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (d *decoder29) parseVMFilter(buf []byte) (*filterBlock, error) {
|
||||
flags := buf[0]
|
||||
br := newRarBitReader(bytes.NewReader(buf[1:]))
|
||||
fb := new(filterBlock)
|
||||
|
||||
// Find the filter number which is an index into d.filters.
|
||||
// If filter number == len(d.filters) it is a new filter to be added.
|
||||
if flags&0x80 > 0 {
|
||||
n, err := br.readUint32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
d.initFilters()
|
||||
fb.reset = true
|
||||
} else {
|
||||
n--
|
||||
if n > maxUniqueFilters {
|
||||
return nil, errInvalidFilter
|
||||
}
|
||||
if int(n) > len(d.filters) {
|
||||
return nil, errInvalidFilter
|
||||
}
|
||||
}
|
||||
d.fnum = int(n)
|
||||
}
|
||||
|
||||
// filter offset
|
||||
n, err := br.readUint32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if flags&0x40 > 0 {
|
||||
n += 258
|
||||
}
|
||||
fb.offset = int(n)
|
||||
|
||||
// filter length
|
||||
if d.fnum == len(d.flen) {
|
||||
d.flen = append(d.flen, 0)
|
||||
}
|
||||
if flags&0x20 > 0 {
|
||||
n, err = br.readUint32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//fb.length = int(n)
|
||||
d.flen[d.fnum] = int(n)
|
||||
}
|
||||
fb.length = d.flen[d.fnum]
|
||||
|
||||
// initial register values
|
||||
r := make(map[int]uint32)
|
||||
if flags&0x10 > 0 {
|
||||
bits, err := br.readBits(vmRegs - 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < vmRegs-1; i++ {
|
||||
if bits&1 > 0 {
|
||||
r[i], err = br.readUint32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
bits >>= 1
|
||||
}
|
||||
}
|
||||
|
||||
// filter is new so read the code for it
|
||||
if d.fnum == len(d.filters) {
|
||||
code, err := readVMCode(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := getV3Filter(code)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.filters = append(d.filters, f)
|
||||
d.flen = append(d.flen, fb.length)
|
||||
}
|
||||
|
||||
// read global data
|
||||
var g []byte
|
||||
if flags&0x08 > 0 {
|
||||
n, err := br.readUint32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n > vmGlobalSize-vmFixedGlobalSize {
|
||||
return nil, errInvalidFilter
|
||||
}
|
||||
g = make([]byte, n)
|
||||
err = br.readFull(g)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// create filter function
|
||||
f := d.filters[d.fnum]
|
||||
fb.filter = func(buf []byte, offset int64) ([]byte, error) {
|
||||
return f(r, g, buf, offset)
|
||||
}
|
||||
|
||||
return fb, nil
|
||||
}
|
||||
|
||||
// readBlockHeader determines and initializes the current decoder for a new decode block.
|
||||
func (d *decoder29) readBlockHeader() error {
|
||||
d.br.alignByte()
|
||||
n, err := d.br.readBits(1)
|
||||
if err == nil {
|
||||
if n > 0 {
|
||||
d.decode = d.ppm.decode
|
||||
err = d.ppm.init(d.br)
|
||||
} else {
|
||||
d.decode = d.lz.decode
|
||||
err = d.lz.init(d.br)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
err = errDecoderOutOfData
|
||||
}
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (d *decoder29) fill(w *window) ([]*filterBlock, error) {
|
||||
if d.eof {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
var fl []*filterBlock
|
||||
|
||||
for w.available() > 0 {
|
||||
b, err := d.decode(w) // perform a single decode operation
|
||||
if len(b) > 0 && err == nil {
|
||||
// parse raw data for filter and add to list of filters
|
||||
var f *filterBlock
|
||||
f, err = d.parseVMFilter(b)
|
||||
if f != nil {
|
||||
// make offset relative to read index (from write index)
|
||||
f.offset += w.buffered()
|
||||
fl = append(fl, f)
|
||||
}
|
||||
}
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
continue
|
||||
case endOfBlock:
|
||||
err = d.readBlockHeader()
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
case endOfFile:
|
||||
d.eof = true
|
||||
err = io.EOF
|
||||
case endOfBlockAndFile:
|
||||
d.eof = true
|
||||
d.decode = nil // clear decoder, it will be setup by next init()
|
||||
err = io.EOF
|
||||
case io.EOF:
|
||||
err = errDecoderOutOfData
|
||||
}
|
||||
return fl, err
|
||||
}
|
||||
// return filters
|
||||
return fl, nil
|
||||
}
|
||||
247
vendor/github.com/nwaples/rardecode/decode29_lz.go
generated
vendored
Normal file
247
vendor/github.com/nwaples/rardecode/decode29_lz.go
generated
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
package rardecode
|
||||
|
||||
const (
|
||||
mainSize = 299
|
||||
offsetSize = 60
|
||||
lowOffsetSize = 17
|
||||
lengthSize = 28
|
||||
tableSize = mainSize + offsetSize + lowOffsetSize + lengthSize
|
||||
)
|
||||
|
||||
var (
|
||||
lengthBase = [28]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20,
|
||||
24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224}
|
||||
lengthExtraBits = [28]uint{0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
|
||||
2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5}
|
||||
|
||||
offsetBase = [60]int{0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96,
|
||||
128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096,
|
||||
6144, 8192, 12288, 16384, 24576, 32768, 49152, 65536, 98304,
|
||||
131072, 196608, 262144, 327680, 393216, 458752, 524288,
|
||||
589824, 655360, 720896, 786432, 851968, 917504, 983040,
|
||||
1048576, 1310720, 1572864, 1835008, 2097152, 2359296, 2621440,
|
||||
2883584, 3145728, 3407872, 3670016, 3932160}
|
||||
offsetExtraBits = [60]uint{0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6,
|
||||
6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14,
|
||||
15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
|
||||
18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18}
|
||||
|
||||
shortOffsetBase = [8]int{0, 4, 8, 16, 32, 64, 128, 192}
|
||||
shortOffsetExtraBits = [8]uint{2, 2, 3, 4, 5, 6, 6, 6}
|
||||
)
|
||||
|
||||
type lz29Decoder struct {
|
||||
codeLength [tableSize]byte
|
||||
|
||||
mainDecoder huffmanDecoder
|
||||
offsetDecoder huffmanDecoder
|
||||
lowOffsetDecoder huffmanDecoder
|
||||
lengthDecoder huffmanDecoder
|
||||
|
||||
offset [4]int // history of previous offsets
|
||||
length int // previous length
|
||||
lowOffset int
|
||||
lowOffsetRepeats int
|
||||
|
||||
br *rarBitReader
|
||||
}
|
||||
|
||||
func (d *lz29Decoder) reset() {
|
||||
for i := range d.offset {
|
||||
d.offset[i] = 0
|
||||
}
|
||||
d.length = 0
|
||||
for i := range d.codeLength {
|
||||
d.codeLength[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (d *lz29Decoder) init(br *rarBitReader) error {
|
||||
d.br = br
|
||||
d.lowOffset = 0
|
||||
d.lowOffsetRepeats = 0
|
||||
|
||||
n, err := d.br.readBits(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addOld := n > 0
|
||||
|
||||
cl := d.codeLength[:]
|
||||
if err = readCodeLengthTable(d.br, cl, addOld); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.mainDecoder.init(cl[:mainSize])
|
||||
cl = cl[mainSize:]
|
||||
d.offsetDecoder.init(cl[:offsetSize])
|
||||
cl = cl[offsetSize:]
|
||||
d.lowOffsetDecoder.init(cl[:lowOffsetSize])
|
||||
cl = cl[lowOffsetSize:]
|
||||
d.lengthDecoder.init(cl)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *lz29Decoder) readFilterData() (b []byte, err error) {
|
||||
flags, err := d.br.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n := (int(flags) & 7) + 1
|
||||
switch n {
|
||||
case 7:
|
||||
n, err = d.br.readBits(8)
|
||||
n += 7
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case 8:
|
||||
n, err = d.br.readBits(16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
buf := make([]byte, n+1)
|
||||
buf[0] = flags
|
||||
err = d.br.readFull(buf[1:])
|
||||
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func (d *lz29Decoder) readEndOfBlock() error {
|
||||
n, err := d.br.readBits(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n > 0 {
|
||||
return endOfBlock
|
||||
}
|
||||
n, err = d.br.readBits(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n > 0 {
|
||||
return endOfBlockAndFile
|
||||
}
|
||||
return endOfFile
|
||||
}
|
||||
|
||||
func (d *lz29Decoder) decode(win *window) ([]byte, error) {
|
||||
sym, err := d.mainDecoder.readSym(d.br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case sym < 256:
|
||||
// literal
|
||||
win.writeByte(byte(sym))
|
||||
return nil, nil
|
||||
case sym == 256:
|
||||
return nil, d.readEndOfBlock()
|
||||
case sym == 257:
|
||||
return d.readFilterData()
|
||||
case sym == 258:
|
||||
// use previous offset and length
|
||||
case sym < 263:
|
||||
i := sym - 259
|
||||
offset := d.offset[i]
|
||||
copy(d.offset[1:i+1], d.offset[:i])
|
||||
d.offset[0] = offset
|
||||
|
||||
i, err := d.lengthDecoder.readSym(d.br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.length = lengthBase[i] + 2
|
||||
bits := lengthExtraBits[i]
|
||||
if bits > 0 {
|
||||
n, err := d.br.readBits(bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.length += n
|
||||
}
|
||||
case sym < 271:
|
||||
i := sym - 263
|
||||
copy(d.offset[1:], d.offset[:])
|
||||
offset := shortOffsetBase[i] + 1
|
||||
bits := shortOffsetExtraBits[i]
|
||||
if bits > 0 {
|
||||
n, err := d.br.readBits(bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += n
|
||||
}
|
||||
d.offset[0] = offset
|
||||
|
||||
d.length = 2
|
||||
default:
|
||||
i := sym - 271
|
||||
d.length = lengthBase[i] + 3
|
||||
bits := lengthExtraBits[i]
|
||||
if bits > 0 {
|
||||
n, err := d.br.readBits(bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.length += n
|
||||
}
|
||||
|
||||
i, err = d.offsetDecoder.readSym(d.br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset := offsetBase[i] + 1
|
||||
bits = offsetExtraBits[i]
|
||||
|
||||
switch {
|
||||
case bits >= 4:
|
||||
if bits > 4 {
|
||||
n, err := d.br.readBits(bits - 4)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += n << 4
|
||||
}
|
||||
|
||||
if d.lowOffsetRepeats > 0 {
|
||||
d.lowOffsetRepeats--
|
||||
offset += d.lowOffset
|
||||
} else {
|
||||
n, err := d.lowOffsetDecoder.readSym(d.br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 16 {
|
||||
d.lowOffsetRepeats = 15
|
||||
offset += d.lowOffset
|
||||
} else {
|
||||
offset += n
|
||||
d.lowOffset = n
|
||||
}
|
||||
}
|
||||
case bits > 0:
|
||||
n, err := d.br.readBits(bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += n
|
||||
}
|
||||
|
||||
if offset >= 0x2000 {
|
||||
d.length++
|
||||
if offset >= 0x40000 {
|
||||
d.length++
|
||||
}
|
||||
}
|
||||
copy(d.offset[1:], d.offset[:])
|
||||
d.offset[0] = offset
|
||||
}
|
||||
win.copyBytes(d.length, d.offset[0])
|
||||
return nil, nil
|
||||
}
|
||||
132
vendor/github.com/nwaples/rardecode/decode29_ppm.go
generated
vendored
Normal file
132
vendor/github.com/nwaples/rardecode/decode29_ppm.go
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
package rardecode
|
||||
|
||||
import "io"
|
||||
|
||||
type ppm29Decoder struct {
|
||||
m model // ppm model
|
||||
esc byte // escape character
|
||||
br io.ByteReader
|
||||
}
|
||||
|
||||
func (d *ppm29Decoder) init(br *rarBitReader) error {
|
||||
maxOrder, err := br.readBits(7)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reset := maxOrder&0x20 > 0
|
||||
|
||||
// Should have flushed all unread bits from bitReader by now,
|
||||
// use underlying ByteReader
|
||||
d.br = br.r
|
||||
|
||||
var maxMB int
|
||||
if reset {
|
||||
c, err := d.br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxMB = int(c) + 1
|
||||
}
|
||||
|
||||
if maxOrder&0x40 > 0 {
|
||||
d.esc, err = d.br.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
maxOrder = (maxOrder & 0x1f) + 1
|
||||
if maxOrder > 16 {
|
||||
maxOrder = 16 + (maxOrder-16)*3
|
||||
}
|
||||
|
||||
return d.m.init(d.br, reset, maxOrder, maxMB)
|
||||
}
|
||||
|
||||
func (d *ppm29Decoder) reset() {
|
||||
d.esc = 2
|
||||
}
|
||||
|
||||
func (d *ppm29Decoder) readFilterData() ([]byte, error) {
|
||||
c, err := d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n := int(c&7) + 1
|
||||
if n == 7 {
|
||||
b, err := d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n += int(b)
|
||||
} else if n == 8 {
|
||||
b, err := d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n = int(b) << 8
|
||||
b, err = d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n |= int(b)
|
||||
}
|
||||
|
||||
n++
|
||||
buf := make([]byte, n)
|
||||
buf[0] = byte(c)
|
||||
for i := 1; i < n; i++ {
|
||||
buf[i], err = d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func (d *ppm29Decoder) decode(w *window) ([]byte, error) {
|
||||
c, err := d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c != d.esc {
|
||||
w.writeByte(c)
|
||||
return nil, nil
|
||||
}
|
||||
c, err = d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch c {
|
||||
case 0:
|
||||
return nil, endOfBlock
|
||||
case 2:
|
||||
return nil, endOfBlockAndFile
|
||||
case 3:
|
||||
return d.readFilterData()
|
||||
case 4:
|
||||
offset := 0
|
||||
for i := 0; i < 3; i++ {
|
||||
c, err = d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset = offset<<8 | int(c)
|
||||
}
|
||||
len, err := d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.copyBytes(int(len)+32, offset+2)
|
||||
case 5:
|
||||
len, err := d.m.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.copyBytes(int(len)+4, 1)
|
||||
default:
|
||||
w.writeByte(d.esc)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
294
vendor/github.com/nwaples/rardecode/decode50.go
generated
vendored
Normal file
294
vendor/github.com/nwaples/rardecode/decode50.go
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
mainSize5 = 306
|
||||
offsetSize5 = 64
|
||||
lowoffsetSize5 = 16
|
||||
lengthSize5 = 44
|
||||
tableSize5 = mainSize5 + offsetSize5 + lowoffsetSize5 + lengthSize5
|
||||
)
|
||||
|
||||
var (
|
||||
errUnknownFilter = errors.New("rardecode: unknown V5 filter")
|
||||
errCorruptDecodeHeader = errors.New("rardecode: corrupt decode header")
|
||||
)
|
||||
|
||||
// decoder50 implements the decoder interface for RAR 5 compression.
|
||||
// Decode input it broken up into 1 or more blocks. Each block starts with
|
||||
// a header containing block length and optional code length tables to initialize
|
||||
// the huffman decoders with.
|
||||
type decoder50 struct {
|
||||
r io.ByteReader
|
||||
br bitReader // bit reader for current data block
|
||||
codeLength [tableSize5]byte
|
||||
|
||||
lastBlock bool // current block is last block in compressed file
|
||||
|
||||
mainDecoder huffmanDecoder
|
||||
offsetDecoder huffmanDecoder
|
||||
lowoffsetDecoder huffmanDecoder
|
||||
lengthDecoder huffmanDecoder
|
||||
|
||||
offset [4]int
|
||||
length int
|
||||
}
|
||||
|
||||
func (d *decoder50) init(r io.ByteReader, reset bool) error {
|
||||
d.r = r
|
||||
d.lastBlock = false
|
||||
|
||||
if reset {
|
||||
for i := range d.offset {
|
||||
d.offset[i] = 0
|
||||
}
|
||||
d.length = 0
|
||||
for i := range d.codeLength {
|
||||
d.codeLength[i] = 0
|
||||
}
|
||||
}
|
||||
err := d.readBlockHeader()
|
||||
if err == io.EOF {
|
||||
return errDecoderOutOfData
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *decoder50) readBlockHeader() error {
|
||||
flags, err := d.r.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bytecount := (flags>>3)&3 + 1
|
||||
if bytecount == 4 {
|
||||
return errCorruptDecodeHeader
|
||||
}
|
||||
|
||||
hsum, err := d.r.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockBits := int(flags)&0x07 + 1
|
||||
blockBytes := 0
|
||||
sum := 0x5a ^ flags
|
||||
for i := byte(0); i < bytecount; i++ {
|
||||
n, err := d.r.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sum ^= n
|
||||
blockBytes |= int(n) << (i * 8)
|
||||
}
|
||||
if sum != hsum { // bad header checksum
|
||||
return errCorruptDecodeHeader
|
||||
}
|
||||
blockBits += (blockBytes - 1) * 8
|
||||
|
||||
// create bit reader for block
|
||||
d.br = limitBitReader(newRarBitReader(d.r), blockBits, errDecoderOutOfData)
|
||||
d.lastBlock = flags&0x40 > 0
|
||||
|
||||
if flags&0x80 > 0 {
|
||||
// read new code length tables and reinitialize huffman decoders
|
||||
cl := d.codeLength[:]
|
||||
err = readCodeLengthTable(d.br, cl, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.mainDecoder.init(cl[:mainSize5])
|
||||
cl = cl[mainSize5:]
|
||||
d.offsetDecoder.init(cl[:offsetSize5])
|
||||
cl = cl[offsetSize5:]
|
||||
d.lowoffsetDecoder.init(cl[:lowoffsetSize5])
|
||||
cl = cl[lowoffsetSize5:]
|
||||
d.lengthDecoder.init(cl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func slotToLength(br bitReader, n int) (int, error) {
|
||||
if n >= 8 {
|
||||
bits := uint(n/4 - 1)
|
||||
n = (4 | (n & 3)) << bits
|
||||
if bits > 0 {
|
||||
b, err := br.readBits(bits)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n |= b
|
||||
}
|
||||
}
|
||||
n += 2
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// readFilter5Data reads an encoded integer used in V5 filters.
|
||||
func readFilter5Data(br bitReader) (int, error) {
|
||||
// TODO: should data really be uint? (for 32bit ints).
|
||||
// It will be masked later anyway by decode window mask.
|
||||
bytes, err := br.readBits(2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
bytes++
|
||||
|
||||
var data int
|
||||
for i := 0; i < bytes; i++ {
|
||||
n, err := br.readBits(8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
data |= n << (uint(i) * 8)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func readFilter(br bitReader) (*filterBlock, error) {
|
||||
fb := new(filterBlock)
|
||||
var err error
|
||||
|
||||
fb.offset, err = readFilter5Data(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fb.length, err = readFilter5Data(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ftype, err := br.readBits(3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch ftype {
|
||||
case 0:
|
||||
n, err := br.readBits(5)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterDelta(n+1, buf) }
|
||||
case 1:
|
||||
fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterE8(0xe8, true, buf, offset) }
|
||||
case 2:
|
||||
fb.filter = func(buf []byte, offset int64) ([]byte, error) { return filterE8(0xe9, true, buf, offset) }
|
||||
case 3:
|
||||
fb.filter = filterArm
|
||||
default:
|
||||
return nil, errUnknownFilter
|
||||
}
|
||||
return fb, nil
|
||||
}
|
||||
|
||||
func (d *decoder50) decodeSym(win *window, sym int) (*filterBlock, error) {
|
||||
switch {
|
||||
case sym < 256:
|
||||
// literal
|
||||
win.writeByte(byte(sym))
|
||||
return nil, nil
|
||||
case sym == 256:
|
||||
f, err := readFilter(d.br)
|
||||
f.offset += win.buffered()
|
||||
return f, err
|
||||
case sym == 257:
|
||||
// use previous offset and length
|
||||
case sym < 262:
|
||||
i := sym - 258
|
||||
offset := d.offset[i]
|
||||
copy(d.offset[1:i+1], d.offset[:i])
|
||||
d.offset[0] = offset
|
||||
|
||||
sl, err := d.lengthDecoder.readSym(d.br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.length, err = slotToLength(d.br, sl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
length, err := slotToLength(d.br, sym-262)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset := 1
|
||||
slot, err := d.offsetDecoder.readSym(d.br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if slot < 4 {
|
||||
offset += slot
|
||||
} else {
|
||||
bits := uint(slot/2 - 1)
|
||||
offset += (2 | (slot & 1)) << bits
|
||||
|
||||
if bits >= 4 {
|
||||
if bits > 4 {
|
||||
n, err := d.br.readBits(bits - 4)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += n << 4
|
||||
}
|
||||
n, err := d.lowoffsetDecoder.readSym(d.br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += n
|
||||
} else {
|
||||
n, err := d.br.readBits(bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += n
|
||||
}
|
||||
}
|
||||
if offset > 0x100 {
|
||||
length++
|
||||
if offset > 0x2000 {
|
||||
length++
|
||||
if offset > 0x40000 {
|
||||
length++
|
||||
}
|
||||
}
|
||||
}
|
||||
copy(d.offset[1:], d.offset[:])
|
||||
d.offset[0] = offset
|
||||
d.length = length
|
||||
}
|
||||
win.copyBytes(d.length, d.offset[0])
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *decoder50) fill(w *window) ([]*filterBlock, error) {
|
||||
var fl []*filterBlock
|
||||
|
||||
for w.available() > 0 {
|
||||
sym, err := d.mainDecoder.readSym(d.br)
|
||||
if err == nil {
|
||||
var f *filterBlock
|
||||
f, err = d.decodeSym(w, sym)
|
||||
if f != nil {
|
||||
fl = append(fl, f)
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
// reached end of the block
|
||||
if d.lastBlock {
|
||||
return fl, io.EOF
|
||||
}
|
||||
err = d.readBlockHeader()
|
||||
}
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return fl, errDecoderOutOfData
|
||||
}
|
||||
return fl, err
|
||||
}
|
||||
}
|
||||
return fl, nil
|
||||
}
|
||||
290
vendor/github.com/nwaples/rardecode/decode_reader.go
generated
vendored
Normal file
290
vendor/github.com/nwaples/rardecode/decode_reader.go
generated
vendored
Normal file
@@ -0,0 +1,290 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
minWindowSize = 0x40000
|
||||
maxQueuedFilters = 8192
|
||||
)
|
||||
|
||||
var (
|
||||
errTooManyFilters = errors.New("rardecode: too many filters")
|
||||
errInvalidFilter = errors.New("rardecode: invalid filter")
|
||||
)
|
||||
|
||||
// filter functions take a byte slice, the current output offset and
|
||||
// returns transformed data.
|
||||
type filter func(b []byte, offset int64) ([]byte, error)
|
||||
|
||||
// filterBlock is a block of data to be processed by a filter.
|
||||
type filterBlock struct {
|
||||
length int // length of block
|
||||
offset int // bytes to be read before start of block
|
||||
reset bool // drop all existing queued filters
|
||||
filter filter // filter function
|
||||
}
|
||||
|
||||
// decoder is the interface for decoding compressed data
|
||||
type decoder interface {
|
||||
init(r io.ByteReader, reset bool) error // initialize decoder for current file
|
||||
fill(w *window) ([]*filterBlock, error) // fill window with decoded data, returning any filters
|
||||
}
|
||||
|
||||
// window is a sliding window buffer.
|
||||
type window struct {
|
||||
buf []byte
|
||||
mask int // buf length mask
|
||||
r int // index in buf for reads (beginning)
|
||||
w int // index in buf for writes (end)
|
||||
l int // length of bytes to be processed by copyBytes
|
||||
o int // offset of bytes to be processed by copyBytes
|
||||
}
|
||||
|
||||
// buffered returns the number of bytes yet to be read from window
|
||||
func (w *window) buffered() int { return (w.w - w.r) & w.mask }
|
||||
|
||||
// available returns the number of bytes that can be written before the window is full
|
||||
func (w *window) available() int { return (w.r - w.w - 1) & w.mask }
|
||||
|
||||
func (w *window) reset(log2size uint, clear bool) {
|
||||
size := 1 << log2size
|
||||
if size < minWindowSize {
|
||||
size = minWindowSize
|
||||
}
|
||||
if size > len(w.buf) {
|
||||
b := make([]byte, size)
|
||||
if clear {
|
||||
w.w = 0
|
||||
} else if len(w.buf) > 0 {
|
||||
n := copy(b, w.buf[w.w:])
|
||||
n += copy(b[n:], w.buf[:w.w])
|
||||
w.w = n
|
||||
}
|
||||
w.buf = b
|
||||
w.mask = size - 1
|
||||
} else if clear {
|
||||
for i := range w.buf {
|
||||
w.buf[i] = 0
|
||||
}
|
||||
w.w = 0
|
||||
}
|
||||
w.r = w.w
|
||||
}
|
||||
|
||||
// writeByte writes c to the end of the window
|
||||
func (w *window) writeByte(c byte) {
|
||||
w.buf[w.w] = c
|
||||
w.w = (w.w + 1) & w.mask
|
||||
}
|
||||
|
||||
// copyBytes copies len bytes at off distance from the end
|
||||
// to the end of the window.
|
||||
func (w *window) copyBytes(len, off int) {
|
||||
len &= w.mask
|
||||
|
||||
n := w.available()
|
||||
if len > n {
|
||||
// if there is not enough space availaible we copy
|
||||
// as much as we can and save the offset and length
|
||||
// of the remaining data to be copied later.
|
||||
w.l = len - n
|
||||
w.o = off
|
||||
len = n
|
||||
}
|
||||
|
||||
i := (w.w - off) & w.mask
|
||||
for ; len > 0; len-- {
|
||||
w.buf[w.w] = w.buf[i]
|
||||
w.w = (w.w + 1) & w.mask
|
||||
i = (i + 1) & w.mask
|
||||
}
|
||||
}
|
||||
|
||||
// read reads bytes from the beginning of the window into p
|
||||
func (w *window) read(p []byte) (n int) {
|
||||
if w.r > w.w {
|
||||
n = copy(p, w.buf[w.r:])
|
||||
w.r = (w.r + n) & w.mask
|
||||
p = p[n:]
|
||||
}
|
||||
if w.r < w.w {
|
||||
l := copy(p, w.buf[w.r:w.w])
|
||||
w.r += l
|
||||
n += l
|
||||
}
|
||||
if w.l > 0 && n > 0 {
|
||||
// if we have successfully read data, copy any
|
||||
// leftover data from a previous copyBytes.
|
||||
l := w.l
|
||||
w.l = 0
|
||||
w.copyBytes(l, w.o)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// decodeReader implements io.Reader for decoding compressed data in RAR archives.
|
||||
type decodeReader struct {
|
||||
win window // sliding window buffer used as decode dictionary
|
||||
dec decoder // decoder being used to unpack file
|
||||
tot int64 // total bytes read
|
||||
buf []byte // filter input/output buffer
|
||||
outbuf []byte // filter output not yet read
|
||||
err error
|
||||
filters []*filterBlock // list of filterBlock's, each with offset relative to previous in list
|
||||
}
|
||||
|
||||
func (d *decodeReader) init(r io.ByteReader, dec decoder, winsize uint, reset bool) error {
|
||||
if reset {
|
||||
d.filters = nil
|
||||
}
|
||||
d.err = nil
|
||||
d.outbuf = nil
|
||||
d.tot = 0
|
||||
d.win.reset(winsize, reset)
|
||||
d.dec = dec
|
||||
return d.dec.init(r, reset)
|
||||
}
|
||||
|
||||
func (d *decodeReader) readErr() error {
|
||||
err := d.err
|
||||
d.err = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// queueFilter adds a filterBlock to the end decodeReader's filters.
|
||||
func (d *decodeReader) queueFilter(f *filterBlock) error {
|
||||
if f.reset {
|
||||
d.filters = nil
|
||||
}
|
||||
if len(d.filters) >= maxQueuedFilters {
|
||||
return errTooManyFilters
|
||||
}
|
||||
// offset & length must be < window size
|
||||
f.offset &= d.win.mask
|
||||
f.length &= d.win.mask
|
||||
// make offset relative to previous filter in list
|
||||
for _, fb := range d.filters {
|
||||
if f.offset < fb.offset {
|
||||
// filter block must not start before previous filter
|
||||
return errInvalidFilter
|
||||
}
|
||||
f.offset -= fb.offset
|
||||
}
|
||||
d.filters = append(d.filters, f)
|
||||
return nil
|
||||
}
|
||||
|
||||
// processFilters processes any filters valid at the current read index
|
||||
// and stores the output in outbuf.
|
||||
func (d *decodeReader) processFilters() (err error) {
|
||||
f := d.filters[0]
|
||||
if f.offset > 0 {
|
||||
return nil
|
||||
}
|
||||
d.filters = d.filters[1:]
|
||||
if d.win.buffered() < f.length {
|
||||
// fill() didn't return enough bytes
|
||||
err = d.readErr()
|
||||
if err == nil || err == io.EOF {
|
||||
return errInvalidFilter
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if cap(d.buf) < f.length {
|
||||
d.buf = make([]byte, f.length)
|
||||
}
|
||||
d.outbuf = d.buf[:f.length]
|
||||
n := d.win.read(d.outbuf)
|
||||
for {
|
||||
// run filter passing buffer and total bytes read so far
|
||||
d.outbuf, err = f.filter(d.outbuf, d.tot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cap(d.outbuf) > cap(d.buf) {
|
||||
// Filter returned a bigger buffer, save it for future filters.
|
||||
d.buf = d.outbuf
|
||||
}
|
||||
if len(d.filters) == 0 {
|
||||
return nil
|
||||
}
|
||||
f = d.filters[0]
|
||||
|
||||
if f.offset != 0 {
|
||||
// next filter not at current offset
|
||||
f.offset -= n
|
||||
return nil
|
||||
}
|
||||
if f.length != len(d.outbuf) {
|
||||
return errInvalidFilter
|
||||
}
|
||||
d.filters = d.filters[1:]
|
||||
|
||||
if cap(d.outbuf) < cap(d.buf) {
|
||||
// Filter returned a smaller buffer. Copy it back to the saved buffer
|
||||
// so the next filter can make use of the larger buffer if needed.
|
||||
d.outbuf = append(d.buf[:0], d.outbuf...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fill fills the decodeReader's window
|
||||
func (d *decodeReader) fill() {
|
||||
if d.err != nil {
|
||||
return
|
||||
}
|
||||
var fl []*filterBlock
|
||||
fl, d.err = d.dec.fill(&d.win) // fill window using decoder
|
||||
for _, f := range fl {
|
||||
err := d.queueFilter(f)
|
||||
if err != nil {
|
||||
d.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read decodes data and stores it in p.
|
||||
func (d *decodeReader) Read(p []byte) (n int, err error) {
|
||||
if len(d.outbuf) == 0 {
|
||||
// no filter output, see if we need to create more
|
||||
if d.win.buffered() == 0 {
|
||||
// fill empty window
|
||||
d.fill()
|
||||
if d.win.buffered() == 0 {
|
||||
return 0, d.readErr()
|
||||
}
|
||||
} else if len(d.filters) > 0 {
|
||||
f := d.filters[0]
|
||||
if f.offset == 0 && f.length > d.win.buffered() {
|
||||
d.fill() // filter at current offset needs more data
|
||||
}
|
||||
}
|
||||
if len(d.filters) > 0 {
|
||||
if err := d.processFilters(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(d.outbuf) > 0 {
|
||||
// copy filter output into p
|
||||
n = copy(p, d.outbuf)
|
||||
d.outbuf = d.outbuf[n:]
|
||||
} else if len(d.filters) > 0 {
|
||||
f := d.filters[0]
|
||||
if f.offset < len(p) {
|
||||
// only read data up to beginning of next filter
|
||||
p = p[:f.offset]
|
||||
}
|
||||
n = d.win.read(p) // read directly from window
|
||||
f.offset -= n // adjust first filter offset by bytes just read
|
||||
} else {
|
||||
n = d.win.read(p) // read directly from window
|
||||
}
|
||||
d.tot += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
126
vendor/github.com/nwaples/rardecode/decrypt_reader.go
generated
vendored
Normal file
126
vendor/github.com/nwaples/rardecode/decrypt_reader.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"io"
|
||||
)
|
||||
|
||||
// cipherBlockReader implements Block Mode decryption of an io.Reader object.
|
||||
type cipherBlockReader struct {
|
||||
r io.Reader
|
||||
mode cipher.BlockMode
|
||||
inbuf []byte // input buffer for partial data block
|
||||
outbuf []byte // output buffer used when output slice < block size
|
||||
n int // bytes read from outbuf
|
||||
err error
|
||||
}
|
||||
|
||||
// read reads and decrypts one or more input blocks into p.
|
||||
// len(p) must be >= cipher block size.
|
||||
func (cr *cipherBlockReader) read(p []byte) (n int, err error) {
|
||||
bs := cr.mode.BlockSize()
|
||||
// round p down to a multiple of the block size
|
||||
l := len(p) - len(p)%bs
|
||||
p = p[:l]
|
||||
|
||||
l = len(cr.inbuf)
|
||||
if l > 0 {
|
||||
// copy any buffered input into p
|
||||
copy(p, cr.inbuf)
|
||||
cr.inbuf = cr.inbuf[:0]
|
||||
}
|
||||
// read data for at least one block
|
||||
n, err = io.ReadAtLeast(cr.r, p[l:], bs-l)
|
||||
n += l
|
||||
p = p[:n]
|
||||
|
||||
l = n % bs
|
||||
// check if p is a multiple of the cipher block size
|
||||
if l > 0 {
|
||||
n -= l
|
||||
// save trailing partial block to process later
|
||||
cr.inbuf = append(cr.inbuf, p[n:]...)
|
||||
p = p[:n]
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err == io.ErrUnexpectedEOF || err == io.ErrShortBuffer {
|
||||
// ignore trailing bytes < block size length
|
||||
err = io.EOF
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
cr.mode.CryptBlocks(p, p) // decrypt block(s)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Read reads and decrypts data into p.
|
||||
// If the input is not a multiple of the cipher block size,
|
||||
// the trailing bytes will be ignored.
|
||||
func (cr *cipherBlockReader) Read(p []byte) (n int, err error) {
|
||||
for {
|
||||
if cr.n < len(cr.outbuf) {
|
||||
// return buffered output
|
||||
n = copy(p, cr.outbuf[cr.n:])
|
||||
cr.n += n
|
||||
return n, nil
|
||||
}
|
||||
if cr.err != nil {
|
||||
err = cr.err
|
||||
cr.err = nil
|
||||
return 0, err
|
||||
}
|
||||
if len(p) >= cap(cr.outbuf) {
|
||||
break
|
||||
}
|
||||
// p is not large enough to process a block, use outbuf instead
|
||||
n, cr.err = cr.read(cr.outbuf[:cap(cr.outbuf)])
|
||||
cr.outbuf = cr.outbuf[:n]
|
||||
cr.n = 0
|
||||
}
|
||||
// read blocks into p
|
||||
return cr.read(p)
|
||||
}
|
||||
|
||||
// ReadByte returns the next decrypted byte.
|
||||
func (cr *cipherBlockReader) ReadByte() (byte, error) {
|
||||
for {
|
||||
if cr.n < len(cr.outbuf) {
|
||||
c := cr.outbuf[cr.n]
|
||||
cr.n++
|
||||
return c, nil
|
||||
}
|
||||
if cr.err != nil {
|
||||
err := cr.err
|
||||
cr.err = nil
|
||||
return 0, err
|
||||
}
|
||||
// refill outbuf
|
||||
var n int
|
||||
n, cr.err = cr.read(cr.outbuf[:cap(cr.outbuf)])
|
||||
cr.outbuf = cr.outbuf[:n]
|
||||
cr.n = 0
|
||||
}
|
||||
}
|
||||
|
||||
// newCipherBlockReader returns a cipherBlockReader that decrypts the given io.Reader using
|
||||
// the provided block mode cipher.
|
||||
func newCipherBlockReader(r io.Reader, mode cipher.BlockMode) *cipherBlockReader {
|
||||
cr := &cipherBlockReader{r: r, mode: mode}
|
||||
cr.outbuf = make([]byte, 0, mode.BlockSize())
|
||||
cr.inbuf = make([]byte, 0, mode.BlockSize())
|
||||
return cr
|
||||
}
|
||||
|
||||
// newAesDecryptReader returns a cipherBlockReader that decrypts input from a given io.Reader using AES.
|
||||
// It will panic if the provided key is invalid.
|
||||
func newAesDecryptReader(r io.Reader, key, iv []byte) *cipherBlockReader {
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mode := cipher.NewCBCDecrypter(block, iv)
|
||||
|
||||
return newCipherBlockReader(r, mode)
|
||||
}
|
||||
416
vendor/github.com/nwaples/rardecode/filters.go
generated
vendored
Normal file
416
vendor/github.com/nwaples/rardecode/filters.go
generated
vendored
Normal file
@@ -0,0 +1,416 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
fileSize = 0x1000000
|
||||
|
||||
vmGlobalAddr = 0x3C000
|
||||
vmGlobalSize = 0x02000
|
||||
vmFixedGlobalSize = 0x40
|
||||
|
||||
maxUint32 = 1<<32 - 1
|
||||
)
|
||||
|
||||
// v3Filter is the interface type for RAR V3 filters.
|
||||
// v3Filter performs the same function as the filter type, except that it also takes
|
||||
// the initial register values r, and global data as input for the RAR V3 VM.
|
||||
type v3Filter func(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error)
|
||||
|
||||
var (
|
||||
// standardV3Filters is a list of known filters. We can replace the use of a vm
|
||||
// filter with a custom filter function.
|
||||
standardV3Filters = []struct {
|
||||
crc uint32 // crc of code byte slice for filter
|
||||
len int // length of code byte slice for filter
|
||||
f v3Filter // replacement filter function
|
||||
}{
|
||||
{0xad576887, 53, e8FilterV3},
|
||||
{0x3cd7e57e, 57, e8e9FilterV3},
|
||||
{0x3769893f, 120, itaniumFilterV3},
|
||||
{0x0e06077d, 29, deltaFilterV3},
|
||||
{0x1c2c5dc8, 149, filterRGBV3},
|
||||
{0xbc85e701, 216, filterAudioV3},
|
||||
}
|
||||
|
||||
// itanium filter byte masks
|
||||
byteMask = []int{4, 4, 6, 6, 0, 0, 7, 7, 4, 4, 0, 0, 4, 4, 0, 0}
|
||||
)
|
||||
|
||||
func filterE8(c byte, v5 bool, buf []byte, offset int64) ([]byte, error) {
|
||||
off := int32(offset)
|
||||
for b := buf; len(b) >= 5; {
|
||||
ch := b[0]
|
||||
b = b[1:]
|
||||
off++
|
||||
if ch != 0xe8 && ch != c {
|
||||
continue
|
||||
}
|
||||
if v5 {
|
||||
off %= fileSize
|
||||
}
|
||||
addr := int32(binary.LittleEndian.Uint32(b))
|
||||
if addr < 0 {
|
||||
if addr+off >= 0 {
|
||||
binary.LittleEndian.PutUint32(b, uint32(addr+fileSize))
|
||||
}
|
||||
} else if addr < fileSize {
|
||||
binary.LittleEndian.PutUint32(b, uint32(addr-off))
|
||||
}
|
||||
off += 4
|
||||
b = b[4:]
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func e8FilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) {
|
||||
return filterE8(0xe8, false, buf, offset)
|
||||
}
|
||||
|
||||
func e8e9FilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) {
|
||||
return filterE8(0xe9, false, buf, offset)
|
||||
}
|
||||
|
||||
func getBits(buf []byte, pos, count uint) uint32 {
|
||||
n := binary.LittleEndian.Uint32(buf[pos/8:])
|
||||
n >>= pos & 7
|
||||
mask := uint32(maxUint32) >> (32 - count)
|
||||
return n & mask
|
||||
}
|
||||
|
||||
func setBits(buf []byte, pos, count uint, bits uint32) {
|
||||
mask := uint32(maxUint32) >> (32 - count)
|
||||
mask <<= pos & 7
|
||||
bits <<= pos & 7
|
||||
n := binary.LittleEndian.Uint32(buf[pos/8:])
|
||||
n = (n & ^mask) | (bits & mask)
|
||||
binary.LittleEndian.PutUint32(buf[pos/8:], n)
|
||||
}
|
||||
|
||||
func itaniumFilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) {
|
||||
fileOffset := uint32(offset) >> 4
|
||||
|
||||
for b := buf; len(b) > 21; b = b[16:] {
|
||||
c := int(b[0]&0x1f) - 0x10
|
||||
if c >= 0 {
|
||||
mask := byteMask[c]
|
||||
if mask != 0 {
|
||||
for i := uint(0); i <= 2; i++ {
|
||||
if mask&(1<<i) == 0 {
|
||||
continue
|
||||
}
|
||||
pos := i*41 + 18
|
||||
if getBits(b, pos+24, 4) == 5 {
|
||||
n := getBits(b, pos, 20)
|
||||
n -= fileOffset
|
||||
setBits(b, pos, 20, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
fileOffset++
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func filterDelta(n int, buf []byte) ([]byte, error) {
|
||||
var res []byte
|
||||
l := len(buf)
|
||||
if cap(buf) >= 2*l {
|
||||
res = buf[l : 2*l] // use unused capacity
|
||||
} else {
|
||||
res = make([]byte, l, 2*l)
|
||||
}
|
||||
|
||||
i := 0
|
||||
for j := 0; j < n; j++ {
|
||||
var c byte
|
||||
for k := j; k < len(res); k += n {
|
||||
c -= buf[i]
|
||||
i++
|
||||
res[k] = c
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func deltaFilterV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) {
|
||||
return filterDelta(int(r[0]), buf)
|
||||
}
|
||||
|
||||
func abs(n int) int {
|
||||
if n < 0 {
|
||||
n = -n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func filterRGBV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) {
|
||||
width := int(r[0] - 3)
|
||||
posR := int(r[1])
|
||||
if posR < 0 || width < 0 {
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
var res []byte
|
||||
l := len(buf)
|
||||
if cap(buf) >= 2*l {
|
||||
res = buf[l : 2*l] // use unused capacity
|
||||
} else {
|
||||
res = make([]byte, l, 2*l)
|
||||
}
|
||||
|
||||
for c := 0; c < 3; c++ {
|
||||
var prevByte int
|
||||
for i := c; i < len(res); i += 3 {
|
||||
var predicted int
|
||||
upperPos := i - width
|
||||
if upperPos >= 3 {
|
||||
upperByte := int(res[upperPos])
|
||||
upperLeftByte := int(res[upperPos-3])
|
||||
predicted = prevByte + upperByte - upperLeftByte
|
||||
pa := abs(predicted - prevByte)
|
||||
pb := abs(predicted - upperByte)
|
||||
pc := abs(predicted - upperLeftByte)
|
||||
if pa <= pb && pa <= pc {
|
||||
predicted = prevByte
|
||||
} else if pb <= pc {
|
||||
predicted = upperByte
|
||||
} else {
|
||||
predicted = upperLeftByte
|
||||
}
|
||||
} else {
|
||||
predicted = prevByte
|
||||
}
|
||||
prevByte = (predicted - int(buf[0])) & 0xFF
|
||||
res[i] = uint8(prevByte)
|
||||
buf = buf[1:]
|
||||
}
|
||||
|
||||
}
|
||||
for i := posR; i < len(res)-2; i += 3 {
|
||||
c := res[i+1]
|
||||
res[i] += c
|
||||
res[i+2] += c
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func filterAudioV3(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) {
|
||||
var res []byte
|
||||
l := len(buf)
|
||||
if cap(buf) >= 2*l {
|
||||
res = buf[l : 2*l] // use unused capacity
|
||||
} else {
|
||||
res = make([]byte, l, 2*l)
|
||||
}
|
||||
|
||||
chans := int(r[0])
|
||||
for c := 0; c < chans; c++ {
|
||||
var prevByte, byteCount int
|
||||
var diff [7]int
|
||||
var d, k [3]int
|
||||
|
||||
for i := c; i < len(res); i += chans {
|
||||
predicted := prevByte<<3 + k[0]*d[0] + k[1]*d[1] + k[2]*d[2]
|
||||
predicted = int(int8(predicted >> 3))
|
||||
|
||||
curByte := int(int8(buf[0]))
|
||||
buf = buf[1:]
|
||||
predicted -= curByte
|
||||
res[i] = uint8(predicted)
|
||||
|
||||
dd := curByte << 3
|
||||
diff[0] += abs(dd)
|
||||
diff[1] += abs(dd - d[0])
|
||||
diff[2] += abs(dd + d[0])
|
||||
diff[3] += abs(dd - d[1])
|
||||
diff[4] += abs(dd + d[1])
|
||||
diff[5] += abs(dd - d[2])
|
||||
diff[6] += abs(dd + d[2])
|
||||
|
||||
prevDelta := int(int8(predicted - prevByte))
|
||||
prevByte = predicted
|
||||
d[2] = d[1]
|
||||
d[1] = prevDelta - d[0]
|
||||
d[0] = prevDelta
|
||||
|
||||
if byteCount&0x1f == 0 {
|
||||
min := diff[0]
|
||||
diff[0] = 0
|
||||
n := 0
|
||||
for j := 1; j < len(diff); j++ {
|
||||
if diff[j] < min {
|
||||
min = diff[j]
|
||||
n = j
|
||||
}
|
||||
diff[j] = 0
|
||||
}
|
||||
n--
|
||||
if n >= 0 {
|
||||
m := n / 2
|
||||
if n%2 == 0 {
|
||||
if k[m] >= -16 {
|
||||
k[m]--
|
||||
}
|
||||
} else {
|
||||
if k[m] < 16 {
|
||||
k[m]++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
byteCount++
|
||||
}
|
||||
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func filterArm(buf []byte, offset int64) ([]byte, error) {
|
||||
for i := 0; len(buf)-i > 3; i += 4 {
|
||||
if buf[i+3] == 0xeb {
|
||||
n := uint(buf[i])
|
||||
n += uint(buf[i+1]) * 0x100
|
||||
n += uint(buf[i+2]) * 0x10000
|
||||
n -= (uint(offset) + uint(i)) / 4
|
||||
buf[i] = byte(n)
|
||||
buf[i+1] = byte(n >> 8)
|
||||
buf[i+2] = byte(n >> 16)
|
||||
}
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
type vmFilter struct {
|
||||
execCount uint32
|
||||
global []byte
|
||||
static []byte
|
||||
code []command
|
||||
}
|
||||
|
||||
// execute implements v3filter type for VM based RAR 3 filters.
|
||||
func (f *vmFilter) execute(r map[int]uint32, global, buf []byte, offset int64) ([]byte, error) {
|
||||
if len(buf) > vmGlobalAddr {
|
||||
return buf, errInvalidFilter
|
||||
}
|
||||
v := newVM(buf)
|
||||
|
||||
// register setup
|
||||
v.r[3] = vmGlobalAddr
|
||||
v.r[4] = uint32(len(buf))
|
||||
v.r[5] = f.execCount
|
||||
for i, n := range r {
|
||||
v.r[i] = n
|
||||
}
|
||||
|
||||
// vm global data memory block
|
||||
vg := v.m[vmGlobalAddr : vmGlobalAddr+vmGlobalSize]
|
||||
|
||||
// initialize fixed global memory
|
||||
for i, n := range v.r[:vmRegs-1] {
|
||||
binary.LittleEndian.PutUint32(vg[i*4:], n)
|
||||
}
|
||||
binary.LittleEndian.PutUint32(vg[0x1c:], uint32(len(buf)))
|
||||
binary.LittleEndian.PutUint64(vg[0x24:], uint64(offset))
|
||||
binary.LittleEndian.PutUint32(vg[0x2c:], f.execCount)
|
||||
|
||||
// registers
|
||||
v.r[6] = uint32(offset)
|
||||
|
||||
// copy program global memory
|
||||
var n int
|
||||
if len(f.global) > 0 {
|
||||
n = copy(vg[vmFixedGlobalSize:], f.global) // use saved global instead
|
||||
} else {
|
||||
n = copy(vg[vmFixedGlobalSize:], global)
|
||||
}
|
||||
copy(vg[vmFixedGlobalSize+n:], f.static)
|
||||
|
||||
v.execute(f.code)
|
||||
|
||||
f.execCount++
|
||||
|
||||
// keep largest global buffer
|
||||
if cap(global) > cap(f.global) {
|
||||
f.global = global[:0]
|
||||
} else if len(f.global) > 0 {
|
||||
f.global = f.global[:0]
|
||||
}
|
||||
|
||||
// check for global data to be saved for next program execution
|
||||
globalSize := binary.LittleEndian.Uint32(vg[0x30:])
|
||||
if globalSize > 0 {
|
||||
if globalSize > vmGlobalSize-vmFixedGlobalSize {
|
||||
globalSize = vmGlobalSize - vmFixedGlobalSize
|
||||
}
|
||||
if cap(f.global) < int(globalSize) {
|
||||
f.global = make([]byte, globalSize)
|
||||
} else {
|
||||
f.global = f.global[:globalSize]
|
||||
}
|
||||
copy(f.global, vg[vmFixedGlobalSize:])
|
||||
}
|
||||
|
||||
// find program output
|
||||
length := binary.LittleEndian.Uint32(vg[0x1c:]) & vmMask
|
||||
start := binary.LittleEndian.Uint32(vg[0x20:]) & vmMask
|
||||
if start+length > vmSize {
|
||||
// TODO: error
|
||||
start = 0
|
||||
length = 0
|
||||
}
|
||||
if start != 0 && cap(v.m) > cap(buf) {
|
||||
// Initial buffer was to small for vm.
|
||||
// Copy output to beginning of vm memory so that decodeReader
|
||||
// will re-use the newly allocated vm memory and we will not
|
||||
// have to reallocate again next time.
|
||||
copy(v.m, v.m[start:start+length])
|
||||
start = 0
|
||||
}
|
||||
return v.m[start : start+length], nil
|
||||
}
|
||||
|
||||
// getV3Filter returns a V3 filter function from a code byte slice.
|
||||
func getV3Filter(code []byte) (v3Filter, error) {
|
||||
// check if filter is a known standard filter
|
||||
c := crc32.ChecksumIEEE(code)
|
||||
for _, f := range standardV3Filters {
|
||||
if f.crc == c && f.len == len(code) {
|
||||
return f.f, nil
|
||||
}
|
||||
}
|
||||
|
||||
// create new vm filter
|
||||
f := new(vmFilter)
|
||||
r := newRarBitReader(bytes.NewReader(code[1:])) // skip first xor byte check
|
||||
|
||||
// read static data
|
||||
n, err := r.readBits(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n > 0 {
|
||||
m, err := r.readUint32()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.static = make([]byte, m+1)
|
||||
err = r.readFull(f.static)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
f.code, err = readCommands(r)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return f.execute, err
|
||||
}
|
||||
1
vendor/github.com/nwaples/rardecode/go.mod
generated
vendored
Normal file
1
vendor/github.com/nwaples/rardecode/go.mod
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
module github.com/nwaples/rardecode
|
||||
208
vendor/github.com/nwaples/rardecode/huffman.go
generated
vendored
Normal file
208
vendor/github.com/nwaples/rardecode/huffman.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
maxCodeLength = 15 // maximum code length in bits
|
||||
maxQuickBits = 10
|
||||
maxQuickSize = 1 << maxQuickBits
|
||||
)
|
||||
|
||||
var (
|
||||
errHuffDecodeFailed = errors.New("rardecode: huffman decode failed")
|
||||
errInvalidLengthTable = errors.New("rardecode: invalid huffman code length table")
|
||||
)
|
||||
|
||||
type huffmanDecoder struct {
|
||||
limit [maxCodeLength + 1]int
|
||||
pos [maxCodeLength + 1]int
|
||||
symbol []int
|
||||
min uint
|
||||
quickbits uint
|
||||
quicklen [maxQuickSize]uint
|
||||
quicksym [maxQuickSize]int
|
||||
}
|
||||
|
||||
func (h *huffmanDecoder) init(codeLengths []byte) {
|
||||
var count [maxCodeLength + 1]int
|
||||
|
||||
for _, n := range codeLengths {
|
||||
if n == 0 {
|
||||
continue
|
||||
}
|
||||
count[n]++
|
||||
}
|
||||
|
||||
h.pos[0] = 0
|
||||
h.limit[0] = 0
|
||||
h.min = 0
|
||||
for i := uint(1); i <= maxCodeLength; i++ {
|
||||
h.limit[i] = h.limit[i-1] + count[i]<<(maxCodeLength-i)
|
||||
h.pos[i] = h.pos[i-1] + count[i-1]
|
||||
if h.min == 0 && h.limit[i] > 0 {
|
||||
h.min = i
|
||||
}
|
||||
}
|
||||
|
||||
if cap(h.symbol) >= len(codeLengths) {
|
||||
h.symbol = h.symbol[:len(codeLengths)]
|
||||
for i := range h.symbol {
|
||||
h.symbol[i] = 0
|
||||
}
|
||||
} else {
|
||||
h.symbol = make([]int, len(codeLengths))
|
||||
}
|
||||
|
||||
copy(count[:], h.pos[:])
|
||||
for i, n := range codeLengths {
|
||||
if n != 0 {
|
||||
h.symbol[count[n]] = i
|
||||
count[n]++
|
||||
}
|
||||
}
|
||||
|
||||
if len(codeLengths) >= 298 {
|
||||
h.quickbits = maxQuickBits
|
||||
} else {
|
||||
h.quickbits = maxQuickBits - 3
|
||||
}
|
||||
|
||||
bits := uint(1)
|
||||
for i := 0; i < 1<<h.quickbits; i++ {
|
||||
v := i << (maxCodeLength - h.quickbits)
|
||||
|
||||
for v >= h.limit[bits] && bits < maxCodeLength {
|
||||
bits++
|
||||
}
|
||||
h.quicklen[i] = bits
|
||||
|
||||
dist := v - h.limit[bits-1]
|
||||
dist >>= (maxCodeLength - bits)
|
||||
|
||||
pos := h.pos[bits] + dist
|
||||
if pos < len(h.symbol) {
|
||||
h.quicksym[i] = h.symbol[pos]
|
||||
} else {
|
||||
h.quicksym[i] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *huffmanDecoder) readSym(r bitReader) (int, error) {
|
||||
bits := uint(maxCodeLength)
|
||||
v, err := r.readBits(maxCodeLength)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
// fall back to 1 bit at a time if we read past EOF
|
||||
for i := uint(1); i <= maxCodeLength; i++ {
|
||||
b, err := r.readBits(1)
|
||||
if err != nil {
|
||||
return 0, err // not enough bits return error
|
||||
}
|
||||
v |= b << (maxCodeLength - i)
|
||||
if v < h.limit[i] {
|
||||
bits = i
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if v < h.limit[h.quickbits] {
|
||||
i := v >> (maxCodeLength - h.quickbits)
|
||||
r.unreadBits(maxCodeLength - h.quicklen[i])
|
||||
return h.quicksym[i], nil
|
||||
}
|
||||
|
||||
for i, n := range h.limit[h.min:] {
|
||||
if v < n {
|
||||
bits = h.min + uint(i)
|
||||
r.unreadBits(maxCodeLength - bits)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dist := v - h.limit[bits-1]
|
||||
dist >>= maxCodeLength - bits
|
||||
|
||||
pos := h.pos[bits] + dist
|
||||
if pos > len(h.symbol) {
|
||||
return 0, errHuffDecodeFailed
|
||||
}
|
||||
|
||||
return h.symbol[pos], nil
|
||||
}
|
||||
|
||||
// readCodeLengthTable reads a new code length table into codeLength from br.
|
||||
// If addOld is set the old table is added to the new one.
|
||||
func readCodeLengthTable(br bitReader, codeLength []byte, addOld bool) error {
|
||||
var bitlength [20]byte
|
||||
for i := 0; i < len(bitlength); i++ {
|
||||
n, err := br.readBits(4)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n == 0xf {
|
||||
cnt, err := br.readBits(4)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cnt > 0 {
|
||||
// array already zero'd dont need to explicitly set
|
||||
i += cnt + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
bitlength[i] = byte(n)
|
||||
}
|
||||
|
||||
var bl huffmanDecoder
|
||||
bl.init(bitlength[:])
|
||||
|
||||
for i := 0; i < len(codeLength); i++ {
|
||||
l, err := bl.readSym(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l < 16 {
|
||||
if addOld {
|
||||
codeLength[i] = (codeLength[i] + byte(l)) & 0xf
|
||||
} else {
|
||||
codeLength[i] = byte(l)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var count int
|
||||
var value byte
|
||||
|
||||
switch l {
|
||||
case 16, 18:
|
||||
count, err = br.readBits(3)
|
||||
count += 3
|
||||
default:
|
||||
count, err = br.readBits(7)
|
||||
count += 11
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if l < 18 {
|
||||
if i == 0 {
|
||||
return errInvalidLengthTable
|
||||
}
|
||||
value = codeLength[i-1]
|
||||
}
|
||||
for ; count > 0 && i < len(codeLength); i++ {
|
||||
codeLength[i] = value
|
||||
count--
|
||||
}
|
||||
i--
|
||||
}
|
||||
return nil
|
||||
}
|
||||
1096
vendor/github.com/nwaples/rardecode/ppm_model.go
generated
vendored
Normal file
1096
vendor/github.com/nwaples/rardecode/ppm_model.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
369
vendor/github.com/nwaples/rardecode/reader.go
generated
vendored
Normal file
369
vendor/github.com/nwaples/rardecode/reader.go
generated
vendored
Normal file
@@ -0,0 +1,369 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileHeader HostOS types
|
||||
const (
|
||||
HostOSUnknown = 0
|
||||
HostOSMSDOS = 1
|
||||
HostOSOS2 = 2
|
||||
HostOSWindows = 3
|
||||
HostOSUnix = 4
|
||||
HostOSMacOS = 5
|
||||
HostOSBeOS = 6
|
||||
)
|
||||
|
||||
const (
|
||||
maxPassword = 128
|
||||
)
|
||||
|
||||
var (
|
||||
errShortFile = errors.New("rardecode: decoded file too short")
|
||||
errInvalidFileBlock = errors.New("rardecode: invalid file block")
|
||||
errUnexpectedArcEnd = errors.New("rardecode: unexpected end of archive")
|
||||
errBadFileChecksum = errors.New("rardecode: bad file checksum")
|
||||
)
|
||||
|
||||
type byteReader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
}
|
||||
|
||||
type limitedReader struct {
|
||||
r io.Reader
|
||||
n int64 // bytes remaining
|
||||
shortErr error // error returned when r returns io.EOF with n > 0
|
||||
}
|
||||
|
||||
func (l *limitedReader) Read(p []byte) (int, error) {
|
||||
if l.n <= 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(p)) > l.n {
|
||||
p = p[0:l.n]
|
||||
}
|
||||
n, err := l.r.Read(p)
|
||||
l.n -= int64(n)
|
||||
if err == io.EOF && l.n > 0 {
|
||||
return n, l.shortErr
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
type limitedByteReader struct {
|
||||
limitedReader
|
||||
br io.ByteReader
|
||||
}
|
||||
|
||||
func (l *limitedByteReader) ReadByte() (byte, error) {
|
||||
if l.n <= 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
c, err := l.br.ReadByte()
|
||||
if err == nil {
|
||||
l.n--
|
||||
} else if err == io.EOF && l.n > 0 {
|
||||
return 0, l.shortErr
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
|
||||
// limitByteReader returns a limitedByteReader that reads from r and stops with
|
||||
// io.EOF after n bytes.
|
||||
// If r returns an io.EOF before reading n bytes, io.ErrUnexpectedEOF is returned.
|
||||
func limitByteReader(r byteReader, n int64) *limitedByteReader {
|
||||
return &limitedByteReader{limitedReader{r, n, io.ErrUnexpectedEOF}, r}
|
||||
}
|
||||
|
||||
// fileChecksum allows file checksum validations to be performed.
|
||||
// File contents must first be written to fileChecksum. Then valid is
|
||||
// called to perform the file checksum calculation to determine
|
||||
// if the file contents are valid or not.
|
||||
type fileChecksum interface {
|
||||
io.Writer
|
||||
valid() bool
|
||||
}
|
||||
|
||||
// FileHeader represents a single file in a RAR archive.
|
||||
type FileHeader struct {
|
||||
Name string // file name using '/' as the directory separator
|
||||
IsDir bool // is a directory
|
||||
HostOS byte // Host OS the archive was created on
|
||||
Attributes int64 // Host OS specific file attributes
|
||||
PackedSize int64 // packed file size (or first block if the file spans volumes)
|
||||
UnPackedSize int64 // unpacked file size
|
||||
UnKnownSize bool // unpacked file size is not known
|
||||
ModificationTime time.Time // modification time (non-zero if set)
|
||||
CreationTime time.Time // creation time (non-zero if set)
|
||||
AccessTime time.Time // access time (non-zero if set)
|
||||
Version int // file version
|
||||
}
|
||||
|
||||
// Mode returns an os.FileMode for the file, calculated from the Attributes field.
|
||||
func (f *FileHeader) Mode() os.FileMode {
|
||||
var m os.FileMode
|
||||
|
||||
if f.IsDir {
|
||||
m = os.ModeDir
|
||||
}
|
||||
if f.HostOS == HostOSWindows {
|
||||
if f.IsDir {
|
||||
m |= 0777
|
||||
} else if f.Attributes&1 > 0 {
|
||||
m |= 0444 // readonly
|
||||
} else {
|
||||
m |= 0666
|
||||
}
|
||||
return m
|
||||
}
|
||||
// assume unix perms for all remaining os types
|
||||
m |= os.FileMode(f.Attributes) & os.ModePerm
|
||||
|
||||
// only check other bits on unix host created archives
|
||||
if f.HostOS != HostOSUnix {
|
||||
return m
|
||||
}
|
||||
|
||||
if f.Attributes&0x200 != 0 {
|
||||
m |= os.ModeSticky
|
||||
}
|
||||
if f.Attributes&0x400 != 0 {
|
||||
m |= os.ModeSetgid
|
||||
}
|
||||
if f.Attributes&0x800 != 0 {
|
||||
m |= os.ModeSetuid
|
||||
}
|
||||
|
||||
// Check for additional file types.
|
||||
if f.Attributes&0xF000 == 0xA000 {
|
||||
m |= os.ModeSymlink
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// fileBlockHeader represents a file block in a RAR archive.
|
||||
// Files may comprise one or more file blocks.
|
||||
// Solid files retain decode tables and dictionary from previous solid files in the archive.
|
||||
type fileBlockHeader struct {
|
||||
first bool // first block in file
|
||||
last bool // last block in file
|
||||
solid bool // file is solid
|
||||
winSize uint // log base 2 of decode window size
|
||||
cksum fileChecksum // file checksum
|
||||
decoder decoder // decoder to use for file
|
||||
key []byte // key for AES, non-empty if file encrypted
|
||||
iv []byte // iv for AES, non-empty if file encrypted
|
||||
FileHeader
|
||||
}
|
||||
|
||||
// fileBlockReader provides sequential access to file blocks in a RAR archive.
|
||||
type fileBlockReader interface {
|
||||
io.Reader // Read's read data from the current file block
|
||||
io.ByteReader // Read bytes from current file block
|
||||
next() (*fileBlockHeader, error) // reads the next file block header at current position
|
||||
reset() // resets encryption
|
||||
isSolid() bool // is archive solid
|
||||
version() int // returns current archive format version
|
||||
}
|
||||
|
||||
// packedFileReader provides sequential access to packed files in a RAR archive.
|
||||
type packedFileReader struct {
|
||||
r fileBlockReader
|
||||
h *fileBlockHeader // current file header
|
||||
}
|
||||
|
||||
// nextBlockInFile reads the next file block in the current file at the current
|
||||
// archive file position, or returns an error if there is a problem.
|
||||
// It is invalid to call this when already at the last block in the current file.
|
||||
func (f *packedFileReader) nextBlockInFile() error {
|
||||
h, err := f.r.next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
// archive ended, but file hasn't
|
||||
return errUnexpectedArcEnd
|
||||
}
|
||||
return err
|
||||
}
|
||||
if h.first || h.Name != f.h.Name {
|
||||
return errInvalidFileBlock
|
||||
}
|
||||
f.h = h
|
||||
return nil
|
||||
}
|
||||
|
||||
// next advances to the next packed file in the RAR archive.
|
||||
func (f *packedFileReader) next() (*fileBlockHeader, error) {
|
||||
if f.h != nil {
|
||||
// skip to last block in current file
|
||||
for !f.h.last {
|
||||
// discard remaining block data
|
||||
if _, err := io.Copy(ioutil.Discard, f.r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := f.nextBlockInFile(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// discard last block data
|
||||
if _, err := io.Copy(ioutil.Discard, f.r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var err error
|
||||
f.h, err = f.r.next() // get next file block
|
||||
if err != nil {
|
||||
if err == errArchiveEnd {
|
||||
return nil, io.EOF
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if !f.h.first {
|
||||
return nil, errInvalidFileBlock
|
||||
}
|
||||
return f.h, nil
|
||||
}
|
||||
|
||||
// Read reads the packed data for the current file into p.
|
||||
func (f *packedFileReader) Read(p []byte) (int, error) {
|
||||
n, err := f.r.Read(p) // read current block data
|
||||
for err == io.EOF { // current block empty
|
||||
if n > 0 {
|
||||
return n, nil
|
||||
}
|
||||
if f.h == nil || f.h.last {
|
||||
return 0, io.EOF // last block so end of file
|
||||
}
|
||||
if err := f.nextBlockInFile(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = f.r.Read(p) // read new block data
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (f *packedFileReader) ReadByte() (byte, error) {
|
||||
c, err := f.r.ReadByte() // read current block data
|
||||
for err == io.EOF && f.h != nil && !f.h.last { // current block empty
|
||||
if err := f.nextBlockInFile(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
c, err = f.r.ReadByte() // read new block data
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Reader provides sequential access to files in a RAR archive.
|
||||
type Reader struct {
|
||||
r io.Reader // reader for current unpacked file
|
||||
pr packedFileReader // reader for current packed file
|
||||
dr decodeReader // reader for decoding and filters if file is compressed
|
||||
cksum fileChecksum // current file checksum
|
||||
solidr io.Reader // reader for solid file
|
||||
}
|
||||
|
||||
// Read reads from the current file in the RAR archive.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.r.Read(p)
|
||||
if err == io.EOF && r.cksum != nil && !r.cksum.valid() {
|
||||
return n, errBadFileChecksum
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Next advances to the next file in the archive.
|
||||
func (r *Reader) Next() (*FileHeader, error) {
|
||||
if r.solidr != nil {
|
||||
// solid files must be read fully to update decoder information
|
||||
if _, err := io.Copy(ioutil.Discard, r.solidr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
h, err := r.pr.next() // skip to next file
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.solidr = nil
|
||||
|
||||
br := byteReader(&r.pr) // start with packed file reader
|
||||
|
||||
// check for encryption
|
||||
if len(h.key) > 0 && len(h.iv) > 0 {
|
||||
br = newAesDecryptReader(br, h.key, h.iv) // decrypt
|
||||
}
|
||||
r.r = br
|
||||
// check for compression
|
||||
if h.decoder != nil {
|
||||
err = r.dr.init(br, h.decoder, h.winSize, !h.solid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.r = &r.dr
|
||||
if r.pr.r.isSolid() {
|
||||
r.solidr = r.r
|
||||
}
|
||||
}
|
||||
if h.UnPackedSize >= 0 && !h.UnKnownSize {
|
||||
// Limit reading to UnPackedSize as there may be padding
|
||||
r.r = &limitedReader{r.r, h.UnPackedSize, errShortFile}
|
||||
}
|
||||
r.cksum = h.cksum
|
||||
if r.cksum != nil {
|
||||
r.r = io.TeeReader(r.r, h.cksum) // write file data to checksum as it is read
|
||||
}
|
||||
fh := new(FileHeader)
|
||||
*fh = h.FileHeader
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
func (r *Reader) init(fbr fileBlockReader) {
|
||||
r.r = bytes.NewReader(nil) // initial reads will always return EOF
|
||||
r.pr.r = fbr
|
||||
}
|
||||
|
||||
// NewReader creates a Reader reading from r.
|
||||
// NewReader only supports single volume archives.
|
||||
// Multi-volume archives must use OpenReader.
|
||||
func NewReader(r io.Reader, password string) (*Reader, error) {
|
||||
br, ok := r.(*bufio.Reader)
|
||||
if !ok {
|
||||
br = bufio.NewReader(r)
|
||||
}
|
||||
fbr, err := newFileBlockReader(br, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rr := new(Reader)
|
||||
rr.init(fbr)
|
||||
return rr, nil
|
||||
}
|
||||
|
||||
type ReadCloser struct {
|
||||
v *volume
|
||||
Reader
|
||||
}
|
||||
|
||||
// Close closes the rar file.
|
||||
func (rc *ReadCloser) Close() error {
|
||||
return rc.v.Close()
|
||||
}
|
||||
|
||||
// OpenReader opens a RAR archive specified by the name and returns a ReadCloser.
|
||||
func OpenReader(name, password string) (*ReadCloser, error) {
|
||||
v, err := openVolume(name, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rc := new(ReadCloser)
|
||||
rc.v = v
|
||||
rc.Reader.init(v)
|
||||
return rc, nil
|
||||
}
|
||||
687
vendor/github.com/nwaples/rardecode/vm.go
generated
vendored
Normal file
687
vendor/github.com/nwaples/rardecode/vm.go
generated
vendored
Normal file
@@ -0,0 +1,687 @@
|
||||
package rardecode
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// vm flag bits
|
||||
flagC = 1 // Carry
|
||||
flagZ = 2 // Zero
|
||||
flagS = 0x80000000 // Sign
|
||||
|
||||
maxCommands = 25000000 // maximum number of commands that can be run in a program
|
||||
|
||||
vmRegs = 8 // number if registers
|
||||
vmSize = 0x40000 // memory size
|
||||
vmMask = vmSize - 1
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidVMInstruction = errors.New("rardecode: invalid vm instruction")
|
||||
)
|
||||
|
||||
type vm struct {
|
||||
ip uint32 // instruction pointer
|
||||
ipMod bool // ip was modified
|
||||
fl uint32 // flag bits
|
||||
r [vmRegs]uint32 // registers
|
||||
m []byte // memory
|
||||
}
|
||||
|
||||
func (v *vm) setIP(ip uint32) {
|
||||
v.ip = ip
|
||||
v.ipMod = true
|
||||
}
|
||||
|
||||
// execute runs a list of commands on the vm.
|
||||
func (v *vm) execute(cmd []command) {
|
||||
v.ip = 0 // reset instruction pointer
|
||||
for n := 0; n < maxCommands; n++ {
|
||||
ip := v.ip
|
||||
if ip >= uint32(len(cmd)) {
|
||||
return
|
||||
}
|
||||
ins := cmd[ip]
|
||||
ins.f(v, ins.bm, ins.op) // run cpu instruction
|
||||
if v.ipMod {
|
||||
// command modified ip, don't increment
|
||||
v.ipMod = false
|
||||
} else {
|
||||
v.ip++ // increment ip for next command
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newVM creates a new RAR virtual machine using the byte slice as memory.
|
||||
func newVM(mem []byte) *vm {
|
||||
v := new(vm)
|
||||
|
||||
if cap(mem) < vmSize+4 {
|
||||
v.m = make([]byte, vmSize+4)
|
||||
copy(v.m, mem)
|
||||
} else {
|
||||
v.m = mem[:vmSize+4]
|
||||
for i := len(mem); i < len(v.m); i++ {
|
||||
v.m[i] = 0
|
||||
}
|
||||
}
|
||||
v.r[7] = vmSize
|
||||
return v
|
||||
}
|
||||
|
||||
type operand interface {
|
||||
get(v *vm, byteMode bool) uint32
|
||||
set(v *vm, byteMode bool, n uint32)
|
||||
}
|
||||
|
||||
// Immediate Operand
|
||||
type opI uint32
|
||||
|
||||
func (op opI) get(v *vm, bm bool) uint32 { return uint32(op) }
|
||||
func (op opI) set(v *vm, bm bool, n uint32) {}
|
||||
|
||||
// Direct Operand
|
||||
type opD uint32
|
||||
|
||||
func (op opD) get(v *vm, byteMode bool) uint32 {
|
||||
if byteMode {
|
||||
return uint32(v.m[op])
|
||||
}
|
||||
return binary.LittleEndian.Uint32(v.m[op:])
|
||||
}
|
||||
|
||||
func (op opD) set(v *vm, byteMode bool, n uint32) {
|
||||
if byteMode {
|
||||
v.m[op] = byte(n)
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(v.m[op:], n)
|
||||
}
|
||||
}
|
||||
|
||||
// Register Operand
|
||||
type opR uint32
|
||||
|
||||
func (op opR) get(v *vm, byteMode bool) uint32 {
|
||||
if byteMode {
|
||||
return v.r[op] & 0xFF
|
||||
}
|
||||
return v.r[op]
|
||||
}
|
||||
|
||||
func (op opR) set(v *vm, byteMode bool, n uint32) {
|
||||
if byteMode {
|
||||
v.r[op] = (v.r[op] & 0xFFFFFF00) | (n & 0xFF)
|
||||
} else {
|
||||
v.r[op] = n
|
||||
}
|
||||
}
|
||||
|
||||
// Register Indirect Operand
|
||||
type opRI uint32
|
||||
|
||||
func (op opRI) get(v *vm, byteMode bool) uint32 {
|
||||
i := v.r[op] & vmMask
|
||||
if byteMode {
|
||||
return uint32(v.m[i])
|
||||
}
|
||||
return binary.LittleEndian.Uint32(v.m[i:])
|
||||
}
|
||||
func (op opRI) set(v *vm, byteMode bool, n uint32) {
|
||||
i := v.r[op] & vmMask
|
||||
if byteMode {
|
||||
v.m[i] = byte(n)
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(v.m[i:], n)
|
||||
}
|
||||
}
|
||||
|
||||
// Base Plus Index Indirect Operand
|
||||
type opBI struct {
|
||||
r uint32
|
||||
i uint32
|
||||
}
|
||||
|
||||
func (op opBI) get(v *vm, byteMode bool) uint32 {
|
||||
i := (v.r[op.r] + op.i) & vmMask
|
||||
if byteMode {
|
||||
return uint32(v.m[i])
|
||||
}
|
||||
return binary.LittleEndian.Uint32(v.m[i:])
|
||||
}
|
||||
func (op opBI) set(v *vm, byteMode bool, n uint32) {
|
||||
i := (v.r[op.r] + op.i) & vmMask
|
||||
if byteMode {
|
||||
v.m[i] = byte(n)
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(v.m[i:], n)
|
||||
}
|
||||
}
|
||||
|
||||
type commandFunc func(v *vm, byteMode bool, op []operand)
|
||||
|
||||
type command struct {
|
||||
f commandFunc
|
||||
bm bool // is byte mode
|
||||
op []operand
|
||||
}
|
||||
|
||||
var (
|
||||
ops = []struct {
|
||||
f commandFunc
|
||||
byteMode bool // supports byte mode
|
||||
nops int // number of operands
|
||||
jop bool // is a jump op
|
||||
}{
|
||||
{mov, true, 2, false},
|
||||
{cmp, true, 2, false},
|
||||
{add, true, 2, false},
|
||||
{sub, true, 2, false},
|
||||
{jz, false, 1, true},
|
||||
{jnz, false, 1, true},
|
||||
{inc, true, 1, false},
|
||||
{dec, true, 1, false},
|
||||
{jmp, false, 1, true},
|
||||
{xor, true, 2, false},
|
||||
{and, true, 2, false},
|
||||
{or, true, 2, false},
|
||||
{test, true, 2, false},
|
||||
{js, false, 1, true},
|
||||
{jns, false, 1, true},
|
||||
{jb, false, 1, true},
|
||||
{jbe, false, 1, true},
|
||||
{ja, false, 1, true},
|
||||
{jae, false, 1, true},
|
||||
{push, false, 1, false},
|
||||
{pop, false, 1, false},
|
||||
{call, false, 1, true},
|
||||
{ret, false, 0, false},
|
||||
{not, true, 1, false},
|
||||
{shl, true, 2, false},
|
||||
{shr, true, 2, false},
|
||||
{sar, true, 2, false},
|
||||
{neg, true, 1, false},
|
||||
{pusha, false, 0, false},
|
||||
{popa, false, 0, false},
|
||||
{pushf, false, 0, false},
|
||||
{popf, false, 0, false},
|
||||
{movzx, false, 2, false},
|
||||
{movsx, false, 2, false},
|
||||
{xchg, true, 2, false},
|
||||
{mul, true, 2, false},
|
||||
{div, true, 2, false},
|
||||
{adc, true, 2, false},
|
||||
{sbb, true, 2, false},
|
||||
{print, false, 0, false},
|
||||
}
|
||||
)
|
||||
|
||||
func mov(v *vm, bm bool, op []operand) {
|
||||
op[0].set(v, bm, op[1].get(v, bm))
|
||||
}
|
||||
|
||||
func cmp(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
r := v1 - op[1].get(v, bm)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = 0
|
||||
if r > v1 {
|
||||
v.fl = flagC
|
||||
}
|
||||
v.fl |= r & flagS
|
||||
}
|
||||
}
|
||||
|
||||
func add(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
r := v1 + op[1].get(v, bm)
|
||||
v.fl = 0
|
||||
signBit := uint32(flagS)
|
||||
if bm {
|
||||
r &= 0xFF
|
||||
signBit = 0x80
|
||||
}
|
||||
if r < v1 {
|
||||
v.fl |= flagC
|
||||
}
|
||||
if r == 0 {
|
||||
v.fl |= flagZ
|
||||
} else if r&signBit > 0 {
|
||||
v.fl |= flagS
|
||||
}
|
||||
op[0].set(v, bm, r)
|
||||
}
|
||||
|
||||
func sub(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
r := v1 - op[1].get(v, bm)
|
||||
v.fl = 0
|
||||
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = 0
|
||||
if r > v1 {
|
||||
v.fl = flagC
|
||||
}
|
||||
v.fl |= r & flagS
|
||||
}
|
||||
op[0].set(v, bm, r)
|
||||
}
|
||||
|
||||
func jz(v *vm, bm bool, op []operand) {
|
||||
if v.fl&flagZ > 0 {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
}
|
||||
|
||||
func jnz(v *vm, bm bool, op []operand) {
|
||||
if v.fl&flagZ == 0 {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
}
|
||||
|
||||
func inc(v *vm, bm bool, op []operand) {
|
||||
r := op[0].get(v, bm) + 1
|
||||
if bm {
|
||||
r &= 0xFF
|
||||
}
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
}
|
||||
|
||||
func dec(v *vm, bm bool, op []operand) {
|
||||
r := op[0].get(v, bm) - 1
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
}
|
||||
|
||||
func jmp(v *vm, bm bool, op []operand) {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
|
||||
func xor(v *vm, bm bool, op []operand) {
|
||||
r := op[0].get(v, bm) ^ op[1].get(v, bm)
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
}
|
||||
|
||||
func and(v *vm, bm bool, op []operand) {
|
||||
r := op[0].get(v, bm) & op[1].get(v, bm)
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
}
|
||||
|
||||
func or(v *vm, bm bool, op []operand) {
|
||||
r := op[0].get(v, bm) | op[1].get(v, bm)
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
}
|
||||
|
||||
func test(v *vm, bm bool, op []operand) {
|
||||
r := op[0].get(v, bm) & op[1].get(v, bm)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
}
|
||||
|
||||
func js(v *vm, bm bool, op []operand) {
|
||||
if v.fl&flagS > 0 {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
}
|
||||
|
||||
func jns(v *vm, bm bool, op []operand) {
|
||||
if v.fl&flagS == 0 {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
}
|
||||
|
||||
func jb(v *vm, bm bool, op []operand) {
|
||||
if v.fl&flagC > 0 {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
}
|
||||
|
||||
func jbe(v *vm, bm bool, op []operand) {
|
||||
if v.fl&(flagC|flagZ) > 0 {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
}
|
||||
|
||||
func ja(v *vm, bm bool, op []operand) {
|
||||
if v.fl&(flagC|flagZ) == 0 {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
}
|
||||
|
||||
func jae(v *vm, bm bool, op []operand) {
|
||||
if v.fl&flagC == 0 {
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
}
|
||||
|
||||
func push(v *vm, bm bool, op []operand) {
|
||||
v.r[7] -= 4
|
||||
opRI(7).set(v, false, op[0].get(v, false))
|
||||
|
||||
}
|
||||
|
||||
func pop(v *vm, bm bool, op []operand) {
|
||||
op[0].set(v, false, opRI(7).get(v, false))
|
||||
v.r[7] += 4
|
||||
}
|
||||
|
||||
func call(v *vm, bm bool, op []operand) {
|
||||
v.r[7] -= 4
|
||||
opRI(7).set(v, false, v.ip+1)
|
||||
v.setIP(op[0].get(v, false))
|
||||
}
|
||||
|
||||
func ret(v *vm, bm bool, op []operand) {
|
||||
r7 := v.r[7]
|
||||
if r7 >= vmSize {
|
||||
v.setIP(0xFFFFFFFF) // trigger end of program
|
||||
} else {
|
||||
v.setIP(binary.LittleEndian.Uint32(v.m[r7:]))
|
||||
v.r[7] += 4
|
||||
}
|
||||
}
|
||||
|
||||
func not(v *vm, bm bool, op []operand) {
|
||||
op[0].set(v, bm, ^op[0].get(v, bm))
|
||||
}
|
||||
|
||||
func shl(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
v2 := op[1].get(v, bm)
|
||||
r := v1 << v2
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
if (v1<<(v2-1))&0x80000000 > 0 {
|
||||
v.fl |= flagC
|
||||
}
|
||||
}
|
||||
|
||||
func shr(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
v2 := op[1].get(v, bm)
|
||||
r := v1 >> v2
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
if (v1>>(v2-1))&0x1 > 0 {
|
||||
v.fl |= flagC
|
||||
}
|
||||
}
|
||||
|
||||
func sar(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
v2 := op[1].get(v, bm)
|
||||
r := uint32(int32(v1) >> v2)
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
if (v1>>(v2-1))&0x1 > 0 {
|
||||
v.fl |= flagC
|
||||
}
|
||||
}
|
||||
|
||||
func neg(v *vm, bm bool, op []operand) {
|
||||
r := 0 - op[0].get(v, bm)
|
||||
op[0].set(v, bm, r)
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r&flagS | flagC
|
||||
}
|
||||
}
|
||||
|
||||
func pusha(v *vm, bm bool, op []operand) {
|
||||
sp := opD(v.r[7])
|
||||
for _, r := range v.r {
|
||||
sp = (sp - 4) & vmMask
|
||||
sp.set(v, false, r)
|
||||
}
|
||||
v.r[7] = uint32(sp)
|
||||
}
|
||||
|
||||
func popa(v *vm, bm bool, op []operand) {
|
||||
sp := opD(v.r[7])
|
||||
for i := 7; i >= 0; i-- {
|
||||
v.r[i] = sp.get(v, false)
|
||||
sp = (sp + 4) & vmMask
|
||||
}
|
||||
}
|
||||
|
||||
func pushf(v *vm, bm bool, op []operand) {
|
||||
v.r[7] -= 4
|
||||
opRI(7).set(v, false, v.fl)
|
||||
}
|
||||
|
||||
func popf(v *vm, bm bool, op []operand) {
|
||||
v.fl = opRI(7).get(v, false)
|
||||
v.r[7] += 4
|
||||
}
|
||||
|
||||
func movzx(v *vm, bm bool, op []operand) {
|
||||
op[0].set(v, false, op[1].get(v, true))
|
||||
}
|
||||
|
||||
func movsx(v *vm, bm bool, op []operand) {
|
||||
op[0].set(v, false, uint32(int8(op[1].get(v, true))))
|
||||
}
|
||||
|
||||
func xchg(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
op[0].set(v, bm, op[1].get(v, bm))
|
||||
op[1].set(v, bm, v1)
|
||||
}
|
||||
|
||||
func mul(v *vm, bm bool, op []operand) {
|
||||
r := op[0].get(v, bm) * op[1].get(v, bm)
|
||||
op[0].set(v, bm, r)
|
||||
}
|
||||
|
||||
func div(v *vm, bm bool, op []operand) {
|
||||
div := op[1].get(v, bm)
|
||||
if div != 0 {
|
||||
r := op[0].get(v, bm) / div
|
||||
op[0].set(v, bm, r)
|
||||
}
|
||||
}
|
||||
|
||||
func adc(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
fc := v.fl & flagC
|
||||
r := v1 + op[1].get(v, bm) + fc
|
||||
if bm {
|
||||
r &= 0xFF
|
||||
}
|
||||
op[0].set(v, bm, r)
|
||||
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
if r < v1 || (r == v1 && fc > 0) {
|
||||
v.fl |= flagC
|
||||
}
|
||||
}
|
||||
|
||||
func sbb(v *vm, bm bool, op []operand) {
|
||||
v1 := op[0].get(v, bm)
|
||||
fc := v.fl & flagC
|
||||
r := v1 - op[1].get(v, bm) - fc
|
||||
if bm {
|
||||
r &= 0xFF
|
||||
}
|
||||
op[0].set(v, bm, r)
|
||||
|
||||
if r == 0 {
|
||||
v.fl = flagZ
|
||||
} else {
|
||||
v.fl = r & flagS
|
||||
}
|
||||
if r > v1 || (r == v1 && fc > 0) {
|
||||
v.fl |= flagC
|
||||
}
|
||||
}
|
||||
|
||||
func print(v *vm, bm bool, op []operand) {
|
||||
// TODO: ignore print for the moment
|
||||
}
|
||||
|
||||
func decodeArg(br *rarBitReader, byteMode bool) (operand, error) {
|
||||
n, err := br.readBits(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n > 0 { // Register
|
||||
n, err = br.readBits(3)
|
||||
return opR(n), err
|
||||
}
|
||||
n, err = br.readBits(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 { // Immediate
|
||||
if byteMode {
|
||||
n, err = br.readBits(8)
|
||||
} else {
|
||||
m, err := br.readUint32()
|
||||
return opI(m), err
|
||||
}
|
||||
return opI(n), err
|
||||
}
|
||||
n, err = br.readBits(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
// Register Indirect
|
||||
n, err = br.readBits(3)
|
||||
return opRI(n), err
|
||||
}
|
||||
n, err = br.readBits(1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
// Base + Index Indirect
|
||||
n, err = br.readBits(3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i, err := br.readUint32()
|
||||
return opBI{r: uint32(n), i: i}, err
|
||||
}
|
||||
// Direct addressing
|
||||
m, err := br.readUint32()
|
||||
return opD(m & vmMask), err
|
||||
}
|
||||
|
||||
func fixJumpOp(op operand, off int) operand {
|
||||
n, ok := op.(opI)
|
||||
if !ok {
|
||||
return op
|
||||
}
|
||||
if n >= 256 {
|
||||
return n - 256
|
||||
}
|
||||
if n >= 136 {
|
||||
n -= 264
|
||||
} else if n >= 16 {
|
||||
n -= 8
|
||||
} else if n >= 8 {
|
||||
n -= 16
|
||||
}
|
||||
return n + opI(off)
|
||||
}
|
||||
|
||||
func readCommands(br *rarBitReader) ([]command, error) {
|
||||
var cmds []command
|
||||
|
||||
for {
|
||||
code, err := br.readBits(4)
|
||||
if err != nil {
|
||||
return cmds, err
|
||||
}
|
||||
if code&0x08 > 0 {
|
||||
n, err := br.readBits(2)
|
||||
if err != nil {
|
||||
return cmds, err
|
||||
}
|
||||
code = (code<<2 | n) - 24
|
||||
}
|
||||
|
||||
if code >= len(ops) {
|
||||
return cmds, errInvalidVMInstruction
|
||||
}
|
||||
ins := ops[code]
|
||||
|
||||
var com command
|
||||
|
||||
if ins.byteMode {
|
||||
n, err := br.readBits(1)
|
||||
if err != nil {
|
||||
return cmds, err
|
||||
}
|
||||
com.bm = n > 0
|
||||
}
|
||||
com.f = ins.f
|
||||
|
||||
if ins.nops > 0 {
|
||||
com.op = make([]operand, ins.nops)
|
||||
com.op[0], err = decodeArg(br, com.bm)
|
||||
if err != nil {
|
||||
return cmds, err
|
||||
}
|
||||
if ins.nops == 2 {
|
||||
com.op[1], err = decodeArg(br, com.bm)
|
||||
if err != nil {
|
||||
return cmds, err
|
||||
}
|
||||
} else if ins.jop {
|
||||
com.op[0] = fixJumpOp(com.op[0], len(cmds))
|
||||
}
|
||||
}
|
||||
cmds = append(cmds, com)
|
||||
}
|
||||
}
|
||||
25
vendor/github.com/ulikunitz/xz/.gitignore
generated
vendored
Normal file
25
vendor/github.com/ulikunitz/xz/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# .gitignore
|
||||
|
||||
TODO.html
|
||||
README.html
|
||||
|
||||
lzma/writer.txt
|
||||
lzma/reader.txt
|
||||
|
||||
cmd/gxz/gxz
|
||||
cmd/xb/xb
|
||||
|
||||
# test executables
|
||||
*.test
|
||||
|
||||
# profile files
|
||||
*.out
|
||||
|
||||
# vim swap file
|
||||
.*.swp
|
||||
|
||||
# executables on windows
|
||||
*.exe
|
||||
|
||||
# default compression test file
|
||||
enwik8*
|
||||
26
vendor/github.com/ulikunitz/xz/LICENSE
generated
vendored
Normal file
26
vendor/github.com/ulikunitz/xz/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
Copyright (c) 2014-2016 Ulrich Kunitz
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* My name, Ulrich Kunitz, may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
73
vendor/github.com/ulikunitz/xz/README.md
generated
vendored
Normal file
73
vendor/github.com/ulikunitz/xz/README.md
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
# Package xz
|
||||
|
||||
This Go language package supports the reading and writing of xz
|
||||
compressed streams. It includes also a gxz command for compressing and
|
||||
decompressing data. The package is completely written in Go and doesn't
|
||||
have any dependency on any C code.
|
||||
|
||||
The package is currently under development. There might be bugs and APIs
|
||||
are not considered stable. At this time the package cannot compete with
|
||||
the xz tool regarding compression speed and size. The algorithms there
|
||||
have been developed over a long time and are highly optimized. However
|
||||
there are a number of improvements planned and I'm very optimistic about
|
||||
parallel compression and decompression. Stay tuned!
|
||||
|
||||
## Using the API
|
||||
|
||||
The following example program shows how to use the API.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
func main() {
|
||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
||||
var buf bytes.Buffer
|
||||
// compress text
|
||||
w, err := xz.NewWriter(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("xz.NewWriter error %s", err)
|
||||
}
|
||||
if _, err := io.WriteString(w, text); err != nil {
|
||||
log.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
// decompress buffer and write output to stdout
|
||||
r, err := xz.NewReader(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
||||
log.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Using the gxz compression tool
|
||||
|
||||
The package includes a gxz command line utility for compression and
|
||||
decompression.
|
||||
|
||||
Use following command for installation:
|
||||
|
||||
$ go get github.com/ulikunitz/xz/cmd/gxz
|
||||
|
||||
To test it call the following command.
|
||||
|
||||
$ gxz bigfile
|
||||
|
||||
After some time a much smaller file bigfile.xz will replace bigfile.
|
||||
To decompress it use the following command.
|
||||
|
||||
$ gxz -d bigfile.xz
|
||||
|
||||
323
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
Normal file
323
vendor/github.com/ulikunitz/xz/TODO.md
generated
vendored
Normal file
@@ -0,0 +1,323 @@
|
||||
# TODO list
|
||||
|
||||
## Release v0.6
|
||||
|
||||
1. Review encoder and check for lzma improvements under xz.
|
||||
2. Fix binary tree matcher.
|
||||
3. Compare compression ratio with xz tool using comparable parameters
|
||||
and optimize parameters
|
||||
4. Do some optimizations
|
||||
- rename operation action and make it a simple type of size 8
|
||||
- make maxMatches, wordSize parameters
|
||||
- stop searching after a certain length is found (parameter sweetLen)
|
||||
|
||||
## Release v0.7
|
||||
|
||||
1. Optimize code
|
||||
2. Do statistical analysis to get linear presets.
|
||||
3. Test sync.Pool compatability for xz and lzma Writer and Reader
|
||||
3. Fuzz optimized code.
|
||||
|
||||
## Release v0.8
|
||||
|
||||
1. Support parallel go routines for writing and reading xz files.
|
||||
2. Support a ReaderAt interface for xz files with small block sizes.
|
||||
3. Improve compatibility between gxz and xz
|
||||
4. Provide manual page for gxz
|
||||
|
||||
## Release v0.9
|
||||
|
||||
1. Improve documentation
|
||||
2. Fuzz again
|
||||
|
||||
## Release v1.0
|
||||
|
||||
1. Full functioning gxz
|
||||
2. Add godoc URL to README.md (godoc.org)
|
||||
3. Resolve all issues.
|
||||
4. Define release candidates.
|
||||
5. Public announcement.
|
||||
|
||||
## Package lzma
|
||||
|
||||
### Release v0.6
|
||||
|
||||
- Rewrite Encoder into a simple greedy one-op-at-a-time encoder
|
||||
including
|
||||
+ simple scan at the dictionary head for the same byte
|
||||
+ use the killer byte (requiring matches to get longer, the first
|
||||
test should be the byte that would make the match longer)
|
||||
|
||||
|
||||
## Optimizations
|
||||
|
||||
- There may be a lot of false sharing in lzma.State; check whether this
|
||||
can be improved by reorganizing the internal structure of it.
|
||||
- Check whether batching encoding and decoding improves speed.
|
||||
|
||||
### DAG optimizations
|
||||
|
||||
- Use full buffer to create minimal bit-length above range encoder.
|
||||
- Might be too slow (see v0.4)
|
||||
|
||||
### Different match finders
|
||||
|
||||
- hashes with 2, 3 characters additional to 4 characters
|
||||
- binary trees with 2-7 characters (uint64 as key, use uint32 as
|
||||
pointers into a an array)
|
||||
- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
|
||||
into an array with bit-steeling for the colors)
|
||||
|
||||
## Release Procedure
|
||||
|
||||
- execute goch -l for all packages; probably with lower param like 0.5.
|
||||
- check orthography with gospell
|
||||
- Write release notes in doc/relnotes.
|
||||
- Update README.md
|
||||
- xb copyright . in xz directory to ensure all new files have Copyright
|
||||
header
|
||||
- VERSION=<version> go generate github.com/ulikunitz/xz/... to update
|
||||
version files
|
||||
- Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
|
||||
- Update TODO.md - write short log entry
|
||||
- git checkout master && git merge dev
|
||||
- git tag -a <version>
|
||||
- git push
|
||||
|
||||
## Log
|
||||
|
||||
### 2019-02-20
|
||||
|
||||
Release v0.5.6 supports the go.mod file.
|
||||
|
||||
### 2018-10-28
|
||||
|
||||
Release v0.5.5 fixes issues #19 observing ErrLimit outputs.
|
||||
|
||||
### 2017-06-05
|
||||
|
||||
Release v0.5.4 fixes issues #15 of another problem with the padding size
|
||||
check for the xz block header. I removed the check completely.
|
||||
|
||||
### 2017-02-15
|
||||
|
||||
Release v0.5.3 fixes issue #12 regarding the decompression of an empty
|
||||
XZ stream. Many thanks to Tomasz Kłak, who reported the issue.
|
||||
|
||||
### 2016-12-02
|
||||
|
||||
Release v0.5.2 became necessary to allow the decoding of xz files with
|
||||
4-byte padding in the block header. Many thanks to Greg, who reported
|
||||
the issue.
|
||||
|
||||
### 2016-07-23
|
||||
|
||||
Release v0.5.1 became necessary to fix problems with 32-bit platforms.
|
||||
Many thanks to Bruno Brigas, who reported the issue.
|
||||
|
||||
### 2016-07-04
|
||||
|
||||
Release v0.5 provides improvements to the compressor and provides support for
|
||||
the decompression of xz files with multiple xz streams.
|
||||
|
||||
### 2016-01-31
|
||||
|
||||
Another compression rate increase by checking the byte at length of the
|
||||
best match first, before checking the whole prefix. This makes the
|
||||
compressor even faster. We have now a large time budget to beat the
|
||||
compression ratio of the xz tool. For enwik8 we have now over 40 seconds
|
||||
to reduce the compressed file size for another 7 MiB.
|
||||
|
||||
### 2016-01-30
|
||||
|
||||
I simplified the encoder. Speed and compression rate increased
|
||||
dramatically. A high compression rate affects also the decompression
|
||||
speed. The approach with the buffer and optimizing for operation
|
||||
compression rate has not been successful. Going for the maximum length
|
||||
appears to be the best approach.
|
||||
|
||||
### 2016-01-28
|
||||
|
||||
The release v0.4 is ready. It provides a working xz implementation,
|
||||
which is rather slow, but works and is interoperable with the xz tool.
|
||||
It is an important milestone.
|
||||
|
||||
### 2016-01-10
|
||||
|
||||
I have the first working implementation of an xz reader and writer. I'm
|
||||
happy about reaching this milestone.
|
||||
|
||||
### 2015-12-02
|
||||
|
||||
I'm now ready to implement xz because, I have a working LZMA2
|
||||
implementation. I decided today that v0.4 will use the slow encoder
|
||||
using the operations buffer to be able to go back, if I intend to do so.
|
||||
|
||||
### 2015-10-21
|
||||
|
||||
I have restarted the work on the library. While trying to implement
|
||||
LZMA2, I discovered that I need to resimplify the encoder and decoder
|
||||
functions. The option approach is too complicated. Using a limited byte
|
||||
writer and not caring for written bytes at all and not to try to handle
|
||||
uncompressed data simplifies the LZMA encoder and decoder much.
|
||||
Processing uncompressed data and handling limits is a feature of the
|
||||
LZMA2 format not of LZMA.
|
||||
|
||||
I learned an interesting method from the LZO format. If the last copy is
|
||||
too far away they are moving the head one 2 bytes and not 1 byte to
|
||||
reduce processing times.
|
||||
|
||||
### 2015-08-26
|
||||
|
||||
I have now reimplemented the lzma package. The code is reasonably fast,
|
||||
but can still be optimized. The next step is to implement LZMA2 and then
|
||||
xz.
|
||||
|
||||
### 2015-07-05
|
||||
|
||||
Created release v0.3. The version is the foundation for a full xz
|
||||
implementation that is the target of v0.4.
|
||||
|
||||
### 2015-06-11
|
||||
|
||||
The gflag package has been developed because I couldn't use flag and
|
||||
pflag for a fully compatible support of gzip's and lzma's options. It
|
||||
seems to work now quite nicely.
|
||||
|
||||
### 2015-06-05
|
||||
|
||||
The overflow issue was interesting to research, however Henry S. Warren
|
||||
Jr. Hacker's Delight book was very helpful as usual and had the issue
|
||||
explained perfectly. Fefe's information on his website was based on the
|
||||
C FAQ and quite bad, because it didn't address the issue of -MININT ==
|
||||
MININT.
|
||||
|
||||
### 2015-06-04
|
||||
|
||||
It has been a productive day. I improved the interface of lzma.Reader
|
||||
and lzma.Writer and fixed the error handling.
|
||||
|
||||
### 2015-06-01
|
||||
|
||||
By computing the bit length of the LZMA operations I was able to
|
||||
improve the greedy algorithm implementation. By using an 8 MByte buffer
|
||||
the compression rate was not as good as for xz but already better then
|
||||
gzip default.
|
||||
|
||||
Compression is currently slow, but this is something we will be able to
|
||||
improve over time.
|
||||
|
||||
### 2015-05-26
|
||||
|
||||
Checked the license of ogier/pflag. The binary lzmago binary should
|
||||
include the license terms for the pflag library.
|
||||
|
||||
I added the endorsement clause as used by Google for the Go sources the
|
||||
LICENSE file.
|
||||
|
||||
### 2015-05-22
|
||||
|
||||
The package lzb contains now the basic implementation for creating or
|
||||
reading LZMA byte streams. It allows the support for the implementation
|
||||
of the DAG-shortest-path algorithm for the compression function.
|
||||
|
||||
### 2015-04-23
|
||||
|
||||
Completed yesterday the lzbase classes. I'm a little bit concerned that
|
||||
using the components may require too much code, but on the other hand
|
||||
there is a lot of flexibility.
|
||||
|
||||
### 2015-04-22
|
||||
|
||||
Implemented Reader and Writer during the Bayern game against Porto. The
|
||||
second half gave me enough time.
|
||||
|
||||
### 2015-04-21
|
||||
|
||||
While showering today morning I discovered that the design for OpEncoder
|
||||
and OpDecoder doesn't work, because encoding/decoding might depend on
|
||||
the current status of the dictionary. This is not exactly the right way
|
||||
to start the day.
|
||||
|
||||
Therefore we need to keep the Reader and Writer design. This time around
|
||||
we simplify it by ignoring size limits. These can be added by wrappers
|
||||
around the Reader and Writer interfaces. The Parameters type isn't
|
||||
needed anymore.
|
||||
|
||||
However I will implement a ReaderState and WriterState type to use
|
||||
static typing to ensure the right State object is combined with the
|
||||
right lzbase.Reader and lzbase.Writer.
|
||||
|
||||
As a start I have implemented ReaderState and WriterState to ensure
|
||||
that the state for reading is only used by readers and WriterState only
|
||||
used by Writers.
|
||||
|
||||
### 2015-04-20
|
||||
|
||||
Today I implemented the OpDecoder and tested OpEncoder and OpDecoder.
|
||||
|
||||
### 2015-04-08
|
||||
|
||||
Came up with a new simplified design for lzbase. I implemented already
|
||||
the type State that replaces OpCodec.
|
||||
|
||||
### 2015-04-06
|
||||
|
||||
The new lzma package is now fully usable and lzmago is using it now. The
|
||||
old lzma package has been completely removed.
|
||||
|
||||
### 2015-04-05
|
||||
|
||||
Implemented lzma.Reader and tested it.
|
||||
|
||||
### 2015-04-04
|
||||
|
||||
Implemented baseReader by adapting code form lzma.Reader.
|
||||
|
||||
### 2015-04-03
|
||||
|
||||
The opCodec has been copied yesterday to lzma2. opCodec has a high
|
||||
number of dependencies on other files in lzma2. Therefore I had to copy
|
||||
almost all files from lzma.
|
||||
|
||||
### 2015-03-31
|
||||
|
||||
Removed only a TODO item.
|
||||
|
||||
However in Francesco Campoy's presentation "Go for Javaneros
|
||||
(Javaïstes?)" is the the idea that using an embedded field E, all the
|
||||
methods of E will be defined on T. If E is an interface T satisfies E.
|
||||
|
||||
https://talks.golang.org/2014/go4java.slide#51
|
||||
|
||||
I have never used this, but it seems to be a cool idea.
|
||||
|
||||
### 2015-03-30
|
||||
|
||||
Finished the type writerDict and wrote a simple test.
|
||||
|
||||
### 2015-03-25
|
||||
|
||||
I started to implement the writerDict.
|
||||
|
||||
### 2015-03-24
|
||||
|
||||
After thinking long about the LZMA2 code and several false starts, I
|
||||
have now a plan to create a self-sufficient lzma2 package that supports
|
||||
the classic LZMA format as well as LZMA2. The core idea is to support a
|
||||
baseReader and baseWriter type that support the basic LZMA stream
|
||||
without any headers. Both types must support the reuse of dictionaries
|
||||
and the opCodec.
|
||||
|
||||
### 2015-01-10
|
||||
|
||||
1. Implemented simple lzmago tool
|
||||
2. Tested tool against large 4.4G file
|
||||
- compression worked correctly; tested decompression with lzma
|
||||
- decompression hits a full buffer condition
|
||||
3. Fixed a bug in the compressor and wrote a test for it
|
||||
4. Executed full cycle for 4.4 GB file; performance can be improved ;-)
|
||||
|
||||
### 2015-01-11
|
||||
|
||||
- Release v0.2 because of the working LZMA encoder and decoder
|
||||
74
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
Normal file
74
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// putUint32LE puts the little-endian representation of x into the first
|
||||
// four bytes of p.
|
||||
func putUint32LE(p []byte, x uint32) {
|
||||
p[0] = byte(x)
|
||||
p[1] = byte(x >> 8)
|
||||
p[2] = byte(x >> 16)
|
||||
p[3] = byte(x >> 24)
|
||||
}
|
||||
|
||||
// putUint64LE puts the little-endian representation of x into the first
|
||||
// eight bytes of p.
|
||||
func putUint64LE(p []byte, x uint64) {
|
||||
p[0] = byte(x)
|
||||
p[1] = byte(x >> 8)
|
||||
p[2] = byte(x >> 16)
|
||||
p[3] = byte(x >> 24)
|
||||
p[4] = byte(x >> 32)
|
||||
p[5] = byte(x >> 40)
|
||||
p[6] = byte(x >> 48)
|
||||
p[7] = byte(x >> 56)
|
||||
}
|
||||
|
||||
// uint32LE converts a little endian representation to an uint32 value.
|
||||
func uint32LE(p []byte) uint32 {
|
||||
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 |
|
||||
uint32(p[3])<<24
|
||||
}
|
||||
|
||||
// putUvarint puts a uvarint representation of x into the byte slice.
|
||||
func putUvarint(p []byte, x uint64) int {
|
||||
i := 0
|
||||
for x >= 0x80 {
|
||||
p[i] = byte(x) | 0x80
|
||||
x >>= 7
|
||||
i++
|
||||
}
|
||||
p[i] = byte(x)
|
||||
return i + 1
|
||||
}
|
||||
|
||||
// errOverflow indicates an overflow of the 64-bit unsigned integer.
|
||||
var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer")
|
||||
|
||||
// readUvarint reads a uvarint from the given byte reader.
|
||||
func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
|
||||
var s uint
|
||||
i := 0
|
||||
for {
|
||||
b, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return x, i, err
|
||||
}
|
||||
i++
|
||||
if b < 0x80 {
|
||||
if i > 10 || i == 10 && b > 1 {
|
||||
return x, i, errOverflowU64
|
||||
}
|
||||
return x | uint64(b)<<s, i, nil
|
||||
}
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
}
|
||||
}
|
||||
54
vendor/github.com/ulikunitz/xz/crc.go
generated
vendored
Normal file
54
vendor/github.com/ulikunitz/xz/crc.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
)
|
||||
|
||||
// crc32Hash implements the hash.Hash32 interface with Sum returning the
|
||||
// crc32 value in little-endian encoding.
|
||||
type crc32Hash struct {
|
||||
hash.Hash32
|
||||
}
|
||||
|
||||
// Sum returns the crc32 value as little endian.
|
||||
func (h crc32Hash) Sum(b []byte) []byte {
|
||||
p := make([]byte, 4)
|
||||
putUint32LE(p, h.Hash32.Sum32())
|
||||
b = append(b, p...)
|
||||
return b
|
||||
}
|
||||
|
||||
// newCRC32 returns a CRC-32 hash that returns the 64-bit value in
|
||||
// little-endian encoding using the IEEE polynomial.
|
||||
func newCRC32() hash.Hash {
|
||||
return crc32Hash{Hash32: crc32.NewIEEE()}
|
||||
}
|
||||
|
||||
// crc64Hash implements the Hash64 interface with Sum returning the
|
||||
// CRC-64 value in little-endian encoding.
|
||||
type crc64Hash struct {
|
||||
hash.Hash64
|
||||
}
|
||||
|
||||
// Sum returns the CRC-64 value in little-endian encoding.
|
||||
func (h crc64Hash) Sum(b []byte) []byte {
|
||||
p := make([]byte, 8)
|
||||
putUint64LE(p, h.Hash64.Sum64())
|
||||
b = append(b, p...)
|
||||
return b
|
||||
}
|
||||
|
||||
// crc64Table is used to create a CRC-64 hash.
|
||||
var crc64Table = crc64.MakeTable(crc64.ECMA)
|
||||
|
||||
// newCRC64 returns a CRC-64 hash that returns the 64-bit value in
|
||||
// little-endian encoding using the ECMA polynomial.
|
||||
func newCRC64() hash.Hash {
|
||||
return crc64Hash{Hash64: crc64.New(crc64Table)}
|
||||
}
|
||||
40
vendor/github.com/ulikunitz/xz/example.go
generated
vendored
Normal file
40
vendor/github.com/ulikunitz/xz/example.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
func main() {
|
||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
||||
var buf bytes.Buffer
|
||||
// compress text
|
||||
w, err := xz.NewWriter(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("xz.NewWriter error %s", err)
|
||||
}
|
||||
if _, err := io.WriteString(w, text); err != nil {
|
||||
log.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
// decompress buffer and write output to stdout
|
||||
r, err := xz.NewReader(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
||||
log.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
}
|
||||
728
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
Normal file
728
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
Normal file
@@ -0,0 +1,728 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/ulikunitz/xz/lzma"
|
||||
)
|
||||
|
||||
// allZeros checks whether a given byte slice has only zeros.
|
||||
func allZeros(p []byte) bool {
|
||||
for _, c := range p {
|
||||
if c != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// padLen returns the length of the padding required for the given
|
||||
// argument.
|
||||
func padLen(n int64) int {
|
||||
k := int(n % 4)
|
||||
if k > 0 {
|
||||
k = 4 - k
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
/*** Header ***/
|
||||
|
||||
// headerMagic stores the magic bytes for the header
|
||||
var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00}
|
||||
|
||||
// HeaderLen provides the length of the xz file header.
|
||||
const HeaderLen = 12
|
||||
|
||||
// Constants for the checksum methods supported by xz.
|
||||
const (
|
||||
CRC32 byte = 0x1
|
||||
CRC64 = 0x4
|
||||
SHA256 = 0xa
|
||||
)
|
||||
|
||||
// errInvalidFlags indicates that flags are invalid.
|
||||
var errInvalidFlags = errors.New("xz: invalid flags")
|
||||
|
||||
// verifyFlags returns the error errInvalidFlags if the value is
|
||||
// invalid.
|
||||
func verifyFlags(flags byte) error {
|
||||
switch flags {
|
||||
case CRC32, CRC64, SHA256:
|
||||
return nil
|
||||
default:
|
||||
return errInvalidFlags
|
||||
}
|
||||
}
|
||||
|
||||
// flagstrings maps flag values to strings.
|
||||
var flagstrings = map[byte]string{
|
||||
CRC32: "CRC-32",
|
||||
CRC64: "CRC-64",
|
||||
SHA256: "SHA-256",
|
||||
}
|
||||
|
||||
// flagString returns the string representation for the given flags.
|
||||
func flagString(flags byte) string {
|
||||
s, ok := flagstrings[flags]
|
||||
if !ok {
|
||||
return "invalid"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// newHashFunc returns a function that creates hash instances for the
|
||||
// hash method encoded in flags.
|
||||
func newHashFunc(flags byte) (newHash func() hash.Hash, err error) {
|
||||
switch flags {
|
||||
case CRC32:
|
||||
newHash = newCRC32
|
||||
case CRC64:
|
||||
newHash = newCRC64
|
||||
case SHA256:
|
||||
newHash = sha256.New
|
||||
default:
|
||||
err = errInvalidFlags
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// header provides the actual content of the xz file header: the flags.
|
||||
type header struct {
|
||||
flags byte
|
||||
}
|
||||
|
||||
// Errors returned by readHeader.
|
||||
var errHeaderMagic = errors.New("xz: invalid header magic bytes")
|
||||
|
||||
// ValidHeader checks whether data is a correct xz file header. The
|
||||
// length of data must be HeaderLen.
|
||||
func ValidHeader(data []byte) bool {
|
||||
var h header
|
||||
err := h.UnmarshalBinary(data)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the flags.
|
||||
func (h header) String() string {
|
||||
return flagString(h.flags)
|
||||
}
|
||||
|
||||
// UnmarshalBinary reads header from the provided data slice.
|
||||
func (h *header) UnmarshalBinary(data []byte) error {
|
||||
// header length
|
||||
if len(data) != HeaderLen {
|
||||
return errors.New("xz: wrong file header length")
|
||||
}
|
||||
|
||||
// magic header
|
||||
if !bytes.Equal(headerMagic, data[:6]) {
|
||||
return errHeaderMagic
|
||||
}
|
||||
|
||||
// checksum
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[6:8])
|
||||
if uint32LE(data[8:]) != crc.Sum32() {
|
||||
return errors.New("xz: invalid checksum for file header")
|
||||
}
|
||||
|
||||
// stream flags
|
||||
if data[6] != 0 {
|
||||
return errInvalidFlags
|
||||
}
|
||||
flags := data[7]
|
||||
if err := verifyFlags(flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.flags = flags
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary generates the xz file header.
|
||||
func (h *header) MarshalBinary() (data []byte, err error) {
|
||||
if err = verifyFlags(h.flags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data = make([]byte, 12)
|
||||
copy(data, headerMagic)
|
||||
data[7] = h.flags
|
||||
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[6:8])
|
||||
putUint32LE(data[8:], crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
/*** Footer ***/
|
||||
|
||||
// footerLen defines the length of the footer.
|
||||
const footerLen = 12
|
||||
|
||||
// footerMagic contains the footer magic bytes.
|
||||
var footerMagic = []byte{'Y', 'Z'}
|
||||
|
||||
// footer represents the content of the xz file footer.
|
||||
type footer struct {
|
||||
indexSize int64
|
||||
flags byte
|
||||
}
|
||||
|
||||
// String prints a string representation of the footer structure.
|
||||
func (f footer) String() string {
|
||||
return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize)
|
||||
}
|
||||
|
||||
// Minimum and maximum for the size of the index (backward size).
|
||||
const (
|
||||
minIndexSize = 4
|
||||
maxIndexSize = (1 << 32) * 4
|
||||
)
|
||||
|
||||
// MarshalBinary converts footer values into an xz file footer. Note
|
||||
// that the footer value is checked for correctness.
|
||||
func (f *footer) MarshalBinary() (data []byte, err error) {
|
||||
if err = verifyFlags(f.flags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) {
|
||||
return nil, errors.New("xz: index size out of range")
|
||||
}
|
||||
if f.indexSize%4 != 0 {
|
||||
return nil, errors.New(
|
||||
"xz: index size not aligned to four bytes")
|
||||
}
|
||||
|
||||
data = make([]byte, footerLen)
|
||||
|
||||
// backward size (index size)
|
||||
s := (f.indexSize / 4) - 1
|
||||
putUint32LE(data[4:], uint32(s))
|
||||
// flags
|
||||
data[9] = f.flags
|
||||
// footer magic
|
||||
copy(data[10:], footerMagic)
|
||||
|
||||
// CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[4:10])
|
||||
putUint32LE(data, crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary sets the footer value by unmarshalling an xz file
|
||||
// footer.
|
||||
func (f *footer) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != footerLen {
|
||||
return errors.New("xz: wrong footer length")
|
||||
}
|
||||
|
||||
// magic bytes
|
||||
if !bytes.Equal(data[10:], footerMagic) {
|
||||
return errors.New("xz: footer magic invalid")
|
||||
}
|
||||
|
||||
// CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[4:10])
|
||||
if uint32LE(data) != crc.Sum32() {
|
||||
return errors.New("xz: footer checksum error")
|
||||
}
|
||||
|
||||
var g footer
|
||||
// backward size (index size)
|
||||
g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4
|
||||
|
||||
// flags
|
||||
if data[8] != 0 {
|
||||
return errInvalidFlags
|
||||
}
|
||||
g.flags = data[9]
|
||||
if err := verifyFlags(g.flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*f = g
|
||||
return nil
|
||||
}
|
||||
|
||||
/*** Block Header ***/
|
||||
|
||||
// blockHeader represents the content of an xz block header.
|
||||
type blockHeader struct {
|
||||
compressedSize int64
|
||||
uncompressedSize int64
|
||||
filters []filter
|
||||
}
|
||||
|
||||
// String converts the block header into a string.
|
||||
func (h blockHeader) String() string {
|
||||
var buf bytes.Buffer
|
||||
first := true
|
||||
if h.compressedSize >= 0 {
|
||||
fmt.Fprintf(&buf, "compressed size %d", h.compressedSize)
|
||||
first = false
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
if !first {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize)
|
||||
first = false
|
||||
}
|
||||
for _, f := range h.filters {
|
||||
if !first {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "filter %s", f)
|
||||
first = false
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Masks for the block flags.
|
||||
const (
|
||||
filterCountMask = 0x03
|
||||
compressedSizePresent = 0x40
|
||||
uncompressedSizePresent = 0x80
|
||||
reservedBlockFlags = 0x3C
|
||||
)
|
||||
|
||||
// errIndexIndicator signals that an index indicator (0x00) has been found
|
||||
// instead of an expected block header indicator.
|
||||
var errIndexIndicator = errors.New("xz: found index indicator")
|
||||
|
||||
// readBlockHeader reads the block header.
|
||||
func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) {
|
||||
var buf bytes.Buffer
|
||||
buf.Grow(20)
|
||||
|
||||
// block header size
|
||||
z, err := io.CopyN(&buf, r, 1)
|
||||
n = int(z)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
s := buf.Bytes()[0]
|
||||
if s == 0 {
|
||||
return nil, n, errIndexIndicator
|
||||
}
|
||||
|
||||
// read complete header
|
||||
headerLen := (int(s) + 1) * 4
|
||||
buf.Grow(headerLen - 1)
|
||||
z, err = io.CopyN(&buf, r, int64(headerLen-1))
|
||||
n += int(z)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
|
||||
// unmarshal block header
|
||||
h = new(blockHeader)
|
||||
if err = h.UnmarshalBinary(buf.Bytes()); err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
|
||||
return h, n, nil
|
||||
}
|
||||
|
||||
// readSizeInBlockHeader reads the uncompressed or compressed size
|
||||
// fields in the block header. The present value informs the function
|
||||
// whether the respective field is actually present in the header.
|
||||
func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) {
|
||||
if !present {
|
||||
return -1, nil
|
||||
}
|
||||
x, _, err := readUvarint(r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if x >= 1<<63 {
|
||||
return 0, errors.New("xz: size overflow in block header")
|
||||
}
|
||||
return int64(x), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary unmarshals the block header.
|
||||
func (h *blockHeader) UnmarshalBinary(data []byte) error {
|
||||
// Check header length
|
||||
s := data[0]
|
||||
if data[0] == 0 {
|
||||
return errIndexIndicator
|
||||
}
|
||||
headerLen := (int(s) + 1) * 4
|
||||
if len(data) != headerLen {
|
||||
return fmt.Errorf("xz: data length %d; want %d", len(data),
|
||||
headerLen)
|
||||
}
|
||||
n := headerLen - 4
|
||||
|
||||
// Check CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[:n])
|
||||
if crc.Sum32() != uint32LE(data[n:]) {
|
||||
return errors.New("xz: checksum error for block header")
|
||||
}
|
||||
|
||||
// Block header flags
|
||||
flags := data[1]
|
||||
if flags&reservedBlockFlags != 0 {
|
||||
return errors.New("xz: reserved block header flags set")
|
||||
}
|
||||
|
||||
r := bytes.NewReader(data[2:n])
|
||||
|
||||
// Compressed size
|
||||
var err error
|
||||
h.compressedSize, err = readSizeInBlockHeader(
|
||||
r, flags&compressedSizePresent != 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Uncompressed size
|
||||
h.uncompressedSize, err = readSizeInBlockHeader(
|
||||
r, flags&uncompressedSizePresent != 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.filters, err = readFilters(r, int(flags&filterCountMask)+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check padding
|
||||
// Since headerLen is a multiple of 4 we don't need to check
|
||||
// alignment.
|
||||
k := r.Len()
|
||||
// The standard spec says that the padding should have not more
|
||||
// than 3 bytes. However we found paddings of 4 or 5 in the
|
||||
// wild. See https://github.com/ulikunitz/xz/pull/11 and
|
||||
// https://github.com/ulikunitz/xz/issues/15
|
||||
//
|
||||
// The only reasonable approach seems to be to ignore the
|
||||
// padding size. We still check that all padding bytes are zero.
|
||||
if !allZeros(data[n-k : n]) {
|
||||
return errPadding
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary marshals the binary header.
|
||||
func (h *blockHeader) MarshalBinary() (data []byte, err error) {
|
||||
if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) {
|
||||
return nil, errors.New("xz: filter count wrong")
|
||||
}
|
||||
for i, f := range h.filters {
|
||||
if i < len(h.filters)-1 {
|
||||
if f.id() == lzmaFilterID {
|
||||
return nil, errors.New(
|
||||
"xz: LZMA2 filter is not the last")
|
||||
}
|
||||
} else {
|
||||
// last filter
|
||||
if f.id() != lzmaFilterID {
|
||||
return nil, errors.New("xz: " +
|
||||
"last filter must be the LZMA2 filter")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
// header size must set at the end
|
||||
buf.WriteByte(0)
|
||||
|
||||
// flags
|
||||
flags := byte(len(h.filters) - 1)
|
||||
if h.compressedSize >= 0 {
|
||||
flags |= compressedSizePresent
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
flags |= uncompressedSizePresent
|
||||
}
|
||||
buf.WriteByte(flags)
|
||||
|
||||
p := make([]byte, 10)
|
||||
if h.compressedSize >= 0 {
|
||||
k := putUvarint(p, uint64(h.compressedSize))
|
||||
buf.Write(p[:k])
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
k := putUvarint(p, uint64(h.uncompressedSize))
|
||||
buf.Write(p[:k])
|
||||
}
|
||||
|
||||
for _, f := range h.filters {
|
||||
fp, err := f.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf.Write(fp)
|
||||
}
|
||||
|
||||
// padding
|
||||
for i := padLen(int64(buf.Len())); i > 0; i-- {
|
||||
buf.WriteByte(0)
|
||||
}
|
||||
|
||||
// crc place holder
|
||||
buf.Write(p[:4])
|
||||
|
||||
data = buf.Bytes()
|
||||
if len(data)%4 != 0 {
|
||||
panic("data length not aligned")
|
||||
}
|
||||
s := len(data)/4 - 1
|
||||
if !(1 < s && s <= 255) {
|
||||
panic("wrong block header size")
|
||||
}
|
||||
data[0] = byte(s)
|
||||
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[:len(data)-4])
|
||||
putUint32LE(data[len(data)-4:], crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Constants used for marshalling and unmarshalling filters in the xz
|
||||
// block header.
|
||||
const (
|
||||
minFilters = 1
|
||||
maxFilters = 4
|
||||
minReservedID = 1 << 62
|
||||
)
|
||||
|
||||
// filter represents a filter in the block header.
|
||||
type filter interface {
|
||||
id() uint64
|
||||
UnmarshalBinary(data []byte) error
|
||||
MarshalBinary() (data []byte, err error)
|
||||
reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error)
|
||||
writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error)
|
||||
// filter must be last filter
|
||||
last() bool
|
||||
}
|
||||
|
||||
// readFilter reads a block filter from the block header. At this point
|
||||
// in time only the LZMA2 filter is supported.
|
||||
func readFilter(r io.Reader) (f filter, err error) {
|
||||
br := lzma.ByteReader(r)
|
||||
|
||||
// index
|
||||
id, _, err := readUvarint(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data []byte
|
||||
switch id {
|
||||
case lzmaFilterID:
|
||||
data = make([]byte, lzmaFilterLen)
|
||||
data[0] = lzmaFilterID
|
||||
if _, err = io.ReadFull(r, data[1:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f = new(lzmaFilter)
|
||||
default:
|
||||
if id >= minReservedID {
|
||||
return nil, errors.New(
|
||||
"xz: reserved filter id in block stream header")
|
||||
}
|
||||
return nil, errors.New("xz: invalid filter id")
|
||||
}
|
||||
if err = f.UnmarshalBinary(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// readFilters reads count filters. At this point in time only the count
|
||||
// 1 is supported.
|
||||
func readFilters(r io.Reader, count int) (filters []filter, err error) {
|
||||
if count != 1 {
|
||||
return nil, errors.New("xz: unsupported filter count")
|
||||
}
|
||||
f, err := readFilter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []filter{f}, err
|
||||
}
|
||||
|
||||
// writeFilters writes the filters.
|
||||
func writeFilters(w io.Writer, filters []filter) (n int, err error) {
|
||||
for _, f := range filters {
|
||||
p, err := f.MarshalBinary()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
k, err := w.Write(p)
|
||||
n += k
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
/*** Index ***/
|
||||
|
||||
// record describes a block in the xz file index.
|
||||
type record struct {
|
||||
unpaddedSize int64
|
||||
uncompressedSize int64
|
||||
}
|
||||
|
||||
// readRecord reads an index record.
|
||||
func readRecord(r io.ByteReader) (rec record, n int, err error) {
|
||||
u, k, err := readUvarint(r)
|
||||
n += k
|
||||
if err != nil {
|
||||
return rec, n, err
|
||||
}
|
||||
rec.unpaddedSize = int64(u)
|
||||
if rec.unpaddedSize < 0 {
|
||||
return rec, n, errors.New("xz: unpadded size negative")
|
||||
}
|
||||
|
||||
u, k, err = readUvarint(r)
|
||||
n += k
|
||||
if err != nil {
|
||||
return rec, n, err
|
||||
}
|
||||
rec.uncompressedSize = int64(u)
|
||||
if rec.uncompressedSize < 0 {
|
||||
return rec, n, errors.New("xz: uncompressed size negative")
|
||||
}
|
||||
|
||||
return rec, n, nil
|
||||
}
|
||||
|
||||
// MarshalBinary converts an index record in its binary encoding.
|
||||
func (rec *record) MarshalBinary() (data []byte, err error) {
|
||||
// maximum length of a uvarint is 10
|
||||
p := make([]byte, 20)
|
||||
n := putUvarint(p, uint64(rec.unpaddedSize))
|
||||
n += putUvarint(p[n:], uint64(rec.uncompressedSize))
|
||||
return p[:n], nil
|
||||
}
|
||||
|
||||
// writeIndex writes the index, a sequence of records.
|
||||
func writeIndex(w io.Writer, index []record) (n int64, err error) {
|
||||
crc := crc32.NewIEEE()
|
||||
mw := io.MultiWriter(w, crc)
|
||||
|
||||
// index indicator
|
||||
k, err := mw.Write([]byte{0})
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// number of records
|
||||
p := make([]byte, 10)
|
||||
k = putUvarint(p, uint64(len(index)))
|
||||
k, err = mw.Write(p[:k])
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// list of records
|
||||
for _, rec := range index {
|
||||
p, err := rec.MarshalBinary()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
k, err = mw.Write(p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// index padding
|
||||
k, err = mw.Write(make([]byte, padLen(int64(n))))
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// crc32 checksum
|
||||
putUint32LE(p, crc.Sum32())
|
||||
k, err = w.Write(p[:4])
|
||||
n += int64(k)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// readIndexBody reads the index from the reader. It assumes that the
|
||||
// index indicator has already been read.
|
||||
func readIndexBody(r io.Reader) (records []record, n int64, err error) {
|
||||
crc := crc32.NewIEEE()
|
||||
// index indicator
|
||||
crc.Write([]byte{0})
|
||||
|
||||
br := lzma.ByteReader(io.TeeReader(r, crc))
|
||||
|
||||
// number of records
|
||||
u, k, err := readUvarint(br)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
recLen := int(u)
|
||||
if recLen < 0 || uint64(recLen) != u {
|
||||
return nil, n, errors.New("xz: record number overflow")
|
||||
}
|
||||
|
||||
// list of records
|
||||
records = make([]record, recLen)
|
||||
for i := range records {
|
||||
records[i], k, err = readRecord(br)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
}
|
||||
|
||||
p := make([]byte, padLen(int64(n+1)), 4)
|
||||
k, err = io.ReadFull(br.(io.Reader), p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
if !allZeros(p) {
|
||||
return nil, n, errors.New("xz: non-zero byte in index padding")
|
||||
}
|
||||
|
||||
// crc32
|
||||
s := crc.Sum32()
|
||||
p = p[:4]
|
||||
k, err = io.ReadFull(br.(io.Reader), p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return records, n, err
|
||||
}
|
||||
if uint32LE(p) != s {
|
||||
return nil, n, errors.New("xz: wrong checksum for index")
|
||||
}
|
||||
|
||||
return records, n, nil
|
||||
}
|
||||
BIN
vendor/github.com/ulikunitz/xz/fox.xz
generated
vendored
Normal file
BIN
vendor/github.com/ulikunitz/xz/fox.xz
generated
vendored
Normal file
Binary file not shown.
1
vendor/github.com/ulikunitz/xz/go.mod
generated
vendored
Normal file
1
vendor/github.com/ulikunitz/xz/go.mod
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
module github.com/ulikunitz/xz
|
||||
181
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
generated
vendored
Normal file
181
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// CyclicPoly provides a cyclic polynomial rolling hash.
|
||||
type CyclicPoly struct {
|
||||
h uint64
|
||||
p []uint64
|
||||
i int
|
||||
}
|
||||
|
||||
// ror rotates the unsigned 64-bit integer to right. The argument s must be
|
||||
// less than 64.
|
||||
func ror(x uint64, s uint) uint64 {
|
||||
return (x >> s) | (x << (64 - s))
|
||||
}
|
||||
|
||||
// NewCyclicPoly creates a new instance of the CyclicPoly structure. The
|
||||
// argument n gives the number of bytes for which a hash will be executed.
|
||||
// This number must be positive; the method panics if this isn't the case.
|
||||
func NewCyclicPoly(n int) *CyclicPoly {
|
||||
if n < 1 {
|
||||
panic("argument n must be positive")
|
||||
}
|
||||
return &CyclicPoly{p: make([]uint64, 0, n)}
|
||||
}
|
||||
|
||||
// Len returns the length of the byte sequence for which a hash is generated.
|
||||
func (r *CyclicPoly) Len() int {
|
||||
return cap(r.p)
|
||||
}
|
||||
|
||||
// RollByte hashes the next byte and returns a hash value. The complete becomes
|
||||
// available after at least Len() bytes have been hashed.
|
||||
func (r *CyclicPoly) RollByte(x byte) uint64 {
|
||||
y := hash[x]
|
||||
if len(r.p) < cap(r.p) {
|
||||
r.h = ror(r.h, 1) ^ y
|
||||
r.p = append(r.p, y)
|
||||
} else {
|
||||
r.h ^= ror(r.p[r.i], uint(cap(r.p)-1))
|
||||
r.h = ror(r.h, 1) ^ y
|
||||
r.p[r.i] = y
|
||||
r.i = (r.i + 1) % cap(r.p)
|
||||
}
|
||||
return r.h
|
||||
}
|
||||
|
||||
// Stores the hash for the individual bytes.
|
||||
var hash = [256]uint64{
|
||||
0x2e4fc3f904065142, 0xc790984cfbc99527,
|
||||
0x879f95eb8c62f187, 0x3b61be86b5021ef2,
|
||||
0x65a896a04196f0a5, 0xc5b307b80470b59e,
|
||||
0xd3bff376a70df14b, 0xc332f04f0b3f1701,
|
||||
0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53,
|
||||
0x1906a10c2c1c0208, 0xfb0c712a03421c0d,
|
||||
0x38be311a65c9552b, 0xfee7ee4ca6445c7e,
|
||||
0x71aadeded184f21e, 0xd73426fccda23b2d,
|
||||
0x29773fb5fb9600b5, 0xce410261cd32981a,
|
||||
0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c,
|
||||
0xc13e35fc9c73a887, 0xf30ed5c201e76dbc,
|
||||
0xa5f10b3910482cea, 0x2945d59be02dfaad,
|
||||
0x06ee334ff70571b5, 0xbabf9d8070f44380,
|
||||
0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7,
|
||||
0x26183cb9f7b1664c, 0xea71dac7da068f21,
|
||||
0xea92eca5bd1d0bb7, 0x415595862defcd75,
|
||||
0x248a386023c60648, 0x9cf021ab284b3c8a,
|
||||
0xfc9372df02870f6c, 0x2b92d693eeb3b3fc,
|
||||
0x73e799d139dc6975, 0x7b15ae312486363c,
|
||||
0xb70e5454a2239c80, 0x208e3fb31d3b2263,
|
||||
0x01f563cabb930f44, 0x2ac4533d2a3240d8,
|
||||
0x84231ed1064f6f7c, 0xa9f020977c2a6d19,
|
||||
0x213c227271c20122, 0x09fe8a9a0a03d07a,
|
||||
0x4236dc75bcaf910c, 0x460a8b2bead8f17e,
|
||||
0xd9b27be1aa07055f, 0xd202d5dc4b11c33e,
|
||||
0x70adb010543bea12, 0xcdae938f7ea6f579,
|
||||
0x3f3d870208672f4d, 0x8e6ccbce9d349536,
|
||||
0xe4c0871a389095ae, 0xf5f2a49152bca080,
|
||||
0x9a43f9b97269934e, 0xc17b3753cb6f475c,
|
||||
0xd56d941e8e206bd4, 0xac0a4f3e525eda00,
|
||||
0xa06d5a011912a550, 0x5537ed19537ad1df,
|
||||
0xa32fe713d611449d, 0x2a1d05b47c3b579f,
|
||||
0x991d02dbd30a2a52, 0x39e91e7e28f93eb0,
|
||||
0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97,
|
||||
0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44,
|
||||
0x0b63d5d801708420, 0x8f227ca8f37ffaec,
|
||||
0x0256278670887c24, 0x107e14877dbf540b,
|
||||
0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61,
|
||||
0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001,
|
||||
0x31f601d5d31c48c4, 0x72ff3c0928bcaec7,
|
||||
0xd99264421147eb03, 0x535a2d6d38aefcfe,
|
||||
0x6ba8b4454a916237, 0xfa39366eaae4719c,
|
||||
0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4,
|
||||
0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8,
|
||||
0xd61c2503fe639144, 0x30ce625441eb92d3,
|
||||
0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5,
|
||||
0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf,
|
||||
0xc7ea4872c96b83ae, 0x6dd5d376f4392382,
|
||||
0x1be88681aaa9792f, 0xfef465ee1b6c10d9,
|
||||
0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9,
|
||||
0x7808e902b3857d0b, 0x171c9c4ea4607972,
|
||||
0x58d66274850146df, 0x42b311c10d3981d1,
|
||||
0x647fa8c621c41a4c, 0xf472771c66ddfedc,
|
||||
0x338d27e3f847b46b, 0x6402ce3da97545ce,
|
||||
0x5162db616fc38638, 0x9c83be97bc22a50e,
|
||||
0x2d3d7478a78d5e72, 0xe621a9b938fd5397,
|
||||
0x9454614eb0f81c45, 0x395fb6e742ed39b6,
|
||||
0x77dd9179d06037bf, 0xc478d0fee4d2656d,
|
||||
0x35d9d6cb772007af, 0x83a56e92c883f0f6,
|
||||
0x27937453250c00a1, 0x27bd6ebc3a46a97d,
|
||||
0x9f543bf784342d51, 0xd158f38c48b0ed52,
|
||||
0x8dd8537c045f66b4, 0x846a57230226f6d5,
|
||||
0x6b13939e0c4e7cdf, 0xfca25425d8176758,
|
||||
0x92e5fc6cd52788e6, 0x9992e13d7a739170,
|
||||
0x518246f7a199e8ea, 0xf104c2a71b9979c7,
|
||||
0x86b3ffaabea4768f, 0x6388061cf3e351ad,
|
||||
0x09d9b5295de5bbb5, 0x38bf1638c2599e92,
|
||||
0x1d759846499e148d, 0x4c0ff015e5f96ef4,
|
||||
0xa41a94cfa270f565, 0x42d76f9cb2326c0b,
|
||||
0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a,
|
||||
0x337523aabbe6cf8d, 0x646bb14001d42b12,
|
||||
0xc178729d138adc74, 0xf900ef4491f24086,
|
||||
0xee1a90d334bb5ac4, 0x9755c92247301a50,
|
||||
0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9,
|
||||
0x0fa8084cf91ac6ff, 0x10d226cf136e6189,
|
||||
0xd302057a07d4fb21, 0x5f03800e20a0fcc3,
|
||||
0x80118d4ae46bd210, 0x58ab61a522843733,
|
||||
0x51edd575c5432a4b, 0x94ee6ff67f9197f7,
|
||||
0x765669e0e5e8157b, 0xa5347830737132f0,
|
||||
0x3ba485a69f01510c, 0x0b247d7b957a01c3,
|
||||
0x1b3d63449fd807dc, 0x0fdc4721c30ad743,
|
||||
0x8b535ed3829b2b14, 0xee41d0cad65d232c,
|
||||
0xe6a99ed97a6a982f, 0x65ac6194c202003d,
|
||||
0x692accf3a70573eb, 0xcc3c02c3e200d5af,
|
||||
0x0d419e8b325914a3, 0x320f160f42c25e40,
|
||||
0x00710d647a51fe7a, 0x3c947692330aed60,
|
||||
0x9288aa280d355a7a, 0xa1806a9b791d1696,
|
||||
0x5d60e38496763da1, 0x6c69e22e613fd0f4,
|
||||
0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba,
|
||||
0x460c17992cbaece1, 0xf7822c5444d3297f,
|
||||
0x344a9790c69b74aa, 0xb80a42e6cae09dce,
|
||||
0x1b1361eaf2b1e757, 0xd84c1e758e236f01,
|
||||
0x88e0b7be347627cc, 0x45246009b7a99490,
|
||||
0x8011c6dd3fe50472, 0xc341d682bffb99d7,
|
||||
0x2511be93808e2d15, 0xd5bc13d7fd739840,
|
||||
0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157,
|
||||
0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0,
|
||||
0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc,
|
||||
0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e,
|
||||
0xa559cce0d9199aac, 0xde39d47ef3723380,
|
||||
0xe5b69d848ce42e35, 0xefa24296f8e79f52,
|
||||
0x70190b59db9a5afc, 0x26f166cdb211e7bf,
|
||||
0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017,
|
||||
0xb9059b05e9420d90, 0x2f0da855c9388754,
|
||||
0x611d5e9ab77949cc, 0x2912038ac01163f4,
|
||||
0x0231df50402b2fba, 0x45660fc4f3245f58,
|
||||
0xb91cc97c7c8dac50, 0xb72d2aafe4953427,
|
||||
0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2,
|
||||
0x1310e1c1a48d21c3, 0xad48a7810cdd8544,
|
||||
0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de,
|
||||
0xe70cfc8fe1ee9626, 0xef4711b0d8dda442,
|
||||
0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93,
|
||||
0x9b37db9d0335a39c, 0x494b6f870f5cfebc,
|
||||
0x6d1b3c1149dda943, 0x372c943a518c1093,
|
||||
0xad27af45e77c09c4, 0x3b6f92b646044604,
|
||||
0xac2917909f5fcf4f, 0x2069a60e977e5557,
|
||||
0x353a469e71014de5, 0x24be356281f55c15,
|
||||
0x2b6d710ba8e9adea, 0x404ad1751c749c29,
|
||||
0xed7311bf23d7f185, 0xba4f6976b4acc43e,
|
||||
0x32d7198d2bc39000, 0xee667019014d6e01,
|
||||
0x494ef3e128d14c83, 0x1f95a152baecd6be,
|
||||
0x201648dff1f483a5, 0x68c28550c8384af6,
|
||||
0x5fc834a6824a7f48, 0x7cd06cb7365eaf28,
|
||||
0xd82bbd95e9b30909, 0x234f0d1694c53f6d,
|
||||
0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e,
|
||||
0xf8f6b97f5585080a, 0x74236084be57b95b,
|
||||
0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b,
|
||||
0x4378ffe93e1528c5, 0x94ca92a17118e2d2,
|
||||
}
|
||||
14
vendor/github.com/ulikunitz/xz/internal/hash/doc.go
generated
vendored
Normal file
14
vendor/github.com/ulikunitz/xz/internal/hash/doc.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package hash provides rolling hashes.
|
||||
|
||||
Rolling hashes have to be used for maintaining the positions of n-byte
|
||||
sequences in the dictionary buffer.
|
||||
|
||||
The package provides currently the Rabin-Karp rolling hash and a Cyclic
|
||||
Polynomial hash. Both support the Hashes method to be used with an interface.
|
||||
*/
|
||||
package hash
|
||||
66
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
generated
vendored
Normal file
66
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// A is the default constant for Robin-Karp rolling hash. This is a random
|
||||
// prime.
|
||||
const A = 0x97b548add41d5da1
|
||||
|
||||
// RabinKarp supports the computation of a rolling hash.
|
||||
type RabinKarp struct {
|
||||
A uint64
|
||||
// a^n
|
||||
aOldest uint64
|
||||
h uint64
|
||||
p []byte
|
||||
i int
|
||||
}
|
||||
|
||||
// NewRabinKarp creates a new RabinKarp value. The argument n defines the
|
||||
// length of the byte sequence to be hashed. The default constant will will be
|
||||
// used.
|
||||
func NewRabinKarp(n int) *RabinKarp {
|
||||
return NewRabinKarpConst(n, A)
|
||||
}
|
||||
|
||||
// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the
|
||||
// length of the byte sequence to be hashed. The argument a provides the
|
||||
// constant used to compute the hash.
|
||||
func NewRabinKarpConst(n int, a uint64) *RabinKarp {
|
||||
if n <= 0 {
|
||||
panic("number of bytes n must be positive")
|
||||
}
|
||||
aOldest := uint64(1)
|
||||
// There are faster methods. For the small n required by the LZMA
|
||||
// compressor O(n) is sufficient.
|
||||
for i := 0; i < n; i++ {
|
||||
aOldest *= a
|
||||
}
|
||||
return &RabinKarp{
|
||||
A: a, aOldest: aOldest,
|
||||
p: make([]byte, 0, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the length of the byte sequence.
|
||||
func (r *RabinKarp) Len() int {
|
||||
return cap(r.p)
|
||||
}
|
||||
|
||||
// RollByte computes the hash after x has been added.
|
||||
func (r *RabinKarp) RollByte(x byte) uint64 {
|
||||
if len(r.p) < cap(r.p) {
|
||||
r.h += uint64(x)
|
||||
r.h *= r.A
|
||||
r.p = append(r.p, x)
|
||||
} else {
|
||||
r.h -= uint64(r.p[r.i]) * r.aOldest
|
||||
r.h += uint64(x)
|
||||
r.h *= r.A
|
||||
r.p[r.i] = x
|
||||
r.i = (r.i + 1) % cap(r.p)
|
||||
}
|
||||
return r.h
|
||||
}
|
||||
29
vendor/github.com/ulikunitz/xz/internal/hash/roller.go
generated
vendored
Normal file
29
vendor/github.com/ulikunitz/xz/internal/hash/roller.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// Roller provides an interface for rolling hashes. The hash value will become
|
||||
// valid after hash has been called Len times.
|
||||
type Roller interface {
|
||||
Len() int
|
||||
RollByte(x byte) uint64
|
||||
}
|
||||
|
||||
// Hashes computes all hash values for the array p. Note that the state of the
|
||||
// roller is changed.
|
||||
func Hashes(r Roller, p []byte) []uint64 {
|
||||
n := r.Len()
|
||||
if len(p) < n {
|
||||
return nil
|
||||
}
|
||||
h := make([]uint64, len(p)-n+1)
|
||||
for i := 0; i < n-1; i++ {
|
||||
r.RollByte(p[i])
|
||||
}
|
||||
for i := range h {
|
||||
h[i] = r.RollByte(p[i+n-1])
|
||||
}
|
||||
return h
|
||||
}
|
||||
457
vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
generated
vendored
Normal file
457
vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
generated
vendored
Normal file
@@ -0,0 +1,457 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package xlog provides a simple logging package that allows to disable
|
||||
// certain message categories. It defines a type, Logger, with multiple
|
||||
// methods for formatting output. The package has also a predefined
|
||||
// 'standard' Logger accessible through helper function Print[f|ln],
|
||||
// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln]
|
||||
// that are easier to use then creating a Logger manually. That logger
|
||||
// writes to standard error and prints the date and time of each logged
|
||||
// message, which can be configured using the function SetFlags.
|
||||
//
|
||||
// The Fatal functions call os.Exit(1) after the message is output
|
||||
// unless not suppressed by the flags. The Panic functions call panic
|
||||
// after the writing the log message unless suppressed.
|
||||
package xlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The flags define what information is prefixed to each log entry
|
||||
// generated by the Logger. The Lno* versions allow the suppression of
|
||||
// specific output. The bits are or'ed together to control what will be
|
||||
// printed. There is no control over the order of the items printed and
|
||||
// the format. The full format is:
|
||||
//
|
||||
// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message
|
||||
//
|
||||
const (
|
||||
Ldate = 1 << iota // the date: 2009-01-23
|
||||
Ltime // the time: 01:23:23
|
||||
Lmicroseconds // microsecond resolution: 01:23:23.123123
|
||||
Llongfile // full file name and line number: /a/b/c/d.go:23
|
||||
Lshortfile // final file name element and line number: d.go:23
|
||||
Lnopanic // suppresses output from Panic[f|ln] but not the panic call
|
||||
Lnofatal // suppresses output from Fatal[f|ln] but not the exit
|
||||
Lnowarn // suppresses output from Warn[f|ln]
|
||||
Lnoprint // suppresses output from Print[f|ln]
|
||||
Lnodebug // suppresses output from Debug[f|ln]
|
||||
// initial values for the standard logger
|
||||
Lstdflags = Ldate | Ltime | Lnodebug
|
||||
)
|
||||
|
||||
// A Logger represents an active logging object that generates lines of
|
||||
// output to an io.Writer. Each logging operation if not suppressed
|
||||
// makes a single call to the Writer's Write method. A Logger can be
|
||||
// used simultaneously from multiple goroutines; it guarantees to
|
||||
// serialize access to the Writer.
|
||||
type Logger struct {
|
||||
mu sync.Mutex // ensures atomic writes; and protects the following
|
||||
// fields
|
||||
prefix string // prefix to write at beginning of each line
|
||||
flag int // properties
|
||||
out io.Writer // destination for output
|
||||
buf []byte // for accumulating text to write
|
||||
}
|
||||
|
||||
// New creates a new Logger. The out argument sets the destination to
|
||||
// which the log output will be written. The prefix appears at the
|
||||
// beginning of each log line. The flag argument defines the logging
|
||||
// properties.
|
||||
func New(out io.Writer, prefix string, flag int) *Logger {
|
||||
return &Logger{out: out, prefix: prefix, flag: flag}
|
||||
}
|
||||
|
||||
// std is the standard logger used by the package scope functions.
|
||||
var std = New(os.Stderr, "", Lstdflags)
|
||||
|
||||
// itoa converts the integer to ASCII. A negative widths will avoid
|
||||
// zero-padding. The function supports only non-negative integers.
|
||||
func itoa(buf *[]byte, i int, wid int) {
|
||||
var u = uint(i)
|
||||
if u == 0 && wid <= 1 {
|
||||
*buf = append(*buf, '0')
|
||||
return
|
||||
}
|
||||
var b [32]byte
|
||||
bp := len(b)
|
||||
for ; u > 0 || wid > 0; u /= 10 {
|
||||
bp--
|
||||
wid--
|
||||
b[bp] = byte(u%10) + '0'
|
||||
}
|
||||
*buf = append(*buf, b[bp:]...)
|
||||
}
|
||||
|
||||
// formatHeader puts the header into the buf field of the buffer.
|
||||
func (l *Logger) formatHeader(t time.Time, file string, line int) {
|
||||
l.buf = append(l.buf, l.prefix...)
|
||||
if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
|
||||
if l.flag&Ldate != 0 {
|
||||
year, month, day := t.Date()
|
||||
itoa(&l.buf, year, 4)
|
||||
l.buf = append(l.buf, '-')
|
||||
itoa(&l.buf, int(month), 2)
|
||||
l.buf = append(l.buf, '-')
|
||||
itoa(&l.buf, day, 2)
|
||||
l.buf = append(l.buf, ' ')
|
||||
}
|
||||
if l.flag&(Ltime|Lmicroseconds) != 0 {
|
||||
hour, min, sec := t.Clock()
|
||||
itoa(&l.buf, hour, 2)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, min, 2)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, sec, 2)
|
||||
if l.flag&Lmicroseconds != 0 {
|
||||
l.buf = append(l.buf, '.')
|
||||
itoa(&l.buf, t.Nanosecond()/1e3, 6)
|
||||
}
|
||||
l.buf = append(l.buf, ' ')
|
||||
}
|
||||
}
|
||||
if l.flag&(Lshortfile|Llongfile) != 0 {
|
||||
if l.flag&Lshortfile != 0 {
|
||||
short := file
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
short = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
file = short
|
||||
}
|
||||
l.buf = append(l.buf, file...)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, line, -1)
|
||||
l.buf = append(l.buf, ": "...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) output(calldepth int, now time.Time, s string) error {
|
||||
var file string
|
||||
var line int
|
||||
if l.flag&(Lshortfile|Llongfile) != 0 {
|
||||
l.mu.Unlock()
|
||||
var ok bool
|
||||
_, file, line, ok = runtime.Caller(calldepth)
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 0
|
||||
}
|
||||
l.mu.Lock()
|
||||
}
|
||||
l.buf = l.buf[:0]
|
||||
l.formatHeader(now, file, line)
|
||||
l.buf = append(l.buf, s...)
|
||||
if len(s) == 0 || s[len(s)-1] != '\n' {
|
||||
l.buf = append(l.buf, '\n')
|
||||
}
|
||||
_, err := l.out.Write(l.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Output writes the string s with the header controlled by the flags to
|
||||
// the l.out writer. A newline will be appended if s doesn't end in a
|
||||
// newline. Calldepth is used to recover the PC, although all current
|
||||
// calls of Output use the call depth 2. Access to the function is serialized.
|
||||
func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprint(v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Outputf works like output but formats the output like Printf.
|
||||
func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprintf(format, v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Outputln works like output but formats the output like Println.
|
||||
func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprintln(v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Panic prints the message like Print and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panic(v ...interface{}) {
|
||||
l.Output(2, Lnopanic, v...)
|
||||
s := fmt.Sprint(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panic prints the message like Print and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panic(v ...interface{}) {
|
||||
std.Output(2, Lnopanic, v...)
|
||||
s := fmt.Sprint(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicf prints the message like Printf and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panicf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnopanic, format, v...)
|
||||
s := fmt.Sprintf(format, v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicf prints the message like Printf and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panicf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnopanic, format, v...)
|
||||
s := fmt.Sprintf(format, v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicln prints the message like Println and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panicln(v ...interface{}) {
|
||||
l.Outputln(2, Lnopanic, v...)
|
||||
s := fmt.Sprintln(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicln prints the message like Println and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panicln(v ...interface{}) {
|
||||
std.Outputln(2, Lnopanic, v...)
|
||||
s := fmt.Sprintln(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Fatal prints the message like Print and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatal(v ...interface{}) {
|
||||
l.Output(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatal prints the message like Print and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatal(v ...interface{}) {
|
||||
std.Output(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf prints the message like Printf and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatalf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnofatal, format, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf prints the message like Printf and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnofatal, format, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalln prints the message like Println and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatalln(format string, v ...interface{}) {
|
||||
l.Outputln(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalln prints the message like Println and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatalln(format string, v ...interface{}) {
|
||||
std.Outputln(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Warn prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warn(v ...interface{}) {
|
||||
l.Output(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warn prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warn(v ...interface{}) {
|
||||
std.Output(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warnf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warnf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnowarn, format, v...)
|
||||
}
|
||||
|
||||
// Warnf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warnf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnowarn, format, v...)
|
||||
}
|
||||
|
||||
// Warnln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warnln(v ...interface{}) {
|
||||
l.Outputln(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warnln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warnln(v ...interface{}) {
|
||||
std.Outputln(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Print prints the message like fmt.Print. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func (l *Logger) Print(v ...interface{}) {
|
||||
l.Output(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Print prints the message like fmt.Print. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func Print(v ...interface{}) {
|
||||
std.Output(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Printf prints the message like fmt.Printf. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func (l *Logger) Printf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnoprint, format, v...)
|
||||
}
|
||||
|
||||
// Printf prints the message like fmt.Printf. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func Printf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnoprint, format, v...)
|
||||
}
|
||||
|
||||
// Println prints the message like fmt.Println. The printing might be
|
||||
// suppressed by the flag Lnoprint.
|
||||
func (l *Logger) Println(v ...interface{}) {
|
||||
l.Outputln(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Println prints the message like fmt.Println. The printing might be
|
||||
// suppressed by the flag Lnoprint.
|
||||
func Println(v ...interface{}) {
|
||||
std.Outputln(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Debug prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debug(v ...interface{}) {
|
||||
l.Output(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debug prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debug(v ...interface{}) {
|
||||
std.Output(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debugf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debugf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnodebug, format, v...)
|
||||
}
|
||||
|
||||
// Debugf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnodebug, format, v...)
|
||||
}
|
||||
|
||||
// Debugln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debugln(v ...interface{}) {
|
||||
l.Outputln(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debugln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debugln(v ...interface{}) {
|
||||
std.Outputln(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Flags returns the current flags used by the logger.
|
||||
func (l *Logger) Flags() int {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.flag
|
||||
}
|
||||
|
||||
// Flags returns the current flags used by the standard logger.
|
||||
func Flags() int {
|
||||
return std.Flags()
|
||||
}
|
||||
|
||||
// SetFlags sets the flags of the logger.
|
||||
func (l *Logger) SetFlags(flag int) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.flag = flag
|
||||
}
|
||||
|
||||
// SetFlags sets the flags for the standard logger.
|
||||
func SetFlags(flag int) {
|
||||
std.SetFlags(flag)
|
||||
}
|
||||
|
||||
// Prefix returns the prefix used by the logger.
|
||||
func (l *Logger) Prefix() string {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.prefix
|
||||
}
|
||||
|
||||
// Prefix returns the prefix used by the standard logger of the package.
|
||||
func Prefix() string {
|
||||
return std.Prefix()
|
||||
}
|
||||
|
||||
// SetPrefix sets the prefix for the logger.
|
||||
func (l *Logger) SetPrefix(prefix string) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.prefix = prefix
|
||||
}
|
||||
|
||||
// SetPrefix sets the prefix of the standard logger of the package.
|
||||
func SetPrefix(prefix string) {
|
||||
std.SetPrefix(prefix)
|
||||
}
|
||||
|
||||
// SetOutput sets the output of the logger.
|
||||
func (l *Logger) SetOutput(w io.Writer) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.out = w
|
||||
}
|
||||
|
||||
// SetOutput sets the output for the standard logger of the package.
|
||||
func SetOutput(w io.Writer) {
|
||||
std.SetOutput(w)
|
||||
}
|
||||
523
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
Normal file
523
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
Normal file
@@ -0,0 +1,523 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// node represents a node in the binary tree.
|
||||
type node struct {
|
||||
// x is the search value
|
||||
x uint32
|
||||
// p parent node
|
||||
p uint32
|
||||
// l left child
|
||||
l uint32
|
||||
// r right child
|
||||
r uint32
|
||||
}
|
||||
|
||||
// wordLen is the number of bytes represented by the v field of a node.
|
||||
const wordLen = 4
|
||||
|
||||
// binTree supports the identification of the next operation based on a
|
||||
// binary tree.
|
||||
//
|
||||
// Nodes will be identified by their index into the ring buffer.
|
||||
type binTree struct {
|
||||
dict *encoderDict
|
||||
// ring buffer of nodes
|
||||
node []node
|
||||
// absolute offset of the entry for the next node. Position 4
|
||||
// byte larger.
|
||||
hoff int64
|
||||
// front position in the node ring buffer
|
||||
front uint32
|
||||
// index of the root node
|
||||
root uint32
|
||||
// current x value
|
||||
x uint32
|
||||
// preallocated array
|
||||
data []byte
|
||||
}
|
||||
|
||||
// null represents the nonexistent index. We can't use zero because it
|
||||
// would always exist or we would need to decrease the index for each
|
||||
// reference.
|
||||
const null uint32 = 1<<32 - 1
|
||||
|
||||
// newBinTree initializes the binTree structure. The capacity defines
|
||||
// the size of the buffer and defines the maximum distance for which
|
||||
// matches will be found.
|
||||
func newBinTree(capacity int) (t *binTree, err error) {
|
||||
if capacity < 1 {
|
||||
return nil, errors.New(
|
||||
"newBinTree: capacity must be larger than zero")
|
||||
}
|
||||
if int64(capacity) >= int64(null) {
|
||||
return nil, errors.New(
|
||||
"newBinTree: capacity must less 2^{32}-1")
|
||||
}
|
||||
t = &binTree{
|
||||
node: make([]node, capacity),
|
||||
hoff: -int64(wordLen),
|
||||
root: null,
|
||||
data: make([]byte, maxMatchLen),
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *binTree) SetDict(d *encoderDict) { t.dict = d }
|
||||
|
||||
// WriteByte writes a single byte into the binary tree.
|
||||
func (t *binTree) WriteByte(c byte) error {
|
||||
t.x = (t.x << 8) | uint32(c)
|
||||
t.hoff++
|
||||
if t.hoff < 0 {
|
||||
return nil
|
||||
}
|
||||
v := t.front
|
||||
if int64(v) < t.hoff {
|
||||
// We are overwriting old nodes stored in the tree.
|
||||
t.remove(v)
|
||||
}
|
||||
t.node[v].x = t.x
|
||||
t.add(v)
|
||||
t.front++
|
||||
if int64(t.front) >= int64(len(t.node)) {
|
||||
t.front = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writes writes a sequence of bytes into the binTree structure.
|
||||
func (t *binTree) Write(p []byte) (n int, err error) {
|
||||
for _, c := range p {
|
||||
t.WriteByte(c)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// add puts the node v into the tree. The node must not be part of the
|
||||
// tree before.
|
||||
func (t *binTree) add(v uint32) {
|
||||
vn := &t.node[v]
|
||||
// Set left and right to null indices.
|
||||
vn.l, vn.r = null, null
|
||||
// If the binary tree is empty make v the root.
|
||||
if t.root == null {
|
||||
t.root = v
|
||||
vn.p = null
|
||||
return
|
||||
}
|
||||
x := vn.x
|
||||
p := t.root
|
||||
// Search for the right leave link and add the new node.
|
||||
for {
|
||||
pn := &t.node[p]
|
||||
if x <= pn.x {
|
||||
if pn.l == null {
|
||||
pn.l = v
|
||||
vn.p = p
|
||||
return
|
||||
}
|
||||
p = pn.l
|
||||
} else {
|
||||
if pn.r == null {
|
||||
pn.r = v
|
||||
vn.p = p
|
||||
return
|
||||
}
|
||||
p = pn.r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parent returns the parent node index of v and the pointer to v value
|
||||
// in the parent.
|
||||
func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) {
|
||||
if t.root == v {
|
||||
return null, &t.root
|
||||
}
|
||||
p = t.node[v].p
|
||||
if t.node[p].l == v {
|
||||
ptr = &t.node[p].l
|
||||
} else {
|
||||
ptr = &t.node[p].r
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove node v.
|
||||
func (t *binTree) remove(v uint32) {
|
||||
vn := &t.node[v]
|
||||
p, ptr := t.parent(v)
|
||||
l, r := vn.l, vn.r
|
||||
if l == null {
|
||||
// Move the right child up.
|
||||
*ptr = r
|
||||
if r != null {
|
||||
t.node[r].p = p
|
||||
}
|
||||
return
|
||||
}
|
||||
if r == null {
|
||||
// Move the left child up.
|
||||
*ptr = l
|
||||
t.node[l].p = p
|
||||
return
|
||||
}
|
||||
|
||||
// Search the in-order predecessor u.
|
||||
un := &t.node[l]
|
||||
ur := un.r
|
||||
if ur == null {
|
||||
// In order predecessor is l. Move it up.
|
||||
un.r = r
|
||||
t.node[r].p = l
|
||||
un.p = p
|
||||
*ptr = l
|
||||
return
|
||||
}
|
||||
var u uint32
|
||||
for {
|
||||
// Look for the max value in the tree where l is root.
|
||||
u = ur
|
||||
ur = t.node[u].r
|
||||
if ur == null {
|
||||
break
|
||||
}
|
||||
}
|
||||
// replace u with ul
|
||||
un = &t.node[u]
|
||||
ul := un.l
|
||||
up := un.p
|
||||
t.node[up].r = ul
|
||||
if ul != null {
|
||||
t.node[ul].p = up
|
||||
}
|
||||
|
||||
// replace v by u
|
||||
un.l, un.r = l, r
|
||||
t.node[l].p = u
|
||||
t.node[r].p = u
|
||||
*ptr = u
|
||||
un.p = p
|
||||
}
|
||||
|
||||
// search looks for the node that have the value x or for the nodes that
|
||||
// brace it. The node highest in the tree with the value x will be
|
||||
// returned. All other nodes with the same value live in left subtree of
|
||||
// the returned node.
|
||||
func (t *binTree) search(v uint32, x uint32) (a, b uint32) {
|
||||
a, b = null, null
|
||||
if v == null {
|
||||
return
|
||||
}
|
||||
for {
|
||||
vn := &t.node[v]
|
||||
if x <= vn.x {
|
||||
if x == vn.x {
|
||||
return v, v
|
||||
}
|
||||
b = v
|
||||
if vn.l == null {
|
||||
return
|
||||
}
|
||||
v = vn.l
|
||||
} else {
|
||||
a = v
|
||||
if vn.r == null {
|
||||
return
|
||||
}
|
||||
v = vn.r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// max returns the node with maximum value in the subtree with v as
|
||||
// root.
|
||||
func (t *binTree) max(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
for {
|
||||
r := t.node[v].r
|
||||
if r == null {
|
||||
return v
|
||||
}
|
||||
v = r
|
||||
}
|
||||
}
|
||||
|
||||
// min returns the node with the minimum value in the subtree with v as
|
||||
// root.
|
||||
func (t *binTree) min(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
for {
|
||||
l := t.node[v].l
|
||||
if l == null {
|
||||
return v
|
||||
}
|
||||
v = l
|
||||
}
|
||||
}
|
||||
|
||||
// pred returns the in-order predecessor of node v.
|
||||
func (t *binTree) pred(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
u := t.max(t.node[v].l)
|
||||
if u != null {
|
||||
return u
|
||||
}
|
||||
for {
|
||||
p := t.node[v].p
|
||||
if p == null {
|
||||
return null
|
||||
}
|
||||
if t.node[p].r == v {
|
||||
return p
|
||||
}
|
||||
v = p
|
||||
}
|
||||
}
|
||||
|
||||
// succ returns the in-order successor of node v.
|
||||
func (t *binTree) succ(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
u := t.min(t.node[v].r)
|
||||
if u != null {
|
||||
return u
|
||||
}
|
||||
for {
|
||||
p := t.node[v].p
|
||||
if p == null {
|
||||
return null
|
||||
}
|
||||
if t.node[p].l == v {
|
||||
return p
|
||||
}
|
||||
v = p
|
||||
}
|
||||
}
|
||||
|
||||
// xval converts the first four bytes of a into an 32-bit unsigned
|
||||
// integer in big-endian order.
|
||||
func xval(a []byte) uint32 {
|
||||
var x uint32
|
||||
switch len(a) {
|
||||
default:
|
||||
x |= uint32(a[3])
|
||||
fallthrough
|
||||
case 3:
|
||||
x |= uint32(a[2]) << 8
|
||||
fallthrough
|
||||
case 2:
|
||||
x |= uint32(a[1]) << 16
|
||||
fallthrough
|
||||
case 1:
|
||||
x |= uint32(a[0]) << 24
|
||||
case 0:
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// dumpX converts value x into a four-letter string.
|
||||
func dumpX(x uint32) string {
|
||||
a := make([]byte, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
c := byte(x >> uint((3-i)*8))
|
||||
if unicode.IsGraphic(rune(c)) {
|
||||
a[i] = c
|
||||
} else {
|
||||
a[i] = '.'
|
||||
}
|
||||
}
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// dumpNode writes a representation of the node v into the io.Writer.
|
||||
func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
|
||||
if v == null {
|
||||
return
|
||||
}
|
||||
|
||||
vn := &t.node[v]
|
||||
|
||||
t.dumpNode(w, vn.r, indent+2)
|
||||
|
||||
for i := 0; i < indent; i++ {
|
||||
fmt.Fprint(w, " ")
|
||||
}
|
||||
if vn.p == null {
|
||||
fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x))
|
||||
} else {
|
||||
fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p)
|
||||
}
|
||||
|
||||
t.dumpNode(w, vn.l, indent+2)
|
||||
}
|
||||
|
||||
// dump prints a representation of the binary tree into the writer.
|
||||
func (t *binTree) dump(w io.Writer) error {
|
||||
bw := bufio.NewWriter(w)
|
||||
t.dumpNode(bw, t.root, 0)
|
||||
return bw.Flush()
|
||||
}
|
||||
|
||||
func (t *binTree) distance(v uint32) int {
|
||||
dist := int(t.front) - int(v)
|
||||
if dist <= 0 {
|
||||
dist += len(t.node)
|
||||
}
|
||||
return dist
|
||||
}
|
||||
|
||||
type matchParams struct {
|
||||
rep [4]uint32
|
||||
// length when match will be accepted
|
||||
nAccept int
|
||||
// nodes to check
|
||||
check int
|
||||
// finish if length get shorter
|
||||
stopShorter bool
|
||||
}
|
||||
|
||||
func (t *binTree) match(m match, distIter func() (int, bool), p matchParams,
|
||||
) (r match, checked int, accepted bool) {
|
||||
buf := &t.dict.buf
|
||||
for {
|
||||
if checked >= p.check {
|
||||
return m, checked, true
|
||||
}
|
||||
dist, ok := distIter()
|
||||
if !ok {
|
||||
return m, checked, false
|
||||
}
|
||||
checked++
|
||||
if m.n > 0 {
|
||||
i := buf.rear - dist + m.n - 1
|
||||
if i < 0 {
|
||||
i += len(buf.data)
|
||||
} else if i >= len(buf.data) {
|
||||
i -= len(buf.data)
|
||||
}
|
||||
if buf.data[i] != t.data[m.n-1] {
|
||||
if p.stopShorter {
|
||||
return m, checked, false
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
n := buf.matchLen(dist, t.data)
|
||||
switch n {
|
||||
case 0:
|
||||
if p.stopShorter {
|
||||
return m, checked, false
|
||||
}
|
||||
continue
|
||||
case 1:
|
||||
if uint32(dist-minDistance) != p.rep[0] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if n < m.n || (n == m.n && int64(dist) >= m.distance) {
|
||||
continue
|
||||
}
|
||||
m = match{int64(dist), n}
|
||||
if n >= p.nAccept {
|
||||
return m, checked, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *binTree) NextOp(rep [4]uint32) operation {
|
||||
// retrieve maxMatchLen data
|
||||
n, _ := t.dict.buf.Peek(t.data[:maxMatchLen])
|
||||
if n == 0 {
|
||||
panic("no data in buffer")
|
||||
}
|
||||
t.data = t.data[:n]
|
||||
|
||||
var (
|
||||
m match
|
||||
x, u, v uint32
|
||||
iterPred, iterSucc func() (int, bool)
|
||||
)
|
||||
p := matchParams{
|
||||
rep: rep,
|
||||
nAccept: maxMatchLen,
|
||||
check: 32,
|
||||
}
|
||||
i := 4
|
||||
iterSmall := func() (dist int, ok bool) {
|
||||
i--
|
||||
if i <= 0 {
|
||||
return 0, false
|
||||
}
|
||||
return i, true
|
||||
}
|
||||
m, checked, accepted := t.match(m, iterSmall, p)
|
||||
if accepted {
|
||||
goto end
|
||||
}
|
||||
p.check -= checked
|
||||
x = xval(t.data)
|
||||
u, v = t.search(t.root, x)
|
||||
if u == v && len(t.data) == 4 {
|
||||
iter := func() (dist int, ok bool) {
|
||||
if u == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(u)
|
||||
u, v = t.search(t.node[u].l, x)
|
||||
if u != v {
|
||||
u = null
|
||||
}
|
||||
return dist, true
|
||||
}
|
||||
m, _, _ = t.match(m, iter, p)
|
||||
goto end
|
||||
}
|
||||
p.stopShorter = true
|
||||
iterSucc = func() (dist int, ok bool) {
|
||||
if v == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(v)
|
||||
v = t.succ(v)
|
||||
return dist, true
|
||||
}
|
||||
m, checked, accepted = t.match(m, iterSucc, p)
|
||||
if accepted {
|
||||
goto end
|
||||
}
|
||||
p.check -= checked
|
||||
iterPred = func() (dist int, ok bool) {
|
||||
if u == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(u)
|
||||
u = t.pred(u)
|
||||
return dist, true
|
||||
}
|
||||
m, _, _ = t.match(m, iterPred, p)
|
||||
end:
|
||||
if m.n == 0 {
|
||||
return lit{t.data[0]}
|
||||
}
|
||||
return m
|
||||
}
|
||||
45
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
Normal file
45
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
/* Naming conventions follows the CodeReviewComments in the Go Wiki. */
|
||||
|
||||
// ntz32Const is used by the functions NTZ and NLZ.
|
||||
const ntz32Const = 0x04d7651f
|
||||
|
||||
// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé.
|
||||
// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26.
|
||||
var ntz32Table = [32]int8{
|
||||
0, 1, 2, 24, 3, 19, 6, 25,
|
||||
22, 4, 20, 10, 16, 7, 12, 26,
|
||||
31, 23, 18, 5, 21, 9, 15, 11,
|
||||
30, 17, 8, 14, 29, 13, 28, 27,
|
||||
}
|
||||
|
||||
// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
|
||||
func ntz32(x uint32) int {
|
||||
if x == 0 {
|
||||
return 32
|
||||
}
|
||||
x = (x & -x) * ntz32Const
|
||||
return int(ntz32Table[x>>27])
|
||||
}
|
||||
|
||||
// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
|
||||
func nlz32(x uint32) int {
|
||||
// Smear left most bit to the right
|
||||
x |= x >> 1
|
||||
x |= x >> 2
|
||||
x |= x >> 4
|
||||
x |= x >> 8
|
||||
x |= x >> 16
|
||||
// Use ntz mechanism to calculate nlz.
|
||||
x++
|
||||
if x == 0 {
|
||||
return 0
|
||||
}
|
||||
x *= ntz32Const
|
||||
return 32 - int(ntz32Table[x>>27])
|
||||
}
|
||||
39
vendor/github.com/ulikunitz/xz/lzma/breader.go
generated
vendored
Normal file
39
vendor/github.com/ulikunitz/xz/lzma/breader.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// breader provides the ReadByte function for a Reader. It doesn't read
|
||||
// more data from the reader than absolutely necessary.
|
||||
type breader struct {
|
||||
io.Reader
|
||||
// helper slice to save allocations
|
||||
p []byte
|
||||
}
|
||||
|
||||
// ByteReader converts an io.Reader into an io.ByteReader.
|
||||
func ByteReader(r io.Reader) io.ByteReader {
|
||||
br, ok := r.(io.ByteReader)
|
||||
if !ok {
|
||||
return &breader{r, make([]byte, 1)}
|
||||
}
|
||||
return br
|
||||
}
|
||||
|
||||
// ReadByte read byte function.
|
||||
func (r *breader) ReadByte() (c byte, err error) {
|
||||
n, err := r.Reader.Read(r.p)
|
||||
if n < 1 {
|
||||
if err == nil {
|
||||
err = errors.New("breader.ReadByte: no data")
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return r.p[0], nil
|
||||
}
|
||||
171
vendor/github.com/ulikunitz/xz/lzma/buffer.go
generated
vendored
Normal file
171
vendor/github.com/ulikunitz/xz/lzma/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// buffer provides a circular buffer of bytes. If the front index equals
|
||||
// the rear index the buffer is empty. As a consequence front cannot be
|
||||
// equal rear for a full buffer. So a full buffer has a length that is
|
||||
// one byte less the the length of the data slice.
|
||||
type buffer struct {
|
||||
data []byte
|
||||
front int
|
||||
rear int
|
||||
}
|
||||
|
||||
// newBuffer creates a buffer with the given size.
|
||||
func newBuffer(size int) *buffer {
|
||||
return &buffer{data: make([]byte, size+1)}
|
||||
}
|
||||
|
||||
// Cap returns the capacity of the buffer.
|
||||
func (b *buffer) Cap() int {
|
||||
return len(b.data) - 1
|
||||
}
|
||||
|
||||
// Resets the buffer. The front and rear index are set to zero.
|
||||
func (b *buffer) Reset() {
|
||||
b.front = 0
|
||||
b.rear = 0
|
||||
}
|
||||
|
||||
// Buffered returns the number of bytes buffered.
|
||||
func (b *buffer) Buffered() int {
|
||||
delta := b.front - b.rear
|
||||
if delta < 0 {
|
||||
delta += len(b.data)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
// Available returns the number of bytes available for writing.
|
||||
func (b *buffer) Available() int {
|
||||
delta := b.rear - 1 - b.front
|
||||
if delta < 0 {
|
||||
delta += len(b.data)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
// addIndex adds a non-negative integer to the index i and returns the
|
||||
// resulting index. The function takes care of wrapping the index as
|
||||
// well as potential overflow situations.
|
||||
func (b *buffer) addIndex(i int, n int) int {
|
||||
// subtraction of len(b.data) prevents overflow
|
||||
i += n - len(b.data)
|
||||
if i < 0 {
|
||||
i += len(b.data)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Read reads bytes from the buffer into p and returns the number of
|
||||
// bytes read. The function never returns an error but might return less
|
||||
// data than requested.
|
||||
func (b *buffer) Read(p []byte) (n int, err error) {
|
||||
n, err = b.Peek(p)
|
||||
b.rear = b.addIndex(b.rear, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Peek reads bytes from the buffer into p without changing the buffer.
|
||||
// Peek will never return an error but might return less data than
|
||||
// requested.
|
||||
func (b *buffer) Peek(p []byte) (n int, err error) {
|
||||
m := b.Buffered()
|
||||
n = len(p)
|
||||
if m < n {
|
||||
n = m
|
||||
p = p[:n]
|
||||
}
|
||||
k := copy(p, b.data[b.rear:])
|
||||
if k < n {
|
||||
copy(p[k:], b.data)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Discard skips the n next bytes to read from the buffer, returning the
|
||||
// bytes discarded.
|
||||
//
|
||||
// If Discards skips fewer than n bytes, it returns an error.
|
||||
func (b *buffer) Discard(n int) (discarded int, err error) {
|
||||
if n < 0 {
|
||||
return 0, errors.New("buffer.Discard: negative argument")
|
||||
}
|
||||
m := b.Buffered()
|
||||
if m < n {
|
||||
n = m
|
||||
err = errors.New(
|
||||
"buffer.Discard: discarded less bytes then requested")
|
||||
}
|
||||
b.rear = b.addIndex(b.rear, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ErrNoSpace indicates that there is insufficient space for the Write
|
||||
// operation.
|
||||
var ErrNoSpace = errors.New("insufficient space")
|
||||
|
||||
// Write puts data into the buffer. If less bytes are written than
|
||||
// requested ErrNoSpace is returned.
|
||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
||||
m := b.Available()
|
||||
n = len(p)
|
||||
if m < n {
|
||||
n = m
|
||||
p = p[:m]
|
||||
err = ErrNoSpace
|
||||
}
|
||||
k := copy(b.data[b.front:], p)
|
||||
if k < n {
|
||||
copy(b.data, p[k:])
|
||||
}
|
||||
b.front = b.addIndex(b.front, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte into the buffer. The error ErrNoSpace
|
||||
// is returned if no single byte is available in the buffer for writing.
|
||||
func (b *buffer) WriteByte(c byte) error {
|
||||
if b.Available() < 1 {
|
||||
return ErrNoSpace
|
||||
}
|
||||
b.data[b.front] = c
|
||||
b.front = b.addIndex(b.front, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// prefixLen returns the length of the common prefix of a and b.
|
||||
func prefixLen(a, b []byte) int {
|
||||
if len(a) > len(b) {
|
||||
a, b = b, a
|
||||
}
|
||||
for i, c := range a {
|
||||
if b[i] != c {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(a)
|
||||
}
|
||||
|
||||
// matchLen returns the length of the common prefix for the given
|
||||
// distance from the rear and the byte slice p.
|
||||
func (b *buffer) matchLen(distance int, p []byte) int {
|
||||
var n int
|
||||
i := b.rear - distance
|
||||
if i < 0 {
|
||||
if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i {
|
||||
return n
|
||||
}
|
||||
p = p[n:]
|
||||
i = 0
|
||||
}
|
||||
n += prefixLen(p, b.data[i:])
|
||||
return n
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user