1
0
Fork 0
mirror of https://github.com/Luzifer/s3sync.git synced 2024-12-20 11:31:17 +00:00

Initial version

This commit is contained in:
Knut Ahlers 2015-07-26 16:06:24 +02:00
commit c90c2fb87e
8 changed files with 488 additions and 0 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
s3sync
Godeps/_workspace/

5
.gobuilder.yml Normal file
View file

@ -0,0 +1,5 @@
---
build_matrix:
general:
ldflags:
- "-X main.version $(git describe --tags)"

62
Godeps/Godeps.json generated Normal file
View file

@ -0,0 +1,62 @@
{
"ImportPath": "github.com/Luzifer/s3sync",
"GoVersion": "go1.4.2",
"Deps": [
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v0.6.4-2-g168a70b",
"Rev": "168a70b9c21a4f60166d7925b690356605907adb"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints",
"Comment": "v0.6.4-2-g168a70b",
"Rev": "168a70b9c21a4f60166d7925b690356605907adb"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query",
"Comment": "v0.6.4-2-g168a70b",
"Rev": "168a70b9c21a4f60166d7925b690356605907adb"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest",
"Comment": "v0.6.4-2-g168a70b",
"Rev": "168a70b9c21a4f60166d7925b690356605907adb"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml",
"Comment": "v0.6.4-2-g168a70b",
"Rev": "168a70b9c21a4f60166d7925b690356605907adb"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil",
"Comment": "v0.6.4-2-g168a70b",
"Rev": "168a70b9c21a4f60166d7925b690356605907adb"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4",
"Comment": "v0.6.4-2-g168a70b",
"Rev": "168a70b9c21a4f60166d7925b690356605907adb"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v0.6.4-2-g168a70b",
"Rev": "168a70b9c21a4f60166d7925b690356605907adb"
},
{
"ImportPath": "github.com/inconshreveable/mousetrap",
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
},
{
"ImportPath": "github.com/spf13/cobra",
"Rev": "385fc87e4343efec233811d3d933509e8975d11a"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "67cbc198fd11dab704b214c1e629a97af392c085"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
}
]
}

13
LICENSE Normal file
View file

@ -0,0 +1,13 @@
Copyright 2015 Knut Ahlers <knut@ahlers.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

55
README.md Normal file
View file

@ -0,0 +1,55 @@
# Luzifer / s3sync
[![License: Apache v2.0](https://badge.luzifer.io/v1/badge?color=5d79b5&title=license&text=Apache+v2.0)](http://www.apache.org/licenses/LICENSE-2.0) [![Gobuild Download](https://badge.luzifer.io/v1/badge?color=5d79b5&title=Download&text=on+GoBuilder.me)](https://gobuilder.me/github.com/Luzifer/s3sync)
`s3sync` is a small utility to sync local directories from/to Amazon S3 without installing any dependencies. Just put the binary somewhere into your path and set three ENV variables and your're ready to sync.
## Features
- Static binary, no dependencies required
- Sync files only if required (judged by MD5 checksum)
- Optionally delete files at target
- Optionally make files public on sync (only if file needs sync)
- Sync local-to-s3, s3-to-local, local-to-local or s3-to-s3
## Usage
1. Set `AWS_ACCESS_KEY`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION`
2. Execute your sync
```bash
# s3sync help
Sync files from <from> to <to>
Usage:
s3sync <from> <to> [flags]
s3sync [command]
Available Commands:
version Returns the current version of s3sync
help Help about any command
Flags:
-d, --delete=false: Delete files on remote not existing on local
-h, --help=false: help for s3sync
-P, --public=false: Make files public when syncing to S3
Use "s3sync [command] --help" for more information about a command.
# s3sync -d 1/ s3://knut-test-s3sync/
(1 / 3) 05/11/pwd_luzifer_io.png OK
(2 / 3) 07/26/bkm.png OK
(3 / 3) 07/26/luzifer_io.png OK
# s3sync -d 1/ s3://knut-test-s3sync/
(1 / 3) 05/11/pwd_luzifer_io.png Skip
(2 / 3) 07/26/bkm.png Skip
(3 / 3) 07/26/luzifer_io.png Skip
# rm -rf 1/05
# s3sync -d s3://knut-test-s3sync/ 1/
(1 / 3) 05/11/pwd_luzifer_io.png OK
(2 / 3) 07/26/bkm.png Skip
(3 / 3) 07/26/luzifer_io.png Skip
```

68
local.go Normal file
View file

@ -0,0 +1,68 @@
package main
import (
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type localProvider struct{}
func newLocalProvider() *localProvider {
return &localProvider{}
}
func (l *localProvider) WriteFile(path string, content io.ReadSeeker, public bool) error {
os.MkdirAll(filepath.Dir(path), 0755)
f, err := os.Create(path)
if err != nil {
return err
}
if _, err := io.Copy(f, content); err != nil {
return err
}
return f.Close()
}
func (l *localProvider) ReadFile(path string) (io.ReadCloser, error) {
return os.Open(path)
}
func (l *localProvider) ListFiles(prefix string) ([]file, error) {
out := []file{}
err := filepath.Walk(prefix, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if !f.IsDir() {
content, err := ioutil.ReadFile(path)
if err != nil {
return err
}
out = append(out, file{
Filename: strings.TrimLeft(strings.Replace(path, prefix, "", 1), "/"),
Size: f.Size(),
MD5: fmt.Sprintf("%x", md5.Sum(content)),
})
}
return nil
})
return out, err
}
func (l *localProvider) DeleteFile(path string) error {
return os.Remove(path)
}
func (l *localProvider) GetAbsolutePath(path string) (string, error) {
return filepath.Abs(path)
}

148
main.go Normal file
View file

@ -0,0 +1,148 @@
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"strings"
"github.com/spf13/cobra"
)
var (
cfg = struct {
Delete bool
Public bool
}{}
version = "dev"
)
type file struct {
Filename string
Size int64
MD5 string
}
type filesystemProvider interface {
WriteFile(path string, content io.ReadSeeker, public bool) error
ReadFile(path string) (io.ReadCloser, error)
ListFiles(prefix string) ([]file, error)
DeleteFile(path string) error
GetAbsolutePath(path string) (string, error)
}
func main() {
app := cobra.Command{
Use: "s3sync <from> <to>",
Short: "Sync files from <from> to <to>",
Run: execSync,
}
app.AddCommand(&cobra.Command{
Use: "version",
Short: "Returns the current version of s3sync",
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("s3sync %s\n", version)
},
})
app.Flags().BoolVarP(&cfg.Public, "public", "P", false, "Make files public when syncing to S3")
app.Flags().BoolVarP(&cfg.Delete, "delete", "d", false, "Delete files on remote not existing on local")
app.Execute()
}
func execSync(cmd *cobra.Command, args []string) {
if len(args) != 2 {
cmd.Usage()
os.Exit(1)
}
local, err := getFSProvider(args[0])
errExit(err)
remote, err := getFSProvider(args[1])
errExit(err)
localPath, err := local.GetAbsolutePath(args[0])
errExit(err)
remotePath, err := remote.GetAbsolutePath(args[1])
errExit(err)
localFiles, err := local.ListFiles(localPath)
errExit(err)
remoteFiles, err := remote.ListFiles(remotePath)
errExit(err)
for i, localFile := range localFiles {
fmt.Printf("(%d / %d) %s ", i+1, len(localFiles), localFile.Filename)
needsCopy := true
for _, remoteFile := range remoteFiles {
if remoteFile.Filename == localFile.Filename && remoteFile.MD5 == localFile.MD5 {
needsCopy = false
break
}
}
if needsCopy {
l, err := local.ReadFile(path.Join(localPath, localFile.Filename))
if err != nil {
fmt.Printf("ERR: %s\n", err)
continue
}
buffer, err := ioutil.ReadAll(l)
if err != nil {
fmt.Printf("ERR: %s\n", err)
continue
}
l.Close()
err = remote.WriteFile(path.Join(remotePath, localFile.Filename), bytes.NewReader(buffer), cfg.Public)
if err != nil {
fmt.Printf("ERR: %s\n", err)
continue
}
fmt.Printf("OK\n")
continue
}
fmt.Printf("Skip\n")
}
if cfg.Delete {
for _, remoteFile := range remoteFiles {
needsDeletion := true
for _, localFile := range localFiles {
if localFile.Filename == remoteFile.Filename {
needsDeletion = false
}
}
if needsDeletion {
fmt.Printf("delete: %s ", remoteFile.Filename)
if err := remote.DeleteFile(path.Join(remotePath, remoteFile.Filename)); err != nil {
fmt.Printf("ERR: %s\n", err)
continue
}
fmt.Printf("OK\n")
}
}
}
}
func errExit(err error) {
if err != nil {
fmt.Printf("ERR: %s\n", err)
os.Exit(1)
}
}
func getFSProvider(prefix string) (filesystemProvider, error) {
if strings.HasPrefix(prefix, "s3://") {
return newS3Provider()
}
return newLocalProvider(), nil
}

135
s3.go Normal file
View file

@ -0,0 +1,135 @@
package main
import (
"fmt"
"io"
"mime"
"path/filepath"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
)
type s3Provider struct {
conn *s3.S3
}
func newS3Provider() (*s3Provider, error) {
return &s3Provider{
conn: s3.New(&aws.Config{}),
}, nil
}
func (s *s3Provider) getBucketPath(prefix string) (bucket string, path string, err error) {
rex := regexp.MustCompile(`^s3://?([^/]+)/(.*)$`)
matches := rex.FindStringSubmatch(prefix)
if len(matches) != 3 {
err = fmt.Errorf("prefix did not match requirements")
return
}
bucket = matches[1]
path = matches[2]
return
}
func (s *s3Provider) ListFiles(prefix string) ([]file, error) {
out := []file{}
bucket, path, err := s.getBucketPath(prefix)
if err != nil {
return out, err
}
in := &s3.ListObjectsInput{
Bucket: aws.String(bucket),
Marker: nil,
Prefix: aws.String(path),
}
for {
o, err := s.conn.ListObjects(in)
if err != nil {
return out, err
}
for _, v := range o.Contents {
out = append(out, file{
Filename: *v.Key,
Size: *v.Size,
MD5: strings.Trim(*v.ETag, "\""), // Wat?
})
}
if o.NextMarker == nil {
break
}
in.Marker = o.NextMarker
}
return out, nil
}
func (s *s3Provider) WriteFile(path string, content io.ReadSeeker, public bool) error {
bucket, path, err := s.getBucketPath(path)
if err != nil {
return err
}
ext := filepath.Ext(path)
mimeType := mime.TypeByExtension(ext)
if mimeType == "" {
mimeType = "application/octet-stream"
}
params := &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(path),
Body: content,
ContentType: aws.String(mimeType),
}
if public {
params.ACL = aws.String("public-read")
}
_, err = s.conn.PutObject(params)
return err
}
func (s *s3Provider) ReadFile(path string) (io.ReadCloser, error) {
bucket, path, err := s.getBucketPath(path)
if err != nil {
return nil, err
}
o, err := s.conn.GetObject(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(path),
})
if err != nil {
return nil, err
}
return o.Body, nil
}
func (s *s3Provider) DeleteFile(path string) error {
bucket, path, err := s.getBucketPath(path)
if err != nil {
return err
}
_, err = s.conn.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(path),
})
return err
}
func (s *s3Provider) GetAbsolutePath(path string) (string, error) {
return path, nil
}