mirror of https://github.com/minio/minio.git
				
				
				
			Implement backblaze-b2 gateway support (#5002)
Fixes https://github.com/minio/minio/issues/4072
This commit is contained in:
		
							parent
							
								
									3d0dced23c
								
							
						
					
					
						commit
						0c0d1e4150
					
				|  | @ -70,6 +70,14 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, contentRange *h | |||
| 		w.Header().Set("ETag", "\""+objInfo.ETag+"\"") | ||||
| 	} | ||||
| 
 | ||||
| 	if objInfo.ContentType != "" { | ||||
| 		w.Header().Set("Content-Type", objInfo.ContentType) | ||||
| 	} | ||||
| 
 | ||||
| 	if objInfo.ContentEncoding != "" { | ||||
| 		w.Header().Set("Content-Encoding", objInfo.ContentEncoding) | ||||
| 	} | ||||
| 
 | ||||
| 	// Set all other user defined metadata.
 | ||||
| 	for k, v := range objInfo.UserDefined { | ||||
| 		w.Header().Set(k, v) | ||||
|  |  | |||
|  | @ -0,0 +1,133 @@ | |||
| /* | ||||
|  * Minio Cloud Storage, (C) 2017 Minio, Inc. | ||||
|  * | ||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  * you may not use this file except in compliance with the License. | ||||
|  * You may obtain a copy of the License at | ||||
|  * | ||||
|  *     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
|  * | ||||
|  * Unless required by applicable law or agreed to in writing, software | ||||
|  * distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  * See the License for the specific language governing permissions and | ||||
|  * limitations under the License. | ||||
|  */ | ||||
| 
 | ||||
| package cmd | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| // mkRange converts offset, size into Range header equivalent.
 | ||||
| func mkRange(offset, size int64) string { | ||||
| 	if offset == 0 && size == 0 { | ||||
| 		return "" | ||||
| 	} | ||||
| 	if size == 0 { | ||||
| 		return fmt.Sprintf("%s%d-", byteRangePrefix, offset) | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s%d-%d", byteRangePrefix, offset, offset+size-1) | ||||
| } | ||||
| 
 | ||||
| // AnonGetObject - performs a plain http GET request on a public resource,
 | ||||
| // fails if the resource is not public.
 | ||||
| func (l *b2Objects) AnonGetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { | ||||
| 	uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object) | ||||
| 	req, err := http.NewRequest("GET", uri, nil) | ||||
| 	if err != nil { | ||||
| 		return b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 	rng := mkRange(startOffset, length) | ||||
| 	if rng != "" { | ||||
| 		req.Header.Set("Range", rng) | ||||
| 	} | ||||
| 	resp, err := l.anonClient.Do(req) | ||||
| 	if err != nil { | ||||
| 		return b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 	defer resp.Body.Close() | ||||
| 	if resp.StatusCode != http.StatusOK { | ||||
| 		return b2ToObjectError(traceError(errors.New(resp.Status)), bucket, object) | ||||
| 	} | ||||
| 	_, err = io.Copy(writer, resp.Body) | ||||
| 	return b2ToObjectError(traceError(err), bucket, object) | ||||
| } | ||||
| 
 | ||||
| // Converts http Header into ObjectInfo. This function looks for all the
 | ||||
| // standard Backblaze B2 headers to convert into ObjectInfo.
 | ||||
| //
 | ||||
| // Content-Length is converted to Size.
 | ||||
| // X-Bz-Upload-Timestamp is converted to ModTime.
 | ||||
| // X-Bz-Info-<header>:<value> is converted to <header>:<value>
 | ||||
| // Content-Type is converted to ContentType.
 | ||||
| // X-Bz-Content-Sha1 is converted to ETag.
 | ||||
| func headerToObjectInfo(bucket, object string, header http.Header) (objInfo ObjectInfo, err error) { | ||||
| 	clen, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 
 | ||||
| 	// Converting upload timestamp in milliseconds to a time.Time value for ObjectInfo.ModTime.
 | ||||
| 	timeStamp, err := strconv.ParseInt(header.Get("X-Bz-Upload-Timestamp"), 10, 64) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 
 | ||||
| 	// Populate user metadata by looking for all the X-Bz-Info-<name>
 | ||||
| 	// HTTP headers, ignore other headers since they have their own
 | ||||
| 	// designated meaning, for more details refer B2 API documentation.
 | ||||
| 	userMetadata := make(map[string]string) | ||||
| 	for key := range header { | ||||
| 		if strings.HasPrefix(key, "X-Bz-Info-") { | ||||
| 			var name string | ||||
| 			name, err = url.QueryUnescape(strings.TrimPrefix(key, "X-Bz-Info-")) | ||||
| 			if err != nil { | ||||
| 				return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 			} | ||||
| 			var val string | ||||
| 			val, err = url.QueryUnescape(header.Get(key)) | ||||
| 			if err != nil { | ||||
| 				return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 			} | ||||
| 			userMetadata[name] = val | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return ObjectInfo{ | ||||
| 		Bucket:      bucket, | ||||
| 		Name:        object, | ||||
| 		ContentType: header.Get("Content-Type"), | ||||
| 		ModTime:     time.Unix(0, 0).Add(time.Duration(timeStamp) * time.Millisecond), | ||||
| 		Size:        clen, | ||||
| 		ETag:        header.Get("X-Bz-File-Id"), | ||||
| 		UserDefined: userMetadata, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // AnonGetObjectInfo - performs a plain http HEAD request on a public resource,
 | ||||
| // fails if the resource is not public.
 | ||||
| func (l *b2Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { | ||||
| 	uri := fmt.Sprintf("%s/file/%s/%s", l.b2Client.DownloadURI, bucket, object) | ||||
| 	req, err := http.NewRequest("HEAD", uri, nil) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 	resp, err := l.anonClient.Do(req) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 	defer resp.Body.Close() | ||||
| 	if resp.StatusCode != http.StatusOK { | ||||
| 		return objInfo, b2ToObjectError(traceError(errors.New(resp.Status)), bucket, object) | ||||
| 	} | ||||
| 	return headerToObjectInfo(bucket, object, resp.Header) | ||||
| } | ||||
|  | @ -0,0 +1,703 @@ | |||
| /* | ||||
|  * Minio Cloud Storage, (C) 2017 Minio, Inc. | ||||
|  * | ||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  * you may not use this file except in compliance with the License. | ||||
|  * You may obtain a copy of the License at | ||||
|  * | ||||
|  *     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
|  * | ||||
|  * Unless required by applicable law or agreed to in writing, software | ||||
|  * distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  * See the License for the specific language governing permissions and | ||||
|  * limitations under the License. | ||||
|  */ | ||||
| 
 | ||||
| package cmd | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/sha1" | ||||
| 	"fmt" | ||||
| 	"hash" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| 
 | ||||
| 	b2 "github.com/minio/blazer/base" | ||||
| 	"github.com/minio/minio-go/pkg/policy" | ||||
| ) | ||||
| 
 | ||||
| // Supported bucket types by B2 backend.
 | ||||
| const ( | ||||
| 	bucketTypePrivate  = "allPrivate" | ||||
| 	bucketTypeReadOnly = "allPublic" | ||||
| ) | ||||
| 
 | ||||
| // b2Object implements gateway for Minio and BackBlaze B2 compatible object storage servers.
 | ||||
| type b2Objects struct { | ||||
| 	gatewayUnsupported | ||||
| 	mu         sync.Mutex | ||||
| 	creds      credential | ||||
| 	b2Client   *b2.B2 | ||||
| 	anonClient *http.Client | ||||
| 	ctx        context.Context | ||||
| } | ||||
| 
 | ||||
| // newB2Gateway returns b2 gateway layer, implements GatewayLayer interface to
 | ||||
| // talk to B2 remote backend.
 | ||||
| func newB2Gateway() (GatewayLayer, error) { | ||||
| 	ctx := context.Background() | ||||
| 	creds := serverConfig.GetCredential() | ||||
| 
 | ||||
| 	client, err := b2.AuthorizeAccount(ctx, creds.AccessKey, creds.SecretKey, b2.Transport(newCustomHTTPTransport())) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return &b2Objects{ | ||||
| 		creds:    creds, | ||||
| 		b2Client: client, | ||||
| 		anonClient: &http.Client{ | ||||
| 			Transport: newCustomHTTPTransport(), | ||||
| 		}, | ||||
| 		ctx: ctx, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // Convert B2 errors to minio object layer errors.
 | ||||
| func b2ToObjectError(err error, params ...string) error { | ||||
| 	if err == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	e, ok := err.(*Error) | ||||
| 	if !ok { | ||||
| 		// Code should be fixed if this function is called without doing traceError()
 | ||||
| 		// Else handling different situations in this function makes this function complicated.
 | ||||
| 		errorIf(err, "Expected type *Error") | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	err = e.e | ||||
| 	bucket := "" | ||||
| 	object := "" | ||||
| 	uploadID := "" | ||||
| 	if len(params) >= 1 { | ||||
| 		bucket = params[0] | ||||
| 	} | ||||
| 	if len(params) == 2 { | ||||
| 		object = params[1] | ||||
| 	} | ||||
| 	if len(params) == 3 { | ||||
| 		uploadID = params[2] | ||||
| 	} | ||||
| 
 | ||||
| 	// Following code is a non-exhaustive check to convert
 | ||||
| 	// B2 errors into S3 compatible errors.
 | ||||
| 	//
 | ||||
| 	// For a more complete information - https://www.backblaze.com/b2/docs/
 | ||||
| 	statusCode, code, msg := b2.Code(err) | ||||
| 	if statusCode == 0 { | ||||
| 		// We don't interpret non B2 errors. B2 errors have statusCode
 | ||||
| 		// to help us convert them to S3 object errors.
 | ||||
| 		return e | ||||
| 	} | ||||
| 
 | ||||
| 	switch code { | ||||
| 	case "duplicate_bucket_name": | ||||
| 		err = BucketAlreadyOwnedByYou{Bucket: bucket} | ||||
| 	case "bad_request": | ||||
| 		if object != "" { | ||||
| 			err = ObjectNameInvalid{bucket, object} | ||||
| 		} else if bucket != "" { | ||||
| 			err = BucketNotFound{Bucket: bucket} | ||||
| 		} | ||||
| 	case "bad_bucket_id": | ||||
| 		err = BucketNotFound{Bucket: bucket} | ||||
| 	case "file_not_present", "not_found": | ||||
| 		err = ObjectNotFound{bucket, object} | ||||
| 	case "cannot_delete_non_empty_bucket": | ||||
| 		err = BucketNotEmpty{bucket, ""} | ||||
| 	} | ||||
| 
 | ||||
| 	// Special interpretation like this is required for Multipart sessions.
 | ||||
| 	if strings.Contains(msg, "No active upload for") && uploadID != "" { | ||||
| 		err = InvalidUploadID{uploadID} | ||||
| 	} | ||||
| 
 | ||||
| 	e.e = err | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // Shutdown saves any gateway metadata to disk
 | ||||
| // if necessary and reload upon next restart.
 | ||||
| func (l *b2Objects) Shutdown() error { | ||||
| 	// TODO
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // StorageInfo is not relevant to B2 backend.
 | ||||
| func (l *b2Objects) StorageInfo() (si StorageInfo) { | ||||
| 	return si | ||||
| } | ||||
| 
 | ||||
| // MakeBucket creates a new container on B2 backend.
 | ||||
| func (l *b2Objects) MakeBucketWithLocation(bucket, location string) error { | ||||
| 	// location is ignored for B2 backend.
 | ||||
| 
 | ||||
| 	// All buckets are set to private by default.
 | ||||
| 	_, err := l.b2Client.CreateBucket(l.ctx, bucket, bucketTypePrivate, nil, nil) | ||||
| 	return b2ToObjectError(traceError(err), bucket) | ||||
| } | ||||
| 
 | ||||
| func (l *b2Objects) reAuthorizeAccount() error { | ||||
| 	client, err := b2.AuthorizeAccount(l.ctx, l.creds.AccessKey, l.creds.SecretKey, b2.Transport(newCustomHTTPTransport())) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	l.mu.Lock() | ||||
| 	l.b2Client.Update(client) | ||||
| 	l.mu.Unlock() | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // listBuckets is a wrapper similar to ListBuckets, which re-authorizes
 | ||||
| // the account and updates the B2 client safely. Once successfully
 | ||||
| // authorized performs the call again and returns list of buckets.
 | ||||
| // For any errors which are not actionable we return an error.
 | ||||
| func (l *b2Objects) listBuckets(err error) ([]*b2.Bucket, error) { | ||||
| 	if err != nil { | ||||
| 		if b2.Action(err) != b2.ReAuthenticate { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		if rerr := l.reAuthorizeAccount(); rerr != nil { | ||||
| 			return nil, rerr | ||||
| 		} | ||||
| 	} | ||||
| 	bktList, lerr := l.b2Client.ListBuckets(l.ctx) | ||||
| 	if lerr != nil { | ||||
| 		return l.listBuckets(lerr) | ||||
| 	} | ||||
| 	return bktList, nil | ||||
| } | ||||
| 
 | ||||
| // Bucket - is a helper which provides a *Bucket instance
 | ||||
| // for performing an API operation. B2 API doesn't
 | ||||
| // provide a direct way to access the bucket so we need
 | ||||
| // to employ following technique.
 | ||||
| func (l *b2Objects) Bucket(bucket string) (*b2.Bucket, error) { | ||||
| 	bktList, err := l.listBuckets(nil) | ||||
| 	if err != nil { | ||||
| 		return nil, b2ToObjectError(traceError(err), bucket) | ||||
| 	} | ||||
| 	for _, bkt := range bktList { | ||||
| 		if bkt.Name == bucket { | ||||
| 			return bkt, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, traceError(BucketNotFound{Bucket: bucket}) | ||||
| } | ||||
| 
 | ||||
| // GetBucketInfo gets bucket metadata..
 | ||||
| func (l *b2Objects) GetBucketInfo(bucket string) (bi BucketInfo, err error) { | ||||
| 	if _, err = l.Bucket(bucket); err != nil { | ||||
| 		return bi, err | ||||
| 	} | ||||
| 	return BucketInfo{ | ||||
| 		Name:    bucket, | ||||
| 		Created: time.Unix(0, 0), | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // ListBuckets lists all B2 buckets
 | ||||
| func (l *b2Objects) ListBuckets() ([]BucketInfo, error) { | ||||
| 	bktList, err := l.listBuckets(nil) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var bktInfo []BucketInfo | ||||
| 	for _, bkt := range bktList { | ||||
| 		bktInfo = append(bktInfo, BucketInfo{ | ||||
| 			Name:    bkt.Name, | ||||
| 			Created: time.Unix(0, 0), | ||||
| 		}) | ||||
| 	} | ||||
| 	return bktInfo, nil | ||||
| } | ||||
| 
 | ||||
| // DeleteBucket deletes a bucket on B2
 | ||||
| func (l *b2Objects) DeleteBucket(bucket string) error { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	err = bkt.DeleteBucket(l.ctx) | ||||
| 	return b2ToObjectError(traceError(err), bucket) | ||||
| } | ||||
| 
 | ||||
| // ListObjects lists all objects in B2 bucket filtered by prefix, returns upto at max 1000 entries at a time.
 | ||||
| func (l *b2Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return loi, err | ||||
| 	} | ||||
| 	loi = ListObjectsInfo{} | ||||
| 	files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, marker, prefix, delimiter) | ||||
| 	if lerr != nil { | ||||
| 		return loi, b2ToObjectError(traceError(lerr), bucket) | ||||
| 	} | ||||
| 	loi.IsTruncated = next != "" | ||||
| 	loi.NextMarker = next | ||||
| 	for _, file := range files { | ||||
| 		switch file.Status { | ||||
| 		case "folder": | ||||
| 			loi.Prefixes = append(loi.Prefixes, file.Name) | ||||
| 		case "upload": | ||||
| 			loi.Objects = append(loi.Objects, ObjectInfo{ | ||||
| 				Bucket:      bucket, | ||||
| 				Name:        file.Name, | ||||
| 				ModTime:     file.Timestamp, | ||||
| 				Size:        file.Size, | ||||
| 				ETag:        file.Info.ID, | ||||
| 				ContentType: file.Info.ContentType, | ||||
| 				UserDefined: file.Info.Info, | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 	return loi, nil | ||||
| } | ||||
| 
 | ||||
| // ListObjectsV2 lists all objects in B2 bucket filtered by prefix, returns upto max 1000 entries at a time.
 | ||||
| func (l *b2Objects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, | ||||
| 	fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { | ||||
| 	// fetchOwner, startAfter are not supported and unused.
 | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return loi, err | ||||
| 	} | ||||
| 	loi = ListObjectsV2Info{} | ||||
| 	files, next, lerr := bkt.ListFileNames(l.ctx, maxKeys, continuationToken, prefix, delimiter) | ||||
| 	if lerr != nil { | ||||
| 		return loi, b2ToObjectError(traceError(lerr), bucket) | ||||
| 	} | ||||
| 	loi.IsTruncated = next != "" | ||||
| 	loi.ContinuationToken = continuationToken | ||||
| 	loi.NextContinuationToken = next | ||||
| 	for _, file := range files { | ||||
| 		switch file.Status { | ||||
| 		case "folder": | ||||
| 			loi.Prefixes = append(loi.Prefixes, file.Name) | ||||
| 		case "upload": | ||||
| 			loi.Objects = append(loi.Objects, ObjectInfo{ | ||||
| 				Bucket:      bucket, | ||||
| 				Name:        file.Name, | ||||
| 				ModTime:     file.Timestamp, | ||||
| 				Size:        file.Size, | ||||
| 				ETag:        file.Info.ID, | ||||
| 				ContentType: file.Info.ContentType, | ||||
| 				UserDefined: file.Info.Info, | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 	return loi, nil | ||||
| } | ||||
| 
 | ||||
| // GetObject reads an object from B2. Supports additional
 | ||||
| // parameters like offset and length which are synonymous with
 | ||||
| // HTTP Range requests.
 | ||||
| //
 | ||||
| // startOffset indicates the starting read location of the object.
 | ||||
| // length indicates the total length of the object.
 | ||||
| func (l *b2Objects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer) error { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	reader, err := bkt.DownloadFileByName(l.ctx, object, startOffset, length) | ||||
| 	if err != nil { | ||||
| 		return b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 	defer reader.Close() | ||||
| 	_, err = io.Copy(writer, reader) | ||||
| 	return b2ToObjectError(traceError(err), bucket, object) | ||||
| } | ||||
| 
 | ||||
| // GetObjectInfo reads object info and replies back ObjectInfo
 | ||||
| func (l *b2Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectInfo, err error) { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return objInfo, err | ||||
| 	} | ||||
| 	f, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 	f.Close() | ||||
| 	fi, err := bkt.File(f.ID, object).GetFileInfo(l.ctx) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 	objInfo = ObjectInfo{ | ||||
| 		Bucket:      bucket, | ||||
| 		Name:        object, | ||||
| 		ETag:        fi.ID, | ||||
| 		Size:        fi.Size, | ||||
| 		ModTime:     fi.Timestamp, | ||||
| 		ContentType: fi.ContentType, | ||||
| 		UserDefined: fi.Info, | ||||
| 	} | ||||
| 	return objInfo, nil | ||||
| } | ||||
| 
 | ||||
| // In B2 - You must always include the X-Bz-Content-Sha1 header with
 | ||||
| // your upload request. The value you provide can be:
 | ||||
| // (1) the 40-character hex checksum of the file,
 | ||||
| // (2) the string hex_digits_at_end, or
 | ||||
| // (3) the string do_not_verify.
 | ||||
| // For more reference - https://www.backblaze.com/b2/docs/uploading.html
 | ||||
| //
 | ||||
| const ( | ||||
| 	sha1NoVerify = "do_not_verify" | ||||
| 	sha1AtEOF    = "hex_digits_at_end" | ||||
| ) | ||||
| 
 | ||||
| // With the second option mentioned above, you append the 40-character hex sha1
 | ||||
| // to the end of the request body, immediately after the contents of the file
 | ||||
| // being uploaded. Note that the content length is the size of the file plus 40
 | ||||
| // of the original size of the reader.
 | ||||
| //
 | ||||
| // newB2Reader implements a B2 compatible reader by wrapping the HashReader into
 | ||||
| // a new io.Reader which will emit out the sha1 hex digits at io.EOF.
 | ||||
| // It also means that your overall content size is now original size + 40 bytes.
 | ||||
| // Additionally this reader also verifies Hash encapsulated inside HashReader
 | ||||
| // at io.EOF if the verification failed we return an error and do not send
 | ||||
| // the content to server.
 | ||||
| func newB2Reader(r *HashReader, size int64) *B2Reader { | ||||
| 	return &B2Reader{ | ||||
| 		r:        r, | ||||
| 		size:     size, | ||||
| 		sha1Hash: sha1.New(), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // B2Reader - is a Reader wraps the HashReader which will emit out the sha1
 | ||||
| // hex digits at io.EOF. It also means that your overall content size is
 | ||||
| // now original size + 40 bytes. Additionally this reader also verifies
 | ||||
| // Hash encapsulated inside HashReader at io.EOF if the verification
 | ||||
| // failed we return an error and do not send the content to server.
 | ||||
| type B2Reader struct { | ||||
| 	r        *HashReader | ||||
| 	size     int64 | ||||
| 	sha1Hash hash.Hash | ||||
| 
 | ||||
| 	isEOF bool | ||||
| 	buf   *strings.Reader | ||||
| } | ||||
| 
 | ||||
| // Size - Returns the total size of Reader.
 | ||||
| func (nb *B2Reader) Size() int64 { return nb.size + 40 } | ||||
| func (nb *B2Reader) Read(p []byte) (int, error) { | ||||
| 	if nb.isEOF { | ||||
| 		return nb.buf.Read(p) | ||||
| 	} | ||||
| 	// Read into hash to update the on going checksum.
 | ||||
| 	n, err := io.TeeReader(nb.r, nb.sha1Hash).Read(p) | ||||
| 	if err == io.EOF { | ||||
| 		// Verify checksum at io.EOF
 | ||||
| 		if err = nb.r.Verify(); err != nil { | ||||
| 			return n, err | ||||
| 		} | ||||
| 		// Stream is not corrupted on this end
 | ||||
| 		// now fill in the last 40 bytes of sha1 hex
 | ||||
| 		// so that the server can verify the stream on
 | ||||
| 		// their end.
 | ||||
| 		err = nil | ||||
| 		nb.isEOF = true | ||||
| 		nb.buf = strings.NewReader(fmt.Sprintf("%x", nb.sha1Hash.Sum(nil))) | ||||
| 	} | ||||
| 	return n, err | ||||
| } | ||||
| 
 | ||||
| // PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
 | ||||
| func (l *b2Objects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (ObjectInfo, error) { | ||||
| 	var objInfo ObjectInfo | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return objInfo, err | ||||
| 	} | ||||
| 	contentType := metadata["content-type"] | ||||
| 	delete(metadata, "content-type") | ||||
| 	delete(metadata, "etag") | ||||
| 
 | ||||
| 	var u *b2.URL | ||||
| 	u, err = bkt.GetUploadURL(l.ctx) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 
 | ||||
| 	hr := newB2Reader(data, data.Size()) | ||||
| 	var f *b2.File | ||||
| 	f, err = u.UploadFile(l.ctx, hr, int(hr.Size()), object, contentType, sha1AtEOF, metadata) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 
 | ||||
| 	var fi *b2.FileInfo | ||||
| 	fi, err = f.GetFileInfo(l.ctx) | ||||
| 	if err != nil { | ||||
| 		return objInfo, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 
 | ||||
| 	return ObjectInfo{ | ||||
| 		Bucket:      bucket, | ||||
| 		Name:        object, | ||||
| 		ETag:        fi.ID, | ||||
| 		Size:        fi.Size, | ||||
| 		ModTime:     fi.Timestamp, | ||||
| 		ContentType: fi.ContentType, | ||||
| 		UserDefined: fi.Info, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // CopyObject copies a blob from source container to destination container.
 | ||||
| func (l *b2Objects) CopyObject(srcBucket string, srcObject string, dstBucket string, | ||||
| 	dstObject string, metadata map[string]string) (objInfo ObjectInfo, err error) { | ||||
| 	return objInfo, traceError(NotImplemented{}) | ||||
| } | ||||
| 
 | ||||
| // DeleteObject deletes a blob in bucket
 | ||||
| func (l *b2Objects) DeleteObject(bucket string, object string) error { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	reader, err := bkt.DownloadFileByName(l.ctx, object, 0, 1) | ||||
| 	if err != nil { | ||||
| 		return b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 	io.Copy(ioutil.Discard, reader) | ||||
| 	reader.Close() | ||||
| 	err = bkt.File(reader.ID, object).DeleteFileVersion(l.ctx) | ||||
| 	return b2ToObjectError(traceError(err), bucket, object) | ||||
| } | ||||
| 
 | ||||
| // ListMultipartUploads lists all multipart uploads.
 | ||||
| func (l *b2Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, | ||||
| 	delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { | ||||
| 	// keyMarker, prefix, delimiter are all ignored, Backblaze B2 doesn't support any
 | ||||
| 	// of these parameters only equivalent parameter is uploadIDMarker.
 | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return lmi, err | ||||
| 	} | ||||
| 	// The maximum number of files to return from this call.
 | ||||
| 	// The default value is 100, and the maximum allowed is 100.
 | ||||
| 	if maxUploads > 100 { | ||||
| 		maxUploads = 100 | ||||
| 	} | ||||
| 	largeFiles, nextMarker, err := bkt.ListUnfinishedLargeFiles(l.ctx, uploadIDMarker, maxUploads) | ||||
| 	if err != nil { | ||||
| 		return lmi, b2ToObjectError(traceError(err), bucket) | ||||
| 	} | ||||
| 	lmi = ListMultipartsInfo{ | ||||
| 		MaxUploads: maxUploads, | ||||
| 	} | ||||
| 	if nextMarker != "" { | ||||
| 		lmi.IsTruncated = true | ||||
| 		lmi.NextUploadIDMarker = nextMarker | ||||
| 	} | ||||
| 	for _, largeFile := range largeFiles { | ||||
| 		lmi.Uploads = append(lmi.Uploads, uploadMetadata{ | ||||
| 			Object:    largeFile.Name, | ||||
| 			UploadID:  largeFile.ID, | ||||
| 			Initiated: largeFile.Timestamp, | ||||
| 		}) | ||||
| 	} | ||||
| 	return lmi, nil | ||||
| } | ||||
| 
 | ||||
| // NewMultipartUpload upload object in multiple parts, uses B2's LargeFile upload API.
 | ||||
| // Large files can range in size from 5MB to 10TB.
 | ||||
| // Each large file must consist of at least 2 parts, and all of the parts except the
 | ||||
| // last one must be at least 5MB in size. The last part must contain at least one byte.
 | ||||
| // For more information - https://www.backblaze.com/b2/docs/large_files.html
 | ||||
| func (l *b2Objects) NewMultipartUpload(bucket string, object string, metadata map[string]string) (string, error) { | ||||
| 	var uploadID string | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return uploadID, err | ||||
| 	} | ||||
| 
 | ||||
| 	contentType := metadata["content-type"] | ||||
| 	delete(metadata, "content-type") | ||||
| 	lf, err := bkt.StartLargeFile(l.ctx, object, contentType, metadata) | ||||
| 	if err != nil { | ||||
| 		return uploadID, b2ToObjectError(traceError(err), bucket, object) | ||||
| 	} | ||||
| 
 | ||||
| 	return lf.ID, nil | ||||
| } | ||||
| 
 | ||||
| // CopyObjectPart copy part of object to other bucket and object.
 | ||||
| func (l *b2Objects) CopyObjectPart(srcBucket string, srcObject string, destBucket string, destObject string, | ||||
| 	uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error) { | ||||
| 	return PartInfo{}, traceError(NotImplemented{}) | ||||
| } | ||||
| 
 | ||||
| // PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
 | ||||
| func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *HashReader) (pi PartInfo, err error) { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return pi, err | ||||
| 	} | ||||
| 
 | ||||
| 	fc, err := bkt.File(uploadID, object).CompileParts(0, nil).GetUploadPartURL(l.ctx) | ||||
| 	if err != nil { | ||||
| 		return pi, b2ToObjectError(traceError(err), bucket, object, uploadID) | ||||
| 	} | ||||
| 
 | ||||
| 	hr := newB2Reader(data, data.Size()) | ||||
| 	sha1, err := fc.UploadPart(l.ctx, hr, sha1AtEOF, int(hr.Size()), partID) | ||||
| 	if err != nil { | ||||
| 		return pi, b2ToObjectError(traceError(err), bucket, object, uploadID) | ||||
| 	} | ||||
| 
 | ||||
| 	return PartInfo{ | ||||
| 		PartNumber:   partID, | ||||
| 		LastModified: UTCNow(), | ||||
| 		ETag:         sha1, | ||||
| 		Size:         data.Size(), | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
| // ListObjectParts returns all object parts for specified object in specified bucket, uses B2's LargeFile upload API.
 | ||||
| func (l *b2Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, err error) { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return lpi, err | ||||
| 	} | ||||
| 	lpi = ListPartsInfo{ | ||||
| 		Bucket:           bucket, | ||||
| 		Object:           object, | ||||
| 		UploadID:         uploadID, | ||||
| 		MaxParts:         maxParts, | ||||
| 		PartNumberMarker: partNumberMarker, | ||||
| 	} | ||||
| 	// startPartNumber must be in the range 1 - 10000 for B2.
 | ||||
| 	partNumberMarker++ | ||||
| 	partsList, next, err := bkt.File(uploadID, object).ListParts(l.ctx, partNumberMarker, maxParts) | ||||
| 	if err != nil { | ||||
| 		return lpi, b2ToObjectError(traceError(err), bucket, object, uploadID) | ||||
| 	} | ||||
| 	if next != 0 { | ||||
| 		lpi.IsTruncated = true | ||||
| 		lpi.NextPartNumberMarker = next | ||||
| 	} | ||||
| 	for _, part := range partsList { | ||||
| 		lpi.Parts = append(lpi.Parts, PartInfo{ | ||||
| 			PartNumber: part.Number, | ||||
| 			ETag:       part.SHA1, | ||||
| 			Size:       part.Size, | ||||
| 		}) | ||||
| 	} | ||||
| 	return lpi, nil | ||||
| } | ||||
| 
 | ||||
| // AbortMultipartUpload aborts a on going multipart upload, uses B2's LargeFile upload API.
 | ||||
| func (l *b2Objects) AbortMultipartUpload(bucket string, object string, uploadID string) error { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	err = bkt.File(uploadID, object).CompileParts(0, nil).CancelLargeFile(l.ctx) | ||||
| 	return b2ToObjectError(traceError(err), bucket, object, uploadID) | ||||
| } | ||||
| 
 | ||||
| // CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
 | ||||
| func (l *b2Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (oi ObjectInfo, err error) { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return oi, err | ||||
| 	} | ||||
| 	hashes := make(map[int]string) | ||||
| 	for i, uploadedPart := range uploadedParts { | ||||
| 		// B2 requires contigous part numbers starting with 1, they do not support
 | ||||
| 		// hand picking part numbers, we return an S3 compatible error instead.
 | ||||
| 		if i+1 != uploadedPart.PartNumber { | ||||
| 			return oi, b2ToObjectError(traceError(InvalidPart{}), bucket, object, uploadID) | ||||
| 		} | ||||
| 		hashes[uploadedPart.PartNumber] = uploadedPart.ETag | ||||
| 	} | ||||
| 
 | ||||
| 	if _, err = bkt.File(uploadID, object).CompileParts(0, hashes).FinishLargeFile(l.ctx); err != nil { | ||||
| 		return oi, b2ToObjectError(traceError(err), bucket, object, uploadID) | ||||
| 	} | ||||
| 
 | ||||
| 	return l.GetObjectInfo(bucket, object) | ||||
| } | ||||
| 
 | ||||
| // SetBucketPolicies - B2 supports 2 types of bucket policies:
 | ||||
| // bucketType.AllPublic - bucketTypeReadOnly means that anybody can download the files is the bucket;
 | ||||
| // bucketType.AllPrivate - bucketTypePrivate means that you need an authorization token to download them.
 | ||||
| // Default is AllPrivate for all buckets.
 | ||||
| func (l *b2Objects) SetBucketPolicies(bucket string, policyInfo policy.BucketAccessPolicy) error { | ||||
| 	var policies []BucketAccessPolicy | ||||
| 
 | ||||
| 	for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket) { | ||||
| 		policies = append(policies, BucketAccessPolicy{ | ||||
| 			Prefix: prefix, | ||||
| 			Policy: policy, | ||||
| 		}) | ||||
| 	} | ||||
| 	prefix := bucket + "/*" // For all objects inside the bucket.
 | ||||
| 	if len(policies) != 1 { | ||||
| 		return traceError(NotImplemented{}) | ||||
| 	} | ||||
| 	if policies[0].Prefix != prefix { | ||||
| 		return traceError(NotImplemented{}) | ||||
| 	} | ||||
| 	if policies[0].Policy != policy.BucketPolicyReadOnly { | ||||
| 		return traceError(NotImplemented{}) | ||||
| 	} | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	bkt.Type = bucketTypeReadOnly | ||||
| 	_, err = bkt.Update(l.ctx) | ||||
| 	return b2ToObjectError(traceError(err)) | ||||
| } | ||||
| 
 | ||||
| // GetBucketPolicies, returns the current bucketType from B2 backend and convert
 | ||||
| // it into S3 compatible bucket policy info.
 | ||||
| func (l *b2Objects) GetBucketPolicies(bucket string) (policy.BucketAccessPolicy, error) { | ||||
| 	policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"} | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return policyInfo, err | ||||
| 	} | ||||
| 	if bkt.Type == bucketTypeReadOnly { | ||||
| 		policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, policy.BucketPolicyReadOnly, bucket, "") | ||||
| 		return policyInfo, nil | ||||
| 	} | ||||
| 	// bkt.Type can also be snapshot, but it is only allowed through B2 browser console,
 | ||||
| 	// just return back as policy not found for all cases.
 | ||||
| 	// CreateBucket always sets the value to allPrivate by default.
 | ||||
| 	return policy.BucketAccessPolicy{}, traceError(PolicyNotFound{Bucket: bucket}) | ||||
| } | ||||
| 
 | ||||
| // DeleteBucketPolicies - resets the bucketType of bucket on B2 to 'allPrivate'.
 | ||||
| func (l *b2Objects) DeleteBucketPolicies(bucket string) error { | ||||
| 	bkt, err := l.Bucket(bucket) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	bkt.Type = bucketTypePrivate | ||||
| 	_, err = bkt.Update(l.ctx) | ||||
| 	return b2ToObjectError(traceError(err)) | ||||
| } | ||||
|  | @ -0,0 +1,104 @@ | |||
| /* | ||||
|  * Minio Cloud Storage, (C) 2017 Minio, Inc. | ||||
|  * | ||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  * you may not use this file except in compliance with the License. | ||||
|  * You may obtain a copy of the License at | ||||
|  * | ||||
|  *     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
|  * | ||||
|  * Unless required by applicable law or agreed to in writing, software | ||||
|  * distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  * See the License for the specific language governing permissions and | ||||
|  * limitations under the License. | ||||
|  */ | ||||
| 
 | ||||
| package cmd | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"testing" | ||||
| ) | ||||
| 
 | ||||
| // Tests headerToObjectInfo
 | ||||
| func TestHeaderToObjectInfo(t *testing.T) { | ||||
| 	testCases := []struct { | ||||
| 		bucket, object string | ||||
| 		header         http.Header | ||||
| 		objInfo        ObjectInfo | ||||
| 	}{ | ||||
| 		{ | ||||
| 			bucket: "bucket", | ||||
| 			object: "object", | ||||
| 			header: http.Header{ | ||||
| 				"Content-Length":         []string{"10"}, | ||||
| 				"Content-Type":           []string{"application/javascript"}, | ||||
| 				"X-Bz-Upload-Timestamp":  []string{"1000"}, | ||||
| 				"X-Bz-Info-X-Amz-Meta-1": []string{"test1"}, | ||||
| 				"X-Bz-File-Id":           []string{"xxxxx"}, | ||||
| 			}, | ||||
| 			objInfo: ObjectInfo{ | ||||
| 				Bucket:      "bucket", | ||||
| 				Name:        "object", | ||||
| 				ContentType: "application/javascript", | ||||
| 				Size:        10, | ||||
| 				UserDefined: map[string]string{ | ||||
| 					"X-Amz-Meta-1": "test1", | ||||
| 				}, | ||||
| 				ETag: "xxxxx", | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	for i, testCase := range testCases { | ||||
| 		gotObjInfo, err := headerToObjectInfo(testCase.bucket, testCase.object, testCase.header) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Test %d: %s", i+1, err) | ||||
| 		} | ||||
| 		if gotObjInfo.Bucket != testCase.objInfo.Bucket { | ||||
| 			t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.Bucket, gotObjInfo.Bucket) | ||||
| 		} | ||||
| 		if gotObjInfo.Name != testCase.objInfo.Name { | ||||
| 			t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.Name, gotObjInfo.Name) | ||||
| 		} | ||||
| 		if gotObjInfo.ContentType != testCase.objInfo.ContentType { | ||||
| 			t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.ContentType, gotObjInfo.ContentType) | ||||
| 		} | ||||
| 		if gotObjInfo.ETag != testCase.objInfo.ETag { | ||||
| 			t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.objInfo.ETag, gotObjInfo.ETag) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Tests mkRange test.
 | ||||
| func TestMkRange(t *testing.T) { | ||||
| 	testCases := []struct { | ||||
| 		offset, size int64 | ||||
| 		expectedRng  string | ||||
| 	}{ | ||||
| 		// No offset set, size not set.
 | ||||
| 		{ | ||||
| 			offset:      0, | ||||
| 			size:        0, | ||||
| 			expectedRng: "", | ||||
| 		}, | ||||
| 		// Offset set, size not set.
 | ||||
| 		{ | ||||
| 			offset:      10, | ||||
| 			size:        0, | ||||
| 			expectedRng: "bytes=10-", | ||||
| 		}, | ||||
| 		// Offset set, size set.
 | ||||
| 		{ | ||||
| 			offset:      10, | ||||
| 			size:        11, | ||||
| 			expectedRng: "bytes=10-20", | ||||
| 		}, | ||||
| 	} | ||||
| 	for i, testCase := range testCases { | ||||
| 		gotRng := mkRange(testCase.offset, testCase.size) | ||||
| 		if gotRng != testCase.expectedRng { | ||||
| 			t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedRng, gotRng) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | @ -1,27 +0,0 @@ | |||
| /* | ||||
|  * Minio Cloud Storage, (C) 2017 Minio, Inc. | ||||
|  * | ||||
|  * Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|  * you may not use this file except in compliance with the License. | ||||
|  * You may obtain a copy of the License at | ||||
|  * | ||||
|  *     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
|  * | ||||
|  * Unless required by applicable law or agreed to in writing, software | ||||
|  * distributed under the License is distributed on an "AS IS" BASIS, | ||||
|  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|  * See the License for the specific language governing permissions and | ||||
|  * limitations under the License. | ||||
|  */ | ||||
| 
 | ||||
| package cmd | ||||
| 
 | ||||
| import "errors" | ||||
| 
 | ||||
| var ( | ||||
| 	// Project ID format is not valid.
 | ||||
| 	errGCSInvalidProjectID = errors.New("GCS project id is either empty or invalid") | ||||
| 
 | ||||
| 	// Project ID not found
 | ||||
| 	errGCSProjectIDNotFound = errors.New("unknown project id") | ||||
| ) | ||||
|  | @ -21,6 +21,7 @@ import ( | |||
| 	"encoding/base64" | ||||
| 	"encoding/hex" | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math" | ||||
|  | @ -39,6 +40,14 @@ import ( | |||
| 	"github.com/minio/minio-go/pkg/policy" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// Project ID format is not valid.
 | ||||
| 	errGCSInvalidProjectID = errors.New("GCS project id is either empty or invalid") | ||||
| 
 | ||||
| 	// Project ID not found
 | ||||
| 	errGCSProjectIDNotFound = errors.New("unknown project id") | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	// Path where multipart objects are saved.
 | ||||
| 	// If we change the backend format we will use a different url path like /multipart/v2
 | ||||
|  |  | |||
|  | @ -125,6 +125,31 @@ EXAMPLES: | |||
| 
 | ||||
| ` | ||||
| 
 | ||||
| const b2GatewayTemplate = `NAME: | ||||
|   {{.HelpName}} - {{.Usage}} | ||||
| 
 | ||||
| USAGE: | ||||
|   {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} | ||||
| {{if .VisibleFlags}} | ||||
| FLAGS: | ||||
|   {{range .VisibleFlags}}{{.}} | ||||
|   {{end}}{{end}} | ||||
| ENVIRONMENT VARIABLES: | ||||
|   ACCESS: | ||||
|      MINIO_ACCESS_KEY: B2 account id. | ||||
|      MINIO_SECRET_KEY: B2 application key. | ||||
| 
 | ||||
|   BROWSER: | ||||
|      MINIO_BROWSER: To disable web browser access, set this value to "off". | ||||
| 
 | ||||
| EXAMPLES: | ||||
|   1. Start minio gateway server for B2 backend. | ||||
|       $ export MINIO_ACCESS_KEY=accountID | ||||
|       $ export MINIO_SECRET_KEY=applicationKey | ||||
|       $ {{.HelpName}} | ||||
| 
 | ||||
| ` | ||||
| 
 | ||||
| var ( | ||||
| 	azureBackendCmd = cli.Command{ | ||||
| 		Name:               "azure", | ||||
|  | @ -143,6 +168,7 @@ var ( | |||
| 		Flags:              append(serverFlags, globalFlags...), | ||||
| 		HideHelpCommand:    true, | ||||
| 	} | ||||
| 
 | ||||
| 	gcsBackendCmd = cli.Command{ | ||||
| 		Name:               "gcs", | ||||
| 		Usage:              "Google Cloud Storage.", | ||||
|  | @ -152,12 +178,21 @@ var ( | |||
| 		HideHelpCommand:    true, | ||||
| 	} | ||||
| 
 | ||||
| 	b2BackendCmd = cli.Command{ | ||||
| 		Name:               "b2", | ||||
| 		Usage:              "Backblaze B2.", | ||||
| 		Action:             b2GatewayMain, | ||||
| 		CustomHelpTemplate: b2GatewayTemplate, | ||||
| 		Flags:              append(serverFlags, globalFlags...), | ||||
| 		HideHelpCommand:    true, | ||||
| 	} | ||||
| 
 | ||||
| 	gatewayCmd = cli.Command{ | ||||
| 		Name:            "gateway", | ||||
| 		Usage:           "Start object storage gateway.", | ||||
| 		Flags:           append(serverFlags, globalFlags...), | ||||
| 		HideHelpCommand: true, | ||||
| 		Subcommands:     []cli.Command{azureBackendCmd, s3BackendCmd, gcsBackendCmd}, | ||||
| 		Subcommands:     []cli.Command{azureBackendCmd, s3BackendCmd, gcsBackendCmd, b2BackendCmd}, | ||||
| 	} | ||||
| ) | ||||
| 
 | ||||
|  | @ -168,6 +203,7 @@ const ( | |||
| 	azureBackend gatewayBackend = "azure" | ||||
| 	s3Backend    gatewayBackend = "s3" | ||||
| 	gcsBackend   gatewayBackend = "gcs" | ||||
| 	b2Backend    gatewayBackend = "b2" | ||||
| 	// Add more backends here.
 | ||||
| ) | ||||
| 
 | ||||
|  | @ -177,6 +213,7 @@ const ( | |||
| // - Azure Blob Storage.
 | ||||
| // - AWS S3.
 | ||||
| // - Google Cloud Storage.
 | ||||
| // - Backblaze B2.
 | ||||
| // - Add your favorite backend here.
 | ||||
| func newGatewayLayer(backendType gatewayBackend, arg string) (GatewayLayer, error) { | ||||
| 	switch backendType { | ||||
|  | @ -189,6 +226,11 @@ func newGatewayLayer(backendType gatewayBackend, arg string) (GatewayLayer, erro | |||
| 		// will be removed when gcs is ready for production use.
 | ||||
| 		log.Println(colorYellow("\n               *** Warning: Not Ready for Production ***")) | ||||
| 		return newGCSGateway(arg) | ||||
| 	case b2Backend: | ||||
| 		// FIXME: The following print command is temporary and
 | ||||
| 		// will be removed when B2 is ready for production use.
 | ||||
| 		log.Println(colorYellow("\n               *** Warning: Not Ready for Production ***")) | ||||
| 		return newB2Gateway() | ||||
| 	} | ||||
| 
 | ||||
| 	return nil, fmt.Errorf("Unrecognized backend type %s", backendType) | ||||
|  | @ -285,6 +327,17 @@ func gcsGatewayMain(ctx *cli.Context) { | |||
| 	gatewayMain(ctx, gcsBackend) | ||||
| } | ||||
| 
 | ||||
| func b2GatewayMain(ctx *cli.Context) { | ||||
| 	if ctx.Args().Present() && ctx.Args().First() == "help" { | ||||
| 		cli.ShowCommandHelpAndExit(ctx, "b2", 1) | ||||
| 	} | ||||
| 
 | ||||
| 	// Validate gateway arguments.
 | ||||
| 	fatalIf(validateGatewayArguments(ctx.GlobalString("address"), ctx.Args().First()), "Invalid argument") | ||||
| 
 | ||||
| 	gatewayMain(ctx, b2Backend) | ||||
| } | ||||
| 
 | ||||
| // Handler for 'minio gateway'.
 | ||||
| func gatewayMain(ctx *cli.Context, backendType gatewayBackend) { | ||||
| 	// Get quiet flag from command line argument.
 | ||||
|  | @ -393,6 +446,8 @@ func gatewayMain(ctx *cli.Context, backendType gatewayBackend) { | |||
| 			mode = globalMinioModeGatewayGCS | ||||
| 		case s3Backend: | ||||
| 			mode = globalMinioModeGatewayS3 | ||||
| 		case b2Backend: | ||||
| 			mode = globalMinioModeGatewayB2 | ||||
| 		} | ||||
| 
 | ||||
| 		// Check update mode.
 | ||||
|  |  | |||
|  | @ -26,12 +26,6 @@ func (a gatewayUnsupported) CopyObjectPart(srcBucket, srcObject, destBucket, des | |||
| 	return info, traceError(NotImplemented{}) | ||||
| } | ||||
| 
 | ||||
| // AnonPutObject creates a new object anonymously with the incoming data,
 | ||||
| func (a gatewayUnsupported) AnonPutObject(bucket, object string, size int64, data io.Reader, | ||||
| 	metadata map[string]string, sha256sum string) (ObjectInfo, error) { | ||||
| 	return ObjectInfo{}, traceError(NotImplemented{}) | ||||
| } | ||||
| 
 | ||||
| // HealBucket - Not relevant.
 | ||||
| func (a gatewayUnsupported) HealBucket(bucket string) error { | ||||
| 	return traceError(NotImplemented{}) | ||||
|  | @ -57,3 +51,26 @@ func (a gatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMark | |||
| 	delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) { | ||||
| 	return lmi, traceError(NotImplemented{}) | ||||
| } | ||||
| 
 | ||||
| // AnonListObjects - List objects anonymously
 | ||||
| func (a gatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string, | ||||
| 	maxKeys int) (loi ListObjectsInfo, err error) { | ||||
| 	return loi, traceError(NotImplemented{}) | ||||
| } | ||||
| 
 | ||||
| // AnonListObjectsV2 - List objects in V2 mode, anonymously
 | ||||
| func (a gatewayUnsupported) AnonListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, | ||||
| 	fetchOwner bool, startAfter string) (loi ListObjectsV2Info, err error) { | ||||
| 	return loi, traceError(NotImplemented{}) | ||||
| } | ||||
| 
 | ||||
| // AnonGetBucketInfo - Get bucket metadata anonymously.
 | ||||
| func (a gatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err error) { | ||||
| 	return bi, traceError(NotImplemented{}) | ||||
| } | ||||
| 
 | ||||
| // AnonPutObject creates a new object anonymously with the incoming data,
 | ||||
| func (a gatewayUnsupported) AnonPutObject(bucket, object string, size int64, data io.Reader, | ||||
| 	metadata map[string]string, sha256sum string) (ObjectInfo, error) { | ||||
| 	return ObjectInfo{}, traceError(NotImplemented{}) | ||||
| } | ||||
|  |  | |||
|  | @ -53,6 +53,7 @@ const ( | |||
| 	globalMinioModeGatewayAzure    = "mode-gateway-azure" | ||||
| 	globalMinioModeGatewayS3       = "mode-gateway-s3" | ||||
| 	globalMinioModeGatewayGCS      = "mode-gateway-gcs" | ||||
| 	globalMinioModeGatewayB2       = "mode-gateway-b2" | ||||
| 
 | ||||
| 	// globalMinioSysTmp prefix is used in Azure/GCS gateway for save metadata sent by Initialize Multipart Upload API.
 | ||||
| 	globalMinioSysTmp = "minio.sys.tmp/" | ||||
|  |  | |||
|  | @ -2,8 +2,8 @@ | |||
| Minio Gateway adds Amazon S3 compatibility to third party cloud storage providers. | ||||
| - [Microsoft Azure Blob Storage](https://github.com/minio/minio/blob/master/docs/gateway/azure.md) | ||||
| - [Google Cloud Storage](https://github.com/minio/minio/blob/master/docs/gateway/gcs.md) _Alpha release_ | ||||
| - [Backblaze B2](https://github.com/minio/minio/blob/master/docs/gateway/b2.md) _Alpha release_ | ||||
| 
 | ||||
| ## Roadmap | ||||
| * Minio & AWS S3 | ||||
| * Edge Caching - Disk based proxy caching support | ||||
| 
 | ||||
|  |  | |||
|  | @ -0,0 +1,48 @@ | |||
| # Minio B2 Gateway [](https://slack.minio.io) | ||||
| Minio Gateway adds Amazon S3 compatibility to Backblaze B2 Cloud Storage. | ||||
| 
 | ||||
| ## Run Minio Gateway for Backblaze B2 Cloud Storage | ||||
| Please follow this [guide](https://www.backblaze.com/b2/docs/quick_account.html) to create an account on backblaze.com to obtain your access credentisals for B2 Cloud storage. | ||||
| 
 | ||||
| ### Using Binary | ||||
| Please download the test binary for gateway B2 support https://data.minio.io:10000/minio-b2/linux-amd64/minio-b2 | ||||
| 
 | ||||
| ``` | ||||
| export MINIO_ACCESS_KEY=b2_accound_id | ||||
| export MINIO_SECRET_KEY=b2_application_key | ||||
| minio gateway b2 | ||||
| ``` | ||||
| ## Test using Minio Browser | ||||
| Minio Gateway comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 ensure your server has started successfully. | ||||
| 
 | ||||
|  | ||||
| 
 | ||||
| ## Test using Minio Client `mc` | ||||
| `mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. | ||||
| 
 | ||||
| ### Configure `mc` | ||||
| ``` | ||||
| mc config host add myb2 http://gateway-ip:9000 b2_account_id b2_application_key | ||||
| ``` | ||||
| 
 | ||||
| ### List buckets on Backblaze B2 | ||||
| ``` | ||||
| mc ls myb2 | ||||
| [2017-02-22 01:50:43 PST]     0B ferenginar/ | ||||
| [2017-02-26 21:43:51 PST]     0B my-bucket/ | ||||
| [2017-02-26 22:10:11 PST]     0B test-bucket1/ | ||||
| ``` | ||||
| 
 | ||||
| ### Known limitations | ||||
| Gateway inherits the following B2 limitations: | ||||
| - No support for CopyObject S3 API (There are no equivalent APIs available on Backblaze B2). | ||||
| - No support for CopyObjectPart S3 API (There are no equivalent APIs available on Backblaze B2). | ||||
| - Only read-only bucket policy supported at bucket level, all other variations will return API Notimplemented error. | ||||
| 
 | ||||
| Other limitations: | ||||
| - Bucket notification APIs are not supported on Gateway. | ||||
| 
 | ||||
| ## Explore Further | ||||
| - [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) | ||||
| - [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) | ||||
| - [`minio-go` Go SDK](https://docs.minio.io/docs/golang-client-quickstart-guide) | ||||
|  | @ -0,0 +1,13 @@ | |||
| Copyright 2016, Google | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -0,0 +1,81 @@ | |||
| // Copyright 2017, Google
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package base | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| ) | ||||
| 
 | ||||
| func noEscape(c byte) bool { | ||||
| 	switch c { | ||||
| 	case '.', '_', '-', '/', '~', '!', '$', '\'', '(', ')', '*', ';', '=', ':', '@': | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| func escape(s string) string { | ||||
| 	// cribbed from url.go, kinda
 | ||||
| 	b := &bytes.Buffer{} | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		switch c := s[i]; { | ||||
| 		case c == '/': | ||||
| 			b.WriteByte(c) | ||||
| 		case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9': | ||||
| 			b.WriteByte(c) | ||||
| 		case noEscape(c): | ||||
| 			b.WriteByte(c) | ||||
| 		default: | ||||
| 			fmt.Fprintf(b, "%%%X", c) | ||||
| 		} | ||||
| 	} | ||||
| 	return b.String() | ||||
| } | ||||
| 
 | ||||
| func unescape(s string) (string, error) { | ||||
| 	b := &bytes.Buffer{} | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		c := s[i] | ||||
| 		switch c { | ||||
| 		case '/': | ||||
| 			b.WriteString("/") | ||||
| 		case '+': | ||||
| 			b.WriteString(" ") | ||||
| 		case '%': | ||||
| 			if len(s)-i < 3 { | ||||
| 				return "", errors.New("unescape: bad encoding") | ||||
| 			} | ||||
| 			b.WriteByte(unhex(s[i+1])<<4 | unhex(s[i+2])) | ||||
| 			i += 2 | ||||
| 		default: | ||||
| 			b.WriteByte(c) | ||||
| 		} | ||||
| 	} | ||||
| 	return b.String(), nil | ||||
| } | ||||
| 
 | ||||
| func unhex(c byte) byte { | ||||
| 	switch { | ||||
| 	case '0' <= c && c <= '9': | ||||
| 		return c - '0' | ||||
| 	case 'a' <= c && c <= 'f': | ||||
| 		return c - 'a' + 10 | ||||
| 	case 'A' <= c && c <= 'F': | ||||
| 		return c - 'A' + 10 | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | @ -0,0 +1,255 @@ | |||
| // Copyright 2016, Google
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package b2types implements internal types common to the B2 API.
 | ||||
| package b2types | ||||
| 
 | ||||
| // You know what would be amazing?  If I could autogen this from like a JSON
 | ||||
| // file.  Wouldn't that be amazing?  That would be amazing.
 | ||||
| 
 | ||||
| const ( | ||||
| 	V1api = "/b2api/v1/" | ||||
| ) | ||||
| 
 | ||||
| type ErrorMessage struct { | ||||
| 	Status int    `json:"status"` | ||||
| 	Code   string `json:"code"` | ||||
| 	Msg    string `json:"message"` | ||||
| } | ||||
| 
 | ||||
| type AuthorizeAccountResponse struct { | ||||
| 	AccountID   string `json:"accountId"` | ||||
| 	AuthToken   string `json:"authorizationToken"` | ||||
| 	URI         string `json:"apiUrl"` | ||||
| 	DownloadURI string `json:"downloadUrl"` | ||||
| 	MinPartSize int    `json:"minimumPartSize"` | ||||
| } | ||||
| 
 | ||||
| type LifecycleRule struct { | ||||
| 	DaysHiddenUntilDeleted int    `json:"daysFromHidingToDeleting,omitempty"` | ||||
| 	DaysNewUntilHidden     int    `json:"daysFromUploadingToHiding,omitempty"` | ||||
| 	Prefix                 string `json:"fileNamePrefix"` | ||||
| } | ||||
| 
 | ||||
| type CreateBucketRequest struct { | ||||
| 	AccountID      string            `json:"accountId"` | ||||
| 	Name           string            `json:"bucketName"` | ||||
| 	Type           string            `json:"bucketType"` | ||||
| 	Info           map[string]string `json:"bucketInfo"` | ||||
| 	LifecycleRules []LifecycleRule   `json:"lifecycleRules"` | ||||
| } | ||||
| 
 | ||||
| type CreateBucketResponse struct { | ||||
| 	BucketID       string            `json:"bucketId"` | ||||
| 	Name           string            `json:"bucketName"` | ||||
| 	Type           string            `json:"bucketType"` | ||||
| 	Info           map[string]string `json:"bucketInfo"` | ||||
| 	LifecycleRules []LifecycleRule   `json:"lifecycleRules"` | ||||
| 	Revision       int               `json:"revision"` | ||||
| } | ||||
| 
 | ||||
| type DeleteBucketRequest struct { | ||||
| 	AccountID string `json:"accountId"` | ||||
| 	BucketID  string `json:"bucketId"` | ||||
| } | ||||
| 
 | ||||
| type ListBucketsRequest struct { | ||||
| 	AccountID string `json:"accountId"` | ||||
| } | ||||
| 
 | ||||
| type ListBucketsResponse struct { | ||||
| 	Buckets []CreateBucketResponse `json:"buckets"` | ||||
| } | ||||
| 
 | ||||
| type UpdateBucketRequest struct { | ||||
| 	AccountID string `json:"accountId"` | ||||
| 	BucketID  string `json:"bucketId"` | ||||
| 	// bucketName is a required field according to
 | ||||
| 	// https://www.backblaze.com/b2/docs/b2_update_bucket.html.
 | ||||
| 	//
 | ||||
| 	// However, actually setting it returns 400: unknown field in
 | ||||
| 	// com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName
 | ||||
| 	//
 | ||||
| 	//Name           string            `json:"bucketName"`
 | ||||
| 	Type           string            `json:"bucketType,omitempty"` | ||||
| 	Info           map[string]string `json:"bucketInfo,omitempty"` | ||||
| 	LifecycleRules []LifecycleRule   `json:"lifecycleRules,omitempty"` | ||||
| 	IfRevisionIs   int               `json:"ifRevisionIs,omitempty"` | ||||
| } | ||||
| 
 | ||||
| type UpdateBucketResponse CreateBucketResponse | ||||
| 
 | ||||
| type GetUploadURLRequest struct { | ||||
| 	BucketID string `json:"bucketId"` | ||||
| } | ||||
| 
 | ||||
| type GetUploadURLResponse struct { | ||||
| 	URI   string `json:"uploadUrl"` | ||||
| 	Token string `json:"authorizationToken"` | ||||
| } | ||||
| 
 | ||||
| type UploadFileResponse struct { | ||||
| 	FileID    string `json:"fileId"` | ||||
| 	Timestamp int64  `json:"uploadTimestamp"` | ||||
| 	Action    string `json:"action"` | ||||
| } | ||||
| 
 | ||||
| type DeleteFileVersionRequest struct { | ||||
| 	Name   string `json:"fileName"` | ||||
| 	FileID string `json:"fileId"` | ||||
| } | ||||
| 
 | ||||
| type StartLargeFileRequest struct { | ||||
| 	BucketID    string            `json:"bucketId"` | ||||
| 	Name        string            `json:"fileName"` | ||||
| 	ContentType string            `json:"contentType"` | ||||
| 	Info        map[string]string `json:"fileInfo,omitempty"` | ||||
| } | ||||
| 
 | ||||
| type StartLargeFileResponse struct { | ||||
| 	ID string `json:"fileId"` | ||||
| } | ||||
| 
 | ||||
| type CancelLargeFileRequest struct { | ||||
| 	ID string `json:"fileId"` | ||||
| } | ||||
| 
 | ||||
| type ListUnfinishedLargeFilesRequest struct { | ||||
| 	BucketID     string `json:"bucketId"` | ||||
| 	Continuation string `json:"startFileId,omitempty"` | ||||
| 	Count        int    `json:"maxFileCount,omitempty"` | ||||
| } | ||||
| 
 | ||||
| type ListUnfinishedLargeFilesResponse struct { | ||||
| 	NextID string `json:"nextFileId"` | ||||
| 	Files  []struct { | ||||
| 		AccountID   string            `json:"accountId"` | ||||
| 		BucketID    string            `json:"bucketId"` | ||||
| 		Name        string            `json:"fileName"` | ||||
| 		ID          string            `json:"fileId"` | ||||
| 		Timestamp   int64             `json:"uploadTimestamp"` | ||||
| 		ContentType string            `json:"contentType"` | ||||
| 		Info        map[string]string `json:"fileInfo,omitempty"` | ||||
| 	} `json:"files"` | ||||
| } | ||||
| 
 | ||||
| type ListPartsRequest struct { | ||||
| 	ID    string `json:"fileId"` | ||||
| 	Start int    `json:"startPartNumber"` | ||||
| 	Count int    `json:"maxPartCount"` | ||||
| } | ||||
| 
 | ||||
| type ListPartsResponse struct { | ||||
| 	Next  int `json:"nextPartNumber"` | ||||
| 	Parts []struct { | ||||
| 		ID     string `json:"fileId"` | ||||
| 		Number int    `json:"partNumber"` | ||||
| 		SHA1   string `json:"contentSha1"` | ||||
| 		Size   int64  `json:"contentLength"` | ||||
| 	} `json:"parts"` | ||||
| } | ||||
| 
 | ||||
| type getUploadPartURLRequest struct { | ||||
| 	ID string `json:"fileId"` | ||||
| } | ||||
| 
 | ||||
| type getUploadPartURLResponse struct { | ||||
| 	URL   string `json:"uploadUrl"` | ||||
| 	Token string `json:"authorizationToken"` | ||||
| } | ||||
| 
 | ||||
| type UploadPartResponse struct { | ||||
| 	ID         string `json:"fileId"` | ||||
| 	PartNumber int    `json:"partNumber"` | ||||
| 	Size       int64  `json:"contentLength"` | ||||
| 	SHA1       string `json:"contentSha1"` | ||||
| } | ||||
| 
 | ||||
| type FinishLargeFileRequest struct { | ||||
| 	ID     string   `json:"fileId"` | ||||
| 	Hashes []string `json:"partSha1Array"` | ||||
| } | ||||
| 
 | ||||
| type FinishLargeFileResponse struct { | ||||
| 	Name      string `json:"fileName"` | ||||
| 	FileID    string `json:"fileId"` | ||||
| 	Timestamp int64  `json:"uploadTimestamp"` | ||||
| 	Action    string `json:"action"` | ||||
| } | ||||
| 
 | ||||
| type ListFileNamesRequest struct { | ||||
| 	BucketID     string `json:"bucketId"` | ||||
| 	Count        int    `json:"maxFileCount"` | ||||
| 	Continuation string `json:"startFileName,omitempty"` | ||||
| 	Prefix       string `json:"prefix,omitempty"` | ||||
| 	Delimiter    string `json:"delimiter,omitempty"` | ||||
| } | ||||
| 
 | ||||
| type ListFileNamesResponse struct { | ||||
| 	Continuation string                `json:"nextFileName"` | ||||
| 	Files        []GetFileInfoResponse `json:"files"` | ||||
| } | ||||
| 
 | ||||
| type ListFileVersionsRequest struct { | ||||
| 	BucketID  string `json:"bucketId"` | ||||
| 	Count     int    `json:"maxFileCount"` | ||||
| 	StartName string `json:"startFileName,omitempty"` | ||||
| 	StartID   string `json:"startFileId,omitempty"` | ||||
| 	Prefix    string `json:"prefix,omitempty"` | ||||
| 	Delimiter string `json:"delimiter,omitempty"` | ||||
| } | ||||
| 
 | ||||
| type ListFileVersionsResponse struct { | ||||
| 	NextName string                `json:"nextFileName"` | ||||
| 	NextID   string                `json:"nextFileId"` | ||||
| 	Files    []GetFileInfoResponse `json:"files"` | ||||
| } | ||||
| 
 | ||||
| type HideFileRequest struct { | ||||
| 	BucketID string `json:"bucketId"` | ||||
| 	File     string `json:"fileName"` | ||||
| } | ||||
| 
 | ||||
| type HideFileResponse struct { | ||||
| 	ID        string `json:"fileId"` | ||||
| 	Timestamp int64  `json:"uploadTimestamp"` | ||||
| 	Action    string `json:"action"` | ||||
| } | ||||
| 
 | ||||
| type GetFileInfoRequest struct { | ||||
| 	ID string `json:"fileId"` | ||||
| } | ||||
| 
 | ||||
| type GetFileInfoResponse struct { | ||||
| 	FileID      string            `json:"fileId"` | ||||
| 	Name        string            `json:"fileName"` | ||||
| 	SHA1        string            `json:"contentSha1"` | ||||
| 	Size        int64             `json:"contentLength"` | ||||
| 	ContentType string            `json:"contentType"` | ||||
| 	Info        map[string]string `json:"fileInfo"` | ||||
| 	Action      string            `json:"action"` | ||||
| 	Timestamp   int64             `json:"uploadTimestamp"` | ||||
| } | ||||
| 
 | ||||
| type GetDownloadAuthorizationRequest struct { | ||||
| 	BucketID string `json:"bucketId"` | ||||
| 	Prefix   string `json:"fileNamePrefix"` | ||||
| 	Valid    int    `json:"validDurationInSeconds"` | ||||
| } | ||||
| 
 | ||||
| type GetDownloadAuthorizationResponse struct { | ||||
| 	BucketID string `json:"bucketId"` | ||||
| 	Prefix   string `json:"fileNamePrefix"` | ||||
| 	Token    string `json:"authorizationToken"` | ||||
| } | ||||
|  | @ -0,0 +1,54 @@ | |||
| // Copyright 2017, Google
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package blog implements a private logger, in the manner of glog, without
 | ||||
| // polluting the flag namespace or leaving files all over /tmp.
 | ||||
| //
 | ||||
| // It has almost no features, and a bunch of global state.
 | ||||
| package blog | ||||
| 
 | ||||
| import ( | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"strconv" | ||||
| ) | ||||
| 
 | ||||
| var level int32 | ||||
| 
 | ||||
| type Verbose bool | ||||
| 
 | ||||
| func init() { | ||||
| 	lvl := os.Getenv("B2_LOG_LEVEL") | ||||
| 	i, err := strconv.ParseInt(lvl, 10, 32) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	level = int32(i) | ||||
| } | ||||
| 
 | ||||
| func (v Verbose) Info(a ...interface{}) { | ||||
| 	if v { | ||||
| 		log.Print(a...) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (v Verbose) Infof(format string, a ...interface{}) { | ||||
| 	if v { | ||||
| 		log.Printf(format, a...) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func V(target int32) Verbose { | ||||
| 	return Verbose(target <= level) | ||||
| } | ||||
|  | @ -1,351 +0,0 @@ | |||
| // Copyright 2015 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package gen | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/gob" | ||||
| 	"fmt" | ||||
| 	"hash" | ||||
| 	"hash/fnv" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
| 
 | ||||
| // This file contains utilities for generating code.
 | ||||
| 
 | ||||
| // TODO: other write methods like:
 | ||||
| // - slices, maps, types, etc.
 | ||||
| 
 | ||||
| // CodeWriter is a utility for writing structured code. It computes the content
 | ||||
| // hash and size of written content. It ensures there are newlines between
 | ||||
| // written code blocks.
 | ||||
| type CodeWriter struct { | ||||
| 	buf  bytes.Buffer | ||||
| 	Size int | ||||
| 	Hash hash.Hash32 // content hash
 | ||||
| 	gob  *gob.Encoder | ||||
| 	// For comments we skip the usual one-line separator if they are followed by
 | ||||
| 	// a code block.
 | ||||
| 	skipSep bool | ||||
| } | ||||
| 
 | ||||
| func (w *CodeWriter) Write(p []byte) (n int, err error) { | ||||
| 	return w.buf.Write(p) | ||||
| } | ||||
| 
 | ||||
| // NewCodeWriter returns a new CodeWriter.
 | ||||
| func NewCodeWriter() *CodeWriter { | ||||
| 	h := fnv.New32() | ||||
| 	return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} | ||||
| } | ||||
| 
 | ||||
| // WriteGoFile appends the buffer with the total size of all created structures
 | ||||
| // and writes it as a Go file to the the given file with the given package name.
 | ||||
| func (w *CodeWriter) WriteGoFile(filename, pkg string) { | ||||
| 	f, err := os.Create(filename) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Could not create file %s: %v", filename, err) | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 	if _, err = w.WriteGo(f, pkg); err != nil { | ||||
| 		log.Fatalf("Error writing file %s: %v", filename, err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // WriteGo appends the buffer with the total size of all created structures and
 | ||||
| // writes it as a Go file to the the given writer with the given package name.
 | ||||
| func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { | ||||
| 	sz := w.Size | ||||
| 	w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) | ||||
| 	defer w.buf.Reset() | ||||
| 	return WriteGo(out, pkg, w.buf.Bytes()) | ||||
| } | ||||
| 
 | ||||
| func (w *CodeWriter) printf(f string, x ...interface{}) { | ||||
| 	fmt.Fprintf(w, f, x...) | ||||
| } | ||||
| 
 | ||||
| func (w *CodeWriter) insertSep() { | ||||
| 	if w.skipSep { | ||||
| 		w.skipSep = false | ||||
| 		return | ||||
| 	} | ||||
| 	// Use at least two newlines to ensure a blank space between the previous
 | ||||
| 	// block. WriteGoFile will remove extraneous newlines.
 | ||||
| 	w.printf("\n\n") | ||||
| } | ||||
| 
 | ||||
| // WriteComment writes a comment block. All line starts are prefixed with "//".
 | ||||
| // Initial empty lines are gobbled. The indentation for the first line is
 | ||||
| // stripped from consecutive lines.
 | ||||
| func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { | ||||
| 	s := fmt.Sprintf(comment, args...) | ||||
| 	s = strings.Trim(s, "\n") | ||||
| 
 | ||||
| 	// Use at least two newlines to ensure a blank space between the previous
 | ||||
| 	// block. WriteGoFile will remove extraneous newlines.
 | ||||
| 	w.printf("\n\n// ") | ||||
| 	w.skipSep = true | ||||
| 
 | ||||
| 	// strip first indent level.
 | ||||
| 	sep := "\n" | ||||
| 	for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { | ||||
| 		sep += s[:1] | ||||
| 	} | ||||
| 
 | ||||
| 	strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) | ||||
| 
 | ||||
| 	w.printf("\n") | ||||
| } | ||||
| 
 | ||||
| func (w *CodeWriter) writeSizeInfo(size int) { | ||||
| 	w.printf("// Size: %d bytes\n", size) | ||||
| } | ||||
| 
 | ||||
| // WriteConst writes a constant of the given name and value.
 | ||||
| func (w *CodeWriter) WriteConst(name string, x interface{}) { | ||||
| 	w.insertSep() | ||||
| 	v := reflect.ValueOf(x) | ||||
| 
 | ||||
| 	switch v.Type().Kind() { | ||||
| 	case reflect.String: | ||||
| 		w.printf("const %s %s = ", name, typeName(x)) | ||||
| 		w.WriteString(v.String()) | ||||
| 		w.printf("\n") | ||||
| 	default: | ||||
| 		w.printf("const %s = %#v\n", name, x) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // WriteVar writes a variable of the given name and value.
 | ||||
| func (w *CodeWriter) WriteVar(name string, x interface{}) { | ||||
| 	w.insertSep() | ||||
| 	v := reflect.ValueOf(x) | ||||
| 	oldSize := w.Size | ||||
| 	sz := int(v.Type().Size()) | ||||
| 	w.Size += sz | ||||
| 
 | ||||
| 	switch v.Type().Kind() { | ||||
| 	case reflect.String: | ||||
| 		w.printf("var %s %s = ", name, typeName(x)) | ||||
| 		w.WriteString(v.String()) | ||||
| 	case reflect.Struct: | ||||
| 		w.gob.Encode(x) | ||||
| 		fallthrough | ||||
| 	case reflect.Slice, reflect.Array: | ||||
| 		w.printf("var %s = ", name) | ||||
| 		w.writeValue(v) | ||||
| 		w.writeSizeInfo(w.Size - oldSize) | ||||
| 	default: | ||||
| 		w.printf("var %s %s = ", name, typeName(x)) | ||||
| 		w.gob.Encode(x) | ||||
| 		w.writeValue(v) | ||||
| 		w.writeSizeInfo(w.Size - oldSize) | ||||
| 	} | ||||
| 	w.printf("\n") | ||||
| } | ||||
| 
 | ||||
| func (w *CodeWriter) writeValue(v reflect.Value) { | ||||
| 	x := v.Interface() | ||||
| 	switch v.Kind() { | ||||
| 	case reflect.String: | ||||
| 		w.WriteString(v.String()) | ||||
| 	case reflect.Array: | ||||
| 		// Don't double count: callers of WriteArray count on the size being
 | ||||
| 		// added, so we need to discount it here.
 | ||||
| 		w.Size -= int(v.Type().Size()) | ||||
| 		w.writeSlice(x, true) | ||||
| 	case reflect.Slice: | ||||
| 		w.writeSlice(x, false) | ||||
| 	case reflect.Struct: | ||||
| 		w.printf("%s{\n", typeName(v.Interface())) | ||||
| 		t := v.Type() | ||||
| 		for i := 0; i < v.NumField(); i++ { | ||||
| 			w.printf("%s: ", t.Field(i).Name) | ||||
| 			w.writeValue(v.Field(i)) | ||||
| 			w.printf(",\n") | ||||
| 		} | ||||
| 		w.printf("}") | ||||
| 	default: | ||||
| 		w.printf("%#v", x) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // WriteString writes a string literal.
 | ||||
| func (w *CodeWriter) WriteString(s string) { | ||||
| 	s = strings.Replace(s, `\`, `\\`, -1) | ||||
| 	io.WriteString(w.Hash, s) // content hash
 | ||||
| 	w.Size += len(s) | ||||
| 
 | ||||
| 	const maxInline = 40 | ||||
| 	if len(s) <= maxInline { | ||||
| 		w.printf("%q", s) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// We will render the string as a multi-line string.
 | ||||
| 	const maxWidth = 80 - 4 - len(`"`) - len(`" +`) | ||||
| 
 | ||||
| 	// When starting on its own line, go fmt indents line 2+ an extra level.
 | ||||
| 	n, max := maxWidth, maxWidth-4 | ||||
| 
 | ||||
| 	// As per https://golang.org/issue/18078, the compiler has trouble
 | ||||
| 	// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
 | ||||
| 	// for large N. We insert redundant, explicit parentheses to work around
 | ||||
| 	// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
 | ||||
| 	// ... + s127) + etc + (etc + ... + sN).
 | ||||
| 	explicitParens, extraComment := len(s) > 128*1024, "" | ||||
| 	if explicitParens { | ||||
| 		w.printf(`(`) | ||||
| 		extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" | ||||
| 	} | ||||
| 
 | ||||
| 	// Print "" +\n, if a string does not start on its own line.
 | ||||
| 	b := w.buf.Bytes() | ||||
| 	if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { | ||||
| 		w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) | ||||
| 		n, max = maxWidth, maxWidth | ||||
| 	} | ||||
| 
 | ||||
| 	w.printf(`"`) | ||||
| 
 | ||||
| 	for sz, p, nLines := 0, 0, 0; p < len(s); { | ||||
| 		var r rune | ||||
| 		r, sz = utf8.DecodeRuneInString(s[p:]) | ||||
| 		out := s[p : p+sz] | ||||
| 		chars := 1 | ||||
| 		if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { | ||||
| 			switch sz { | ||||
| 			case 1: | ||||
| 				out = fmt.Sprintf("\\x%02x", s[p]) | ||||
| 			case 2, 3: | ||||
| 				out = fmt.Sprintf("\\u%04x", r) | ||||
| 			case 4: | ||||
| 				out = fmt.Sprintf("\\U%08x", r) | ||||
| 			} | ||||
| 			chars = len(out) | ||||
| 		} | ||||
| 		if n -= chars; n < 0 { | ||||
| 			nLines++ | ||||
| 			if explicitParens && nLines&63 == 63 { | ||||
| 				w.printf("\") + (\"") | ||||
| 			} | ||||
| 			w.printf("\" +\n\"") | ||||
| 			n = max - len(out) | ||||
| 		} | ||||
| 		w.printf("%s", out) | ||||
| 		p += sz | ||||
| 	} | ||||
| 	w.printf(`"`) | ||||
| 	if explicitParens { | ||||
| 		w.printf(`)`) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // WriteSlice writes a slice value.
 | ||||
| func (w *CodeWriter) WriteSlice(x interface{}) { | ||||
| 	w.writeSlice(x, false) | ||||
| } | ||||
| 
 | ||||
| // WriteArray writes an array value.
 | ||||
| func (w *CodeWriter) WriteArray(x interface{}) { | ||||
| 	w.writeSlice(x, true) | ||||
| } | ||||
| 
 | ||||
| func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { | ||||
| 	v := reflect.ValueOf(x) | ||||
| 	w.gob.Encode(v.Len()) | ||||
| 	w.Size += v.Len() * int(v.Type().Elem().Size()) | ||||
| 	name := typeName(x) | ||||
| 	if isArray { | ||||
| 		name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) | ||||
| 	} | ||||
| 	if isArray { | ||||
| 		w.printf("%s{\n", name) | ||||
| 	} else { | ||||
| 		w.printf("%s{ // %d elements\n", name, v.Len()) | ||||
| 	} | ||||
| 
 | ||||
| 	switch kind := v.Type().Elem().Kind(); kind { | ||||
| 	case reflect.String: | ||||
| 		for _, s := range x.([]string) { | ||||
| 			w.WriteString(s) | ||||
| 			w.printf(",\n") | ||||
| 		} | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, | ||||
| 		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: | ||||
| 		// nLine and nBlock are the number of elements per line and block.
 | ||||
| 		nLine, nBlock, format := 8, 64, "%d," | ||||
| 		switch kind { | ||||
| 		case reflect.Uint8: | ||||
| 			format = "%#02x," | ||||
| 		case reflect.Uint16: | ||||
| 			format = "%#04x," | ||||
| 		case reflect.Uint32: | ||||
| 			nLine, nBlock, format = 4, 32, "%#08x," | ||||
| 		case reflect.Uint, reflect.Uint64: | ||||
| 			nLine, nBlock, format = 4, 32, "%#016x," | ||||
| 		case reflect.Int8: | ||||
| 			nLine = 16 | ||||
| 		} | ||||
| 		n := nLine | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			if i%nBlock == 0 && v.Len() > nBlock { | ||||
| 				w.printf("// Entry %X - %X\n", i, i+nBlock-1) | ||||
| 			} | ||||
| 			x := v.Index(i).Interface() | ||||
| 			w.gob.Encode(x) | ||||
| 			w.printf(format, x) | ||||
| 			if n--; n == 0 { | ||||
| 				n = nLine | ||||
| 				w.printf("\n") | ||||
| 			} | ||||
| 		} | ||||
| 		w.printf("\n") | ||||
| 	case reflect.Struct: | ||||
| 		zero := reflect.Zero(v.Type().Elem()).Interface() | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			x := v.Index(i).Interface() | ||||
| 			w.gob.EncodeValue(v) | ||||
| 			if !reflect.DeepEqual(zero, x) { | ||||
| 				line := fmt.Sprintf("%#v,\n", x) | ||||
| 				line = line[strings.IndexByte(line, '{'):] | ||||
| 				w.printf("%d: ", i) | ||||
| 				w.printf(line) | ||||
| 			} | ||||
| 		} | ||||
| 	case reflect.Array: | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			w.printf("%d: %#v,\n", i, v.Index(i).Interface()) | ||||
| 		} | ||||
| 	default: | ||||
| 		panic("gen: slice elem type not supported") | ||||
| 	} | ||||
| 	w.printf("}") | ||||
| } | ||||
| 
 | ||||
| // WriteType writes a definition of the type of the given value and returns the
 | ||||
| // type name.
 | ||||
| func (w *CodeWriter) WriteType(x interface{}) string { | ||||
| 	t := reflect.TypeOf(x) | ||||
| 	w.printf("type %s struct {\n", t.Name()) | ||||
| 	for i := 0; i < t.NumField(); i++ { | ||||
| 		w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) | ||||
| 	} | ||||
| 	w.printf("}\n") | ||||
| 	return t.Name() | ||||
| } | ||||
| 
 | ||||
| // typeName returns the name of the go type of x.
 | ||||
| func typeName(x interface{}) string { | ||||
| 	t := reflect.ValueOf(x).Type() | ||||
| 	return strings.Replace(fmt.Sprint(t), "main.", "", 1) | ||||
| } | ||||
|  | @ -1,281 +0,0 @@ | |||
| // Copyright 2015 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| // Package gen contains common code for the various code generation tools in the
 | ||||
| // text repository. Its usage ensures consistency between tools.
 | ||||
| //
 | ||||
| // This package defines command line flags that are common to most generation
 | ||||
| // tools. The flags allow for specifying specific Unicode and CLDR versions
 | ||||
| // in the public Unicode data repository (http://www.unicode.org/Public).
 | ||||
| //
 | ||||
| // A local Unicode data mirror can be set through the flag -local or the
 | ||||
| // environment variable UNICODE_DIR. The former takes precedence. The local
 | ||||
| // directory should follow the same structure as the public repository.
 | ||||
| //
 | ||||
| // IANA data can also optionally be mirrored by putting it in the iana directory
 | ||||
| // rooted at the top of the local mirror. Beware, though, that IANA data is not
 | ||||
| // versioned. So it is up to the developer to use the right version.
 | ||||
| package gen // import "golang.org/x/text/internal/gen"
 | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"go/build" | ||||
| 	"go/format" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"path" | ||||
| 	"path/filepath" | ||||
| 	"sync" | ||||
| 	"unicode" | ||||
| 
 | ||||
| 	"golang.org/x/text/unicode/cldr" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	url = flag.String("url", | ||||
| 		"http://www.unicode.org/Public", | ||||
| 		"URL of Unicode database directory") | ||||
| 	iana = flag.String("iana", | ||||
| 		"http://www.iana.org", | ||||
| 		"URL of the IANA repository") | ||||
| 	unicodeVersion = flag.String("unicode", | ||||
| 		getEnv("UNICODE_VERSION", unicode.Version), | ||||
| 		"unicode version to use") | ||||
| 	cldrVersion = flag.String("cldr", | ||||
| 		getEnv("CLDR_VERSION", cldr.Version), | ||||
| 		"cldr version to use") | ||||
| ) | ||||
| 
 | ||||
| func getEnv(name, def string) string { | ||||
| 	if v := os.Getenv(name); v != "" { | ||||
| 		return v | ||||
| 	} | ||||
| 	return def | ||||
| } | ||||
| 
 | ||||
| // Init performs common initialization for a gen command. It parses the flags
 | ||||
| // and sets up the standard logging parameters.
 | ||||
| func Init() { | ||||
| 	log.SetPrefix("") | ||||
| 	log.SetFlags(log.Lshortfile) | ||||
| 	flag.Parse() | ||||
| } | ||||
| 
 | ||||
| const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
 | ||||
| 
 | ||||
| package %s | ||||
| 
 | ||||
| ` | ||||
| 
 | ||||
| // UnicodeVersion reports the requested Unicode version.
 | ||||
| func UnicodeVersion() string { | ||||
| 	return *unicodeVersion | ||||
| } | ||||
| 
 | ||||
| // UnicodeVersion reports the requested CLDR version.
 | ||||
| func CLDRVersion() string { | ||||
| 	return *cldrVersion | ||||
| } | ||||
| 
 | ||||
| // IsLocal reports whether data files are available locally.
 | ||||
| func IsLocal() bool { | ||||
| 	dir, err := localReadmeFile() | ||||
| 	if err != nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	if _, err = os.Stat(dir); err != nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // OpenUCDFile opens the requested UCD file. The file is specified relative to
 | ||||
| // the public Unicode root directory. It will call log.Fatal if there are any
 | ||||
| // errors.
 | ||||
| func OpenUCDFile(file string) io.ReadCloser { | ||||
| 	return openUnicode(path.Join(*unicodeVersion, "ucd", file)) | ||||
| } | ||||
| 
 | ||||
| // OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
 | ||||
| // are any errors.
 | ||||
| func OpenCLDRCoreZip() io.ReadCloser { | ||||
| 	return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") | ||||
| } | ||||
| 
 | ||||
| // OpenUnicodeFile opens the requested file of the requested category from the
 | ||||
| // root of the Unicode data archive. The file is specified relative to the
 | ||||
| // public Unicode root directory. If version is "", it will use the default
 | ||||
| // Unicode version. It will call log.Fatal if there are any errors.
 | ||||
| func OpenUnicodeFile(category, version, file string) io.ReadCloser { | ||||
| 	if version == "" { | ||||
| 		version = UnicodeVersion() | ||||
| 	} | ||||
| 	return openUnicode(path.Join(category, version, file)) | ||||
| } | ||||
| 
 | ||||
| // OpenIANAFile opens the requested IANA file. The file is specified relative
 | ||||
| // to the IANA root, which is typically either http://www.iana.org or the
 | ||||
| // iana directory in the local mirror. It will call log.Fatal if there are any
 | ||||
| // errors.
 | ||||
| func OpenIANAFile(path string) io.ReadCloser { | ||||
| 	return Open(*iana, "iana", path) | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	dirMutex sync.Mutex | ||||
| 	localDir string | ||||
| ) | ||||
| 
 | ||||
| const permissions = 0755 | ||||
| 
 | ||||
| func localReadmeFile() (string, error) { | ||||
| 	p, err := build.Import("golang.org/x/text", "", build.FindOnly) | ||||
| 	if err != nil { | ||||
| 		return "", fmt.Errorf("Could not locate package: %v", err) | ||||
| 	} | ||||
| 	return filepath.Join(p.Dir, "DATA", "README"), nil | ||||
| } | ||||
| 
 | ||||
| func getLocalDir() string { | ||||
| 	dirMutex.Lock() | ||||
| 	defer dirMutex.Unlock() | ||||
| 
 | ||||
| 	readme, err := localReadmeFile() | ||||
| 	if err != nil { | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| 	dir := filepath.Dir(readme) | ||||
| 	if _, err := os.Stat(readme); err != nil { | ||||
| 		if err := os.MkdirAll(dir, permissions); err != nil { | ||||
| 			log.Fatalf("Could not create directory: %v", err) | ||||
| 		} | ||||
| 		ioutil.WriteFile(readme, []byte(readmeTxt), permissions) | ||||
| 	} | ||||
| 	return dir | ||||
| } | ||||
| 
 | ||||
| const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. | ||||
| 
 | ||||
| This directory contains downloaded files used to generate the various tables | ||||
| in the golang.org/x/text subrepo. | ||||
| 
 | ||||
| Note that the language subtag repo (iana/assignments/language-subtag-registry) | ||||
| and all other times in the iana subdirectory are not versioned and will need | ||||
| to be periodically manually updated. The easiest way to do this is to remove | ||||
| the entire iana directory. This is mostly of concern when updating the language | ||||
| package. | ||||
| ` | ||||
| 
 | ||||
| // Open opens subdir/path if a local directory is specified and the file exists,
 | ||||
| // where subdir is a directory relative to the local root, or fetches it from
 | ||||
| // urlRoot/path otherwise. It will call log.Fatal if there are any errors.
 | ||||
| func Open(urlRoot, subdir, path string) io.ReadCloser { | ||||
| 	file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) | ||||
| 	return open(file, urlRoot, path) | ||||
| } | ||||
| 
 | ||||
| func openUnicode(path string) io.ReadCloser { | ||||
| 	file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) | ||||
| 	return open(file, *url, path) | ||||
| } | ||||
| 
 | ||||
| // TODO: automatically periodically update non-versioned files.
 | ||||
| 
 | ||||
| func open(file, urlRoot, path string) io.ReadCloser { | ||||
| 	if f, err := os.Open(file); err == nil { | ||||
| 		return f | ||||
| 	} | ||||
| 	r := get(urlRoot, path) | ||||
| 	defer r.Close() | ||||
| 	b, err := ioutil.ReadAll(r) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Could not download file: %v", err) | ||||
| 	} | ||||
| 	os.MkdirAll(filepath.Dir(file), permissions) | ||||
| 	if err := ioutil.WriteFile(file, b, permissions); err != nil { | ||||
| 		log.Fatalf("Could not create file: %v", err) | ||||
| 	} | ||||
| 	return ioutil.NopCloser(bytes.NewReader(b)) | ||||
| } | ||||
| 
 | ||||
| func get(root, path string) io.ReadCloser { | ||||
| 	url := root + "/" + path | ||||
| 	fmt.Printf("Fetching %s...", url) | ||||
| 	defer fmt.Println(" done.") | ||||
| 	resp, err := http.Get(url) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("HTTP GET: %v", err) | ||||
| 	} | ||||
| 	if resp.StatusCode != 200 { | ||||
| 		log.Fatalf("Bad GET status for %q: %q", url, resp.Status) | ||||
| 	} | ||||
| 	return resp.Body | ||||
| } | ||||
| 
 | ||||
| // TODO: use Write*Version in all applicable packages.
 | ||||
| 
 | ||||
| // WriteUnicodeVersion writes a constant for the Unicode version from which the
 | ||||
| // tables are generated.
 | ||||
| func WriteUnicodeVersion(w io.Writer) { | ||||
| 	fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") | ||||
| 	fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) | ||||
| } | ||||
| 
 | ||||
| // WriteCLDRVersion writes a constant for the CLDR version from which the
 | ||||
| // tables are generated.
 | ||||
| func WriteCLDRVersion(w io.Writer) { | ||||
| 	fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") | ||||
| 	fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) | ||||
| } | ||||
| 
 | ||||
| // WriteGoFile prepends a standard file comment and package statement to the
 | ||||
| // given bytes, applies gofmt, and writes them to a file with the given name.
 | ||||
| // It will call log.Fatal if there are any errors.
 | ||||
| func WriteGoFile(filename, pkg string, b []byte) { | ||||
| 	w, err := os.Create(filename) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Could not create file %s: %v", filename, err) | ||||
| 	} | ||||
| 	defer w.Close() | ||||
| 	if _, err = WriteGo(w, pkg, b); err != nil { | ||||
| 		log.Fatalf("Error writing file %s: %v", filename, err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // WriteGo prepends a standard file comment and package statement to the given
 | ||||
| // bytes, applies gofmt, and writes them to w.
 | ||||
| func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { | ||||
| 	src := []byte(fmt.Sprintf(header, pkg)) | ||||
| 	src = append(src, b...) | ||||
| 	formatted, err := format.Source(src) | ||||
| 	if err != nil { | ||||
| 		// Print the generated code even in case of an error so that the
 | ||||
| 		// returned error can be meaningfully interpreted.
 | ||||
| 		n, _ = w.Write(src) | ||||
| 		return n, err | ||||
| 	} | ||||
| 	return w.Write(formatted) | ||||
| } | ||||
| 
 | ||||
| // Repackage rewrites a Go file from belonging to package main to belonging to
 | ||||
| // the given package.
 | ||||
| func Repackage(inFile, outFile, pkg string) { | ||||
| 	src, err := ioutil.ReadFile(inFile) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("reading %s: %v", inFile, err) | ||||
| 	} | ||||
| 	const toDelete = "package main\n\n" | ||||
| 	i := bytes.Index(src, []byte(toDelete)) | ||||
| 	if i < 0 { | ||||
| 		log.Fatalf("Could not find %q in %s.", toDelete, inFile) | ||||
| 	} | ||||
| 	w := &bytes.Buffer{} | ||||
| 	w.Write(src[i+len(toDelete):]) | ||||
| 	WriteGoFile(outFile, pkg, w.Bytes()) | ||||
| } | ||||
|  | @ -1,58 +0,0 @@ | |||
| // Copyright 2014 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package triegen | ||||
| 
 | ||||
| // This file defines Compacter and its implementations.
 | ||||
| 
 | ||||
| import "io" | ||||
| 
 | ||||
| // A Compacter generates an alternative, more space-efficient way to store a
 | ||||
| // trie value block. A trie value block holds all possible values for the last
 | ||||
| // byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block
 | ||||
| // always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0).
 | ||||
| type Compacter interface { | ||||
| 	// Size returns whether the Compacter could encode the given block as well
 | ||||
| 	// as its size in case it can. len(v) is always 64.
 | ||||
| 	Size(v []uint64) (sz int, ok bool) | ||||
| 
 | ||||
| 	// Store stores the block using the Compacter's compression method.
 | ||||
| 	// It returns a handle with which the block can be retrieved.
 | ||||
| 	// len(v) is always 64.
 | ||||
| 	Store(v []uint64) uint32 | ||||
| 
 | ||||
| 	// Print writes the data structures associated to the given store to w.
 | ||||
| 	Print(w io.Writer) error | ||||
| 
 | ||||
| 	// Handler returns the name of a function that gets called during trie
 | ||||
| 	// lookup for blocks generated by the Compacter. The function should be of
 | ||||
| 	// the form func (n uint32, b byte) uint64, where n is the index returned by
 | ||||
| 	// the Compacter's Store method and b is the last byte of the UTF-8
 | ||||
| 	// encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the
 | ||||
| 	// block.
 | ||||
| 	Handler() string | ||||
| } | ||||
| 
 | ||||
| // simpleCompacter is the default Compacter used by builder. It implements a
 | ||||
| // normal trie block.
 | ||||
| type simpleCompacter builder | ||||
| 
 | ||||
| func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { | ||||
| 	return blockSize * b.ValueSize, true | ||||
| } | ||||
| 
 | ||||
| func (b *simpleCompacter) Store(v []uint64) uint32 { | ||||
| 	h := uint32(len(b.ValueBlocks) - blockOffset) | ||||
| 	b.ValueBlocks = append(b.ValueBlocks, v) | ||||
| 	return h | ||||
| } | ||||
| 
 | ||||
| func (b *simpleCompacter) Print(io.Writer) error { | ||||
| 	// Structures are printed in print.go.
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (b *simpleCompacter) Handler() string { | ||||
| 	panic("Handler should be special-cased for this Compacter") | ||||
| } | ||||
|  | @ -1,251 +0,0 @@ | |||
| // Copyright 2014 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package triegen | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| 	"text/template" | ||||
| ) | ||||
| 
 | ||||
| // print writes all the data structures as well as the code necessary to use the
 | ||||
| // trie to w.
 | ||||
| func (b *builder) print(w io.Writer) error { | ||||
| 	b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize | ||||
| 	b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize | ||||
| 	b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize | ||||
| 	b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize | ||||
| 	b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize | ||||
| 
 | ||||
| 	// If we only have one root trie, all starter blocks are at position 0 and
 | ||||
| 	// we can access the arrays directly.
 | ||||
| 	if len(b.Trie) == 1 { | ||||
| 		// At this point we cannot refer to the generated tables directly.
 | ||||
| 		b.ASCIIBlock = b.Name + "Values" | ||||
| 		b.StarterBlock = b.Name + "Index" | ||||
| 	} else { | ||||
| 		// Otherwise we need to have explicit starter indexes in the trie
 | ||||
| 		// structure.
 | ||||
| 		b.ASCIIBlock = "t.ascii" | ||||
| 		b.StarterBlock = "t.utf8Start" | ||||
| 	} | ||||
| 
 | ||||
| 	b.SourceType = "[]byte" | ||||
| 	if err := lookupGen.Execute(w, b); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	b.SourceType = "string" | ||||
| 	if err := lookupGen.Execute(w, b); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := trieGen.Execute(w, b); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	for _, c := range b.Compactions { | ||||
| 		if err := c.c.Print(w); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func printValues(n int, values []uint64) string { | ||||
| 	w := &bytes.Buffer{} | ||||
| 	boff := n * blockSize | ||||
| 	fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) | ||||
| 	var newline bool | ||||
| 	for i, v := range values { | ||||
| 		if i%6 == 0 { | ||||
| 			newline = true | ||||
| 		} | ||||
| 		if v != 0 { | ||||
| 			if newline { | ||||
| 				fmt.Fprintf(w, "\n") | ||||
| 				newline = false | ||||
| 			} | ||||
| 			fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) | ||||
| 		} | ||||
| 	} | ||||
| 	return w.String() | ||||
| } | ||||
| 
 | ||||
| func printIndex(b *builder, nr int, n *node) string { | ||||
| 	w := &bytes.Buffer{} | ||||
| 	boff := nr * blockSize | ||||
| 	fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) | ||||
| 	var newline bool | ||||
| 	for i, c := range n.children { | ||||
| 		if i%8 == 0 { | ||||
| 			newline = true | ||||
| 		} | ||||
| 		if c != nil { | ||||
| 			v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) | ||||
| 			if v != 0 { | ||||
| 				if newline { | ||||
| 					fmt.Fprintf(w, "\n") | ||||
| 					newline = false | ||||
| 				} | ||||
| 				fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return w.String() | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ | ||||
| 		"printValues": printValues, | ||||
| 		"printIndex":  printIndex, | ||||
| 		"title":       strings.Title, | ||||
| 		"dec":         func(x int) int { return x - 1 }, | ||||
| 		"psize": func(n int) string { | ||||
| 			return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) | ||||
| 		}, | ||||
| 	}).Parse(trieTemplate)) | ||||
| 	lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) | ||||
| ) | ||||
| 
 | ||||
| // TODO: consider the return type of lookup. It could be uint64, even if the
 | ||||
| // internal value type is smaller. We will have to verify this with the
 | ||||
| // performance of unicode/norm, which is very sensitive to such changes.
 | ||||
| const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} | ||||
| // {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}.
 | ||||
| type {{.Name}}Trie struct { {{if $multi}} | ||||
| 	ascii []{{.ValueType}} // index for ASCII bytes
 | ||||
| 	utf8Start  []{{.IndexType}} // index for UTF-8 bytes >= 0xC0
 | ||||
| {{end}}} | ||||
| 
 | ||||
| func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} | ||||
| 	h := {{.Name}}TrieHandles[i] | ||||
| 	return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } | ||||
| } | ||||
| 
 | ||||
| type {{.Name}}TrieHandle struct { | ||||
| 	ascii, multi {{.IndexType}} | ||||
| } | ||||
| 
 | ||||
| // {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes
 | ||||
| var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ | ||||
| {{range .Trie}}	{ {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}}
 | ||||
| {{end}}}{{else}} | ||||
| 	return &{{.Name}}Trie{} | ||||
| } | ||||
| {{end}} | ||||
| // lookupValue determines the type of block n and looks up the value for b.
 | ||||
| func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { | ||||
| 	switch { {{range $i, $c := .Compactions}} | ||||
| 		{{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} | ||||
| 			n -= {{$c.Offset}}{{end}} | ||||
| 			return {{print $b.ValueType}}({{$c.Handler}}){{end}} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes
 | ||||
| // The third block is the zero block.
 | ||||
| var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { | ||||
| {{range $i, $v := .ValueBlocks}}{{printValues $i $v}} | ||||
| {{end}}} | ||||
| 
 | ||||
| // {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes
 | ||||
| // Block 0 is the zero block.
 | ||||
| var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { | ||||
| {{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} | ||||
| {{end}}} | ||||
| ` | ||||
| 
 | ||||
| // TODO: consider allowing zero-length strings after evaluating performance with
 | ||||
| // unicode/norm.
 | ||||
| const lookupTemplate = ` | ||||
| // lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and
 | ||||
| // the width in bytes of this encoding. The size will be 0 if s does not
 | ||||
| // hold enough bytes to complete the encoding. len(s) must be greater than 0.
 | ||||
| func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { | ||||
| 	c0 := s[0] | ||||
| 	switch { | ||||
| 	case c0 < 0x80: // is ASCII
 | ||||
| 		return {{.ASCIIBlock}}[c0], 1 | ||||
| 	case c0 < 0xC2: | ||||
| 		return 0, 1  // Illegal UTF-8: not a starter, not ASCII.
 | ||||
| 	case c0 < 0xE0: // 2-byte UTF-8
 | ||||
| 		if len(s) < 2 { | ||||
| 			return 0, 0 | ||||
| 		} | ||||
| 		i := {{.StarterBlock}}[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return 0, 1 // Illegal UTF-8: not a continuation byte.
 | ||||
| 		} | ||||
| 		return t.lookupValue(uint32(i), c1), 2 | ||||
| 	case c0 < 0xF0: // 3-byte UTF-8
 | ||||
| 		if len(s) < 3 { | ||||
| 			return 0, 0 | ||||
| 		} | ||||
| 		i := {{.StarterBlock}}[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return 0, 1 // Illegal UTF-8: not a continuation byte.
 | ||||
| 		} | ||||
| 		o := uint32(i)<<6 + uint32(c1) | ||||
| 		i = {{.Name}}Index[o] | ||||
| 		c2 := s[2] | ||||
| 		if c2 < 0x80 || 0xC0 <= c2 { | ||||
| 			return 0, 2 // Illegal UTF-8: not a continuation byte.
 | ||||
| 		} | ||||
| 		return t.lookupValue(uint32(i), c2), 3 | ||||
| 	case c0 < 0xF8: // 4-byte UTF-8
 | ||||
| 		if len(s) < 4 { | ||||
| 			return 0, 0 | ||||
| 		} | ||||
| 		i := {{.StarterBlock}}[c0] | ||||
| 		c1 := s[1] | ||||
| 		if c1 < 0x80 || 0xC0 <= c1 { | ||||
| 			return 0, 1 // Illegal UTF-8: not a continuation byte.
 | ||||
| 		} | ||||
| 		o := uint32(i)<<6 + uint32(c1) | ||||
| 		i = {{.Name}}Index[o] | ||||
| 		c2 := s[2] | ||||
| 		if c2 < 0x80 || 0xC0 <= c2 { | ||||
| 			return 0, 2 // Illegal UTF-8: not a continuation byte.
 | ||||
| 		} | ||||
| 		o = uint32(i)<<6 + uint32(c2) | ||||
| 		i = {{.Name}}Index[o] | ||||
| 		c3 := s[3] | ||||
| 		if c3 < 0x80 || 0xC0 <= c3 { | ||||
| 			return 0, 3 // Illegal UTF-8: not a continuation byte.
 | ||||
| 		} | ||||
| 		return t.lookupValue(uint32(i), c3), 4 | ||||
| 	} | ||||
| 	// Illegal rune
 | ||||
| 	return 0, 1 | ||||
| } | ||||
| 
 | ||||
| // lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s.
 | ||||
| // s must start with a full and valid UTF-8 encoded rune.
 | ||||
| func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { | ||||
| 	c0 := s[0] | ||||
| 	if c0 < 0x80 { // is ASCII
 | ||||
| 		return {{.ASCIIBlock}}[c0] | ||||
| 	} | ||||
| 	i := {{.StarterBlock}}[c0] | ||||
| 	if c0 < 0xE0 { // 2-byte UTF-8
 | ||||
| 		return t.lookupValue(uint32(i), s[1]) | ||||
| 	} | ||||
| 	i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] | ||||
| 	if c0 < 0xF0 { // 3-byte UTF-8
 | ||||
| 		return t.lookupValue(uint32(i), s[2]) | ||||
| 	} | ||||
| 	i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] | ||||
| 	if c0 < 0xF8 { // 4-byte UTF-8
 | ||||
| 		return t.lookupValue(uint32(i), s[3]) | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
| ` | ||||
|  | @ -1,494 +0,0 @@ | |||
| // Copyright 2014 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| // Package triegen implements a code generator for a trie for associating
 | ||||
| // unsigned integer values with UTF-8 encoded runes.
 | ||||
| //
 | ||||
| // Many of the go.text packages use tries for storing per-rune information.  A
 | ||||
| // trie is especially useful if many of the runes have the same value. If this
 | ||||
| // is the case, many blocks can be expected to be shared allowing for
 | ||||
| // information on many runes to be stored in little space.
 | ||||
| //
 | ||||
| // As most of the lookups are done directly on []byte slices, the tries use the
 | ||||
| // UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to
 | ||||
| // runes and contributes a little bit to better performance. It also naturally
 | ||||
| // provides a fast path for ASCII.
 | ||||
| //
 | ||||
| // Space is also an issue. There are many code points defined in Unicode and as
 | ||||
| // a result tables can get quite large. So every byte counts. The triegen
 | ||||
| // package automatically chooses the smallest integer values to represent the
 | ||||
| // tables. Compacters allow further compression of the trie by allowing for
 | ||||
| // alternative representations of individual trie blocks.
 | ||||
| //
 | ||||
| // triegen allows generating multiple tries as a single structure. This is
 | ||||
| // useful when, for example, one wants to generate tries for several languages
 | ||||
| // that have a lot of values in common. Some existing libraries for
 | ||||
| // internationalization store all per-language data as a dynamically loadable
 | ||||
| // chunk. The go.text packages are designed with the assumption that the user
 | ||||
| // typically wants to compile in support for all supported languages, in line
 | ||||
| // with the approach common to Go to create a single standalone binary. The
 | ||||
| // multi-root trie approach can give significant storage savings in this
 | ||||
| // scenario.
 | ||||
| //
 | ||||
| // triegen generates both tables and code. The code is optimized to use the
 | ||||
| // automatically chosen data types. The following code is generated for a Trie
 | ||||
| // or multiple Tries named "foo":
 | ||||
| //	- type fooTrie
 | ||||
| //		The trie type.
 | ||||
| //
 | ||||
| //	- func newFooTrie(x int) *fooTrie
 | ||||
| //		Trie constructor, where x is the index of the trie passed to Gen.
 | ||||
| //
 | ||||
| //	- func (t *fooTrie) lookup(s []byte) (v uintX, sz int)
 | ||||
| //		The lookup method, where uintX is automatically chosen.
 | ||||
| //
 | ||||
| //	- func lookupString, lookupUnsafe and lookupStringUnsafe
 | ||||
| //		Variants of the above.
 | ||||
| //
 | ||||
| //	- var fooValues and fooIndex and any tables generated by Compacters.
 | ||||
| //		The core trie data.
 | ||||
| //
 | ||||
| //	- var fooTrieHandles
 | ||||
| //		Indexes of starter blocks in case of multiple trie roots.
 | ||||
| //
 | ||||
| // It is recommended that users test the generated trie by checking the returned
 | ||||
| // value for every rune. Such exhaustive tests are possible as the the number of
 | ||||
| // runes in Unicode is limited.
 | ||||
| package triegen // import "golang.org/x/text/internal/triegen"
 | ||||
| 
 | ||||
| // TODO: Arguably, the internally optimized data types would not have to be
 | ||||
| // exposed in the generated API. We could also investigate not generating the
 | ||||
| // code, but using it through a package. We would have to investigate the impact
 | ||||
| // on performance of making such change, though. For packages like unicode/norm,
 | ||||
| // small changes like this could tank performance.
 | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/binary" | ||||
| 	"fmt" | ||||
| 	"hash/crc64" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
| 
 | ||||
| // builder builds a set of tries for associating values with runes. The set of
 | ||||
| // tries can share common index and value blocks.
 | ||||
| type builder struct { | ||||
| 	Name string | ||||
| 
 | ||||
| 	// ValueType is the type of the trie values looked up.
 | ||||
| 	ValueType string | ||||
| 
 | ||||
| 	// ValueSize is the byte size of the ValueType.
 | ||||
| 	ValueSize int | ||||
| 
 | ||||
| 	// IndexType is the type of trie index values used for all UTF-8 bytes of
 | ||||
| 	// a rune except the last one.
 | ||||
| 	IndexType string | ||||
| 
 | ||||
| 	// IndexSize is the byte size of the IndexType.
 | ||||
| 	IndexSize int | ||||
| 
 | ||||
| 	// SourceType is used when generating the lookup functions. If the user
 | ||||
| 	// requests StringSupport, all lookup functions will be generated for
 | ||||
| 	// string input as well.
 | ||||
| 	SourceType string | ||||
| 
 | ||||
| 	Trie []*Trie | ||||
| 
 | ||||
| 	IndexBlocks []*node | ||||
| 	ValueBlocks [][]uint64 | ||||
| 	Compactions []compaction | ||||
| 	Checksum    uint64 | ||||
| 
 | ||||
| 	ASCIIBlock   string | ||||
| 	StarterBlock string | ||||
| 
 | ||||
| 	indexBlockIdx map[uint64]int | ||||
| 	valueBlockIdx map[uint64]nodeIndex | ||||
| 	asciiBlockIdx map[uint64]int | ||||
| 
 | ||||
| 	// Stats are used to fill out the template.
 | ||||
| 	Stats struct { | ||||
| 		NValueEntries int | ||||
| 		NValueBytes   int | ||||
| 		NIndexEntries int | ||||
| 		NIndexBytes   int | ||||
| 		NHandleBytes  int | ||||
| 	} | ||||
| 
 | ||||
| 	err error | ||||
| } | ||||
| 
 | ||||
| // A nodeIndex encodes the index of a node, which is defined by the compaction
 | ||||
| // which stores it and an index within the compaction. For internal nodes, the
 | ||||
| // compaction is always 0.
 | ||||
| type nodeIndex struct { | ||||
| 	compaction int | ||||
| 	index      int | ||||
| } | ||||
| 
 | ||||
| // compaction keeps track of stats used for the compaction.
 | ||||
| type compaction struct { | ||||
| 	c         Compacter | ||||
| 	blocks    []*node | ||||
| 	maxHandle uint32 | ||||
| 	totalSize int | ||||
| 
 | ||||
| 	// Used by template-based generator and thus exported.
 | ||||
| 	Cutoff  uint32 | ||||
| 	Offset  uint32 | ||||
| 	Handler string | ||||
| } | ||||
| 
 | ||||
| func (b *builder) setError(err error) { | ||||
| 	if b.err == nil { | ||||
| 		b.err = err | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // An Option can be passed to Gen.
 | ||||
| type Option func(b *builder) error | ||||
| 
 | ||||
| // Compact configures the trie generator to use the given Compacter.
 | ||||
| func Compact(c Compacter) Option { | ||||
| 	return func(b *builder) error { | ||||
| 		b.Compactions = append(b.Compactions, compaction{ | ||||
| 			c:       c, | ||||
| 			Handler: c.Handler() + "(n, b)"}) | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Gen writes Go code for a shared trie lookup structure to w for the given
 | ||||
| // Tries. The generated trie type will be called nameTrie. newNameTrie(x) will
 | ||||
| // return the *nameTrie for tries[x]. A value can be looked up by using one of
 | ||||
| // the various lookup methods defined on nameTrie. It returns the table size of
 | ||||
| // the generated trie.
 | ||||
| func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { | ||||
| 	// The index contains two dummy blocks, followed by the zero block. The zero
 | ||||
| 	// block is at offset 0x80, so that the offset for the zero block for
 | ||||
| 	// continuation bytes is 0.
 | ||||
| 	b := &builder{ | ||||
| 		Name:        name, | ||||
| 		Trie:        tries, | ||||
| 		IndexBlocks: []*node{{}, {}, {}}, | ||||
| 		Compactions: []compaction{{ | ||||
| 			Handler: name + "Values[n<<6+uint32(b)]", | ||||
| 		}}, | ||||
| 		// The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero
 | ||||
| 		// block.
 | ||||
| 		indexBlockIdx: map[uint64]int{0: 0}, | ||||
| 		valueBlockIdx: map[uint64]nodeIndex{0: {}}, | ||||
| 		asciiBlockIdx: map[uint64]int{}, | ||||
| 	} | ||||
| 	b.Compactions[0].c = (*simpleCompacter)(b) | ||||
| 
 | ||||
| 	for _, f := range opts { | ||||
| 		if err := f(b); err != nil { | ||||
| 			return 0, err | ||||
| 		} | ||||
| 	} | ||||
| 	b.build() | ||||
| 	if b.err != nil { | ||||
| 		return 0, b.err | ||||
| 	} | ||||
| 	if err = b.print(w); err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	return b.Size(), nil | ||||
| } | ||||
| 
 | ||||
| // A Trie represents a single root node of a trie. A builder may build several
 | ||||
| // overlapping tries at once.
 | ||||
| type Trie struct { | ||||
| 	root *node | ||||
| 
 | ||||
| 	hiddenTrie | ||||
| } | ||||
| 
 | ||||
| // hiddenTrie contains values we want to be visible to the template generator,
 | ||||
| // but hidden from the API documentation.
 | ||||
| type hiddenTrie struct { | ||||
| 	Name         string | ||||
| 	Checksum     uint64 | ||||
| 	ASCIIIndex   int | ||||
| 	StarterIndex int | ||||
| } | ||||
| 
 | ||||
| // NewTrie returns a new trie root.
 | ||||
| func NewTrie(name string) *Trie { | ||||
| 	return &Trie{ | ||||
| 		&node{ | ||||
| 			children: make([]*node, blockSize), | ||||
| 			values:   make([]uint64, utf8.RuneSelf), | ||||
| 		}, | ||||
| 		hiddenTrie{Name: name}, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Gen is a convenience wrapper around the Gen func passing t as the only trie
 | ||||
| // and uses the name passed to NewTrie. It returns the size of the generated
 | ||||
| // tables.
 | ||||
| func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { | ||||
| 	return Gen(w, t.Name, []*Trie{t}, opts...) | ||||
| } | ||||
| 
 | ||||
| // node is a node of the intermediate trie structure.
 | ||||
| type node struct { | ||||
| 	// children holds this node's children. It is always of length 64.
 | ||||
| 	// A child node may be nil.
 | ||||
| 	children []*node | ||||
| 
 | ||||
| 	// values contains the values of this node. If it is non-nil, this node is
 | ||||
| 	// either a root or leaf node:
 | ||||
| 	// For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F].
 | ||||
| 	// For leaf nodes, len(values) ==  64 and it maps the bytes in [0x80, 0xBF].
 | ||||
| 	values []uint64 | ||||
| 
 | ||||
| 	index nodeIndex | ||||
| } | ||||
| 
 | ||||
| // Insert associates value with the given rune. Insert will panic if a non-zero
 | ||||
| // value is passed for an invalid rune.
 | ||||
| func (t *Trie) Insert(r rune, value uint64) { | ||||
| 	if value == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	s := string(r) | ||||
| 	if []rune(s)[0] != r && value != 0 { | ||||
| 		// Note: The UCD tables will always assign what amounts to a zero value
 | ||||
| 		// to a surrogate. Allowing a zero value for an illegal rune allows
 | ||||
| 		// users to iterate over [0..MaxRune] without having to explicitly
 | ||||
| 		// exclude surrogates, which would be tedious.
 | ||||
| 		panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) | ||||
| 	} | ||||
| 	if len(s) == 1 { | ||||
| 		// It is a root node value (ASCII).
 | ||||
| 		t.root.values[s[0]] = value | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	n := t.root | ||||
| 	for ; len(s) > 1; s = s[1:] { | ||||
| 		if n.children == nil { | ||||
| 			n.children = make([]*node, blockSize) | ||||
| 		} | ||||
| 		p := s[0] % blockSize | ||||
| 		c := n.children[p] | ||||
| 		if c == nil { | ||||
| 			c = &node{} | ||||
| 			n.children[p] = c | ||||
| 		} | ||||
| 		if len(s) > 2 && c.values != nil { | ||||
| 			log.Fatalf("triegen: insert(%U): found internal node with values", r) | ||||
| 		} | ||||
| 		n = c | ||||
| 	} | ||||
| 	if n.values == nil { | ||||
| 		n.values = make([]uint64, blockSize) | ||||
| 	} | ||||
| 	if n.children != nil { | ||||
| 		log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) | ||||
| 	} | ||||
| 	n.values[s[0]-0x80] = value | ||||
| } | ||||
| 
 | ||||
| // Size returns the number of bytes the generated trie will take to store. It
 | ||||
| // needs to be exported as it is used in the templates.
 | ||||
| func (b *builder) Size() int { | ||||
| 	// Index blocks.
 | ||||
| 	sz := len(b.IndexBlocks) * blockSize * b.IndexSize | ||||
| 
 | ||||
| 	// Skip the first compaction, which represents the normal value blocks, as
 | ||||
| 	// its totalSize does not account for the ASCII blocks, which are managed
 | ||||
| 	// separately.
 | ||||
| 	sz += len(b.ValueBlocks) * blockSize * b.ValueSize | ||||
| 	for _, c := range b.Compactions[1:] { | ||||
| 		sz += c.totalSize | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO: this computation does not account for the fixed overhead of a using
 | ||||
| 	// a compaction, either code or data. As for data, though, the typical
 | ||||
| 	// overhead of data is in the order of bytes (2 bytes for cases). Further,
 | ||||
| 	// the savings of using a compaction should anyway be substantial for it to
 | ||||
| 	// be worth it.
 | ||||
| 
 | ||||
| 	// For multi-root tries, we also need to account for the handles.
 | ||||
| 	if len(b.Trie) > 1 { | ||||
| 		sz += 2 * b.IndexSize * len(b.Trie) | ||||
| 	} | ||||
| 	return sz | ||||
| } | ||||
| 
 | ||||
| func (b *builder) build() { | ||||
| 	// Compute the sizes of the values.
 | ||||
| 	var vmax uint64 | ||||
| 	for _, t := range b.Trie { | ||||
| 		vmax = maxValue(t.root, vmax) | ||||
| 	} | ||||
| 	b.ValueType, b.ValueSize = getIntType(vmax) | ||||
| 
 | ||||
| 	// Compute all block allocations.
 | ||||
| 	// TODO: first compute the ASCII blocks for all tries and then the other
 | ||||
| 	// nodes. ASCII blocks are more restricted in placement, as they require two
 | ||||
| 	// blocks to be placed consecutively. Processing them first may improve
 | ||||
| 	// sharing (at least one zero block can be expected to be saved.)
 | ||||
| 	for _, t := range b.Trie { | ||||
| 		b.Checksum += b.buildTrie(t) | ||||
| 	} | ||||
| 
 | ||||
| 	// Compute the offsets for all the Compacters.
 | ||||
| 	offset := uint32(0) | ||||
| 	for i := range b.Compactions { | ||||
| 		c := &b.Compactions[i] | ||||
| 		c.Offset = offset | ||||
| 		offset += c.maxHandle + 1 | ||||
| 		c.Cutoff = offset | ||||
| 	} | ||||
| 
 | ||||
| 	// Compute the sizes of indexes.
 | ||||
| 	// TODO: different byte positions could have different sizes. So far we have
 | ||||
| 	// not found a case where this is beneficial.
 | ||||
| 	imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) | ||||
| 	for _, ib := range b.IndexBlocks { | ||||
| 		if x := uint64(ib.index.index); x > imax { | ||||
| 			imax = x | ||||
| 		} | ||||
| 	} | ||||
| 	b.IndexType, b.IndexSize = getIntType(imax) | ||||
| } | ||||
| 
 | ||||
| func maxValue(n *node, max uint64) uint64 { | ||||
| 	if n == nil { | ||||
| 		return max | ||||
| 	} | ||||
| 	for _, c := range n.children { | ||||
| 		max = maxValue(c, max) | ||||
| 	} | ||||
| 	for _, v := range n.values { | ||||
| 		if max < v { | ||||
| 			max = v | ||||
| 		} | ||||
| 	} | ||||
| 	return max | ||||
| } | ||||
| 
 | ||||
| func getIntType(v uint64) (string, int) { | ||||
| 	switch { | ||||
| 	case v < 1<<8: | ||||
| 		return "uint8", 1 | ||||
| 	case v < 1<<16: | ||||
| 		return "uint16", 2 | ||||
| 	case v < 1<<32: | ||||
| 		return "uint32", 4 | ||||
| 	} | ||||
| 	return "uint64", 8 | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	blockSize = 64 | ||||
| 
 | ||||
| 	// Subtract two blocks to offset 0x80, the first continuation byte.
 | ||||
| 	blockOffset = 2 | ||||
| 
 | ||||
| 	// Subtract three blocks to offset 0xC0, the first non-ASCII starter.
 | ||||
| 	rootBlockOffset = 3 | ||||
| ) | ||||
| 
 | ||||
| var crcTable = crc64.MakeTable(crc64.ISO) | ||||
| 
 | ||||
| func (b *builder) buildTrie(t *Trie) uint64 { | ||||
| 	n := t.root | ||||
| 
 | ||||
| 	// Get the ASCII offset. For the first trie, the ASCII block will be at
 | ||||
| 	// position 0.
 | ||||
| 	hasher := crc64.New(crcTable) | ||||
| 	binary.Write(hasher, binary.BigEndian, n.values) | ||||
| 	hash := hasher.Sum64() | ||||
| 
 | ||||
| 	v, ok := b.asciiBlockIdx[hash] | ||||
| 	if !ok { | ||||
| 		v = len(b.ValueBlocks) | ||||
| 		b.asciiBlockIdx[hash] = v | ||||
| 
 | ||||
| 		b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) | ||||
| 		if v == 0 { | ||||
| 			// Add the zero block at position 2 so that it will be assigned a
 | ||||
| 			// zero reference in the lookup blocks.
 | ||||
| 			// TODO: always do this? This would allow us to remove a check from
 | ||||
| 			// the trie lookup, but at the expense of extra space. Analyze
 | ||||
| 			// performance for unicode/norm.
 | ||||
| 			b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) | ||||
| 		} | ||||
| 	} | ||||
| 	t.ASCIIIndex = v | ||||
| 
 | ||||
| 	// Compute remaining offsets.
 | ||||
| 	t.Checksum = b.computeOffsets(n, true) | ||||
| 	// We already subtracted the normal blockOffset from the index. Subtract the
 | ||||
| 	// difference for starter bytes.
 | ||||
| 	t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) | ||||
| 	return t.Checksum | ||||
| } | ||||
| 
 | ||||
| func (b *builder) computeOffsets(n *node, root bool) uint64 { | ||||
| 	// For the first trie, the root lookup block will be at position 3, which is
 | ||||
| 	// the offset for UTF-8 non-ASCII starter bytes.
 | ||||
| 	first := len(b.IndexBlocks) == rootBlockOffset | ||||
| 	if first { | ||||
| 		b.IndexBlocks = append(b.IndexBlocks, n) | ||||
| 	} | ||||
| 
 | ||||
| 	// We special-case the cases where all values recursively are 0. This allows
 | ||||
| 	// for the use of a zero block to which all such values can be directed.
 | ||||
| 	hash := uint64(0) | ||||
| 	if n.children != nil || n.values != nil { | ||||
| 		hasher := crc64.New(crcTable) | ||||
| 		for _, c := range n.children { | ||||
| 			var v uint64 | ||||
| 			if c != nil { | ||||
| 				v = b.computeOffsets(c, false) | ||||
| 			} | ||||
| 			binary.Write(hasher, binary.BigEndian, v) | ||||
| 		} | ||||
| 		binary.Write(hasher, binary.BigEndian, n.values) | ||||
| 		hash = hasher.Sum64() | ||||
| 	} | ||||
| 
 | ||||
| 	if first { | ||||
| 		b.indexBlockIdx[hash] = rootBlockOffset - blockOffset | ||||
| 	} | ||||
| 
 | ||||
| 	// Compacters don't apply to internal nodes.
 | ||||
| 	if n.children != nil { | ||||
| 		v, ok := b.indexBlockIdx[hash] | ||||
| 		if !ok { | ||||
| 			v = len(b.IndexBlocks) - blockOffset | ||||
| 			b.IndexBlocks = append(b.IndexBlocks, n) | ||||
| 			b.indexBlockIdx[hash] = v | ||||
| 		} | ||||
| 		n.index = nodeIndex{0, v} | ||||
| 	} else { | ||||
| 		h, ok := b.valueBlockIdx[hash] | ||||
| 		if !ok { | ||||
| 			bestI, bestSize := 0, blockSize*b.ValueSize | ||||
| 			for i, c := range b.Compactions[1:] { | ||||
| 				if sz, ok := c.c.Size(n.values); ok && bestSize > sz { | ||||
| 					bestI, bestSize = i+1, sz | ||||
| 				} | ||||
| 			} | ||||
| 			c := &b.Compactions[bestI] | ||||
| 			c.totalSize += bestSize | ||||
| 			v := c.c.Store(n.values) | ||||
| 			if c.maxHandle < v { | ||||
| 				c.maxHandle = v | ||||
| 			} | ||||
| 			h = nodeIndex{bestI, int(v)} | ||||
| 			b.valueBlockIdx[hash] = h | ||||
| 		} | ||||
| 		n.index = h | ||||
| 	} | ||||
| 	return hash | ||||
| } | ||||
|  | @ -1,376 +0,0 @@ | |||
| // Copyright 2014 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| // Package ucd provides a parser for Unicode Character Database files, the
 | ||||
| // format of which is defined in http://www.unicode.org/reports/tr44/. See
 | ||||
| // http://www.unicode.org/Public/UCD/latest/ucd/ for example files.
 | ||||
| //
 | ||||
| // It currently does not support substitutions of missing fields.
 | ||||
| package ucd // import "golang.org/x/text/internal/ucd"
 | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // UnicodeData.txt fields.
 | ||||
| const ( | ||||
| 	CodePoint = iota | ||||
| 	Name | ||||
| 	GeneralCategory | ||||
| 	CanonicalCombiningClass | ||||
| 	BidiClass | ||||
| 	DecompMapping | ||||
| 	DecimalValue | ||||
| 	DigitValue | ||||
| 	NumericValue | ||||
| 	BidiMirrored | ||||
| 	Unicode1Name | ||||
| 	ISOComment | ||||
| 	SimpleUppercaseMapping | ||||
| 	SimpleLowercaseMapping | ||||
| 	SimpleTitlecaseMapping | ||||
| ) | ||||
| 
 | ||||
| // Parse calls f for each entry in the given reader of a UCD file. It will close
 | ||||
| // the reader upon return. It will call log.Fatal if any error occurred.
 | ||||
| //
 | ||||
| // This implements the most common usage pattern of using Parser.
 | ||||
| func Parse(r io.ReadCloser, f func(p *Parser)) { | ||||
| 	defer r.Close() | ||||
| 
 | ||||
| 	p := New(r) | ||||
| 	for p.Next() { | ||||
| 		f(p) | ||||
| 	} | ||||
| 	if err := p.Err(); err != nil { | ||||
| 		r.Close() // os.Exit will cause defers not to be called.
 | ||||
| 		log.Fatal(err) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // An Option is used to configure a Parser.
 | ||||
| type Option func(p *Parser) | ||||
| 
 | ||||
| func keepRanges(p *Parser) { | ||||
| 	p.keepRanges = true | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	// KeepRanges prevents the expansion of ranges. The raw ranges can be
 | ||||
| 	// obtained by calling Range(0) on the parser.
 | ||||
| 	KeepRanges Option = keepRanges | ||||
| ) | ||||
| 
 | ||||
| // The Part option register a handler for lines starting with a '@'. The text
 | ||||
| // after a '@' is available as the first field. Comments are handled as usual.
 | ||||
| func Part(f func(p *Parser)) Option { | ||||
| 	return func(p *Parser) { | ||||
| 		p.partHandler = f | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // The CommentHandler option passes comments that are on a line by itself to
 | ||||
| // a given handler.
 | ||||
| func CommentHandler(f func(s string)) Option { | ||||
| 	return func(p *Parser) { | ||||
| 		p.commentHandler = f | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // A Parser parses Unicode Character Database (UCD) files.
 | ||||
| type Parser struct { | ||||
| 	scanner *bufio.Scanner | ||||
| 
 | ||||
| 	keepRanges bool // Don't expand rune ranges in field 0.
 | ||||
| 
 | ||||
| 	err     error | ||||
| 	comment []byte | ||||
| 	field   [][]byte | ||||
| 	// parsedRange is needed in case Range(0) is called more than once for one
 | ||||
| 	// field. In some cases this requires scanning ahead.
 | ||||
| 	parsedRange          bool | ||||
| 	rangeStart, rangeEnd rune | ||||
| 
 | ||||
| 	partHandler    func(p *Parser) | ||||
| 	commentHandler func(s string) | ||||
| } | ||||
| 
 | ||||
| func (p *Parser) setError(err error) { | ||||
| 	if p.err == nil { | ||||
| 		p.err = err | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (p *Parser) getField(i int) []byte { | ||||
| 	if i >= len(p.field) { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return p.field[i] | ||||
| } | ||||
| 
 | ||||
| // Err returns a non-nil error if any error occurred during parsing.
 | ||||
| func (p *Parser) Err() error { | ||||
| 	return p.err | ||||
| } | ||||
| 
 | ||||
| // New returns a Parser for the given Reader.
 | ||||
| func New(r io.Reader, o ...Option) *Parser { | ||||
| 	p := &Parser{ | ||||
| 		scanner: bufio.NewScanner(r), | ||||
| 	} | ||||
| 	for _, f := range o { | ||||
| 		f(p) | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
| 
 | ||||
| // Next parses the next line in the file. It returns true if a line was parsed
 | ||||
| // and false if it reached the end of the file.
 | ||||
| func (p *Parser) Next() bool { | ||||
| 	if !p.keepRanges && p.rangeStart < p.rangeEnd { | ||||
| 		p.rangeStart++ | ||||
| 		return true | ||||
| 	} | ||||
| 	p.comment = nil | ||||
| 	p.field = p.field[:0] | ||||
| 	p.parsedRange = false | ||||
| 
 | ||||
| 	for p.scanner.Scan() { | ||||
| 		b := p.scanner.Bytes() | ||||
| 		if len(b) == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		if b[0] == '#' { | ||||
| 			if p.commentHandler != nil { | ||||
| 				p.commentHandler(strings.TrimSpace(string(b[1:]))) | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Parse line
 | ||||
| 		if i := bytes.IndexByte(b, '#'); i != -1 { | ||||
| 			p.comment = bytes.TrimSpace(b[i+1:]) | ||||
| 			b = b[:i] | ||||
| 		} | ||||
| 		if b[0] == '@' { | ||||
| 			if p.partHandler != nil { | ||||
| 				p.field = append(p.field, bytes.TrimSpace(b[1:])) | ||||
| 				p.partHandler(p) | ||||
| 				p.field = p.field[:0] | ||||
| 			} | ||||
| 			p.comment = nil | ||||
| 			continue | ||||
| 		} | ||||
| 		for { | ||||
| 			i := bytes.IndexByte(b, ';') | ||||
| 			if i == -1 { | ||||
| 				p.field = append(p.field, bytes.TrimSpace(b)) | ||||
| 				break | ||||
| 			} | ||||
| 			p.field = append(p.field, bytes.TrimSpace(b[:i])) | ||||
| 			b = b[i+1:] | ||||
| 		} | ||||
| 		if !p.keepRanges { | ||||
| 			p.rangeStart, p.rangeEnd = p.getRange(0) | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	p.setError(p.scanner.Err()) | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| func parseRune(b []byte) (rune, error) { | ||||
| 	if len(b) > 2 && b[0] == 'U' && b[1] == '+' { | ||||
| 		b = b[2:] | ||||
| 	} | ||||
| 	x, err := strconv.ParseUint(string(b), 16, 32) | ||||
| 	return rune(x), err | ||||
| } | ||||
| 
 | ||||
| func (p *Parser) parseRune(b []byte) rune { | ||||
| 	x, err := parseRune(b) | ||||
| 	p.setError(err) | ||||
| 	return x | ||||
| } | ||||
| 
 | ||||
| // Rune parses and returns field i as a rune.
 | ||||
| func (p *Parser) Rune(i int) rune { | ||||
| 	if i > 0 || p.keepRanges { | ||||
| 		return p.parseRune(p.getField(i)) | ||||
| 	} | ||||
| 	return p.rangeStart | ||||
| } | ||||
| 
 | ||||
| // Runes interprets and returns field i as a sequence of runes.
 | ||||
| func (p *Parser) Runes(i int) (runes []rune) { | ||||
| 	add := func(b []byte) { | ||||
| 		if b = bytes.TrimSpace(b); len(b) > 0 { | ||||
| 			runes = append(runes, p.parseRune(b)) | ||||
| 		} | ||||
| 	} | ||||
| 	for b := p.getField(i); ; { | ||||
| 		i := bytes.IndexByte(b, ' ') | ||||
| 		if i == -1 { | ||||
| 			add(b) | ||||
| 			break | ||||
| 		} | ||||
| 		add(b[:i]) | ||||
| 		b = b[i+1:] | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") | ||||
| 
 | ||||
| 	// reRange matches one line of a legacy rune range.
 | ||||
| 	reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") | ||||
| ) | ||||
| 
 | ||||
| // Range parses and returns field i as a rune range. A range is inclusive at
 | ||||
| // both ends. If the field only has one rune, first and last will be identical.
 | ||||
| // It supports the legacy format for ranges used in UnicodeData.txt.
 | ||||
| func (p *Parser) Range(i int) (first, last rune) { | ||||
| 	if !p.keepRanges { | ||||
| 		return p.rangeStart, p.rangeStart | ||||
| 	} | ||||
| 	return p.getRange(i) | ||||
| } | ||||
| 
 | ||||
| func (p *Parser) getRange(i int) (first, last rune) { | ||||
| 	b := p.getField(i) | ||||
| 	if k := bytes.Index(b, []byte("..")); k != -1 { | ||||
| 		return p.parseRune(b[:k]), p.parseRune(b[k+2:]) | ||||
| 	} | ||||
| 	// The first field may not be a rune, in which case we may ignore any error
 | ||||
| 	// and set the range as 0..0.
 | ||||
| 	x, err := parseRune(b) | ||||
| 	if err != nil { | ||||
| 		// Disable range parsing henceforth. This ensures that an error will be
 | ||||
| 		// returned if the user subsequently will try to parse this field as
 | ||||
| 		// a Rune.
 | ||||
| 		p.keepRanges = true | ||||
| 	} | ||||
| 	// Special case for UnicodeData that was retained for backwards compatibility.
 | ||||
| 	if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { | ||||
| 		if p.parsedRange { | ||||
| 			return p.rangeStart, p.rangeEnd | ||||
| 		} | ||||
| 		mf := reRange.FindStringSubmatch(p.scanner.Text()) | ||||
| 		if mf == nil || !p.scanner.Scan() { | ||||
| 			p.setError(errIncorrectLegacyRange) | ||||
| 			return x, x | ||||
| 		} | ||||
| 		// Using Bytes would be more efficient here, but Text is a lot easier
 | ||||
| 		// and this is not a frequent case.
 | ||||
| 		ml := reRange.FindStringSubmatch(p.scanner.Text()) | ||||
| 		if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { | ||||
| 			p.setError(errIncorrectLegacyRange) | ||||
| 			return x, x | ||||
| 		} | ||||
| 		p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) | ||||
| 		p.parsedRange = true | ||||
| 		return p.rangeStart, p.rangeEnd | ||||
| 	} | ||||
| 	return x, x | ||||
| } | ||||
| 
 | ||||
| // bools recognizes all valid UCD boolean values.
 | ||||
| var bools = map[string]bool{ | ||||
| 	"":      false, | ||||
| 	"N":     false, | ||||
| 	"No":    false, | ||||
| 	"F":     false, | ||||
| 	"False": false, | ||||
| 	"Y":     true, | ||||
| 	"Yes":   true, | ||||
| 	"T":     true, | ||||
| 	"True":  true, | ||||
| } | ||||
| 
 | ||||
| // Bool parses and returns field i as a boolean value.
 | ||||
| func (p *Parser) Bool(i int) bool { | ||||
| 	b := p.getField(i) | ||||
| 	for s, v := range bools { | ||||
| 		if bstrEq(b, s) { | ||||
| 			return v | ||||
| 		} | ||||
| 	} | ||||
| 	p.setError(strconv.ErrSyntax) | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // Int parses and returns field i as an integer value.
 | ||||
| func (p *Parser) Int(i int) int { | ||||
| 	x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) | ||||
| 	p.setError(err) | ||||
| 	return int(x) | ||||
| } | ||||
| 
 | ||||
| // Uint parses and returns field i as an unsigned integer value.
 | ||||
| func (p *Parser) Uint(i int) uint { | ||||
| 	x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) | ||||
| 	p.setError(err) | ||||
| 	return uint(x) | ||||
| } | ||||
| 
 | ||||
| // Float parses and returns field i as a decimal value.
 | ||||
| func (p *Parser) Float(i int) float64 { | ||||
| 	x, err := strconv.ParseFloat(string(p.getField(i)), 64) | ||||
| 	p.setError(err) | ||||
| 	return x | ||||
| } | ||||
| 
 | ||||
| // String parses and returns field i as a string value.
 | ||||
| func (p *Parser) String(i int) string { | ||||
| 	return string(p.getField(i)) | ||||
| } | ||||
| 
 | ||||
| // Strings parses and returns field i as a space-separated list of strings.
 | ||||
| func (p *Parser) Strings(i int) []string { | ||||
| 	ss := strings.Split(string(p.getField(i)), " ") | ||||
| 	for i, s := range ss { | ||||
| 		ss[i] = strings.TrimSpace(s) | ||||
| 	} | ||||
| 	return ss | ||||
| } | ||||
| 
 | ||||
| // Comment returns the comments for the current line.
 | ||||
| func (p *Parser) Comment() string { | ||||
| 	return string(p.comment) | ||||
| } | ||||
| 
 | ||||
| var errUndefinedEnum = errors.New("ucd: undefined enum value") | ||||
| 
 | ||||
| // Enum interprets and returns field i as a value that must be one of the values
 | ||||
| // in enum.
 | ||||
| func (p *Parser) Enum(i int, enum ...string) string { | ||||
| 	b := p.getField(i) | ||||
| 	for _, s := range enum { | ||||
| 		if bstrEq(b, s) { | ||||
| 			return s | ||||
| 		} | ||||
| 	} | ||||
| 	p.setError(errUndefinedEnum) | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| func bstrEq(b []byte, s string) bool { | ||||
| 	if len(b) != len(s) { | ||||
| 		return false | ||||
| 	} | ||||
| 	for i, c := range b { | ||||
| 		if c != s[i] { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | @ -1,100 +0,0 @@ | |||
| // Copyright 2013 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package cldr | ||||
| 
 | ||||
| import ( | ||||
| 	"encoding/xml" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| ) | ||||
| 
 | ||||
| // Elem is implemented by every XML element.
 | ||||
| type Elem interface { | ||||
| 	setEnclosing(Elem) | ||||
| 	setName(string) | ||||
| 	enclosing() Elem | ||||
| 
 | ||||
| 	GetCommon() *Common | ||||
| } | ||||
| 
 | ||||
| type hidden struct { | ||||
| 	CharData string `xml:",chardata"` | ||||
| 	Alias    *struct { | ||||
| 		Common | ||||
| 		Source string `xml:"source,attr"` | ||||
| 		Path   string `xml:"path,attr"` | ||||
| 	} `xml:"alias"` | ||||
| 	Def *struct { | ||||
| 		Common | ||||
| 		Choice string `xml:"choice,attr,omitempty"` | ||||
| 		Type   string `xml:"type,attr,omitempty"` | ||||
| 	} `xml:"default"` | ||||
| } | ||||
| 
 | ||||
| // Common holds several of the most common attributes and sub elements
 | ||||
| // of an XML element.
 | ||||
| type Common struct { | ||||
| 	XMLName         xml.Name | ||||
| 	name            string | ||||
| 	enclElem        Elem | ||||
| 	Type            string `xml:"type,attr,omitempty"` | ||||
| 	Reference       string `xml:"reference,attr,omitempty"` | ||||
| 	Alt             string `xml:"alt,attr,omitempty"` | ||||
| 	ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` | ||||
| 	Draft           string `xml:"draft,attr,omitempty"` | ||||
| 	hidden | ||||
| } | ||||
| 
 | ||||
| // Default returns the default type to select from the enclosed list
 | ||||
| // or "" if no default value is specified.
 | ||||
| func (e *Common) Default() string { | ||||
| 	if e.Def == nil { | ||||
| 		return "" | ||||
| 	} | ||||
| 	if e.Def.Choice != "" { | ||||
| 		return e.Def.Choice | ||||
| 	} else if e.Def.Type != "" { | ||||
| 		// Type is still used by the default element in collation.
 | ||||
| 		return e.Def.Type | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| // GetCommon returns e. It is provided such that Common implements Elem.
 | ||||
| func (e *Common) GetCommon() *Common { | ||||
| 	return e | ||||
| } | ||||
| 
 | ||||
| // Data returns the character data accumulated for this element.
 | ||||
| func (e *Common) Data() string { | ||||
| 	e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) | ||||
| 	return e.CharData | ||||
| } | ||||
| 
 | ||||
| func (e *Common) setName(s string) { | ||||
| 	e.name = s | ||||
| } | ||||
| 
 | ||||
| func (e *Common) enclosing() Elem { | ||||
| 	return e.enclElem | ||||
| } | ||||
| 
 | ||||
| func (e *Common) setEnclosing(en Elem) { | ||||
| 	e.enclElem = en | ||||
| } | ||||
| 
 | ||||
| // Escape characters that can be escaped without further escaping the string.
 | ||||
| var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) | ||||
| 
 | ||||
| // replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
 | ||||
| // It assumes the input string is correctly formatted.
 | ||||
| func replaceUnicode(s string) string { | ||||
| 	if s[1] == '#' { | ||||
| 		r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) | ||||
| 		return string(r) | ||||
| 	} | ||||
| 	r, _, _, _ := strconv.UnquoteChar(s, 0) | ||||
| 	return string(r) | ||||
| } | ||||
|  | @ -1,130 +0,0 @@ | |||
| // Copyright 2013 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| //go:generate go run makexml.go -output xml.go
 | ||||
| 
 | ||||
| // Package cldr provides a parser for LDML and related XML formats.
 | ||||
| // This package is intended to be used by the table generation tools
 | ||||
| // for the various internationalization-related packages.
 | ||||
| // As the XML types are generated from the CLDR DTD, and as the CLDR standard
 | ||||
| // is periodically amended, this package may change considerably over time.
 | ||||
| // This mostly means that data may appear and disappear between versions.
 | ||||
| // That is, old code should keep compiling for newer versions, but data
 | ||||
| // may have moved or changed.
 | ||||
| // CLDR version 22 is the first version supported by this package.
 | ||||
| // Older versions may not work.
 | ||||
| package cldr // import "golang.org/x/text/unicode/cldr"
 | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| ) | ||||
| 
 | ||||
| // CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
 | ||||
| type CLDR struct { | ||||
| 	parent   map[string][]string | ||||
| 	locale   map[string]*LDML | ||||
| 	resolved map[string]*LDML | ||||
| 	bcp47    *LDMLBCP47 | ||||
| 	supp     *SupplementalData | ||||
| } | ||||
| 
 | ||||
| func makeCLDR() *CLDR { | ||||
| 	return &CLDR{ | ||||
| 		parent:   make(map[string][]string), | ||||
| 		locale:   make(map[string]*LDML), | ||||
| 		resolved: make(map[string]*LDML), | ||||
| 		bcp47:    &LDMLBCP47{}, | ||||
| 		supp:     &SupplementalData{}, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
 | ||||
| func (cldr *CLDR) BCP47() *LDMLBCP47 { | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // Draft indicates the draft level of an element.
 | ||||
| type Draft int | ||||
| 
 | ||||
| const ( | ||||
| 	Approved Draft = iota | ||||
| 	Contributed | ||||
| 	Provisional | ||||
| 	Unconfirmed | ||||
| ) | ||||
| 
 | ||||
| var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} | ||||
| 
 | ||||
| // ParseDraft returns the Draft value corresponding to the given string. The
 | ||||
| // empty string corresponds to Approved.
 | ||||
| func ParseDraft(level string) (Draft, error) { | ||||
| 	if level == "" { | ||||
| 		return Approved, nil | ||||
| 	} | ||||
| 	for i, s := range drafts { | ||||
| 		if level == s { | ||||
| 			return Unconfirmed - Draft(i), nil | ||||
| 		} | ||||
| 	} | ||||
| 	return Approved, fmt.Errorf("cldr: unknown draft level %q", level) | ||||
| } | ||||
| 
 | ||||
| func (d Draft) String() string { | ||||
| 	return drafts[len(drafts)-1-int(d)] | ||||
| } | ||||
| 
 | ||||
| // SetDraftLevel sets which draft levels to include in the evaluated LDML.
 | ||||
| // Any draft element for which the draft level is higher than lev will be excluded.
 | ||||
| // If multiple draft levels are available for a single element, the one with the
 | ||||
| // lowest draft level will be selected, unless preferDraft is true, in which case
 | ||||
| // the highest draft will be chosen.
 | ||||
| // It is assumed that the underlying LDML is canonicalized.
 | ||||
| func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { | ||||
| 	// TODO: implement
 | ||||
| 	cldr.resolved = make(map[string]*LDML) | ||||
| } | ||||
| 
 | ||||
| // RawLDML returns the LDML XML for id in unresolved form.
 | ||||
| // id must be one of the strings returned by Locales.
 | ||||
| func (cldr *CLDR) RawLDML(loc string) *LDML { | ||||
| 	return cldr.locale[loc] | ||||
| } | ||||
| 
 | ||||
| // LDML returns the fully resolved LDML XML for loc, which must be one of
 | ||||
| // the strings returned by Locales.
 | ||||
| func (cldr *CLDR) LDML(loc string) (*LDML, error) { | ||||
| 	return cldr.resolve(loc) | ||||
| } | ||||
| 
 | ||||
| // Supplemental returns the parsed supplemental data. If no such data was parsed,
 | ||||
| // nil is returned.
 | ||||
| func (cldr *CLDR) Supplemental() *SupplementalData { | ||||
| 	return cldr.supp | ||||
| } | ||||
| 
 | ||||
| // Locales returns the locales for which there exist files.
 | ||||
| // Valid sublocales for which there is no file are not included.
 | ||||
| // The root locale is always sorted first.
 | ||||
| func (cldr *CLDR) Locales() []string { | ||||
| 	loc := []string{"root"} | ||||
| 	hasRoot := false | ||||
| 	for l, _ := range cldr.locale { | ||||
| 		if l == "root" { | ||||
| 			hasRoot = true | ||||
| 			continue | ||||
| 		} | ||||
| 		loc = append(loc, l) | ||||
| 	} | ||||
| 	sort.Strings(loc[1:]) | ||||
| 	if !hasRoot { | ||||
| 		return loc[1:] | ||||
| 	} | ||||
| 	return loc | ||||
| } | ||||
| 
 | ||||
| // Get fills in the fields of x based on the XPath path.
 | ||||
| func Get(e Elem, path string) (res Elem, err error) { | ||||
| 	return walkXPath(e, path) | ||||
| } | ||||
|  | @ -1,359 +0,0 @@ | |||
| // Copyright 2013 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package cldr | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"encoding/xml" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
| 
 | ||||
| // RuleProcessor can be passed to Collator's Process method, which
 | ||||
| // parses the rules and calls the respective method for each rule found.
 | ||||
| type RuleProcessor interface { | ||||
| 	Reset(anchor string, before int) error | ||||
| 	Insert(level int, str, context, extend string) error | ||||
| 	Index(id string) | ||||
| } | ||||
| 
 | ||||
| const ( | ||||
| 	// cldrIndex is a Unicode-reserved sentinel value used to mark the start
 | ||||
| 	// of a grouping within an index.
 | ||||
| 	// We ignore any rule that starts with this rune.
 | ||||
| 	// See http://unicode.org/reports/tr35/#Collation_Elements for details.
 | ||||
| 	cldrIndex = "\uFDD0" | ||||
| 
 | ||||
| 	// specialAnchor is the format in which to represent logical reset positions,
 | ||||
| 	// such as "first tertiary ignorable".
 | ||||
| 	specialAnchor = "<%s/>" | ||||
| ) | ||||
| 
 | ||||
| // Process parses the rules for the tailorings of this collation
 | ||||
| // and calls the respective methods of p for each rule found.
 | ||||
| func (c Collation) Process(p RuleProcessor) (err error) { | ||||
| 	if len(c.Cr) > 0 { | ||||
| 		if len(c.Cr) > 1 { | ||||
| 			return fmt.Errorf("multiple cr elements, want 0 or 1") | ||||
| 		} | ||||
| 		return processRules(p, c.Cr[0].Data()) | ||||
| 	} | ||||
| 	if c.Rules.Any != nil { | ||||
| 		return c.processXML(p) | ||||
| 	} | ||||
| 	return errors.New("no tailoring data") | ||||
| } | ||||
| 
 | ||||
| // processRules parses rules in the Collation Rule Syntax defined in
 | ||||
| // http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
 | ||||
| func processRules(p RuleProcessor, s string) (err error) { | ||||
| 	chk := func(s string, e error) string { | ||||
| 		if err == nil { | ||||
| 			err = e | ||||
| 		} | ||||
| 		return s | ||||
| 	} | ||||
| 	i := 0 // Save the line number for use after the loop.
 | ||||
| 	scanner := bufio.NewScanner(strings.NewReader(s)) | ||||
| 	for ; scanner.Scan() && err == nil; i++ { | ||||
| 		for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { | ||||
| 			level := 5 | ||||
| 			var ch byte | ||||
| 			switch ch, s = s[0], s[1:]; ch { | ||||
| 			case '&': // followed by <anchor> or '[' <key> ']'
 | ||||
| 				if s = skipSpace(s); consume(&s, '[') { | ||||
| 					s = chk(parseSpecialAnchor(p, s)) | ||||
| 				} else { | ||||
| 					s = chk(parseAnchor(p, 0, s)) | ||||
| 				} | ||||
| 			case '<': // sort relation '<'{1,4}, optionally followed by '*'.
 | ||||
| 				for level = 1; consume(&s, '<'); level++ { | ||||
| 				} | ||||
| 				if level > 4 { | ||||
| 					err = fmt.Errorf("level %d > 4", level) | ||||
| 				} | ||||
| 				fallthrough | ||||
| 			case '=': // identity relation, optionally followed by *.
 | ||||
| 				if consume(&s, '*') { | ||||
| 					s = chk(parseSequence(p, level, s)) | ||||
| 				} else { | ||||
| 					s = chk(parseOrder(p, level, s)) | ||||
| 				} | ||||
| 			default: | ||||
| 				chk("", fmt.Errorf("illegal operator %q", ch)) | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	if chk("", scanner.Err()); err != nil { | ||||
| 		return fmt.Errorf("%d: %v", i, err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // parseSpecialAnchor parses the anchor syntax which is either of the form
 | ||||
| //    ['before' <level>] <anchor>
 | ||||
| // or
 | ||||
| //    [<label>]
 | ||||
| // The starting should already be consumed.
 | ||||
| func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) { | ||||
| 	i := strings.IndexByte(s, ']') | ||||
| 	if i == -1 { | ||||
| 		return "", errors.New("unmatched bracket") | ||||
| 	} | ||||
| 	a := strings.TrimSpace(s[:i]) | ||||
| 	s = s[i+1:] | ||||
| 	if strings.HasPrefix(a, "before ") { | ||||
| 		l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3) | ||||
| 		if err != nil { | ||||
| 			return s, err | ||||
| 		} | ||||
| 		return parseAnchor(p, int(l), s) | ||||
| 	} | ||||
| 	return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0) | ||||
| } | ||||
| 
 | ||||
| func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) { | ||||
| 	anchor, s, err := scanString(s) | ||||
| 	if err != nil { | ||||
| 		return s, err | ||||
| 	} | ||||
| 	return s, p.Reset(anchor, level) | ||||
| } | ||||
| 
 | ||||
| func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) { | ||||
| 	var value, context, extend string | ||||
| 	if value, s, err = scanString(s); err != nil { | ||||
| 		return s, err | ||||
| 	} | ||||
| 	if strings.HasPrefix(value, cldrIndex) { | ||||
| 		p.Index(value[len(cldrIndex):]) | ||||
| 		return | ||||
| 	} | ||||
| 	if consume(&s, '|') { | ||||
| 		if context, s, err = scanString(s); err != nil { | ||||
| 			return s, errors.New("missing string after context") | ||||
| 		} | ||||
| 	} | ||||
| 	if consume(&s, '/') { | ||||
| 		if extend, s, err = scanString(s); err != nil { | ||||
| 			return s, errors.New("missing string after extension") | ||||
| 		} | ||||
| 	} | ||||
| 	return s, p.Insert(level, value, context, extend) | ||||
| } | ||||
| 
 | ||||
| // scanString scans a single input string.
 | ||||
| func scanString(s string) (str, tail string, err error) { | ||||
| 	if s = skipSpace(s); s == "" { | ||||
| 		return s, s, errors.New("missing string") | ||||
| 	} | ||||
| 	buf := [16]byte{} // small but enough to hold most cases.
 | ||||
| 	value := buf[:0] | ||||
| 	for s != "" { | ||||
| 		if consume(&s, '\'') { | ||||
| 			i := strings.IndexByte(s, '\'') | ||||
| 			if i == -1 { | ||||
| 				return "", "", errors.New(`unmatched single quote`) | ||||
| 			} | ||||
| 			if i == 0 { | ||||
| 				value = append(value, '\'') | ||||
| 			} else { | ||||
| 				value = append(value, s[:i]...) | ||||
| 			} | ||||
| 			s = s[i+1:] | ||||
| 			continue | ||||
| 		} | ||||
| 		r, sz := utf8.DecodeRuneInString(s) | ||||
| 		if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) { | ||||
| 			break | ||||
| 		} | ||||
| 		value = append(value, s[:sz]...) | ||||
| 		s = s[sz:] | ||||
| 	} | ||||
| 	return string(value), skipSpace(s), nil | ||||
| } | ||||
| 
 | ||||
| func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) { | ||||
| 	if s = skipSpace(s); s == "" { | ||||
| 		return s, errors.New("empty sequence") | ||||
| 	} | ||||
| 	last := rune(0) | ||||
| 	for s != "" { | ||||
| 		r, sz := utf8.DecodeRuneInString(s) | ||||
| 		s = s[sz:] | ||||
| 
 | ||||
| 		if r == '-' { | ||||
| 			// We have a range. The first element was already written.
 | ||||
| 			if last == 0 { | ||||
| 				return s, errors.New("range without starter value") | ||||
| 			} | ||||
| 			r, sz = utf8.DecodeRuneInString(s) | ||||
| 			s = s[sz:] | ||||
| 			if r == utf8.RuneError || r < last { | ||||
| 				return s, fmt.Errorf("invalid range %q-%q", last, r) | ||||
| 			} | ||||
| 			for i := last + 1; i <= r; i++ { | ||||
| 				if err := p.Insert(level, string(i), "", ""); err != nil { | ||||
| 					return s, err | ||||
| 				} | ||||
| 			} | ||||
| 			last = 0 | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		if unicode.IsSpace(r) || unicode.IsPunct(r) { | ||||
| 			break | ||||
| 		} | ||||
| 
 | ||||
| 		// normal case
 | ||||
| 		if err := p.Insert(level, string(r), "", ""); err != nil { | ||||
| 			return s, err | ||||
| 		} | ||||
| 		last = r | ||||
| 	} | ||||
| 	return s, nil | ||||
| } | ||||
| 
 | ||||
| func skipSpace(s string) string { | ||||
| 	return strings.TrimLeftFunc(s, unicode.IsSpace) | ||||
| } | ||||
| 
 | ||||
| // consumes returns whether the next byte is ch. If so, it gobbles it by
 | ||||
| // updating s.
 | ||||
| func consume(s *string, ch byte) (ok bool) { | ||||
| 	if *s == "" || (*s)[0] != ch { | ||||
| 		return false | ||||
| 	} | ||||
| 	*s = (*s)[1:] | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // The following code parses Collation rules of CLDR version 24 and before.
 | ||||
| 
 | ||||
| var lmap = map[byte]int{ | ||||
| 	'p': 1, | ||||
| 	's': 2, | ||||
| 	't': 3, | ||||
| 	'i': 5, | ||||
| } | ||||
| 
 | ||||
| type rulesElem struct { | ||||
| 	Rules struct { | ||||
| 		Common | ||||
| 		Any []*struct { | ||||
| 			XMLName xml.Name | ||||
| 			rule | ||||
| 		} `xml:",any"` | ||||
| 	} `xml:"rules"` | ||||
| } | ||||
| 
 | ||||
| type rule struct { | ||||
| 	Value  string `xml:",chardata"` | ||||
| 	Before string `xml:"before,attr"` | ||||
| 	Any    []*struct { | ||||
| 		XMLName xml.Name | ||||
| 		rule | ||||
| 	} `xml:",any"` | ||||
| } | ||||
| 
 | ||||
| var emptyValueError = errors.New("cldr: empty rule value") | ||||
| 
 | ||||
| func (r *rule) value() (string, error) { | ||||
| 	// Convert hexadecimal Unicode codepoint notation to a string.
 | ||||
| 	s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode) | ||||
| 	r.Value = s | ||||
| 	if s == "" { | ||||
| 		if len(r.Any) != 1 { | ||||
| 			return "", emptyValueError | ||||
| 		} | ||||
| 		r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local) | ||||
| 		r.Any = nil | ||||
| 	} else if len(r.Any) != 0 { | ||||
| 		return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any) | ||||
| 	} | ||||
| 	return r.Value, nil | ||||
| } | ||||
| 
 | ||||
| func (r rule) process(p RuleProcessor, name, context, extend string) error { | ||||
| 	v, err := r.value() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	switch name { | ||||
| 	case "p", "s", "t", "i": | ||||
| 		if strings.HasPrefix(v, cldrIndex) { | ||||
| 			p.Index(v[len(cldrIndex):]) | ||||
| 			return nil | ||||
| 		} | ||||
| 		if err := p.Insert(lmap[name[0]], v, context, extend); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	case "pc", "sc", "tc", "ic": | ||||
| 		level := lmap[name[0]] | ||||
| 		for _, s := range v { | ||||
| 			if err := p.Insert(level, string(s), context, extend); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	default: | ||||
| 		return fmt.Errorf("cldr: unsupported tag: %q", name) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // processXML parses the format of CLDR versions 24 and older.
 | ||||
| func (c Collation) processXML(p RuleProcessor) (err error) { | ||||
| 	// Collation is generated and defined in xml.go.
 | ||||
| 	var v string | ||||
| 	for _, r := range c.Rules.Any { | ||||
| 		switch r.XMLName.Local { | ||||
| 		case "reset": | ||||
| 			level := 0 | ||||
| 			switch r.Before { | ||||
| 			case "primary", "1": | ||||
| 				level = 1 | ||||
| 			case "secondary", "2": | ||||
| 				level = 2 | ||||
| 			case "tertiary", "3": | ||||
| 				level = 3 | ||||
| 			case "": | ||||
| 			default: | ||||
| 				return fmt.Errorf("cldr: unknown level %q", r.Before) | ||||
| 			} | ||||
| 			v, err = r.value() | ||||
| 			if err == nil { | ||||
| 				err = p.Reset(v, level) | ||||
| 			} | ||||
| 		case "x": | ||||
| 			var context, extend string | ||||
| 			for _, r1 := range r.Any { | ||||
| 				v, err = r1.value() | ||||
| 				switch r1.XMLName.Local { | ||||
| 				case "context": | ||||
| 					context = v | ||||
| 				case "extend": | ||||
| 					extend = v | ||||
| 				} | ||||
| 			} | ||||
| 			for _, r1 := range r.Any { | ||||
| 				if t := r1.XMLName.Local; t == "context" || t == "extend" { | ||||
| 					continue | ||||
| 				} | ||||
| 				r1.rule.process(p, r1.XMLName.Local, context, extend) | ||||
| 			} | ||||
| 		default: | ||||
| 			err = r.rule.process(p, r.XMLName.Local, "", "") | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | @ -1,171 +0,0 @@ | |||
| // Copyright 2013 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package cldr | ||||
| 
 | ||||
| import ( | ||||
| 	"archive/zip" | ||||
| 	"bytes" | ||||
| 	"encoding/xml" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"regexp" | ||||
| ) | ||||
| 
 | ||||
| // A Decoder loads an archive of CLDR data.
 | ||||
| type Decoder struct { | ||||
| 	dirFilter     []string | ||||
| 	sectionFilter []string | ||||
| 	loader        Loader | ||||
| 	cldr          *CLDR | ||||
| 	curLocale     string | ||||
| } | ||||
| 
 | ||||
| // SetSectionFilter takes a list top-level LDML element names to which
 | ||||
| // evaluation of LDML should be limited.  It automatically calls SetDirFilter.
 | ||||
| func (d *Decoder) SetSectionFilter(filter ...string) { | ||||
| 	d.sectionFilter = filter | ||||
| 	// TODO: automatically set dir filter
 | ||||
| } | ||||
| 
 | ||||
| // SetDirFilter limits the loading of LDML XML files of the specied directories.
 | ||||
| // Note that sections may be split across directories differently for different CLDR versions.
 | ||||
| // For more robust code, use SetSectionFilter.
 | ||||
| func (d *Decoder) SetDirFilter(dir ...string) { | ||||
| 	d.dirFilter = dir | ||||
| } | ||||
| 
 | ||||
| // A Loader provides access to the files of a CLDR archive.
 | ||||
| type Loader interface { | ||||
| 	Len() int | ||||
| 	Path(i int) string | ||||
| 	Reader(i int) (io.ReadCloser, error) | ||||
| } | ||||
| 
 | ||||
| var fileRe = regexp.MustCompile(".*/(.*)/(.*)\\.xml") | ||||
| 
 | ||||
| // Decode loads and decodes the files represented by l.
 | ||||
| func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) { | ||||
| 	d.cldr = makeCLDR() | ||||
| 	for i := 0; i < l.Len(); i++ { | ||||
| 		fname := l.Path(i) | ||||
| 		if m := fileRe.FindStringSubmatch(fname); m != nil { | ||||
| 			if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) { | ||||
| 				continue | ||||
| 			} | ||||
| 			var r io.Reader | ||||
| 			if r, err = l.Reader(i); err == nil { | ||||
| 				err = d.decode(m[1], m[2], r) | ||||
| 			} | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	d.cldr.finalize(d.sectionFilter) | ||||
| 	return d.cldr, nil | ||||
| } | ||||
| 
 | ||||
| func (d *Decoder) decode(dir, id string, r io.Reader) error { | ||||
| 	var v interface{} | ||||
| 	var l *LDML | ||||
| 	cldr := d.cldr | ||||
| 	switch { | ||||
| 	case dir == "supplemental": | ||||
| 		v = cldr.supp | ||||
| 	case dir == "transforms": | ||||
| 		return nil | ||||
| 	case dir == "bcp47": | ||||
| 		v = cldr.bcp47 | ||||
| 	case dir == "validity": | ||||
| 		return nil | ||||
| 	default: | ||||
| 		ok := false | ||||
| 		if v, ok = cldr.locale[id]; !ok { | ||||
| 			l = &LDML{} | ||||
| 			v, cldr.locale[id] = l, l | ||||
| 		} | ||||
| 	} | ||||
| 	x := xml.NewDecoder(r) | ||||
| 	if err := x.Decode(v); err != nil { | ||||
| 		log.Printf("%s/%s: %v", dir, id, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	if l != nil { | ||||
| 		if l.Identity == nil { | ||||
| 			return fmt.Errorf("%s/%s: missing identity element", dir, id) | ||||
| 		} | ||||
| 		// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
 | ||||
| 		// is resolved.
 | ||||
| 		// path := strings.Split(id, "_")
 | ||||
| 		// if lang := l.Identity.Language.Type; lang != path[0] {
 | ||||
| 		// 	return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
 | ||||
| 		// }
 | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| type pathLoader []string | ||||
| 
 | ||||
| func makePathLoader(path string) (pl pathLoader, err error) { | ||||
| 	err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error { | ||||
| 		pl = append(pl, path) | ||||
| 		return err | ||||
| 	}) | ||||
| 	return pl, err | ||||
| } | ||||
| 
 | ||||
| func (pl pathLoader) Len() int { | ||||
| 	return len(pl) | ||||
| } | ||||
| 
 | ||||
| func (pl pathLoader) Path(i int) string { | ||||
| 	return pl[i] | ||||
| } | ||||
| 
 | ||||
| func (pl pathLoader) Reader(i int) (io.ReadCloser, error) { | ||||
| 	return os.Open(pl[i]) | ||||
| } | ||||
| 
 | ||||
| // DecodePath loads CLDR data from the given path.
 | ||||
| func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) { | ||||
| 	loader, err := makePathLoader(path) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return d.Decode(loader) | ||||
| } | ||||
| 
 | ||||
| type zipLoader struct { | ||||
| 	r *zip.Reader | ||||
| } | ||||
| 
 | ||||
| func (zl zipLoader) Len() int { | ||||
| 	return len(zl.r.File) | ||||
| } | ||||
| 
 | ||||
| func (zl zipLoader) Path(i int) string { | ||||
| 	return zl.r.File[i].Name | ||||
| } | ||||
| 
 | ||||
| func (zl zipLoader) Reader(i int) (io.ReadCloser, error) { | ||||
| 	return zl.r.File[i].Open() | ||||
| } | ||||
| 
 | ||||
| // DecodeZip loads CLDR data from the zip archive for which r is the source.
 | ||||
| func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) { | ||||
| 	buffer, err := ioutil.ReadAll(r) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer))) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return d.Decode(zipLoader{archive}) | ||||
| } | ||||
|  | @ -1,400 +0,0 @@ | |||
| // Copyright 2013 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| // +build ignore
 | ||||
| 
 | ||||
| // This tool generates types for the various XML formats of CLDR.
 | ||||
| package main | ||||
| 
 | ||||
| import ( | ||||
| 	"archive/zip" | ||||
| 	"bytes" | ||||
| 	"encoding/xml" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"golang.org/x/text/internal/gen" | ||||
| ) | ||||
| 
 | ||||
| var outputFile = flag.String("output", "xml.go", "output file name") | ||||
| 
 | ||||
| func main() { | ||||
| 	flag.Parse() | ||||
| 
 | ||||
| 	r := gen.OpenCLDRCoreZip() | ||||
| 	buffer, err := ioutil.ReadAll(r) | ||||
| 	if err != nil { | ||||
| 		log.Fatal("Could not read zip file") | ||||
| 	} | ||||
| 	r.Close() | ||||
| 	z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer))) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("Could not read zip archive: %v", err) | ||||
| 	} | ||||
| 
 | ||||
| 	var buf bytes.Buffer | ||||
| 
 | ||||
| 	version := gen.CLDRVersion() | ||||
| 
 | ||||
| 	for _, dtd := range files { | ||||
| 		for _, f := range z.File { | ||||
| 			if strings.HasSuffix(f.Name, dtd.file+".dtd") { | ||||
| 				r, err := f.Open() | ||||
| 				failOnError(err) | ||||
| 
 | ||||
| 				b := makeBuilder(&buf, dtd) | ||||
| 				b.parseDTD(r) | ||||
| 				b.resolve(b.index[dtd.top[0]]) | ||||
| 				b.write() | ||||
| 				if b.version != "" && version != b.version { | ||||
| 					println(f.Name) | ||||
| 					log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version) | ||||
| 				} | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.") | ||||
| 	fmt.Fprintf(&buf, "const Version = %q\n", version) | ||||
| 
 | ||||
| 	gen.WriteGoFile(*outputFile, "cldr", buf.Bytes()) | ||||
| } | ||||
| 
 | ||||
| func failOnError(err error) { | ||||
| 	if err != nil { | ||||
| 		log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error()) | ||||
| 		os.Exit(1) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // configuration data per DTD type
 | ||||
| type dtd struct { | ||||
| 	file string   // base file name
 | ||||
| 	root string   // Go name of the root XML element
 | ||||
| 	top  []string // create a different type for this section
 | ||||
| 
 | ||||
| 	skipElem    []string // hard-coded or deprecated elements
 | ||||
| 	skipAttr    []string // attributes to exclude
 | ||||
| 	predefined  []string // hard-coded elements exist of the form <name>Elem
 | ||||
| 	forceRepeat []string // elements to make slices despite DTD
 | ||||
| } | ||||
| 
 | ||||
| var files = []dtd{ | ||||
| 	{ | ||||
| 		file: "ldmlBCP47", | ||||
| 		root: "LDMLBCP47", | ||||
| 		top:  []string{"ldmlBCP47"}, | ||||
| 		skipElem: []string{ | ||||
| 			"cldrVersion", // deprecated, not used
 | ||||
| 		}, | ||||
| 	}, | ||||
| 	{ | ||||
| 		file: "ldmlSupplemental", | ||||
| 		root: "SupplementalData", | ||||
| 		top:  []string{"supplementalData"}, | ||||
| 		skipElem: []string{ | ||||
| 			"cldrVersion", // deprecated, not used
 | ||||
| 		}, | ||||
| 		forceRepeat: []string{ | ||||
| 			"plurals", // data defined in plurals.xml and ordinals.xml
 | ||||
| 		}, | ||||
| 	}, | ||||
| 	{ | ||||
| 		file: "ldml", | ||||
| 		root: "LDML", | ||||
| 		top: []string{ | ||||
| 			"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers", | ||||
| 		}, | ||||
| 		skipElem: []string{ | ||||
| 			"cp",       // not used anywhere
 | ||||
| 			"special",  // not used anywhere
 | ||||
| 			"fallback", // deprecated, not used
 | ||||
| 			"alias",    // in Common
 | ||||
| 			"default",  // in Common
 | ||||
| 		}, | ||||
| 		skipAttr: []string{ | ||||
| 			"hiraganaQuarternary", // typo in DTD, correct version included as well
 | ||||
| 		}, | ||||
| 		predefined: []string{"rules"}, | ||||
| 	}, | ||||
| } | ||||
| 
 | ||||
| var comments = map[string]string{ | ||||
| 	"ldmlBCP47": ` | ||||
| // LDMLBCP47 holds information on allowable values for various variables in LDML.
 | ||||
| `, | ||||
| 	"supplementalData": ` | ||||
| // SupplementalData holds information relevant for internationalization
 | ||||
| // and proper use of CLDR, but that is not contained in the locale hierarchy.
 | ||||
| `, | ||||
| 	"ldml": ` | ||||
| // LDML is the top-level type for locale-specific data.
 | ||||
| `, | ||||
| 	"collation": ` | ||||
| // Collation contains rules that specify a certain sort-order,
 | ||||
| // as a tailoring of the root order. 
 | ||||
| // The parsed rules are obtained by passing a RuleProcessor to Collation's
 | ||||
| // Process method.
 | ||||
| `, | ||||
| 	"calendar": ` | ||||
| // Calendar specifies the fields used for formatting and parsing dates and times.
 | ||||
| // The month and quarter names are identified numerically, starting at 1.
 | ||||
| // The day (of the week) names are identified with short strings, since there is
 | ||||
| // no universally-accepted numeric designation.
 | ||||
| `, | ||||
| 	"dates": ` | ||||
| // Dates contains information regarding the format and parsing of dates and times.
 | ||||
| `, | ||||
| 	"localeDisplayNames": ` | ||||
| // LocaleDisplayNames specifies localized display names for for scripts, languages,
 | ||||
| // countries, currencies, and variants.
 | ||||
| `, | ||||
| 	"numbers": ` | ||||
| // Numbers supplies information for formatting and parsing numbers and currencies.
 | ||||
| `, | ||||
| } | ||||
| 
 | ||||
| type element struct { | ||||
| 	name      string // XML element name
 | ||||
| 	category  string // elements contained by this element
 | ||||
| 	signature string // category + attrKey*
 | ||||
| 
 | ||||
| 	attr []*attribute // attributes supported by this element.
 | ||||
| 	sub  []struct {   // parsed and evaluated sub elements of this element.
 | ||||
| 		e      *element | ||||
| 		repeat bool // true if the element needs to be a slice
 | ||||
| 	} | ||||
| 
 | ||||
| 	resolved bool // prevent multiple resolutions of this element.
 | ||||
| } | ||||
| 
 | ||||
| type attribute struct { | ||||
| 	name string | ||||
| 	key  string | ||||
| 	list []string | ||||
| 
 | ||||
| 	tag string // Go tag
 | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	reHead  = regexp.MustCompile(` *(\w+) +([\w\-]+)`) | ||||
| 	reAttr  = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`) | ||||
| 	reElem  = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`) | ||||
| 	reToken = regexp.MustCompile(`\w\-`) | ||||
| ) | ||||
| 
 | ||||
| // builder is used to read in the DTD files from CLDR and generate Go code
 | ||||
| // to be used with the encoding/xml package.
 | ||||
| type builder struct { | ||||
| 	w       io.Writer | ||||
| 	index   map[string]*element | ||||
| 	elem    []*element | ||||
| 	info    dtd | ||||
| 	version string | ||||
| } | ||||
| 
 | ||||
| func makeBuilder(w io.Writer, d dtd) builder { | ||||
| 	return builder{ | ||||
| 		w:     w, | ||||
| 		index: make(map[string]*element), | ||||
| 		elem:  []*element{}, | ||||
| 		info:  d, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // parseDTD parses a DTD file.
 | ||||
| func (b *builder) parseDTD(r io.Reader) { | ||||
| 	for d := xml.NewDecoder(r); ; { | ||||
| 		t, err := d.Token() | ||||
| 		if t == nil { | ||||
| 			break | ||||
| 		} | ||||
| 		failOnError(err) | ||||
| 		dir, ok := t.(xml.Directive) | ||||
| 		if !ok { | ||||
| 			continue | ||||
| 		} | ||||
| 		m := reHead.FindSubmatch(dir) | ||||
| 		dir = dir[len(m[0]):] | ||||
| 		ename := string(m[2]) | ||||
| 		el, elementFound := b.index[ename] | ||||
| 		switch string(m[1]) { | ||||
| 		case "ELEMENT": | ||||
| 			if elementFound { | ||||
| 				log.Fatal("parseDTD: duplicate entry for element %q", ename) | ||||
| 			} | ||||
| 			m := reElem.FindSubmatch(dir) | ||||
| 			if m == nil { | ||||
| 				log.Fatalf("parseDTD: invalid element %q", string(dir)) | ||||
| 			} | ||||
| 			if len(m[0]) != len(dir) { | ||||
| 				log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0])) | ||||
| 			} | ||||
| 			s := string(m[1]) | ||||
| 			el = &element{ | ||||
| 				name:     ename, | ||||
| 				category: s, | ||||
| 			} | ||||
| 			b.index[ename] = el | ||||
| 		case "ATTLIST": | ||||
| 			if !elementFound { | ||||
| 				log.Fatalf("parseDTD: unknown element %q", ename) | ||||
| 			} | ||||
| 			s := string(dir) | ||||
| 			m := reAttr.FindStringSubmatch(s) | ||||
| 			if m == nil { | ||||
| 				log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir))) | ||||
| 			} | ||||
| 			if m[4] == "FIXED" { | ||||
| 				b.version = m[5] | ||||
| 			} else { | ||||
| 				switch m[1] { | ||||
| 				case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ : | ||||
| 				case "type", "choice": | ||||
| 				default: | ||||
| 					el.attr = append(el.attr, &attribute{ | ||||
| 						name: m[1], | ||||
| 						key:  s, | ||||
| 						list: reToken.FindAllString(m[3], -1), | ||||
| 					}) | ||||
| 					el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2]) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`) | ||||
| 
 | ||||
| // resolve takes a parsed element and converts it into structured data
 | ||||
| // that can be used to generate the XML code.
 | ||||
| func (b *builder) resolve(e *element) { | ||||
| 	if e.resolved { | ||||
| 		return | ||||
| 	} | ||||
| 	b.elem = append(b.elem, e) | ||||
| 	e.resolved = true | ||||
| 	s := e.category | ||||
| 	found := make(map[string]bool) | ||||
| 	sequenceStart := []int{} | ||||
| 	for len(s) > 0 { | ||||
| 		m := reCat.FindStringSubmatch(s) | ||||
| 		if m == nil { | ||||
| 			log.Fatalf("%s: invalid category string %q", e.name, s) | ||||
| 		} | ||||
| 		repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1]) | ||||
| 		switch m[1] { | ||||
| 		case "": | ||||
| 		case "(": | ||||
| 			sequenceStart = append(sequenceStart, len(e.sub)) | ||||
| 		case ")": | ||||
| 			if len(sequenceStart) == 0 { | ||||
| 				log.Fatalf("%s: unmatched closing parenthesis", e.name) | ||||
| 			} | ||||
| 			for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ { | ||||
| 				e.sub[i].repeat = e.sub[i].repeat || repeat | ||||
| 			} | ||||
| 			sequenceStart = sequenceStart[:len(sequenceStart)-1] | ||||
| 		default: | ||||
| 			if in(b.info.skipElem, m[1]) { | ||||
| 			} else if sub, ok := b.index[m[1]]; ok { | ||||
| 				if !found[sub.name] { | ||||
| 					e.sub = append(e.sub, struct { | ||||
| 						e      *element | ||||
| 						repeat bool | ||||
| 					}{sub, repeat}) | ||||
| 					found[sub.name] = true | ||||
| 					b.resolve(sub) | ||||
| 				} | ||||
| 			} else if m[1] == "#PCDATA" || m[1] == "ANY" { | ||||
| 			} else if m[1] != "EMPTY" { | ||||
| 				log.Fatalf("resolve:%s: element %q not found", e.name, m[1]) | ||||
| 			} | ||||
| 		} | ||||
| 		s = s[len(m[0]):] | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // return true if s is contained in set.
 | ||||
| func in(set []string, s string) bool { | ||||
| 	for _, v := range set { | ||||
| 		if v == s { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| var repl = strings.NewReplacer("-", " ", "_", " ") | ||||
| 
 | ||||
| // title puts the first character or each character following '_' in title case and
 | ||||
| // removes all occurrences of '_'.
 | ||||
| func title(s string) string { | ||||
| 	return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1) | ||||
| } | ||||
| 
 | ||||
| // writeElem generates Go code for a single element, recursively.
 | ||||
| func (b *builder) writeElem(tab int, e *element) { | ||||
| 	p := func(f string, x ...interface{}) { | ||||
| 		f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1) | ||||
| 		fmt.Fprintf(b.w, f, x...) | ||||
| 	} | ||||
| 	if len(e.sub) == 0 && len(e.attr) == 0 { | ||||
| 		p("Common") | ||||
| 		return | ||||
| 	} | ||||
| 	p("struct {") | ||||
| 	tab++ | ||||
| 	p("\nCommon") | ||||
| 	for _, attr := range e.attr { | ||||
| 		if !in(b.info.skipAttr, attr.name) { | ||||
| 			p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, sub := range e.sub { | ||||
| 		if in(b.info.predefined, sub.e.name) { | ||||
| 			p("\n%sElem", sub.e.name) | ||||
| 			continue | ||||
| 		} | ||||
| 		if in(b.info.skipElem, sub.e.name) { | ||||
| 			continue | ||||
| 		} | ||||
| 		p("\n%s ", title(sub.e.name)) | ||||
| 		if sub.repeat { | ||||
| 			p("[]") | ||||
| 		} | ||||
| 		p("*") | ||||
| 		if in(b.info.top, sub.e.name) { | ||||
| 			p(title(sub.e.name)) | ||||
| 		} else { | ||||
| 			b.writeElem(tab, sub.e) | ||||
| 		} | ||||
| 		p(" `xml:\"%s\"`", sub.e.name) | ||||
| 	} | ||||
| 	tab-- | ||||
| 	p("\n}") | ||||
| } | ||||
| 
 | ||||
| // write generates the Go XML code.
 | ||||
| func (b *builder) write() { | ||||
| 	for i, name := range b.info.top { | ||||
| 		e := b.index[name] | ||||
| 		if e != nil { | ||||
| 			fmt.Fprintf(b.w, comments[name]) | ||||
| 			name := title(e.name) | ||||
| 			if i == 0 { | ||||
| 				name = b.info.root | ||||
| 			} | ||||
| 			fmt.Fprintf(b.w, "type %s ", name) | ||||
| 			b.writeElem(0, e) | ||||
| 			fmt.Fprint(b.w, "\n") | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | @ -1,602 +0,0 @@ | |||
| // Copyright 2013 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package cldr | ||||
| 
 | ||||
| // This file implements the various inheritance constructs defined by LDML.
 | ||||
| // See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity
 | ||||
| // for more details.
 | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"log" | ||||
| 	"reflect" | ||||
| 	"regexp" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // fieldIter iterates over fields in a struct. It includes
 | ||||
| // fields of embedded structs.
 | ||||
| type fieldIter struct { | ||||
| 	v        reflect.Value | ||||
| 	index, n []int | ||||
| } | ||||
| 
 | ||||
| func iter(v reflect.Value) fieldIter { | ||||
| 	if v.Kind() != reflect.Struct { | ||||
| 		log.Panicf("value %v must be a struct", v) | ||||
| 	} | ||||
| 	i := fieldIter{ | ||||
| 		v:     v, | ||||
| 		index: []int{0}, | ||||
| 		n:     []int{v.NumField()}, | ||||
| 	} | ||||
| 	i.descent() | ||||
| 	return i | ||||
| } | ||||
| 
 | ||||
| func (i *fieldIter) descent() { | ||||
| 	for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() { | ||||
| 		i.index = append(i.index, 0) | ||||
| 		i.n = append(i.n, f.Type.NumField()) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (i *fieldIter) done() bool { | ||||
| 	return len(i.index) == 1 && i.index[0] >= i.n[0] | ||||
| } | ||||
| 
 | ||||
| func skip(f reflect.StructField) bool { | ||||
| 	return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z') | ||||
| } | ||||
| 
 | ||||
| func (i *fieldIter) next() { | ||||
| 	for { | ||||
| 		k := len(i.index) - 1 | ||||
| 		i.index[k]++ | ||||
| 		if i.index[k] < i.n[k] { | ||||
| 			if !skip(i.field()) { | ||||
| 				break | ||||
| 			} | ||||
| 		} else { | ||||
| 			if k == 0 { | ||||
| 				return | ||||
| 			} | ||||
| 			i.index = i.index[:k] | ||||
| 			i.n = i.n[:k] | ||||
| 		} | ||||
| 	} | ||||
| 	i.descent() | ||||
| } | ||||
| 
 | ||||
| func (i *fieldIter) value() reflect.Value { | ||||
| 	return i.v.FieldByIndex(i.index) | ||||
| } | ||||
| 
 | ||||
| func (i *fieldIter) field() reflect.StructField { | ||||
| 	return i.v.Type().FieldByIndex(i.index) | ||||
| } | ||||
| 
 | ||||
| type visitor func(v reflect.Value) error | ||||
| 
 | ||||
| var stopDescent = fmt.Errorf("do not recurse") | ||||
| 
 | ||||
| func (f visitor) visit(x interface{}) error { | ||||
| 	return f.visitRec(reflect.ValueOf(x)) | ||||
| } | ||||
| 
 | ||||
| // visit recursively calls f on all nodes in v.
 | ||||
| func (f visitor) visitRec(v reflect.Value) error { | ||||
| 	if v.Kind() == reflect.Ptr { | ||||
| 		if v.IsNil() { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return f.visitRec(v.Elem()) | ||||
| 	} | ||||
| 	if err := f(v); err != nil { | ||||
| 		if err == stopDescent { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
| 	switch v.Kind() { | ||||
| 	case reflect.Struct: | ||||
| 		for i := iter(v); !i.done(); i.next() { | ||||
| 			if err := f.visitRec(i.value()); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	case reflect.Slice: | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			if err := f.visitRec(v.Index(i)); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // getPath is used for error reporting purposes only.
 | ||||
| func getPath(e Elem) string { | ||||
| 	if e == nil { | ||||
| 		return "<nil>" | ||||
| 	} | ||||
| 	if e.enclosing() == nil { | ||||
| 		return e.GetCommon().name | ||||
| 	} | ||||
| 	if e.GetCommon().Type == "" { | ||||
| 		return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name) | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type) | ||||
| } | ||||
| 
 | ||||
| // xmlName returns the xml name of the element or attribute
 | ||||
| func xmlName(f reflect.StructField) (name string, attr bool) { | ||||
| 	tags := strings.Split(f.Tag.Get("xml"), ",") | ||||
| 	for _, s := range tags { | ||||
| 		attr = attr || s == "attr" | ||||
| 	} | ||||
| 	return tags[0], attr | ||||
| } | ||||
| 
 | ||||
| func findField(v reflect.Value, key string) (reflect.Value, error) { | ||||
| 	v = reflect.Indirect(v) | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		if n, _ := xmlName(i.field()); n == key { | ||||
| 			return i.value(), nil | ||||
| 		} | ||||
| 	} | ||||
| 	return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface()) | ||||
| } | ||||
| 
 | ||||
| var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`) | ||||
| 
 | ||||
| func walkXPath(e Elem, path string) (res Elem, err error) { | ||||
| 	for _, c := range strings.Split(path, "/") { | ||||
| 		if c == ".." { | ||||
| 			if e = e.enclosing(); e == nil { | ||||
| 				panic("path ..") | ||||
| 				return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path) | ||||
| 			} | ||||
| 			continue | ||||
| 		} else if c == "" { | ||||
| 			continue | ||||
| 		} | ||||
| 		m := xpathPart.FindStringSubmatch(c) | ||||
| 		if len(m) == 0 || len(m[0]) != len(c) { | ||||
| 			return nil, fmt.Errorf("cldr: syntax error in path component %q", c) | ||||
| 		} | ||||
| 		v, err := findField(reflect.ValueOf(e), m[1]) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		switch v.Kind() { | ||||
| 		case reflect.Slice: | ||||
| 			i := 0 | ||||
| 			if m[2] != "" || v.Len() > 1 { | ||||
| 				if m[2] == "" { | ||||
| 					m[2] = "type" | ||||
| 					if m[3] = e.GetCommon().Default(); m[3] == "" { | ||||
| 						return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1]) | ||||
| 					} | ||||
| 				} | ||||
| 				for ; i < v.Len(); i++ { | ||||
| 					vi := v.Index(i) | ||||
| 					key, err := findField(vi.Elem(), m[2]) | ||||
| 					if err != nil { | ||||
| 						return nil, err | ||||
| 					} | ||||
| 					key = reflect.Indirect(key) | ||||
| 					if key.Kind() == reflect.String && key.String() == m[3] { | ||||
| 						break | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 			if i == v.Len() || v.Index(i).IsNil() { | ||||
| 				return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3]) | ||||
| 			} | ||||
| 			e = v.Index(i).Interface().(Elem) | ||||
| 		case reflect.Ptr: | ||||
| 			if v.IsNil() { | ||||
| 				return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name) | ||||
| 			} | ||||
| 			var ok bool | ||||
| 			if e, ok = v.Interface().(Elem); !ok { | ||||
| 				return nil, fmt.Errorf("cldr: %q is not an XML element", m[1]) | ||||
| 			} else if m[2] != "" || m[3] != "" { | ||||
| 				return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1]) | ||||
| 			} | ||||
| 		default: | ||||
| 			return nil, fmt.Errorf("cldr: %q is not an XML element", m[1]) | ||||
| 		} | ||||
| 	} | ||||
| 	return e, nil | ||||
| } | ||||
| 
 | ||||
| const absPrefix = "//ldml/" | ||||
| 
 | ||||
| func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) { | ||||
| 	if src != "locale" { | ||||
| 		if !strings.HasPrefix(path, absPrefix) { | ||||
| 			return nil, fmt.Errorf("cldr: expected absolute path, found %q", path) | ||||
| 		} | ||||
| 		path = path[len(absPrefix):] | ||||
| 		if e, err = cldr.resolve(src); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	return walkXPath(e, path) | ||||
| } | ||||
| 
 | ||||
| func (cldr *CLDR) resolveAndMergeAlias(e Elem) error { | ||||
| 	alias := e.GetCommon().Alias | ||||
| 	if alias == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	a, err := cldr.resolveAlias(e, alias.Source, alias.Path) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err) | ||||
| 	} | ||||
| 	// Ensure alias node was already evaluated. TODO: avoid double evaluation.
 | ||||
| 	err = cldr.resolveAndMergeAlias(a) | ||||
| 	v := reflect.ValueOf(e).Elem() | ||||
| 	for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() { | ||||
| 		if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() { | ||||
| 			if _, attr := xmlName(i.field()); !attr { | ||||
| 				v.FieldByIndex(i.index).Set(vv) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| func (cldr *CLDR) aliasResolver() visitor { | ||||
| 	return func(v reflect.Value) (err error) { | ||||
| 		if e, ok := v.Addr().Interface().(Elem); ok { | ||||
| 			err = cldr.resolveAndMergeAlias(e) | ||||
| 			if err == nil && blocking[e.GetCommon().name] { | ||||
| 				return stopDescent | ||||
| 			} | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // elements within blocking elements do not inherit.
 | ||||
| // Taken from CLDR's supplementalMetaData.xml.
 | ||||
| var blocking = map[string]bool{ | ||||
| 	"identity":         true, | ||||
| 	"supplementalData": true, | ||||
| 	"cldrTest":         true, | ||||
| 	"collation":        true, | ||||
| 	"transform":        true, | ||||
| } | ||||
| 
 | ||||
| // Distinguishing attributes affect inheritance; two elements with different
 | ||||
| // distinguishing attributes are treated as different for purposes of inheritance,
 | ||||
| // except when such attributes occur in the indicated elements.
 | ||||
| // Taken from CLDR's supplementalMetaData.xml.
 | ||||
| var distinguishing = map[string][]string{ | ||||
| 	"key":        nil, | ||||
| 	"request_id": nil, | ||||
| 	"id":         nil, | ||||
| 	"registry":   nil, | ||||
| 	"alt":        nil, | ||||
| 	"iso4217":    nil, | ||||
| 	"iso3166":    nil, | ||||
| 	"mzone":      nil, | ||||
| 	"from":       nil, | ||||
| 	"to":         nil, | ||||
| 	"type": []string{ | ||||
| 		"abbreviationFallback", | ||||
| 		"default", | ||||
| 		"mapping", | ||||
| 		"measurementSystem", | ||||
| 		"preferenceOrdering", | ||||
| 	}, | ||||
| 	"numberSystem": nil, | ||||
| } | ||||
| 
 | ||||
| func in(set []string, s string) bool { | ||||
| 	for _, v := range set { | ||||
| 		if v == s { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // attrKey computes a key based on the distinguishable attributes of
 | ||||
| // an element and it's values.
 | ||||
| func attrKey(v reflect.Value, exclude ...string) string { | ||||
| 	parts := []string{} | ||||
| 	ename := v.Interface().(Elem).GetCommon().name | ||||
| 	v = v.Elem() | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		if name, attr := xmlName(i.field()); attr { | ||||
| 			if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) { | ||||
| 				v := i.value() | ||||
| 				if v.Kind() == reflect.Ptr { | ||||
| 					v = v.Elem() | ||||
| 				} | ||||
| 				if v.IsValid() { | ||||
| 					parts = append(parts, fmt.Sprintf("%s=%s", name, v.String())) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	sort.Strings(parts) | ||||
| 	return strings.Join(parts, ";") | ||||
| } | ||||
| 
 | ||||
| // Key returns a key for e derived from all distinguishing attributes
 | ||||
| // except those specified by exclude.
 | ||||
| func Key(e Elem, exclude ...string) string { | ||||
| 	return attrKey(reflect.ValueOf(e), exclude...) | ||||
| } | ||||
| 
 | ||||
| // linkEnclosing sets the enclosing element as well as the name
 | ||||
| // for all sub-elements of child, recursively.
 | ||||
| func linkEnclosing(parent, child Elem) { | ||||
| 	child.setEnclosing(parent) | ||||
| 	v := reflect.ValueOf(child).Elem() | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		vf := i.value() | ||||
| 		if vf.Kind() == reflect.Slice { | ||||
| 			for j := 0; j < vf.Len(); j++ { | ||||
| 				linkEnclosing(child, vf.Index(j).Interface().(Elem)) | ||||
| 			} | ||||
| 		} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct { | ||||
| 			linkEnclosing(child, vf.Interface().(Elem)) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func setNames(e Elem, name string) { | ||||
| 	e.setName(name) | ||||
| 	v := reflect.ValueOf(e).Elem() | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		vf := i.value() | ||||
| 		name, _ = xmlName(i.field()) | ||||
| 		if vf.Kind() == reflect.Slice { | ||||
| 			for j := 0; j < vf.Len(); j++ { | ||||
| 				setNames(vf.Index(j).Interface().(Elem), name) | ||||
| 			} | ||||
| 		} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct { | ||||
| 			setNames(vf.Interface().(Elem), name) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // deepCopy copies elements of v recursively.  All elements of v that may
 | ||||
| // be modified by inheritance are explicitly copied.
 | ||||
| func deepCopy(v reflect.Value) reflect.Value { | ||||
| 	switch v.Kind() { | ||||
| 	case reflect.Ptr: | ||||
| 		if v.IsNil() || v.Elem().Kind() != reflect.Struct { | ||||
| 			return v | ||||
| 		} | ||||
| 		nv := reflect.New(v.Elem().Type()) | ||||
| 		nv.Elem().Set(v.Elem()) | ||||
| 		deepCopyRec(nv.Elem(), v.Elem()) | ||||
| 		return nv | ||||
| 	case reflect.Slice: | ||||
| 		nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len()) | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			deepCopyRec(nv.Index(i), v.Index(i)) | ||||
| 		} | ||||
| 		return nv | ||||
| 	} | ||||
| 	panic("deepCopy: must be called with pointer or slice") | ||||
| } | ||||
| 
 | ||||
| // deepCopyRec is only called by deepCopy.
 | ||||
| func deepCopyRec(nv, v reflect.Value) { | ||||
| 	if v.Kind() == reflect.Struct { | ||||
| 		t := v.Type() | ||||
| 		for i := 0; i < v.NumField(); i++ { | ||||
| 			if name, attr := xmlName(t.Field(i)); name != "" && !attr { | ||||
| 				deepCopyRec(nv.Field(i), v.Field(i)) | ||||
| 			} | ||||
| 		} | ||||
| 	} else { | ||||
| 		nv.Set(deepCopy(v)) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // newNode is used to insert a missing node during inheritance.
 | ||||
| func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value { | ||||
| 	n := reflect.New(v.Type()) | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		if name, attr := xmlName(i.field()); name == "" || attr { | ||||
| 			n.Elem().FieldByIndex(i.index).Set(i.value()) | ||||
| 		} | ||||
| 	} | ||||
| 	n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem)) | ||||
| 	return n | ||||
| } | ||||
| 
 | ||||
| // v, parent must be pointers to struct
 | ||||
| func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) { | ||||
| 	t := v.Type() | ||||
| 	nv := reflect.New(t) | ||||
| 	nv.Elem().Set(v) | ||||
| 	for i := iter(v); !i.done(); i.next() { | ||||
| 		vf := i.value() | ||||
| 		f := i.field() | ||||
| 		name, attr := xmlName(f) | ||||
| 		if name == "" || attr { | ||||
| 			continue | ||||
| 		} | ||||
| 		pf := parent.FieldByIndex(i.index) | ||||
| 		if blocking[name] { | ||||
| 			if vf.IsNil() { | ||||
| 				vf = pf | ||||
| 			} | ||||
| 			nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf)) | ||||
| 			continue | ||||
| 		} | ||||
| 		switch f.Type.Kind() { | ||||
| 		case reflect.Ptr: | ||||
| 			if f.Type.Elem().Kind() == reflect.Struct { | ||||
| 				if !vf.IsNil() { | ||||
| 					if vf, err = cldr.inheritStructPtr(vf, pf); err != nil { | ||||
| 						return reflect.Value{}, err | ||||
| 					} | ||||
| 					vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem)) | ||||
| 					nv.Elem().FieldByIndex(i.index).Set(vf) | ||||
| 				} else if !pf.IsNil() { | ||||
| 					n := cldr.newNode(pf.Elem(), v) | ||||
| 					if vf, err = cldr.inheritStructPtr(n, pf); err != nil { | ||||
| 						return reflect.Value{}, err | ||||
| 					} | ||||
| 					vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem)) | ||||
| 					nv.Elem().FieldByIndex(i.index).Set(vf) | ||||
| 				} | ||||
| 			} | ||||
| 		case reflect.Slice: | ||||
| 			vf, err := cldr.inheritSlice(nv.Elem(), vf, pf) | ||||
| 			if err != nil { | ||||
| 				return reflect.Zero(t), err | ||||
| 			} | ||||
| 			nv.Elem().FieldByIndex(i.index).Set(vf) | ||||
| 		} | ||||
| 	} | ||||
| 	return nv, nil | ||||
| } | ||||
| 
 | ||||
| func root(e Elem) *LDML { | ||||
| 	for ; e.enclosing() != nil; e = e.enclosing() { | ||||
| 	} | ||||
| 	return e.(*LDML) | ||||
| } | ||||
| 
 | ||||
| // inheritStructPtr first merges possible aliases in with v and then inherits
 | ||||
| // any underspecified elements from parent.
 | ||||
| func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) { | ||||
| 	if !v.IsNil() { | ||||
| 		e := v.Interface().(Elem).GetCommon() | ||||
| 		alias := e.Alias | ||||
| 		if alias == nil && !parent.IsNil() { | ||||
| 			alias = parent.Interface().(Elem).GetCommon().Alias | ||||
| 		} | ||||
| 		if alias != nil { | ||||
| 			a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path) | ||||
| 			if a != nil { | ||||
| 				if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil { | ||||
| 					return reflect.Value{}, err | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		if !parent.IsNil() { | ||||
| 			return cldr.inheritFields(v.Elem(), parent.Elem()) | ||||
| 		} | ||||
| 	} else if parent.IsNil() { | ||||
| 		panic("should not reach here") | ||||
| 	} | ||||
| 	return v, nil | ||||
| } | ||||
| 
 | ||||
| // Must be slice of struct pointers.
 | ||||
| func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) { | ||||
| 	t := v.Type() | ||||
| 	index := make(map[string]reflect.Value) | ||||
| 	if !v.IsNil() { | ||||
| 		for i := 0; i < v.Len(); i++ { | ||||
| 			vi := v.Index(i) | ||||
| 			key := attrKey(vi) | ||||
| 			index[key] = vi | ||||
| 		} | ||||
| 	} | ||||
| 	if !parent.IsNil() { | ||||
| 		for i := 0; i < parent.Len(); i++ { | ||||
| 			vi := parent.Index(i) | ||||
| 			key := attrKey(vi) | ||||
| 			if w, ok := index[key]; ok { | ||||
| 				index[key], err = cldr.inheritStructPtr(w, vi) | ||||
| 			} else { | ||||
| 				n := cldr.newNode(vi.Elem(), enc) | ||||
| 				index[key], err = cldr.inheritStructPtr(n, vi) | ||||
| 			} | ||||
| 			index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem)) | ||||
| 			if err != nil { | ||||
| 				return v, err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	keys := make([]string, 0, len(index)) | ||||
| 	for k, _ := range index { | ||||
| 		keys = append(keys, k) | ||||
| 	} | ||||
| 	sort.Strings(keys) | ||||
| 	sl := reflect.MakeSlice(t, len(index), len(index)) | ||||
| 	for i, k := range keys { | ||||
| 		sl.Index(i).Set(index[k]) | ||||
| 	} | ||||
| 	return sl, nil | ||||
| } | ||||
| 
 | ||||
| func parentLocale(loc string) string { | ||||
| 	parts := strings.Split(loc, "_") | ||||
| 	if len(parts) == 1 { | ||||
| 		return "root" | ||||
| 	} | ||||
| 	parts = parts[:len(parts)-1] | ||||
| 	key := strings.Join(parts, "_") | ||||
| 	return key | ||||
| } | ||||
| 
 | ||||
| func (cldr *CLDR) resolve(loc string) (res *LDML, err error) { | ||||
| 	if r := cldr.resolved[loc]; r != nil { | ||||
| 		return r, nil | ||||
| 	} | ||||
| 	x := cldr.RawLDML(loc) | ||||
| 	if x == nil { | ||||
| 		return nil, fmt.Errorf("cldr: unknown locale %q", loc) | ||||
| 	} | ||||
| 	var v reflect.Value | ||||
| 	if loc == "root" { | ||||
| 		x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML) | ||||
| 		linkEnclosing(nil, x) | ||||
| 		err = cldr.aliasResolver().visit(x) | ||||
| 	} else { | ||||
| 		key := parentLocale(loc) | ||||
| 		var parent *LDML | ||||
| 		for ; cldr.locale[key] == nil; key = parentLocale(key) { | ||||
| 		} | ||||
| 		if parent, err = cldr.resolve(key); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem()) | ||||
| 		x = v.Interface().(*LDML) | ||||
| 		linkEnclosing(nil, x) | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	cldr.resolved[loc] = x | ||||
| 	return x, err | ||||
| } | ||||
| 
 | ||||
| // finalize finalizes the initialization of the raw LDML structs.  It also
 | ||||
| // removed unwanted fields, as specified by filter, so that they will not
 | ||||
| // be unnecessarily evaluated.
 | ||||
| func (cldr *CLDR) finalize(filter []string) { | ||||
| 	for _, x := range cldr.locale { | ||||
| 		if filter != nil { | ||||
| 			v := reflect.ValueOf(x).Elem() | ||||
| 			t := v.Type() | ||||
| 			for i := 0; i < v.NumField(); i++ { | ||||
| 				f := t.Field(i) | ||||
| 				name, _ := xmlName(f) | ||||
| 				if name != "" && name != "identity" && !in(filter, name) { | ||||
| 					v.Field(i).Set(reflect.Zero(f.Type)) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		linkEnclosing(nil, x) // for resolving aliases and paths
 | ||||
| 		setNames(x, "ldml") | ||||
| 	} | ||||
| } | ||||
|  | @ -1,144 +0,0 @@ | |||
| // Copyright 2013 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package cldr | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| ) | ||||
| 
 | ||||
| // Slice provides utilities for modifying slices of elements.
 | ||||
| // It can be wrapped around any slice of which the element type implements
 | ||||
| // interface Elem.
 | ||||
| type Slice struct { | ||||
| 	ptr reflect.Value | ||||
| 	typ reflect.Type | ||||
| } | ||||
| 
 | ||||
| // Value returns the reflect.Value of the underlying slice.
 | ||||
| func (s *Slice) Value() reflect.Value { | ||||
| 	return s.ptr.Elem() | ||||
| } | ||||
| 
 | ||||
| // MakeSlice wraps a pointer to a slice of Elems.
 | ||||
| // It replaces the array pointed to by the slice so that subsequent modifications
 | ||||
| // do not alter the data in a CLDR type.
 | ||||
| // It panics if an incorrect type is passed.
 | ||||
| func MakeSlice(slicePtr interface{}) Slice { | ||||
| 	ptr := reflect.ValueOf(slicePtr) | ||||
| 	if ptr.Kind() != reflect.Ptr { | ||||
| 		panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type())) | ||||
| 	} | ||||
| 	sl := ptr.Elem() | ||||
| 	if sl.Kind() != reflect.Slice { | ||||
| 		panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type())) | ||||
| 	} | ||||
| 	intf := reflect.TypeOf((*Elem)(nil)).Elem() | ||||
| 	if !sl.Type().Elem().Implements(intf) { | ||||
| 		panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem())) | ||||
| 	} | ||||
| 	nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len()) | ||||
| 	reflect.Copy(nsl, sl) | ||||
| 	sl.Set(nsl) | ||||
| 	return Slice{ | ||||
| 		ptr: ptr, | ||||
| 		typ: sl.Type().Elem().Elem(), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (s Slice) indexForAttr(a string) []int { | ||||
| 	for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() { | ||||
| 		if n, _ := xmlName(i.field()); n == a { | ||||
| 			return i.index | ||||
| 		} | ||||
| 	} | ||||
| 	panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ)) | ||||
| } | ||||
| 
 | ||||
| // Filter filters s to only include elements for which fn returns true.
 | ||||
| func (s Slice) Filter(fn func(e Elem) bool) { | ||||
| 	k := 0 | ||||
| 	sl := s.Value() | ||||
| 	for i := 0; i < sl.Len(); i++ { | ||||
| 		vi := sl.Index(i) | ||||
| 		if fn(vi.Interface().(Elem)) { | ||||
| 			sl.Index(k).Set(vi) | ||||
| 			k++ | ||||
| 		} | ||||
| 	} | ||||
| 	sl.Set(sl.Slice(0, k)) | ||||
| } | ||||
| 
 | ||||
| // Group finds elements in s for which fn returns the same value and groups
 | ||||
| // them in a new Slice.
 | ||||
| func (s Slice) Group(fn func(e Elem) string) []Slice { | ||||
| 	m := make(map[string][]reflect.Value) | ||||
| 	sl := s.Value() | ||||
| 	for i := 0; i < sl.Len(); i++ { | ||||
| 		vi := sl.Index(i) | ||||
| 		key := fn(vi.Interface().(Elem)) | ||||
| 		m[key] = append(m[key], vi) | ||||
| 	} | ||||
| 	keys := []string{} | ||||
| 	for k, _ := range m { | ||||
| 		keys = append(keys, k) | ||||
| 	} | ||||
| 	sort.Strings(keys) | ||||
| 	res := []Slice{} | ||||
| 	for _, k := range keys { | ||||
| 		nsl := reflect.New(sl.Type()) | ||||
| 		nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...)) | ||||
| 		res = append(res, MakeSlice(nsl.Interface())) | ||||
| 	} | ||||
| 	return res | ||||
| } | ||||
| 
 | ||||
| // SelectAnyOf filters s to contain only elements for which attr matches
 | ||||
| // any of the values.
 | ||||
| func (s Slice) SelectAnyOf(attr string, values ...string) { | ||||
| 	index := s.indexForAttr(attr) | ||||
| 	s.Filter(func(e Elem) bool { | ||||
| 		vf := reflect.ValueOf(e).Elem().FieldByIndex(index) | ||||
| 		return in(values, vf.String()) | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // SelectOnePerGroup filters s to include at most one element e per group of
 | ||||
| // elements matching Key(attr), where e has an attribute a that matches any
 | ||||
| // the values in v.
 | ||||
| // If more than one element in a group matches a value in v preference
 | ||||
| // is given to the element that matches the first value in v.
 | ||||
| func (s Slice) SelectOnePerGroup(a string, v []string) { | ||||
| 	index := s.indexForAttr(a) | ||||
| 	grouped := s.Group(func(e Elem) string { return Key(e, a) }) | ||||
| 	sl := s.Value() | ||||
| 	sl.Set(sl.Slice(0, 0)) | ||||
| 	for _, g := range grouped { | ||||
| 		e := reflect.Value{} | ||||
| 		found := len(v) | ||||
| 		gsl := g.Value() | ||||
| 		for i := 0; i < gsl.Len(); i++ { | ||||
| 			vi := gsl.Index(i).Elem().FieldByIndex(index) | ||||
| 			j := 0 | ||||
| 			for ; j < len(v) && v[j] != vi.String(); j++ { | ||||
| 			} | ||||
| 			if j < found { | ||||
| 				found = j | ||||
| 				e = gsl.Index(i) | ||||
| 			} | ||||
| 		} | ||||
| 		if found < len(v) { | ||||
| 			sl.Set(reflect.Append(sl, e)) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // SelectDraft drops all elements from the list with a draft level smaller than d
 | ||||
| // and selects the highest draft level of the remaining.
 | ||||
| // This method assumes that the input CLDR is canonicalized.
 | ||||
| func (s Slice) SelectDraft(d Draft) { | ||||
| 	s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):]) | ||||
| } | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -1,113 +0,0 @@ | |||
| // Copyright 2015 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| // +build ignore
 | ||||
| 
 | ||||
| package main | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 
 | ||||
| 	"golang.org/x/text/internal/gen" | ||||
| 	"golang.org/x/text/internal/ucd" | ||||
| 	"golang.org/x/text/unicode/rangetable" | ||||
| ) | ||||
| 
 | ||||
| var versionList = flag.String("versions", "", | ||||
| 	"list of versions for which to generate RangeTables") | ||||
| 
 | ||||
| const bootstrapMessage = `No versions specified. | ||||
| To bootstrap the code generation, run: | ||||
| 	go run gen.go --versions=4.1.0,5.0.0,6.0.0,6.1.0,6.2.0,6.3.0,7.0.0 | ||||
| 
 | ||||
| and ensure that the latest versions are included by checking: | ||||
| 	http://www.unicode.org/Public/`
 | ||||
| 
 | ||||
| func getVersions() []string { | ||||
| 	if *versionList == "" { | ||||
| 		log.Fatal(bootstrapMessage) | ||||
| 	} | ||||
| 
 | ||||
| 	versions := strings.Split(*versionList, ",") | ||||
| 	sort.Strings(versions) | ||||
| 
 | ||||
| 	// Ensure that at least the current version is included.
 | ||||
| 	for _, v := range versions { | ||||
| 		if v == gen.UnicodeVersion() { | ||||
| 			return versions | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	versions = append(versions, gen.UnicodeVersion()) | ||||
| 	sort.Strings(versions) | ||||
| 	return versions | ||||
| } | ||||
| 
 | ||||
| func main() { | ||||
| 	gen.Init() | ||||
| 
 | ||||
| 	versions := getVersions() | ||||
| 
 | ||||
| 	w := &bytes.Buffer{} | ||||
| 
 | ||||
| 	fmt.Fprintf(w, "//go:generate go run gen.go --versions=%s\n\n", strings.Join(versions, ",")) | ||||
| 	fmt.Fprintf(w, "import \"unicode\"\n\n") | ||||
| 
 | ||||
| 	vstr := func(s string) string { return strings.Replace(s, ".", "_", -1) } | ||||
| 
 | ||||
| 	fmt.Fprintf(w, "var assigned = map[string]*unicode.RangeTable{\n") | ||||
| 	for _, v := range versions { | ||||
| 		fmt.Fprintf(w, "\t%q: assigned%s,\n", v, vstr(v)) | ||||
| 	} | ||||
| 	fmt.Fprintf(w, "}\n\n") | ||||
| 
 | ||||
| 	var size int | ||||
| 	for _, v := range versions { | ||||
| 		assigned := []rune{} | ||||
| 
 | ||||
| 		r := gen.Open("http://www.unicode.org/Public/", "", v+"/ucd/UnicodeData.txt") | ||||
| 		ucd.Parse(r, func(p *ucd.Parser) { | ||||
| 			assigned = append(assigned, p.Rune(0)) | ||||
| 		}) | ||||
| 
 | ||||
| 		rt := rangetable.New(assigned...) | ||||
| 		sz := int(reflect.TypeOf(unicode.RangeTable{}).Size()) | ||||
| 		sz += int(reflect.TypeOf(unicode.Range16{}).Size()) * len(rt.R16) | ||||
| 		sz += int(reflect.TypeOf(unicode.Range32{}).Size()) * len(rt.R32) | ||||
| 
 | ||||
| 		fmt.Fprintf(w, "// size %d bytes (%d KiB)\n", sz, sz/1024) | ||||
| 		fmt.Fprintf(w, "var assigned%s = ", vstr(v)) | ||||
| 		print(w, rt) | ||||
| 
 | ||||
| 		size += sz | ||||
| 	} | ||||
| 
 | ||||
| 	fmt.Fprintf(w, "// Total size %d bytes (%d KiB)\n", size, size/1024) | ||||
| 
 | ||||
| 	gen.WriteGoFile("tables.go", "rangetable", w.Bytes()) | ||||
| } | ||||
| 
 | ||||
| func print(w io.Writer, rt *unicode.RangeTable) { | ||||
| 	fmt.Fprintln(w, "&unicode.RangeTable{") | ||||
| 	fmt.Fprintln(w, "\tR16: []unicode.Range16{") | ||||
| 	for _, r := range rt.R16 { | ||||
| 		fmt.Fprintf(w, "\t\t{%#04x, %#04x, %d},\n", r.Lo, r.Hi, r.Stride) | ||||
| 	} | ||||
| 	fmt.Fprintln(w, "\t},") | ||||
| 	fmt.Fprintln(w, "\tR32: []unicode.Range32{") | ||||
| 	for _, r := range rt.R32 { | ||||
| 		fmt.Fprintf(w, "\t\t{%#08x, %#08x, %d},\n", r.Lo, r.Hi, r.Stride) | ||||
| 	} | ||||
| 	fmt.Fprintln(w, "\t},") | ||||
| 	fmt.Fprintf(w, "\tLatinOffset: %d,\n", rt.LatinOffset) | ||||
| 	fmt.Fprintf(w, "}\n\n") | ||||
| } | ||||
|  | @ -1,260 +0,0 @@ | |||
| // Copyright 2015 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| package rangetable | ||||
| 
 | ||||
| import ( | ||||
| 	"unicode" | ||||
| ) | ||||
| 
 | ||||
| // atEnd is used to mark a completed iteration.
 | ||||
| const atEnd = unicode.MaxRune + 1 | ||||
| 
 | ||||
| // Merge returns a new RangeTable that is the union of the given tables.
 | ||||
| // It can also be used to compact user-created RangeTables. The entries in
 | ||||
| // R16 and R32 for any given RangeTable should be sorted and non-overlapping.
 | ||||
| //
 | ||||
| // A lookup in the resulting table can be several times faster than using In
 | ||||
| // directly on the ranges. Merge is an expensive operation, however, and only
 | ||||
| // makes sense if one intends to use the result for more than a couple of
 | ||||
| // hundred lookups.
 | ||||
| func Merge(ranges ...*unicode.RangeTable) *unicode.RangeTable { | ||||
| 	rt := &unicode.RangeTable{} | ||||
| 	if len(ranges) == 0 { | ||||
| 		return rt | ||||
| 	} | ||||
| 
 | ||||
| 	iter := tablesIter(make([]tableIndex, len(ranges))) | ||||
| 
 | ||||
| 	for i, t := range ranges { | ||||
| 		iter[i] = tableIndex{t, 0, atEnd} | ||||
| 		if len(t.R16) > 0 { | ||||
| 			iter[i].next = rune(t.R16[0].Lo) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if r0 := iter.next16(); r0.Stride != 0 { | ||||
| 		for { | ||||
| 			r1 := iter.next16() | ||||
| 			if r1.Stride == 0 { | ||||
| 				rt.R16 = append(rt.R16, r0) | ||||
| 				break | ||||
| 			} | ||||
| 			stride := r1.Lo - r0.Hi | ||||
| 			if (r1.Lo == r1.Hi || stride == r1.Stride) && (r0.Lo == r0.Hi || stride == r0.Stride) { | ||||
| 				// Fully merge the next range into the previous one.
 | ||||
| 				r0.Hi, r0.Stride = r1.Hi, stride | ||||
| 				continue | ||||
| 			} else if stride == r0.Stride { | ||||
| 				// Move the first element of r1 to r0. This may eliminate an
 | ||||
| 				// entry.
 | ||||
| 				r0.Hi = r1.Lo | ||||
| 				r0.Stride = stride | ||||
| 				r1.Lo = r1.Lo + r1.Stride | ||||
| 				if r1.Lo > r1.Hi { | ||||
| 					continue | ||||
| 				} | ||||
| 			} | ||||
| 			rt.R16 = append(rt.R16, r0) | ||||
| 			r0 = r1 | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	for i, t := range ranges { | ||||
| 		iter[i] = tableIndex{t, 0, atEnd} | ||||
| 		if len(t.R32) > 0 { | ||||
| 			iter[i].next = rune(t.R32[0].Lo) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if r0 := iter.next32(); r0.Stride != 0 { | ||||
| 		for { | ||||
| 			r1 := iter.next32() | ||||
| 			if r1.Stride == 0 { | ||||
| 				rt.R32 = append(rt.R32, r0) | ||||
| 				break | ||||
| 			} | ||||
| 			stride := r1.Lo - r0.Hi | ||||
| 			if (r1.Lo == r1.Hi || stride == r1.Stride) && (r0.Lo == r0.Hi || stride == r0.Stride) { | ||||
| 				// Fully merge the next range into the previous one.
 | ||||
| 				r0.Hi, r0.Stride = r1.Hi, stride | ||||
| 				continue | ||||
| 			} else if stride == r0.Stride { | ||||
| 				// Move the first element of r1 to r0. This may eliminate an
 | ||||
| 				// entry.
 | ||||
| 				r0.Hi = r1.Lo | ||||
| 				r1.Lo = r1.Lo + r1.Stride | ||||
| 				if r1.Lo > r1.Hi { | ||||
| 					continue | ||||
| 				} | ||||
| 			} | ||||
| 			rt.R32 = append(rt.R32, r0) | ||||
| 			r0 = r1 | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	for i := 0; i < len(rt.R16) && rt.R16[i].Hi <= unicode.MaxLatin1; i++ { | ||||
| 		rt.LatinOffset = i + 1 | ||||
| 	} | ||||
| 
 | ||||
| 	return rt | ||||
| } | ||||
| 
 | ||||
| type tableIndex struct { | ||||
| 	t    *unicode.RangeTable | ||||
| 	p    uint32 | ||||
| 	next rune | ||||
| } | ||||
| 
 | ||||
| type tablesIter []tableIndex | ||||
| 
 | ||||
| // sortIter does an insertion sort using the next field of tableIndex. Insertion
 | ||||
| // sort is a good sorting algorithm for this case.
 | ||||
| func sortIter(t []tableIndex) { | ||||
| 	for i := range t { | ||||
| 		for j := i; j > 0 && t[j-1].next > t[j].next; j-- { | ||||
| 			t[j], t[j-1] = t[j-1], t[j] | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // next16 finds the ranged to be added to the table. If ranges overlap between
 | ||||
| // multiple tables it clips the result to a non-overlapping range if the
 | ||||
| // elements are not fully subsumed. It returns a zero range if there are no more
 | ||||
| // ranges.
 | ||||
| func (ti tablesIter) next16() unicode.Range16 { | ||||
| 	sortIter(ti) | ||||
| 
 | ||||
| 	t0 := ti[0] | ||||
| 	if t0.next == atEnd { | ||||
| 		return unicode.Range16{} | ||||
| 	} | ||||
| 	r0 := t0.t.R16[t0.p] | ||||
| 	r0.Lo = uint16(t0.next) | ||||
| 
 | ||||
| 	// We restrict the Hi of the current range if it overlaps with another range.
 | ||||
| 	for i := range ti { | ||||
| 		tn := ti[i] | ||||
| 		// Since our tableIndices are sorted by next, we can break if the there
 | ||||
| 		// is no overlap. The first value of a next range can always be merged
 | ||||
| 		// into the current one, so we can break in case of equality as well.
 | ||||
| 		if rune(r0.Hi) <= tn.next { | ||||
| 			break | ||||
| 		} | ||||
| 		rn := tn.t.R16[tn.p] | ||||
| 		rn.Lo = uint16(tn.next) | ||||
| 
 | ||||
| 		// Limit r0.Hi based on next ranges in list, but allow it to overlap
 | ||||
| 		// with ranges as long as it subsumes it.
 | ||||
| 		m := (rn.Lo - r0.Lo) % r0.Stride | ||||
| 		if m == 0 && (rn.Stride == r0.Stride || rn.Lo == rn.Hi) { | ||||
| 			// Overlap, take the min of the two Hi values: for simplicity's sake
 | ||||
| 			// we only process one range at a time.
 | ||||
| 			if r0.Hi > rn.Hi { | ||||
| 				r0.Hi = rn.Hi | ||||
| 			} | ||||
| 		} else { | ||||
| 			// Not a compatible stride. Set to the last possible value before
 | ||||
| 			// rn.Lo, but ensure there is at least one value.
 | ||||
| 			if x := rn.Lo - m; r0.Lo <= x { | ||||
| 				r0.Hi = x | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Update the next values for each table.
 | ||||
| 	for i := range ti { | ||||
| 		tn := &ti[i] | ||||
| 		if rune(r0.Hi) < tn.next { | ||||
| 			break | ||||
| 		} | ||||
| 		rn := tn.t.R16[tn.p] | ||||
| 		stride := rune(rn.Stride) | ||||
| 		tn.next += stride * (1 + ((rune(r0.Hi) - tn.next) / stride)) | ||||
| 		if rune(rn.Hi) < tn.next { | ||||
| 			if tn.p++; int(tn.p) == len(tn.t.R16) { | ||||
| 				tn.next = atEnd | ||||
| 			} else { | ||||
| 				tn.next = rune(tn.t.R16[tn.p].Lo) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if r0.Lo == r0.Hi { | ||||
| 		r0.Stride = 1 | ||||
| 	} | ||||
| 
 | ||||
| 	return r0 | ||||
| } | ||||
| 
 | ||||
| // next32 finds the ranged to be added to the table. If ranges overlap between
 | ||||
| // multiple tables it clips the result to a non-overlapping range if the
 | ||||
| // elements are not fully subsumed. It returns a zero range if there are no more
 | ||||
| // ranges.
 | ||||
| func (ti tablesIter) next32() unicode.Range32 { | ||||
| 	sortIter(ti) | ||||
| 
 | ||||
| 	t0 := ti[0] | ||||
| 	if t0.next == atEnd { | ||||
| 		return unicode.Range32{} | ||||
| 	} | ||||
| 	r0 := t0.t.R32[t0.p] | ||||
| 	r0.Lo = uint32(t0.next) | ||||
| 
 | ||||
| 	// We restrict the Hi of the current range if it overlaps with another range.
 | ||||
| 	for i := range ti { | ||||
| 		tn := ti[i] | ||||
| 		// Since our tableIndices are sorted by next, we can break if the there
 | ||||
| 		// is no overlap. The first value of a next range can always be merged
 | ||||
| 		// into the current one, so we can break in case of equality as well.
 | ||||
| 		if rune(r0.Hi) <= tn.next { | ||||
| 			break | ||||
| 		} | ||||
| 		rn := tn.t.R32[tn.p] | ||||
| 		rn.Lo = uint32(tn.next) | ||||
| 
 | ||||
| 		// Limit r0.Hi based on next ranges in list, but allow it to overlap
 | ||||
| 		// with ranges as long as it subsumes it.
 | ||||
| 		m := (rn.Lo - r0.Lo) % r0.Stride | ||||
| 		if m == 0 && (rn.Stride == r0.Stride || rn.Lo == rn.Hi) { | ||||
| 			// Overlap, take the min of the two Hi values: for simplicity's sake
 | ||||
| 			// we only process one range at a time.
 | ||||
| 			if r0.Hi > rn.Hi { | ||||
| 				r0.Hi = rn.Hi | ||||
| 			} | ||||
| 		} else { | ||||
| 			// Not a compatible stride. Set to the last possible value before
 | ||||
| 			// rn.Lo, but ensure there is at least one value.
 | ||||
| 			if x := rn.Lo - m; r0.Lo <= x { | ||||
| 				r0.Hi = x | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Update the next values for each table.
 | ||||
| 	for i := range ti { | ||||
| 		tn := &ti[i] | ||||
| 		if rune(r0.Hi) < tn.next { | ||||
| 			break | ||||
| 		} | ||||
| 		rn := tn.t.R32[tn.p] | ||||
| 		stride := rune(rn.Stride) | ||||
| 		tn.next += stride * (1 + ((rune(r0.Hi) - tn.next) / stride)) | ||||
| 		if rune(rn.Hi) < tn.next { | ||||
| 			if tn.p++; int(tn.p) == len(tn.t.R32) { | ||||
| 				tn.next = atEnd | ||||
| 			} else { | ||||
| 				tn.next = rune(tn.t.R32[tn.p].Lo) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if r0.Lo == r0.Hi { | ||||
| 		r0.Stride = 1 | ||||
| 	} | ||||
| 
 | ||||
| 	return r0 | ||||
| } | ||||
|  | @ -1,70 +0,0 @@ | |||
| // Copyright 2015 The Go Authors. All rights reserved.
 | ||||
| // Use of this source code is governed by a BSD-style
 | ||||
| // license that can be found in the LICENSE file.
 | ||||
| 
 | ||||
| // Package rangetable provides utilities for creating and inspecting
 | ||||
| // unicode.RangeTables.
 | ||||
| package rangetable | ||||
| 
 | ||||
| import ( | ||||
| 	"sort" | ||||
| 	"unicode" | ||||
| ) | ||||
| 
 | ||||
| // New creates a RangeTable from the given runes, which may contain duplicates.
 | ||||
| func New(r ...rune) *unicode.RangeTable { | ||||
| 	if len(r) == 0 { | ||||
| 		return &unicode.RangeTable{} | ||||
| 	} | ||||
| 
 | ||||
| 	sort.Sort(byRune(r)) | ||||
| 
 | ||||
| 	// Remove duplicates.
 | ||||
| 	k := 1 | ||||
| 	for i := 1; i < len(r); i++ { | ||||
| 		if r[k-1] != r[i] { | ||||
| 			r[k] = r[i] | ||||
| 			k++ | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	var rt unicode.RangeTable | ||||
| 	for _, r := range r[:k] { | ||||
| 		if r <= 0xFFFF { | ||||
| 			rt.R16 = append(rt.R16, unicode.Range16{Lo: uint16(r), Hi: uint16(r), Stride: 1}) | ||||
| 		} else { | ||||
| 			rt.R32 = append(rt.R32, unicode.Range32{Lo: uint32(r), Hi: uint32(r), Stride: 1}) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Optimize RangeTable.
 | ||||
| 	return Merge(&rt) | ||||
| } | ||||
| 
 | ||||
| type byRune []rune | ||||
| 
 | ||||
| func (r byRune) Len() int           { return len(r) } | ||||
| func (r byRune) Swap(i, j int)      { r[i], r[j] = r[j], r[i] } | ||||
| func (r byRune) Less(i, j int) bool { return r[i] < r[j] } | ||||
| 
 | ||||
| // Visit visits all runes in the given RangeTable in order, calling fn for each.
 | ||||
| func Visit(rt *unicode.RangeTable, fn func(rune)) { | ||||
| 	for _, r16 := range rt.R16 { | ||||
| 		for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) { | ||||
| 			fn(r) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, r32 := range rt.R32 { | ||||
| 		for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) { | ||||
| 			fn(r) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Assigned returns a RangeTable with all assigned code points for a given
 | ||||
| // Unicode version. This includes graphic, format, control, and private-use
 | ||||
| // characters. It returns nil if the data for the given version is not
 | ||||
| // available.
 | ||||
| func Assigned(version string) *unicode.RangeTable { | ||||
| 	return assigned[version] | ||||
| } | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -307,6 +307,24 @@ | |||
| 			"revision": "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4", | ||||
| 			"revisionTime": "2016-07-23T06:10:19Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "1EiU/fWJI6ldCTGorUvijilegRM=", | ||||
| 			"path": "github.com/minio/blazer/base", | ||||
| 			"revision": "8e81ddf2d8deed54c6ac3f7d264d78659e72fbb8", | ||||
| 			"revisionTime": "2017-10-06T21:06:28Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "ucCxupZ1gyxvFsBg5igP13dySLI=", | ||||
| 			"path": "github.com/minio/blazer/internal/b2types", | ||||
| 			"revision": "8e81ddf2d8deed54c6ac3f7d264d78659e72fbb8", | ||||
| 			"revisionTime": "2017-10-06T21:06:28Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "zgBbPwwuUH2sxz8smOzOA9TrD5g=", | ||||
| 			"path": "github.com/minio/blazer/internal/blog", | ||||
| 			"revision": "8e81ddf2d8deed54c6ac3f7d264d78659e72fbb8", | ||||
| 			"revisionTime": "2017-10-06T21:06:28Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "fUWokilZyc1QDKnIgCDJE8n1S9U=", | ||||
| 			"path": "github.com/minio/cli", | ||||
|  | @ -571,24 +589,6 @@ | |||
| 			"revision": "ea9bcade75cb975a0b9738936568ab388b845617", | ||||
| 			"revisionTime": "2017-03-08T18:50:27Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "ZQdHbB9VYCXwQ+9/CmZPhJv0+SM=", | ||||
| 			"path": "golang.org/x/text/internal/gen", | ||||
| 			"revision": "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4", | ||||
| 			"revisionTime": "2017-04-25T18:31:26Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "47nwiUyVBY2RKoEGXmCSvusY4Js=", | ||||
| 			"path": "golang.org/x/text/internal/triegen", | ||||
| 			"revision": "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4", | ||||
| 			"revisionTime": "2017-04-25T18:31:26Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "Yd5wMObzagIfCiKLpZbtBIrOUA4=", | ||||
| 			"path": "golang.org/x/text/internal/ucd", | ||||
| 			"revision": "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4", | ||||
| 			"revisionTime": "2017-04-25T18:31:26Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "faFDXp++cLjLBlvsr+izZ+go1WU=", | ||||
| 			"path": "golang.org/x/text/secure/bidirule", | ||||
|  | @ -607,24 +607,12 @@ | |||
| 			"revision": "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4", | ||||
| 			"revisionTime": "2017-04-25T18:31:26Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "ZbYsJjfj1rPbHN+0baD1rg09PXQ=", | ||||
| 			"path": "golang.org/x/text/unicode/cldr", | ||||
| 			"revision": "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4", | ||||
| 			"revisionTime": "2017-04-25T18:31:26Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "Anof4bt0AU+Sa3R8Rq0KBnlpbaQ=", | ||||
| 			"path": "golang.org/x/text/unicode/norm", | ||||
| 			"revision": "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4", | ||||
| 			"revisionTime": "2017-04-25T18:31:26Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "5R2IZ5umPfkD5QKt3pwrbIgmrDk=", | ||||
| 			"path": "golang.org/x/text/unicode/rangetable", | ||||
| 			"revision": "470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4", | ||||
| 			"revisionTime": "2017-04-25T18:31:26Z" | ||||
| 		}, | ||||
| 		{ | ||||
| 			"checksumSHA1": "M1wpFKbvAwtmAafEAYAvJ5mUlf0=", | ||||
| 			"path": "google.golang.org/api/cloudresourcemanager/v1", | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue