[ARVADOS] updated: 936aac46fade3dc5b50698d45e58a271f8e84c77

git at public.curoverse.com git at public.curoverse.com
Mon Sep 14 18:54:56 EDT 2015


Summary of changes:
 services/datamanager/collection/collection.go      | 75 ++++++++++++----------
 services/datamanager/collection/collection_test.go | 24 +++----
 services/datamanager/collection/testing.go         | 14 ++--
 services/datamanager/keep/keep.go                  | 50 ++++++++++-----
 services/datamanager/keep/keep_test.go             |  6 +-
 services/datamanager/summary/summary.go            |  4 +-
 services/datamanager/summary/summary_test.go       |  2 +-
 7 files changed, 100 insertions(+), 75 deletions(-)

       via  936aac46fade3dc5b50698d45e58a271f8e84c77 (commit)
      from  037735ad2e0637d1f1892847f0f82e255a1f764e (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 936aac46fade3dc5b50698d45e58a271f8e84c77
Author: radhika <radhika at curoverse.com>
Date:   Mon Sep 14 18:51:45 2015 -0400

    6260: so many golint complaints!!

diff --git a/services/datamanager/collection/collection.go b/services/datamanager/collection/collection.go
index 5519ad8..ca03627 100644
--- a/services/datamanager/collection/collection.go
+++ b/services/datamanager/collection/collection.go
@@ -24,38 +24,43 @@ var (
 	maxManifestSize   uint64
 )
 
+// Collection representation
 type Collection struct {
-	Uuid              string
-	OwnerUuid         string
+	UUID              string
+	OwnerUUID         string
 	ReplicationLevel  int
 	BlockDigestToSize map[blockdigest.BlockDigest]int
 	TotalSize         int
 }
 
+// ReadCollections holds information about collections from API server
 type ReadCollections struct {
 	ReadAllCollections        bool
-	UuidToCollection          map[string]Collection
+	UUIDToCollection          map[string]Collection
 	OwnerToCollectionSize     map[string]int
 	BlockToDesiredReplication map[blockdigest.DigestWithSize]int
-	CollectionUuidToIndex     map[string]int
-	CollectionIndexToUuid     []string
+	CollectionUUIDToIndex     map[string]int
+	CollectionIndexToUUID     []string
 	BlockToCollectionIndices  map[blockdigest.DigestWithSize][]int
 }
 
+// GetCollectionsParams params
 type GetCollectionsParams struct {
 	Client    arvadosclient.ArvadosClient
 	Logger    *logger.Logger
 	BatchSize int
 }
 
+// SdkCollectionInfo holds collection info from api
 type SdkCollectionInfo struct {
-	Uuid         string    `json:"uuid"`
-	OwnerUuid    string    `json:"owner_uuid"`
+	UUID         string    `json:"uuid"`
+	OwnerUUID    string    `json:"owner_uuid"`
 	Redundancy   int       `json:"redundancy"`
 	ModifiedAt   time.Time `json:"modified_at"`
 	ManifestText string    `json:"manifest_text"`
 }
 
+// SdkCollectionList lists collections from api
 type SdkCollectionList struct {
 	ItemsAvailable int                 `json:"items_available"`
 	Items          []SdkCollectionInfo `json:"items"`
@@ -68,7 +73,7 @@ func init() {
 		"File to write the heap profiles to. Leave blank to skip profiling.")
 }
 
-// Write the heap profile to a file for later review.
+// WriteHeapProfile writes the heap profile to a file for later review.
 // Since a file is expected to only contain a single heap profile this
 // function overwrites the previously written profile, so it is safe
 // to call multiple times in a single run.
@@ -77,27 +82,28 @@ func init() {
 func WriteHeapProfile() {
 	if heapProfileFilename != "" {
 
-		heap_profile, err := os.Create(heapProfileFilename)
+		heapProfile, err := os.Create(heapProfileFilename)
 		if err != nil {
 			log.Fatal(err)
 		}
 
-		defer heap_profile.Close()
+		defer heapProfile.Close()
 
-		err = pprof.WriteHeapProfile(heap_profile)
+		err = pprof.WriteHeapProfile(heapProfile)
 		if err != nil {
 			log.Fatal(err)
 		}
 	}
 }
 
+// GetCollectionsAndSummarize gets collections from api and summarizes
 func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollections) {
 	results = GetCollections(params)
 	results.Summarize(params.Logger)
 
 	log.Printf("Uuid to Size used: %v", results.OwnerToCollectionSize)
 	log.Printf("Read and processed %d collections",
-		len(results.UuidToCollection))
+		len(results.UUIDToCollection))
 
 	// TODO(misha): Add a "readonly" flag. If we're in readonly mode,
 	// lots of behaviors can become warnings (and obviously we can't
@@ -109,6 +115,7 @@ func GetCollectionsAndSummarize(params GetCollectionsParams) (results ReadCollec
 	return
 }
 
+// GetCollections gets collections from api
 func GetCollections(params GetCollectionsParams) (results ReadCollections) {
 	if &params.Client == nil {
 		log.Fatalf("params.Client passed to GetCollections() should " +
@@ -157,7 +164,7 @@ func GetCollections(params GetCollectionsParams) (results ReadCollections) {
 	// that we don't have to grow the map in most cases.
 	maxExpectedCollections := int(
 		float64(initialNumberOfCollectionsAvailable) * 1.01)
-	results.UuidToCollection = make(map[string]Collection, maxExpectedCollections)
+	results.UUIDToCollection = make(map[string]Collection, maxExpectedCollections)
 
 	if params.Logger != nil {
 		params.Logger.Update(func(p map[string]interface{}, e map[string]interface{}) {
@@ -191,11 +198,11 @@ func GetCollections(params GetCollectionsParams) (results ReadCollections) {
 			ProcessCollections(params.Logger,
 				collections.Items,
 				defaultReplicationLevel,
-				results.UuidToCollection).Format(time.RFC3339)
+				results.UUIDToCollection).Format(time.RFC3339)
 
 		// update counts
 		previousTotalCollections = totalCollections
-		totalCollections = len(results.UuidToCollection)
+		totalCollections = len(results.UUIDToCollection)
 
 		log.Printf("%d collections read, %d new in last batch, "+
 			"%s latest modified date, %.0f %d %d avg,max,total manifest size",
@@ -229,13 +236,14 @@ func StrCopy(s string) string {
 	return string([]byte(s))
 }
 
+// ProcessCollections read from api server
 func ProcessCollections(arvLogger *logger.Logger,
 	receivedCollections []SdkCollectionInfo,
 	defaultReplicationLevel int,
-	uuidToCollection map[string]Collection) (latestModificationDate time.Time) {
+	UUIDToCollection map[string]Collection) (latestModificationDate time.Time) {
 	for _, sdkCollection := range receivedCollections {
-		collection := Collection{Uuid: StrCopy(sdkCollection.Uuid),
-			OwnerUuid:         StrCopy(sdkCollection.OwnerUuid),
+		collection := Collection{UUID: StrCopy(sdkCollection.UUID),
+			OwnerUUID:         StrCopy(sdkCollection.OwnerUUID),
 			ReplicationLevel:  sdkCollection.Redundancy,
 			BlockDigestToSize: make(map[blockdigest.BlockDigest]int)}
 
@@ -260,7 +268,7 @@ func ProcessCollections(arvLogger *logger.Logger,
 		manifest := manifest.Manifest{sdkCollection.ManifestText}
 		manifestSize := uint64(len(sdkCollection.ManifestText))
 
-		if _, alreadySeen := uuidToCollection[collection.Uuid]; !alreadySeen {
+		if _, alreadySeen := UUIDToCollection[collection.UUID]; !alreadySeen {
 			totalManifestSize += manifestSize
 		}
 		if manifestSize > maxManifestSize {
@@ -269,11 +277,11 @@ func ProcessCollections(arvLogger *logger.Logger,
 
 		blockChannel := manifest.BlockIterWithDuplicates()
 		for block := range blockChannel {
-			if stored_size, stored := collection.BlockDigestToSize[block.Digest]; stored && stored_size != block.Size {
+			if storedSize, stored := collection.BlockDigestToSize[block.Digest]; stored && storedSize != block.Size {
 				message := fmt.Sprintf(
 					"Collection %s contains multiple sizes (%d and %d) for block %s",
-					collection.Uuid,
-					stored_size,
+					collection.UUID,
+					storedSize,
 					block.Size,
 					block.Digest)
 				loggerutil.FatalWithMessage(arvLogger, message)
@@ -284,7 +292,7 @@ func ProcessCollections(arvLogger *logger.Logger,
 		for _, size := range collection.BlockDigestToSize {
 			collection.TotalSize += size
 		}
-		uuidToCollection[collection.Uuid] = collection
+		UUIDToCollection[collection.UUID] = collection
 
 		// Clear out all the manifest strings that we don't need anymore.
 		// These hopefully form the bulk of our memory usage.
@@ -295,22 +303,23 @@ func ProcessCollections(arvLogger *logger.Logger,
 	return
 }
 
+// Summarize the collections read
 func (readCollections *ReadCollections) Summarize(arvLogger *logger.Logger) {
 	readCollections.OwnerToCollectionSize = make(map[string]int)
 	readCollections.BlockToDesiredReplication = make(map[blockdigest.DigestWithSize]int)
-	numCollections := len(readCollections.UuidToCollection)
-	readCollections.CollectionUuidToIndex = make(map[string]int, numCollections)
-	readCollections.CollectionIndexToUuid = make([]string, 0, numCollections)
+	numCollections := len(readCollections.UUIDToCollection)
+	readCollections.CollectionUUIDToIndex = make(map[string]int, numCollections)
+	readCollections.CollectionIndexToUUID = make([]string, 0, numCollections)
 	readCollections.BlockToCollectionIndices = make(map[blockdigest.DigestWithSize][]int)
 
-	for _, coll := range readCollections.UuidToCollection {
-		collectionIndex := len(readCollections.CollectionIndexToUuid)
-		readCollections.CollectionIndexToUuid =
-			append(readCollections.CollectionIndexToUuid, coll.Uuid)
-		readCollections.CollectionUuidToIndex[coll.Uuid] = collectionIndex
+	for _, coll := range readCollections.UUIDToCollection {
+		collectionIndex := len(readCollections.CollectionIndexToUUID)
+		readCollections.CollectionIndexToUUID =
+			append(readCollections.CollectionIndexToUUID, coll.UUID)
+		readCollections.CollectionUUIDToIndex[coll.UUID] = collectionIndex
 
-		readCollections.OwnerToCollectionSize[coll.OwnerUuid] =
-			readCollections.OwnerToCollectionSize[coll.OwnerUuid] + coll.TotalSize
+		readCollections.OwnerToCollectionSize[coll.OwnerUUID] =
+			readCollections.OwnerToCollectionSize[coll.OwnerUUID] + coll.TotalSize
 
 		for block, size := range coll.BlockDigestToSize {
 			locator := blockdigest.DigestWithSize{Digest: block, Size: uint32(size)}
diff --git a/services/datamanager/collection/collection_test.go b/services/datamanager/collection/collection_test.go
index 1669bb7..07c82e1 100644
--- a/services/datamanager/collection/collection_test.go
+++ b/services/datamanager/collection/collection_test.go
@@ -16,7 +16,7 @@ type MySuite struct{}
 var _ = Suite(&MySuite{})
 
 // This captures the result we expect from
-// ReadCollections.Summarize().  Because CollectionUuidToIndex is
+// ReadCollections.Summarize().  Because CollectionUUIDToIndex is
 // indeterminate, we replace BlockToCollectionIndices with
 // BlockToCollectionUuids.
 type ExpectedSummary struct {
@@ -41,7 +41,7 @@ func CompareSummarizedReadCollections(c *C,
 		uuidSet := make(map[string]struct{})
 		summarizedBlockToCollectionUuids[digest] = uuidSet
 		for _, index := range indices {
-			uuidSet[summarized.CollectionIndexToUuid[index]] = struct{}{}
+			uuidSet[summarized.CollectionIndexToUUID[index]] = struct{}{}
 		}
 	}
 
@@ -67,15 +67,15 @@ func (s *MySuite) TestSummarizeSimple(checker *C) {
 
 	rc.Summarize(nil)
 
-	c := rc.UuidToCollection["col0"]
+	c := rc.UUIDToCollection["col0"]
 
 	blockDigest1 := blockdigest.MakeTestDigestWithSize(1)
 	blockDigest2 := blockdigest.MakeTestDigestWithSize(2)
 
 	expected := ExpectedSummary{
-		OwnerToCollectionSize:     map[string]int{c.OwnerUuid: c.TotalSize},
+		OwnerToCollectionSize:     map[string]int{c.OwnerUUID: c.TotalSize},
 		BlockToDesiredReplication: map[blockdigest.DigestWithSize]int{blockDigest1: 5, blockDigest2: 5},
-		BlockToCollectionUuids:    map[blockdigest.DigestWithSize][]string{blockDigest1: []string{c.Uuid}, blockDigest2: []string{c.Uuid}},
+		BlockToCollectionUuids:    map[blockdigest.DigestWithSize][]string{blockDigest1: []string{c.UUID}, blockDigest2: []string{c.UUID}},
 	}
 
 	CompareSummarizedReadCollections(checker, rc, expected)
@@ -95,8 +95,8 @@ func (s *MySuite) TestSummarizeOverlapping(checker *C) {
 
 	rc.Summarize(nil)
 
-	c0 := rc.UuidToCollection["col0"]
-	c1 := rc.UuidToCollection["col1"]
+	c0 := rc.UUIDToCollection["col0"]
+	c1 := rc.UUIDToCollection["col1"]
 
 	blockDigest1 := blockdigest.MakeTestDigestWithSize(1)
 	blockDigest2 := blockdigest.MakeTestDigestWithSize(2)
@@ -104,8 +104,8 @@ func (s *MySuite) TestSummarizeOverlapping(checker *C) {
 
 	expected := ExpectedSummary{
 		OwnerToCollectionSize: map[string]int{
-			c0.OwnerUuid: c0.TotalSize,
-			c1.OwnerUuid: c1.TotalSize,
+			c0.OwnerUUID: c0.TotalSize,
+			c1.OwnerUUID: c1.TotalSize,
 		},
 		BlockToDesiredReplication: map[blockdigest.DigestWithSize]int{
 			blockDigest1: 5,
@@ -113,9 +113,9 @@ func (s *MySuite) TestSummarizeOverlapping(checker *C) {
 			blockDigest3: 8,
 		},
 		BlockToCollectionUuids: map[blockdigest.DigestWithSize][]string{
-			blockDigest1: []string{c0.Uuid},
-			blockDigest2: []string{c0.Uuid, c1.Uuid},
-			blockDigest3: []string{c1.Uuid},
+			blockDigest1: []string{c0.UUID},
+			blockDigest2: []string{c0.UUID, c1.UUID},
+			blockDigest3: []string{c1.UUID},
 		},
 	}
 
diff --git a/services/datamanager/collection/testing.go b/services/datamanager/collection/testing.go
index ac6e9cb..2238433 100644
--- a/services/datamanager/collection/testing.go
+++ b/services/datamanager/collection/testing.go
@@ -17,22 +17,22 @@ type TestCollectionSpec struct {
 }
 
 // MakeTestReadCollections creates a ReadCollections object for testing
-// based on the give specs. Only the ReadAllCollections and UuidToCollection
+// based on the give specs. Only the ReadAllCollections and UUIDToCollection
 // fields are populated. To populate other fields call rc.Summarize().
 func MakeTestReadCollections(specs []TestCollectionSpec) (rc ReadCollections) {
 	rc = ReadCollections{
 		ReadAllCollections: true,
-		UuidToCollection:   map[string]Collection{},
+		UUIDToCollection:   map[string]Collection{},
 	}
 
 	for i, spec := range specs {
 		c := Collection{
-			Uuid:              fmt.Sprintf("col%d", i),
-			OwnerUuid:         fmt.Sprintf("owner%d", i),
+			UUID:              fmt.Sprintf("col%d", i),
+			OwnerUUID:         fmt.Sprintf("owner%d", i),
 			ReplicationLevel:  spec.ReplicationLevel,
 			BlockDigestToSize: map[blockdigest.BlockDigest]int{},
 		}
-		rc.UuidToCollection[c.Uuid] = c
+		rc.UUIDToCollection[c.UUID] = c
 		for _, j := range spec.Blocks {
 			c.BlockDigestToSize[blockdigest.MakeTestBlockDigest(j)] = j
 		}
@@ -52,10 +52,10 @@ func MakeTestReadCollections(specs []TestCollectionSpec) (rc ReadCollections) {
 // assigns an index to each collection.
 func (rc ReadCollections) CollectionIndicesForTesting() (indices []int) {
 	// TODO(misha): Assert that rc.Summarize() has been called.
-	numCollections := len(rc.CollectionIndexToUuid)
+	numCollections := len(rc.CollectionIndexToUUID)
 	indices = make([]int, numCollections)
 	for i := 0; i < numCollections; i++ {
-		indices[i] = rc.CollectionUuidToIndex[fmt.Sprintf("col%d", i)]
+		indices[i] = rc.CollectionUUIDToIndex[fmt.Sprintf("col%d", i)]
 	}
 	return
 }
diff --git a/services/datamanager/keep/keep.go b/services/datamanager/keep/keep.go
index 1a75bb1..884a69a 100644
--- a/services/datamanager/keep/keep.go
+++ b/services/datamanager/keep/keep.go
@@ -21,34 +21,38 @@ import (
 	"time"
 )
 
+// ServerAddress struct
 type ServerAddress struct {
 	SSL  bool   `json:service_ssl_flag`
 	Host string `json:"service_host"`
 	Port int    `json:"service_port"`
-	Uuid string `json:"uuid"`
+	UUID string `json:"uuid"`
 }
 
-// Info about a particular block returned by the server
+// BlockInfo is info about a particular block returned by the server
 type BlockInfo struct {
 	Digest blockdigest.DigestWithSize
 	Mtime  int64 // TODO(misha): Replace this with a timestamp.
 }
 
-// Info about a specified block given by a server
+// BlockServerInfo is info about a specified block given by a server
 type BlockServerInfo struct {
 	ServerIndex int
 	Mtime       int64 // TODO(misha): Replace this with a timestamp.
 }
 
+// ServerContents struct
 type ServerContents struct {
 	BlockDigestToInfo map[blockdigest.DigestWithSize]BlockInfo
 }
 
+// ServerResponse struct
 type ServerResponse struct {
 	Address  ServerAddress
 	Contents ServerContents
 }
 
+// ReadServers struct
 type ReadServers struct {
 	ReadAllServers           bool
 	KeepServerIndexToAddress []ServerAddress
@@ -58,30 +62,34 @@ type ReadServers struct {
 	BlockReplicationCounts   map[int]int
 }
 
+// GetKeepServersParams struct
 type GetKeepServersParams struct {
 	Client arvadosclient.ArvadosClient
 	Logger *logger.Logger
 	Limit  int
 }
 
-type KeepServiceList struct {
+// ServiceList consists of the addresses of all the available kee servers
+type ServiceList struct {
 	ItemsAvailable int             `json:"items_available"`
 	KeepServers    []ServerAddress `json:"items"`
 }
 
+// String 
 // TODO(misha): Change this to include the UUID as well.
 func (s ServerAddress) String() string {
 	return s.URL()
 }
 
+// URL of the keep server
 func (s ServerAddress) URL() string {
 	if s.SSL {
 		return fmt.Sprintf("https://%s:%d", s.Host, s.Port)
-	} else {
-		return fmt.Sprintf("http://%s:%d", s.Host, s.Port)
 	}
+	return fmt.Sprintf("http://%s:%d", s.Host, s.Port)
 }
 
+// GetKeepServersAndSummarize gets keep servers from api
 func GetKeepServersAndSummarize(params GetKeepServersParams) (results ReadServers) {
 	results = GetKeepServers(params)
 	log.Printf("Returned %d keep disks", len(results.ServerToContents))
@@ -93,6 +101,7 @@ func GetKeepServersAndSummarize(params GetKeepServersParams) (results ReadServer
 	return
 }
 
+// GetKeepServers from api server
 func GetKeepServers(params GetKeepServersParams) (results ReadServers) {
 	sdkParams := arvadosclient.Dict{
 		"filters": [][]string{[]string{"service_type", "=", "disk"}},
@@ -101,7 +110,7 @@ func GetKeepServers(params GetKeepServersParams) (results ReadServers) {
 		sdkParams["limit"] = params.Limit
 	}
 
-	var sdkResponse KeepServiceList
+	var sdkResponse ServiceList
 	err := params.Client.List("keep_services", sdkParams, &sdkResponse)
 
 	if err != nil {
@@ -171,6 +180,7 @@ func GetKeepServers(params GetKeepServersParams) (results ReadServers) {
 	return
 }
 
+// GetServerContents of the keep server
 func GetServerContents(arvLogger *logger.Logger,
 	keepServer ServerAddress,
 	arv arvadosclient.ArvadosClient) (response ServerResponse) {
@@ -190,6 +200,7 @@ func GetServerContents(arvLogger *logger.Logger,
 	return ReadServerResponse(arvLogger, keepServer, resp)
 }
 
+// GetServerStatus get keep server status by invoking /status.json
 func GetServerStatus(arvLogger *logger.Logger,
 	keepServer ServerAddress,
 	arv arvadosclient.ArvadosClient) {
@@ -206,7 +217,7 @@ func GetServerStatus(arvLogger *logger.Logger,
 			serverInfo["host"] = keepServer.Host
 			serverInfo["port"] = keepServer.Port
 
-			keepInfo[keepServer.Uuid] = serverInfo
+			keepInfo[keepServer.UUID] = serverInfo
 		})
 	}
 
@@ -234,13 +245,14 @@ func GetServerStatus(arvLogger *logger.Logger,
 		now := time.Now()
 		arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
 			keepInfo := logger.GetOrCreateMap(p, "keep_info")
-			serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+			serverInfo := keepInfo[keepServer.UUID].(map[string]interface{})
 			serverInfo["status_response_processed_at"] = now
 			serverInfo["status"] = keepStatus
 		})
 	}
 }
 
+// CreateIndexRequest to the keep server
 func CreateIndexRequest(arvLogger *logger.Logger,
 	keepServer ServerAddress,
 	arv arvadosclient.ArvadosClient) (req *http.Request) {
@@ -251,7 +263,7 @@ func CreateIndexRequest(arvLogger *logger.Logger,
 		now := time.Now()
 		arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
 			keepInfo := logger.GetOrCreateMap(p, "keep_info")
-			serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+			serverInfo := keepInfo[keepServer.UUID].(map[string]interface{})
 			serverInfo["index_request_sent_at"] = now
 		})
 	}
@@ -266,6 +278,7 @@ func CreateIndexRequest(arvLogger *logger.Logger,
 	return
 }
 
+// ReadServerResponse reads reasponse from keep server
 func ReadServerResponse(arvLogger *logger.Logger,
 	keepServer ServerAddress,
 	resp *http.Response) (response ServerResponse) {
@@ -281,7 +294,7 @@ func ReadServerResponse(arvLogger *logger.Logger,
 		now := time.Now()
 		arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
 			keepInfo := logger.GetOrCreateMap(p, "keep_info")
-			serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+			serverInfo := keepInfo[keepServer.UUID].(map[string]interface{})
 			serverInfo["index_response_received_at"] = now
 		})
 	}
@@ -328,7 +341,7 @@ func ReadServerResponse(arvLogger *logger.Logger,
 
 		if storedBlock, ok := response.Contents.BlockDigestToInfo[blockInfo.Digest]; ok {
 			// This server returned multiple lines containing the same block digest.
-			numDuplicates += 1
+			numDuplicates++
 			// Keep the block that's newer.
 			if storedBlock.Mtime < blockInfo.Mtime {
 				response.Contents.BlockDigestToInfo[blockInfo.Digest] = blockInfo
@@ -349,7 +362,7 @@ func ReadServerResponse(arvLogger *logger.Logger,
 		now := time.Now()
 		arvLogger.Update(func(p map[string]interface{}, e map[string]interface{}) {
 			keepInfo := logger.GetOrCreateMap(p, "keep_info")
-			serverInfo := keepInfo[keepServer.Uuid].(map[string]interface{})
+			serverInfo := keepInfo[keepServer.UUID].(map[string]interface{})
 
 			serverInfo["processing_finished_at"] = now
 			serverInfo["lines_received"] = numLines
@@ -392,11 +405,12 @@ func parseBlockInfoFromIndexLine(indexLine string) (blockInfo BlockInfo, err err
 	return
 }
 
+// Summarize results from keep server
 func (readServers *ReadServers) Summarize(arvLogger *logger.Logger) {
 	readServers.BlockReplicationCounts = make(map[int]int)
 	for _, infos := range readServers.BlockToServers {
 		replication := len(infos)
-		readServers.BlockReplicationCounts[replication] += 1
+		readServers.BlockReplicationCounts[replication]++
 	}
 
 	if arvLogger != nil {
@@ -405,16 +419,18 @@ func (readServers *ReadServers) Summarize(arvLogger *logger.Logger) {
 			keepInfo["distinct_blocks_stored"] = len(readServers.BlockToServers)
 		})
 	}
-
 }
 
+// TrashRequest struct
 type TrashRequest struct {
 	Locator    string `json:"locator"`
 	BlockMtime int64  `json:"block_mtime"`
 }
 
+// TrashList is an array of TrashRequest objects
 type TrashList []TrashRequest
 
+// SendTrashLists to trash queue
 func SendTrashLists(kc *keepclient.KeepClient, spl map[string]TrashList) (errs []error) {
 	count := 0
 	barrier := make(chan error)
@@ -422,7 +438,7 @@ func SendTrashLists(kc *keepclient.KeepClient, spl map[string]TrashList) (errs [
 	client := kc.Client
 
 	for url, v := range spl {
-		count += 1
+		count++
 		log.Printf("Sending trash list to %v", url)
 
 		go (func(url string, v TrashList) {
@@ -464,7 +480,7 @@ func SendTrashLists(kc *keepclient.KeepClient, spl map[string]TrashList) (errs [
 
 	}
 
-	for i := 0; i < count; i += 1 {
+	for i := 0; i < count; i++ {
 		b := <-barrier
 		if b != nil {
 			errs = append(errs, b)
diff --git a/services/datamanager/keep/keep_test.go b/services/datamanager/keep/keep_test.go
index f39463e..2ccf17d 100644
--- a/services/datamanager/keep/keep_test.go
+++ b/services/datamanager/keep/keep_test.go
@@ -22,9 +22,9 @@ type TestHandler struct {
 	request TrashList
 }
 
-func (this *TestHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) {
+func (ts *TestHandler) ServeHTTP(writer http.ResponseWriter, req *http.Request) {
 	r := json.NewDecoder(req.Body)
-	r.Decode(&this.request)
+	r.Decode(&ts.request)
 }
 
 func (s *KeepSuite) TestSendTrashLists(c *C) {
@@ -53,7 +53,7 @@ func (s *KeepSuite) TestSendTrashLists(c *C) {
 type TestHandlerError struct {
 }
 
-func (this *TestHandlerError) ServeHTTP(writer http.ResponseWriter, req *http.Request) {
+func (tse *TestHandlerError) ServeHTTP(writer http.ResponseWriter, req *http.Request) {
 	http.Error(writer, "I'm a teapot", 418)
 }
 
diff --git a/services/datamanager/summary/summary.go b/services/datamanager/summary/summary.go
index edd760b..ec30a12 100644
--- a/services/datamanager/summary/summary.go
+++ b/services/datamanager/summary/summary.go
@@ -27,7 +27,7 @@ func (bs BlockSet) Union(obs BlockSet) {
 
 // We use the collection index to save space. To convert to and from
 // the uuid, use collection.ReadCollections' fields
-// CollectionIndexToUuid and CollectionUuidToIndex.
+// CollectionIndexToUUID and CollectionUUIDToIndex.
 type CollectionIndexSet map[int]struct{}
 
 // Adds a single collection to the set. The collection is specified by
@@ -254,7 +254,7 @@ func (rlbsm ReplicationLevelBlockSetMap) SummarizeBuckets(
 	rs.OverReplicatedBlocks.ToCollectionIndexSet(readCollections,
 		&rs.OverReplicatedCollections)
 
-	for i := range readCollections.CollectionIndexToUuid {
+	for i := range readCollections.CollectionIndexToUUID {
 		if _, notInKeep := rs.CollectionsNotFullyInKeep[i]; notInKeep {
 		} else if _, underReplicated := rs.UnderReplicatedCollections[i]; underReplicated {
 		} else if _, overReplicated := rs.OverReplicatedCollections[i]; overReplicated {
diff --git a/services/datamanager/summary/summary_test.go b/services/datamanager/summary/summary_test.go
index ea76df4..cc4eb92 100644
--- a/services/datamanager/summary/summary_test.go
+++ b/services/datamanager/summary/summary_test.go
@@ -215,6 +215,6 @@ func TestMixedReplication(t *testing.T) {
 	returnedSummary := SummarizeReplication(rc, keepInfo)
 
 	if !reflect.DeepEqual(returnedSummary, expectedSummary) {
-		t.Fatalf("Expected returnedSummary to look like: \n%+v but instead it is: \n%+v. Index to UUID is %v. BlockToCollectionIndices is %v.", expectedSummary, returnedSummary, rc.CollectionIndexToUuid, rc.BlockToCollectionIndices)
+		t.Fatalf("Expected returnedSummary to look like: \n%+v but instead it is: \n%+v. Index to UUID is %v. BlockToCollectionIndices is %v.", expectedSummary, returnedSummary, rc.CollectionIndexToUUID, rc.BlockToCollectionIndices)
 	}
 }

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list