[ARVADOS] updated: 91c929939c115f1abac3c5a3d0497a09901559a6

git at public.curoverse.com git at public.curoverse.com
Mon Sep 14 20:21:16 EDT 2015


Summary of changes:
 services/datamanager/keep/keep.go                |  6 +--
 services/datamanager/summary/canonical_string.go |  3 ++
 services/datamanager/summary/file.go             | 41 ++++++++++----------
 services/datamanager/summary/pull_list.go        | 26 +++++++------
 services/datamanager/summary/summary.go          | 48 ++++++++++++++----------
 services/datamanager/summary/trash_list.go       | 10 +++--
 services/datamanager/summary/trash_list_test.go  |  2 +-
 7 files changed, 78 insertions(+), 58 deletions(-)

       via  91c929939c115f1abac3c5a3d0497a09901559a6 (commit)
      from  936aac46fade3dc5b50698d45e58a271f8e84c77 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 91c929939c115f1abac3c5a3d0497a09901559a6
Author: radhika <radhika at curoverse.com>
Date:   Mon Sep 14 20:16:32 2015 -0400

    6260: most golint suggestions are addressed.

diff --git a/services/datamanager/keep/keep.go b/services/datamanager/keep/keep.go
index 884a69a..5b855dc 100644
--- a/services/datamanager/keep/keep.go
+++ b/services/datamanager/keep/keep.go
@@ -75,7 +75,7 @@ type ServiceList struct {
 	KeepServers    []ServerAddress `json:"items"`
 }
 
-// String 
+// String
 // TODO(misha): Change this to include the UUID as well.
 func (s ServerAddress) String() string {
 	return s.URL()
@@ -274,7 +274,7 @@ func CreateIndexRequest(arvLogger *logger.Logger,
 			fmt.Sprintf("Error building http request for %s: %v", url, err))
 	}
 
-	req.Header.Add("Authorization", "OAuth2 " + arv.ApiToken)
+	req.Header.Add("Authorization", "OAuth2 "+arv.ApiToken)
 	return
 }
 
@@ -456,7 +456,7 @@ func SendTrashLists(kc *keepclient.KeepClient, spl map[string]TrashList) (errs [
 				return
 			}
 
-			req.Header.Add("Authorization", "OAuth2 " + kc.Arvados.ApiToken)
+			req.Header.Add("Authorization", "OAuth2 "+kc.Arvados.ApiToken)
 
 			// Make the request
 			var resp *http.Response
diff --git a/services/datamanager/summary/canonical_string.go b/services/datamanager/summary/canonical_string.go
index 94f0676..152314c 100644
--- a/services/datamanager/summary/canonical_string.go
+++ b/services/datamanager/summary/canonical_string.go
@@ -1,13 +1,16 @@
 /* Ensures that we only have one copy of each unique string. This is
 /* not designed for concurrent access. */
+
 package summary
 
 // This code should probably be moved somewhere more universal.
 
+// CanonicalString struct
 type CanonicalString struct {
 	m map[string]string
 }
 
+// Get a CanonicalString
 func (cs *CanonicalString) Get(s string) (r string) {
 	if cs.m == nil {
 		cs.m = make(map[string]string)
diff --git a/services/datamanager/summary/file.go b/services/datamanager/summary/file.go
index 8c37e99..18b3aec 100644
--- a/services/datamanager/summary/file.go
+++ b/services/datamanager/summary/file.go
@@ -26,6 +26,7 @@ var (
 	readDataFrom string
 )
 
+// DataFetcher to fetch data from keep servers
 type DataFetcher func(arvLogger *logger.Logger,
 	readCollections *collection.ReadCollections,
 	keepServerInfo *keep.ReadServers)
@@ -41,7 +42,7 @@ func init() {
 		"Avoid network i/o and read summary data from this file instead. Used for development only.")
 }
 
-// Writes data we've read to a file.
+// MaybeWriteData writes data we've read to a file.
 //
 // This is useful for development, so that we don't need to read all
 // our data from the network every time we tweak something.
@@ -53,33 +54,33 @@ func MaybeWriteData(arvLogger *logger.Logger,
 	keepServerInfo keep.ReadServers) bool {
 	if writeDataTo == "" {
 		return false
-	} else {
-		summaryFile, err := os.Create(writeDataTo)
-		if err != nil {
-			loggerutil.FatalWithMessage(arvLogger,
-				fmt.Sprintf("Failed to open %s: %v", writeDataTo, err))
-		}
-		defer summaryFile.Close()
+	}
+	summaryFile, err := os.Create(writeDataTo)
+	if err != nil {
+		loggerutil.FatalWithMessage(arvLogger,
+			fmt.Sprintf("Failed to open %s: %v", writeDataTo, err))
+	}
+	defer summaryFile.Close()
 
-		enc := gob.NewEncoder(summaryFile)
-		data := serializedData{
-			ReadCollections: readCollections,
-			KeepServerInfo:  keepServerInfo}
-		err = enc.Encode(data)
-		if err != nil {
-			loggerutil.FatalWithMessage(arvLogger,
-				fmt.Sprintf("Failed to write summary data: %v", err))
-		}
-		log.Printf("Wrote summary data to: %s", writeDataTo)
-		return true
+	enc := gob.NewEncoder(summaryFile)
+	data := serializedData{
+		ReadCollections: readCollections,
+		KeepServerInfo:  keepServerInfo}
+	err = enc.Encode(data)
+	if err != nil {
+		loggerutil.FatalWithMessage(arvLogger,
+			fmt.Sprintf("Failed to write summary data: %v", err))
 	}
+	log.Printf("Wrote summary data to: %s", writeDataTo)
+	return true
 }
 
+// ShouldReadData should not be used outside of development
 func ShouldReadData() bool {
 	return readDataFrom != ""
 }
 
-// Reads data that we've written to a file.
+// ReadData reads data that we've written to a file.
 //
 // This is useful for development, so that we don't need to read all
 // our data from the network every time we tweak something.
diff --git a/services/datamanager/summary/pull_list.go b/services/datamanager/summary/pull_list.go
index b326c95..cc01249 100644
--- a/services/datamanager/summary/pull_list.go
+++ b/services/datamanager/summary/pull_list.go
@@ -1,4 +1,5 @@
 // Code for generating pull lists as described in https://arvados.org/projects/arvados/wiki/Keep_Design_Doc#Pull-List
+
 package summary
 
 import (
@@ -14,19 +15,21 @@ import (
 	"strings"
 )
 
+// Locator is a block digest
 type Locator blockdigest.DigestWithSize
 
+// MarshalJSON encoding
 func (l Locator) MarshalJSON() ([]byte, error) {
 	return []byte("\"" + blockdigest.DigestWithSize(l).String() + "\""), nil
 }
 
-// One entry in the Pull List
+// PullRequest represents one entry in the Pull List
 type PullRequest struct {
 	Locator Locator  `json:"locator"`
 	Servers []string `json:"servers"`
 }
 
-// The Pull List for a particular server
+// PullList for a particular server
 type PullList []PullRequest
 
 // PullListByLocator implements sort.Interface for PullList based on
@@ -49,6 +52,7 @@ func (a PullListByLocator) Less(i, j int) bool {
 	return false
 }
 
+// PullServers struct
 // For a given under-replicated block, this structure represents which
 // servers should pull the specified block and which servers they can
 // pull it from.
@@ -57,8 +61,8 @@ type PullServers struct {
 	From []string // Servers that already contain the specified block
 }
 
-// Creates a map from block locator to PullServers with one entry for
-// each under-replicated block.
+// ComputePullServers creates a map from block locator to PullServers
+// with one entry for each under-replicated block.
 //
 // This method ignores zero-replica blocks since there are no servers
 // to pull them from, so callers should feel free to omit them, but
@@ -78,7 +82,7 @@ func ComputePullServers(kc *keepclient.KeepClient,
 		writableServers[cs.Get(url)] = struct{}{}
 	}
 
-	for block, _ := range underReplicated {
+	for block := range underReplicated {
 		serversStoringBlock := keepServerInfo.BlockToServers[block]
 		numCopies := len(serversStoringBlock)
 		numCopiesMissing := blockToDesiredReplication[block] - numCopies
@@ -109,9 +113,9 @@ func ComputePullServers(kc *keepclient.KeepClient,
 	return m
 }
 
-// Creates a pull list in which the To and From fields preserve the
-// ordering of sorted servers and the contents are all canonical
-// strings.
+// CreatePullServers creates a pull list in which the To and From
+// fields preserve the ordering of sorted servers and the contents
+// are all canonical strings.
 func CreatePullServers(cs CanonicalString,
 	serverHasBlock map[string]struct{},
 	writableServers map[string]struct{},
@@ -142,12 +146,12 @@ func CreatePullServers(cs CanonicalString,
 	return
 }
 
-// Strips the protocol prefix from a url.
+// RemoveProtocolPrefix strips the protocol prefix from a url.
 func RemoveProtocolPrefix(url string) string {
 	return url[(strings.LastIndex(url, "/") + 1):]
 }
 
-// Produces a PullList for each keep server.
+// BuildPullLists produces a PullList for each keep server.
 func BuildPullLists(lps map[Locator]PullServers) (spl map[string]PullList) {
 	spl = map[string]PullList{}
 	// We don't worry about canonicalizing our strings here, because we
@@ -166,7 +170,7 @@ func BuildPullLists(lps map[Locator]PullServers) (spl map[string]PullList) {
 	return
 }
 
-// Writes each pull list to a file.
+// WritePullLists writes each pull list to a file.
 // The filename is based on the hostname.
 //
 // This is just a hack for prototyping, it is not expected to be used
diff --git a/services/datamanager/summary/summary.go b/services/datamanager/summary/summary.go
index ec30a12..9fb0316 100644
--- a/services/datamanager/summary/summary.go
+++ b/services/datamanager/summary/summary.go
@@ -1,4 +1,5 @@
 // Summarizes Collection Data and Keep Server Contents.
+
 package summary
 
 // TODO(misha): Check size of blocks as well as their digest.
@@ -11,31 +12,33 @@ import (
 	"sort"
 )
 
+// BlockSet is a map of blocks
 type BlockSet map[blockdigest.DigestWithSize]struct{}
 
-// Adds a single block to the set.
+// Insert adds a single block to the set.
 func (bs BlockSet) Insert(digest blockdigest.DigestWithSize) {
 	bs[digest] = struct{}{}
 }
 
-// Adds a set of blocks to the set.
+// Union adds a set of blocks to the set.
 func (bs BlockSet) Union(obs BlockSet) {
 	for k, v := range obs {
 		bs[k] = v
 	}
 }
 
-// We use the collection index to save space. To convert to and from
+// CollectionIndexSet is used to save space. To convert to and from
 // the uuid, use collection.ReadCollections' fields
 // CollectionIndexToUUID and CollectionUUIDToIndex.
 type CollectionIndexSet map[int]struct{}
 
-// Adds a single collection to the set. The collection is specified by
+// Insert adds a single collection to the set. The collection is specified by
 // its index.
 func (cis CollectionIndexSet) Insert(collectionIndex int) {
 	cis[collectionIndex] = struct{}{}
 }
 
+// ToCollectionIndexSet gets block to collection indices
 func (bs BlockSet) ToCollectionIndexSet(
 	readCollections collection.ReadCollections,
 	collectionIndexSet *CollectionIndexSet) {
@@ -46,6 +49,7 @@ func (bs BlockSet) ToCollectionIndexSet(
 	}
 }
 
+// ReplicationLevels struct
 // Keeps track of the requested and actual replication levels.
 // Currently this is only used for blocks but could easily be used for
 // collections as well.
@@ -59,18 +63,20 @@ type ReplicationLevels struct {
 	Actual int
 }
 
-// Maps from replication levels to their blocks.
+// ReplicationLevelBlockSetMap maps from replication levels to their blocks.
 type ReplicationLevelBlockSetMap map[ReplicationLevels]BlockSet
 
-// An individual entry from ReplicationLevelBlockSetMap which only reports the number of blocks, not which blocks.
+// ReplicationLevelBlockCount is an individual entry from ReplicationLevelBlockSetMap
+// which only reports the number of blocks, not which blocks.
 type ReplicationLevelBlockCount struct {
 	Levels ReplicationLevels
 	Count  int
 }
 
-// An ordered list of ReplicationLevelBlockCount useful for reporting.
+// ReplicationLevelBlockSetSlice is an ordered list of ReplicationLevelBlockCount useful for reporting.
 type ReplicationLevelBlockSetSlice []ReplicationLevelBlockCount
 
+// ReplicationSummary sturct
 type ReplicationSummary struct {
 	CollectionBlocksNotInKeep  BlockSet
 	UnderReplicatedBlocks      BlockSet
@@ -84,7 +90,7 @@ type ReplicationSummary struct {
 	CorrectlyReplicatedCollections CollectionIndexSet
 }
 
-// This struct counts the elements in each set in ReplicationSummary.
+// ReplicationSummaryCounts struct counts the elements in each set in ReplicationSummary.
 type ReplicationSummaryCounts struct {
 	CollectionBlocksNotInKeep      int
 	UnderReplicatedBlocks          int
@@ -97,8 +103,8 @@ type ReplicationSummaryCounts struct {
 	CorrectlyReplicatedCollections int
 }
 
-// Gets the BlockSet for a given set of ReplicationLevels, creating it
-// if it doesn't already exist.
+// GetOrCreate gets the BlockSet for a given set of ReplicationLevels,
+// creating it if it doesn't already exist.
 func (rlbs ReplicationLevelBlockSetMap) GetOrCreate(
 	repLevels ReplicationLevels) (bs BlockSet) {
 	bs, exists := rlbs[repLevels]
@@ -109,21 +115,21 @@ func (rlbs ReplicationLevelBlockSetMap) GetOrCreate(
 	return
 }
 
-// Adds a block to the set for a given replication level.
+// Insert adds a block to the set for a given replication level.
 func (rlbs ReplicationLevelBlockSetMap) Insert(
 	repLevels ReplicationLevels,
 	block blockdigest.DigestWithSize) {
 	rlbs.GetOrCreate(repLevels).Insert(block)
 }
 
-// Adds a set of blocks to the set for a given replication level.
+// Union adds a set of blocks to the set for a given replication level.
 func (rlbs ReplicationLevelBlockSetMap) Union(
 	repLevels ReplicationLevels,
 	bs BlockSet) {
 	rlbs.GetOrCreate(repLevels).Union(bs)
 }
 
-// Outputs a sorted list of ReplicationLevelBlockCounts.
+// Counts outputs a sorted list of ReplicationLevelBlockCounts.
 func (rlbs ReplicationLevelBlockSetMap) Counts() (
 	sorted ReplicationLevelBlockSetSlice) {
 	sorted = make(ReplicationLevelBlockSetSlice, len(rlbs))
@@ -153,6 +159,7 @@ func (rlbss ReplicationLevelBlockSetSlice) Swap(i, j int) {
 	rlbss[i], rlbss[j] = rlbss[j], rlbss[i]
 }
 
+// ComputeCounts returns ReplicationSummaryCounts
 func (rs ReplicationSummary) ComputeCounts() (rsc ReplicationSummaryCounts) {
 	// TODO(misha): Consider rewriting this method to iterate through
 	// the fields using reflection, instead of explictily listing the
@@ -169,6 +176,7 @@ func (rs ReplicationSummary) ComputeCounts() (rsc ReplicationSummaryCounts) {
 	return rsc
 }
 
+// PrettyPrint ReplicationSummaryCounts
 func (rsc ReplicationSummaryCounts) PrettyPrint() string {
 	return fmt.Sprintf("Replication Block Counts:"+
 		"\n Missing From Keep: %d, "+
@@ -192,12 +200,13 @@ func (rsc ReplicationSummaryCounts) PrettyPrint() string {
 		rsc.CorrectlyReplicatedCollections)
 }
 
+// BucketReplication returns ReplicationLevelBlockSetMap
 func BucketReplication(readCollections collection.ReadCollections,
-	keepServerInfo keep.ReadServers) (rlbsm ReplicationLevelBlockSetMap) {
-	rlbsm = make(ReplicationLevelBlockSetMap)
+	keepServerInfo keep.ReadServers) (rlbs ReplicationLevelBlockSetMap) {
+	rlbs = make(ReplicationLevelBlockSetMap)
 
 	for block, requestedReplication := range readCollections.BlockToDesiredReplication {
-		rlbsm.Insert(
+		rlbs.Insert(
 			ReplicationLevels{
 				Requested: requestedReplication,
 				Actual:    len(keepServerInfo.BlockToServers[block])},
@@ -206,7 +215,7 @@ func BucketReplication(readCollections collection.ReadCollections,
 
 	for block, servers := range keepServerInfo.BlockToServers {
 		if 0 == readCollections.BlockToDesiredReplication[block] {
-			rlbsm.Insert(
+			rlbs.Insert(
 				ReplicationLevels{Requested: 0, Actual: len(servers)},
 				block)
 		}
@@ -214,7 +223,8 @@ func BucketReplication(readCollections collection.ReadCollections,
 	return
 }
 
-func (rlbsm ReplicationLevelBlockSetMap) SummarizeBuckets(
+// SummarizeBuckets reads collections and summarizes
+func (rlbs ReplicationLevelBlockSetMap) SummarizeBuckets(
 	readCollections collection.ReadCollections) (
 	rs ReplicationSummary) {
 	rs.CollectionBlocksNotInKeep = make(BlockSet)
@@ -228,7 +238,7 @@ func (rlbsm ReplicationLevelBlockSetMap) SummarizeBuckets(
 	rs.OverReplicatedCollections = make(CollectionIndexSet)
 	rs.CorrectlyReplicatedCollections = make(CollectionIndexSet)
 
-	for levels, bs := range rlbsm {
+	for levels, bs := range rlbs {
 		if levels.Actual == 0 {
 			rs.CollectionBlocksNotInKeep.Union(bs)
 		} else if levels.Requested == 0 {
diff --git a/services/datamanager/summary/trash_list.go b/services/datamanager/summary/trash_list.go
index 0bedc9c..b6ceace 100644
--- a/services/datamanager/summary/trash_list.go
+++ b/services/datamanager/summary/trash_list.go
@@ -1,4 +1,5 @@
 // Code for generating trash lists
+
 package summary
 
 import (
@@ -9,6 +10,7 @@ import (
 	"time"
 )
 
+// BuildTrashLists builds list of blocks to be sent to trash queue
 func BuildTrashLists(kc *keepclient.KeepClient,
 	keepServerInfo *keep.ReadServers,
 	keepBlocksNotInCollections BlockSet) (m map[string]keep.TrashList, err error) {
@@ -40,19 +42,19 @@ func buildTrashListsInternal(writableServers map[string]struct{},
 	m = make(map[string]keep.TrashList)
 
 	for block := range keepBlocksNotInCollections {
-		for _, block_on_server := range keepServerInfo.BlockToServers[block] {
-			if block_on_server.Mtime >= expiry {
+		for _, blockOnServer := range keepServerInfo.BlockToServers[block] {
+			if blockOnServer.Mtime >= expiry {
 				continue
 			}
 
 			// block is older than expire cutoff
-			srv := keepServerInfo.KeepServerIndexToAddress[block_on_server.ServerIndex].String()
+			srv := keepServerInfo.KeepServerIndexToAddress[blockOnServer.ServerIndex].String()
 
 			if _, writable := writableServers[srv]; !writable {
 				continue
 			}
 
-			m[srv] = append(m[srv], keep.TrashRequest{Locator: block.Digest.String(), BlockMtime: block_on_server.Mtime})
+			m[srv] = append(m[srv], keep.TrashRequest{Locator: block.Digest.String(), BlockMtime: blockOnServer.Mtime})
 		}
 	}
 	return
diff --git a/services/datamanager/summary/trash_list_test.go b/services/datamanager/summary/trash_list_test.go
index 7620631..555211f 100644
--- a/services/datamanager/summary/trash_list_test.go
+++ b/services/datamanager/summary/trash_list_test.go
@@ -34,7 +34,7 @@ func (s *TrashSuite) TestBuildTrashLists(c *C) {
 				keep.BlockServerInfo{1, 101}}}}
 
 	// only block0 is in delete set
-	var bs BlockSet = make(BlockSet)
+	var bs = make(BlockSet)
 	bs[block0] = struct{}{}
 
 	// Test trash list where only sv0 is on writable list.

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list