-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathpackageexample_test.go
168 lines (163 loc) · 8.14 KB
/
packageexample_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
package ring_test
import (
"fmt"
"github.com/gholt/ring"
)
func Example_overview() {
// We'll create a new builder for a ring with three replicas:
builder := ring.NewBuilder(3)
// And we'll add four nodes we'll label ABCD:
for n := 'A'; n <= 'D'; n++ {
builder.AddNode(string(n), 1, nil)
}
// Generate the ring:
builder.Rebalance()
rring := builder.Ring()
// Print out the ring assignments: partitions horizontally, replicas vertically:
// ` P0 P1 P2 P3
// R0 A B C D
// R1 B A D C
// R2 D C A B
fmt.Print("` ")
for partition := 0; partition < rring.PartitionCount(); partition++ {
fmt.Print(fmt.Sprintf(" P%d", partition))
}
fmt.Println()
for replica := 0; replica < rring.ReplicaCount(); replica++ {
fmt.Print(fmt.Sprintf("R%d", replica))
for partition := 0; partition < rring.PartitionCount(); partition++ {
fmt.Print(" " + rring.KeyNodes(partition)[replica].Info())
}
fmt.Println()
}
// Output:
// ` P0 P1 P2 P3
// R0 A B C D
// R1 B A D C
// R2 D C A B
}
func Example_groupTiers() {
fmt.Println("Group tiers can be confusing, so let's work with a detailed example.")
fmt.Println("We are going to have two servers, each with two disk drives.")
fmt.Println("The disk drives are going to be represented by the nodes themselves.")
fmt.Println("The servers will be represented by groups.")
fmt.Println("By defining groups, we can tell the Builder to build rings with replicas assigned as far apart as possible, while keeping the whole Ring in balance.")
fmt.Println("So let's define and build our ring; we'll use two replicas to start with...")
builder := ring.NewBuilder(2)
builder.SetMaxPartitionCount(2) // Just to keep the output simpler
for _, server := range []string{"ServerA", "ServerB"} {
group := builder.AddGroup(server, nil)
for _, disk := range []string{"1stDisk", "2ndDisk"} {
builder.AddNode(disk, 1, group)
}
}
builder.Rebalance()
rring := builder.Ring()
printRing := func(rring *ring.Ring) {
fmt.Println("Here are the ring assignments: partitions horizontally, replicas vertically:")
fmt.Print("` ")
for partition := 0; partition < rring.PartitionCount(); partition++ {
fmt.Print(fmt.Sprintf(" -------P%d------", partition))
}
fmt.Println()
for replica := 0; replica < rring.ReplicaCount(); replica++ {
fmt.Print(fmt.Sprintf("R%d", replica))
for partition := 0; partition < rring.PartitionCount(); partition++ {
node := rring.KeyNodes(partition)[replica]
fmt.Print(" " + node.Group().Info() + ":" + node.Info())
}
fmt.Println()
}
}
printRing(rring)
// ` -------P0------ -------P1------
// R0 ServerA:1stDisk ServerB:2ndDisk
// R1 ServerB:1stDisk ServerA:2ndDisk
fmt.Println("Note that it assigned each replica of a partition to a distinct server.")
fmt.Println("The node info (disk names) happened to be the same but it doesn't matter since they are on different servers.")
fmt.Println()
fmt.Println("Let's up the replica count to 3, where we know it will have to assign multiple replicas to a single server...")
builder = ring.NewBuilder(3)
builder.SetMaxPartitionCount(4) // Just to keep the output simpler
for _, server := range []string{"ServerA", "ServerB"} {
group := builder.AddGroup(server, nil)
for _, disk := range []string{"1stDisk", "2ndDisk"} {
builder.AddNode(disk, 1, group)
}
}
builder.Rebalance()
rring = builder.Ring()
printRing(rring)
// ` -------P0------ -------P1------ -------P2------ -------P3------
// R0 ServerA:1stDisk ServerB:2ndDisk ServerA:2ndDisk ServerB:1stDisk
// R1 ServerB:1stDisk ServerA:1stDisk ServerB:2ndDisk ServerA:2ndDisk
// R2 ServerA:2ndDisk ServerB:1stDisk ServerA:1stDisk ServerB:2ndDisk
fmt.Println("So now it ended up using servers twice within the same partition, but note that it made sure to pick distinct drives for each replica at least.")
fmt.Println()
fmt.Println("Let's get more complicated and define another tier of groups; we'll call it the region tier.")
fmt.Println("To do this, we simply create the new region groups and set them as the parents of the server groups.")
fmt.Println("We'll assign our first two servers to the East region, and add two more servers in the Cent region, and even two more servers in the West region.")
builder = ring.NewBuilder(3)
builder.SetMaxPartitionCount(4) // Just to keep the output simpler
for _, region := range []string{"East", "Cent", "West"} {
regionGroup := builder.AddGroup(region, nil)
for _, server := range []string{"ServerA", "ServerB"} {
serverGroup := builder.AddGroup(server, regionGroup)
for _, disk := range []string{"1stDisk", "2ndDisk"} {
builder.AddNode(disk, 1, serverGroup)
}
}
}
builder.Rebalance()
rring = builder.Ring()
fmt.Println("Here are the ring assignments: partitions horizontally, replicas vertically:")
// ` ---------P0--------- ---------P1--------- ---------P2--------- ---------P3---------
// R0 East:ServerA:1stDisk Cent:ServerB:2ndDisk West:ServerB:2ndDisk East:ServerB:1stDisk
// R1 Cent:ServerA:1stDisk East:ServerA:2ndDisk Cent:ServerA:2ndDisk West:ServerB:1stDisk
// R2 West:ServerA:1stDisk West:ServerA:2ndDisk East:ServerB:2ndDisk Cent:ServerB:1stDisk
fmt.Print("` ")
for partition := 0; partition < rring.PartitionCount(); partition++ {
fmt.Print(fmt.Sprintf(" ---------P%d---------", partition))
}
fmt.Println()
for replica := 0; replica < rring.ReplicaCount(); replica++ {
fmt.Print(fmt.Sprintf("R%d", replica))
for partition := 0; partition < rring.PartitionCount(); partition++ {
node := rring.KeyNodes(partition)[replica]
fmt.Print(" " + node.Group().Parent().Info() + ":" + node.Group().Info() + ":" + node.Info())
}
fmt.Println()
}
fmt.Println("So now you can see it assigned replicas in distinct regions before worrying about the lower tiers.")
// Output:
// Group tiers can be confusing, so let's work with a detailed example.
// We are going to have two servers, each with two disk drives.
// The disk drives are going to be represented by the nodes themselves.
// The servers will be represented by groups.
// By defining groups, we can tell the Builder to build rings with replicas assigned as far apart as possible, while keeping the whole Ring in balance.
// So let's define and build our ring; we'll use two replicas to start with...
// Here are the ring assignments: partitions horizontally, replicas vertically:
// ` -------P0------ -------P1------
// R0 ServerA:1stDisk ServerB:2ndDisk
// R1 ServerB:1stDisk ServerA:2ndDisk
// Note that it assigned each replica of a partition to a distinct server.
// The node info (disk names) happened to be the same but it doesn't matter since they are on different servers.
//
// Let's up the replica count to 3, where we know it will have to assign multiple replicas to a single server...
// Here are the ring assignments: partitions horizontally, replicas vertically:
// ` -------P0------ -------P1------ -------P2------ -------P3------
// R0 ServerA:1stDisk ServerB:2ndDisk ServerA:2ndDisk ServerB:1stDisk
// R1 ServerB:1stDisk ServerA:1stDisk ServerB:2ndDisk ServerA:2ndDisk
// R2 ServerA:2ndDisk ServerB:1stDisk ServerA:1stDisk ServerB:2ndDisk
// So now it ended up using servers twice within the same partition, but note that it made sure to pick distinct drives for each replica at least.
//
// Let's get more complicated and define another tier of groups; we'll call it the region tier.
// To do this, we simply create the new region groups and set them as the parents of the server groups.
// We'll assign our first two servers to the East region, and add two more servers in the Cent region, and even two more servers in the West region.
// Here are the ring assignments: partitions horizontally, replicas vertically:
// ` ---------P0--------- ---------P1--------- ---------P2--------- ---------P3---------
// R0 East:ServerA:1stDisk Cent:ServerB:2ndDisk West:ServerB:2ndDisk East:ServerB:1stDisk
// R1 Cent:ServerA:1stDisk East:ServerA:2ndDisk Cent:ServerA:2ndDisk West:ServerB:1stDisk
// R2 West:ServerA:1stDisk West:ServerA:2ndDisk East:ServerB:2ndDisk Cent:ServerB:1stDisk
// So now you can see it assigned replicas in distinct regions before worrying about the lower tiers.
}