HashMap和ConcurrentHashMap

HashMap源代码和ConcurrentHashMap源代码对比。


1. HashMap和ConcurrentHashMap基本结构

HashMap和ConcurrentHashMap的基本结构为: 数组+链表(红黑树)
HashMap的Node和TreeNode

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
transient Node<K,V>[] table;

static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
V value;
Node<K,V> next;
}


static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, V val, Node<K,V> next) {
super(hash, key, val, next);
}
}

ConcurrentHashMap的TreeNode继承自Node, 同时Node还有其他三个子类:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
static final int MOVED     = -1; // hash for forwarding nodes
static final int TREEBIN = -2; // hash for roots of trees
static final int RESERVED = -3; // hash for transient reservations
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash


static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
volatile V val;
volatile Node<K,V> next;
}

/* ---------------- Special Nodes -------------- */
// A node inserted at head of bins during transfer operations.
static final class ForwardingNode<K,V> extends Node<K,V> {
final Node<K,V>[] nextTable;
ForwardingNode(Node<K,V>[] tab) {
super(MOVED, null, null, null);
this.nextTable = tab;
}
}

// A place-holder node used in computeIfAbsent and compute
static final class ReservationNode<K,V> extends Node<K,V> {
ReservationNode() {
super(RESERVED, null, null, null);
}
}


/* ---------------- TreeNodes -------------- */
//红黑树Node节点
static final class TreeNode<K,V> extends Node<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
}


/* ---------------- TreeBins -------------- */
/**
* TreeNodes used at the heads of bins. TreeBins do not hold user
* keys or values, but instead point to list of TreeNodes and
* their root. They also maintain a parasitic read-write lock
* forcing writers (who hold bin lock) to wait for readers (who do
* not) to complete before tree restructuring operations.
*/
static final class TreeBin<K,V> extends Node<K,V> {
TreeNode<K,V> root;
volatile TreeNode<K,V> first;
volatile Thread waiter;
volatile int lockState;
// values for lockState
static final int WRITER = 1; // set while holding write lock
static final int WAITER = 2; // set when waiting for write lock
static final int READER = 4; // increment value for setting read lock
}


2. HashMap的几个参数

2.1 hash()

HashMap的hash()和位置映射算法:

1
2
3
4
5
6
7
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
}

//(tab.length-1) & hash(key)得到在数组中的位置
tab[(tab.length - 1) & hash(key)]

ConcurrentHashMap的hash()和映射算法:

1
2
3
4
5
6
7
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash

static final int spread(int h) {
return (h ^ (h >>> 16)) & HASH_BITS;
}

//(tab.length-1) & hash(key)得到在数组中的位置

2.2 tableSize()

HashMap和ConcurrentHashMap的tableSize()相同:

1
2
3
4
5
6
7
8
9
10
//保证tableSize=2^n :  a power of two size for the given target capacity.
static final int tableSizeFor(int cap) {
int n = cap - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}


3. 扩容: resize() & transfer()

HashMap的扩容方法resize(), ConcurrentHashMap的transfer()扩容方法,会在两种情况下触发:

    1. treeifyBin()的时候table.length < MIN_TREEIFY_CAPACITY(64), 则先进行扩容。
    1. put()结束, 判断当前size>threshold, 需要进行扩容。

3.1 HashMap的resize():

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
/**
*初始化或者double table长度
*初始化: allocates in accord with initial capaciy target held in field threhold.
*扩容: 因为使用power-of-two expansion,所以扩容后的元素在新table中 : 要么保持index不变,要么移动a power of two offset
*/
final Node<K,V>[] resize() {
Node<K,V>[] oldTab = table;
int oldCap = (oldTab == null) ? 0 : oldTab.length;
int oldThr = threshold;
int newCap, newThr = 0;
if (oldCap > 0) {
if (oldCap >= MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return oldTab;
}
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && oldCap >= DEFAULT_INITIAL_CAPACITY)
newThr = oldThr << 1; // double threshold
}
else if (oldThr > 0) // initial capacity was placed in threshold
newCap = oldThr;
else { // zero initial threshold signifies using defaults
newCap = DEFAULT_INITIAL_CAPACITY;
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
}
if (newThr == 0) {
float ft = (float)newCap * loadFactor;
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? (int)ft : Integer.MAX_VALUE);
}
threshold = newThr;
@SuppressWarnings({"rawtypes","unchecked"})
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
table = newTab;
//如果原来的数组不为null, 说明原来有数据, 需要复制到newTab
if (oldTab != null) {
for (int j = 0; j < oldCap; ++j) {
Node<K,V> e;
if ((e = oldTab[j]) != null) {
oldTab[j] = null;
//如果该slot只有一个元素, 那么直接计算它在newTab中的位置
if (e.next == null)
newTab[e.hash & (newCap - 1)] = e;

//如果该slot后面是红黑树...
else if (e instanceof TreeNode)
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);

//如果后面是链表
else { // preserve order
Node<K,V> loHead = null, loTail = null;
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;
do {
next = e.next;

/*考虑到
* oldCap = 00000100, 00001000, 00010000
* oldCap-1 = 00000011, 00000111, 00001111
*
* newCap = 00001000, 00010000, 00100000
* oldCap-1 = 00000111, 00001111, 00011111
*
* if (e.hash&oldCap)==0 那么e.hash&(newCap-1) = e.hash&(oldCap-1)
* if (e.hash&oldCap)!=0 那么e.hash&(newCap-1) = e.hash&(old-1)+oldCap
*
* 所以如果e.hash&oldCap==0, 那么元素e在newTab中的位置index跟原来一样;
* 否则, 元素e在newTab中的位置为原来的index+oldCap,即j+oldCap
*/
if ((e.hash & oldCap) == 0) {
if (loTail == null)
loHead = e;
else
loTail.next = e;
loTail = e;
}
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}
} while ((e = next) != null);

if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
}
}
}
}
}
return newTab;
}

3.2 ConcurrentHashMap的transfer():

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
int n = tab.length, stride;
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
try {
@SuppressWarnings("unchecked")
Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
}
int nextn = nextTab.length;
ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
boolean advance = true;
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node<K,V> f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) {
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1);
return;
}
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
Node<K,V> ln, hn;
if (fh >= 0) {
int runBit = fh & n;
Node<K,V> lastRun = f;
for (Node<K,V> p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
for (Node<K,V> p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node<K,V>(ph, pk, pv, ln);
else
hn = new Node<K,V>(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
else if (f instanceof TreeBin) {
TreeBin<K,V> t = (TreeBin<K,V>)f;
TreeNode<K,V> lo = null, loTail = null;
TreeNode<K,V> hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node<K,V> e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode<K,V> p = new TreeNode<K,V>
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin<K,V>(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin<K,V>(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}

4. get(Object key)

主要对比HashMap和ConcurrentHashMap的get()方法:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
   //HashMap的get()方法
final Node<K,V> getNode(int hash, Object key) {
Node<K,V>[] tab; Node<K,V> first, e; int n; K k;
if ((tab = table) != null && (n = tab.length) > 0 && (first = tab[(n - 1) & hash]) != null) {
//总是先check第一个位置的Node是否为要找的, 如果找到了, 就无需再区分后面是链表还是红黑树了
if (first.hash == hash && ((k = first.key) == key || (key != null && key.equals(k))))
return first;
//第一个不是要找的
if ((e = first.next) != null) {
//如果节点是红黑树的TreeNode
if (first instanceof TreeNode)
return ((TreeNode<K,V>)first).getTreeNode(hash, key);
//否则,就一定是链表了
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
return e;
} while ((e = e.next) != null);
}
}
return null;
}

ConcurrentHashMap的get()方法与HashMap的大体类似, 而且也并没有加锁, 这是弱一致性的体现。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
public V get(Object key) {
Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 && (e = tabAt(tab, (n - 1) & h)) != null) {
//先确认第一个节点
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
//如果是红黑树
else if (eh < 0)
return (p = e.find(h, key)) != null ? p.val : null;
//否则一定是链表了
while ((e = e.next) != null) {
if (e.hash == h && ((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}


5. size()

HashMap的size()方法很简单。

1
2
3
4
5
6
7
public int size() {
return size;
}

public boolean isEmpty() {
return size == 0;
}

ConcurrentHashMap的size()方法:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
/**Table of counter cells. When non-null, size is a power of 2.**/
//为每个数组的slot准备一个计数的CounterCell
private transient volatile CounterCell[] counterCells;

@sun.misc.Contended static final class CounterCell {
volatile long value;
CounterCell(long x) { value = x; }
}

//把数组中所有slot的CounterCell相加得到总数
final long sumCount() {
CounterCell[] as = counterCells;
CounterCell a;
long sum = baseCount;
if (as != null) {
for (int i = 0; i < as.length; ++i) {
if ((a = as[i]) != null)
sum += a.value;
}
}
return sum;
}

public int size() {
long n = sumCount();
return ((n < 0L) ? 0 : (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int)n);
}

public boolean isEmpty() {
return sumCount() <= 0L; // ignore transient negative values
}


6. put()

HashMap的put()方法比较简单,key point在于首先判断数组的slot中是Node还是TreeNode, 然后针对链表和红黑树分别操作:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
final V putVal(int hash, K key, V value, boolean onlyIfAbsent, boolean evict) {
Node<K,V>[] tab; Node<K,V> p; int n, i;
//1. 如果数组还没有初始化,先进行初始化
if ((tab = table) == null || (n = tab.length) == 0)
n = (tab = resize()).length;
//2. 如果插入位置还没有元素,直接新建一个Node即可
if ((p = tab[i = (n - 1) & hash]) == null)
tab[i] = newNode(hash, key, value, null);
//3. 否则要针对链表和红黑树分别操作
else {
Node<K,V> e; K k;
//3.1 如果首节点是Node,那么直接返回e=p首节点
if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k))))
e = p;
//3.2 如果是红黑树
else if (p instanceof TreeNode)
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
//3.3 否则就是链表
else {
for (int binCount = 0; ; ++binCount) {
//3.4 到了链表最后一个节点还是没有找到, 新建一个节点放在最后,e=null
if ((e = p.next) == null) {
p.next = newNode(hash, key, value, null);
//判断是否需要转换成红黑树
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
treeifyBin(tab, hash);
break;
}
//3.5 在链表中间的某个位置找到了key
if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) break;
p = e;//p=e=p.next
}
}
//3.6 如果已经有了key的映射,需要返回oldValue
if (e != null) { // existing mapping for key
V oldValue = e.value;
if (!onlyIfAbsent || oldValue == null)
e.value = value;
afterNodeAccess(e);
return oldValue;
}
}
++modCount;
//3.7 判断是否需要扩容
if (++size > threshold)
resize();
afterNodeInsertion(evict);
return null;
}

ConcurrentHashMap的put()方法:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
//如果正在扩容, 则调用helpTransfer()帮助扩容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
//新增binCount, 在其中会触发扩容transfer()
addCount(1L, binCount);
return null;
}


9. HashMap的序列化问题

HashMap实现了serializable接口

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
public class HashMap<K,V> extends AbstractMap<K,V>
implements Map<K,V>, Cloneable, Serializable {

private static final long serialVersionUID = 362498820763181265L;

  ...

  //这里使用了transient, 是为什么呢?
transient Node<K,V>[] table;

/**
* Holds cached entrySet(). Note that AbstractMap fields are used
* for keySet() and values().
*/
transient Set<Map.Entry<K,V>> entrySet;

/**
* The number of key-value mappings contained in this map.
*/
transient int size;

//记录structurally modified次数
transient int modCount;
}

注意到transient Node<K,V>[] table, 这表明table里面的内容全都不会被序列化,其中的原因是因为:

  • HashMap是基于Object.hashcode()的, 而Object的hashcode()方法是native的,它与底层实现相关,而Java是跨平台的。
  • 不同的虚拟机,不同机器上面,可能不同的hashcode()实现算法
  • 比如, 在机器A上面, 一个KEY的hashcode=1, 但是在另外一台机器B上面, 这个KEY的hashcode=2。如果机器A上面序列化的HashMap对象,到了机器B上面,上述映射其实就是错误的。
  • 为了避免这一点,HashMap重写了自己的序列化方法
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
     private void writeObject(java.io.ObjectOutputStream s)
    throws IOException {

    }

    /**
    * Reconstitute the {@code HashMap} instance from a stream (i.e.,
    * deserialize it).
    */
    private void readObject(java.io.ObjectInputStream s)
    throws IOException, ClassNotFoundException {
    }

10. HashMap和HashTable, ConcurrentHashMap的对比

HashMap, HashTable, ConcurrentHashMap经常会在一起被讨论, 因此可以总结以下它们的一些异同:
|比较点|HashMap|HashTable|ConcurrentHashMap|
|:–|:–|:–|
|1, 是否线程安全?|否|安全|安全|
|2, 是否允许key为null|是|否|否|

|3, Fast Fail Iterator or not?|?|?|-|

11. HashMap的参数测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110

public class ThinkInHashMap
{
private static final int MAXIMUM_CAPACITY = 1 << 30;

public static void main(String[] ar)
{
HashMap<Integer, String> map = new HashMap<Integer, String>(10);
System.out.println(map.size());
thinkInTableSize();
thinkInHash();


}

private static void thinkInHash()
{
/**
* n为HashMap的capacity<br>
*/
int n = 16;

/**
* 测试Integer
*/
Integer[] keys =
{ 1, 2, 3, 4, 5, 6, 7, 17, 18, 19, 20, 21 };
for (Integer key : keys)
{
System.out.format("key=%2d\tkey.hashcode=%2d", key, key.hashCode());
System.out.format("\tkey.hashcodeByHashMap=%2d", hash(key));
System.out.println("\tLocationCountedByHashcode=" + ((n - 1) & hash(key)));

}
System.out.println("从中可以看到,虽然1和17的hashcode不同,但是它们还是映射到了同一个bucket!!!\n\n");


/**
* 测试String
*/
String[] ks =
{ "zhangjie", "zhang", "jie","fan","miao" ,"hello" };
for (String s : ks)
{
{
System.out.format("key=%10s\tkey.hashcode=%d", s, s.hashCode());
System.out.format("\tkey.hashcodeByHashMap=%s", hash(s));
System.out.println("\tLocationCountedByHashcode=" + ((n - 1) & hash(s)));

}

}


}

/**
* HashMap的计算某个Key的hashcode值的hash函数<br>
*
* @param key
* @return
*/
static final int hash(Object key)
{
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);

}

/***
* 测试HashMap如何调整它的初始大小,保证capacity一直为2的N次方<br>
*/
private static void thinkInTableSize()
{
int[] clientSetSize =
{ 1, 2, 3, 5, 9, 17, 33, 65, 129 };
for (int i : clientSetSize)
{
int actualSize = tableSizeFor(i);
System.out.println("f(" + i + ")=" + actualSize + "\n");

}

}

/****
* 这是HashMap的tableSize初始化函数,依据用户的设置的大小,自动调整为2的n次方<br>
*
* @param c
* @return
*/
private static final int tableSizeFor(int c)
{
int n = c - 1;
n |= n >>> 1;
System.out.print("n|=n>>1 : " + n + "\t");
n |= n >>> 2;
System.out.print("n|=n>>2 : " + n + "\t");
n |= n >>> 4;
System.out.print("n|=n>>4 : " + n + "\t");
n |= n >>> 8;
System.out.print("n|=n>>8 : " + n + "\t");
n |= n >>> 16;
System.out.print("n|=n>>16 : " + n + "\t");
System.out.println("");
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;

}

}

11. 实现自己的HashMap

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
/***
* 自己实现的一个简单的HashMap<br>
*
* @author jay
*
* @param <K>
* @param <V>
*/
public class MyHashMap<K, V>
{
private Entry[] table;
private static int capacity = 16;

public MyHashMap()
{
this(capacity);

}

public MyHashMap(int capa)
{
table = new Entry[16];

}

static class Entry<K, V>
{
K key;
V value;
Entry next;

public Entry(K k, V v, Entry ne)
{
this.key = k;
this.value = v;
this.next = ne;

}

public Entry(K k, V v)
{
this.key = k;
this.value = v;
this.next = null;


}

public K getKey()
{
return key;

}

public void setKey(K key)
{
this.key = key;

}

public V getValue()
{
return value;

}

public void setValue(V value)
{
this.value = value;

}

public String toString()
{
if (next == null)
return key + "=" + value;
else
return key + "=" + value + " -> " + next.toString();

}


}

static final int hash(Object key)
{
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);

}

public V put(K key, V value)
{
V oldValue = null;
int index = (capacity - 1) & hash(key);
// int index = Math.abs(key.hashCode() % capacity);
if (table[index] == null)
table[index] = new Entry<K, V>(key, value);

Entry<K, V> pair = new Entry(key, value);
boolean found = false;

for (Entry e = (Entry) table[index]; e != null; e = e.next)
{
Entry<K, V> ipair = e;
if (ipair.getKey().equals(key))
{
oldValue = ipair.getValue();
e.setValue(value);
found = true;
break;

}

}

if (!found)
{
Entry entry = (Entry) table[index];
pair.next = entry;
table[index] = pair;

}
return oldValue;

}

public V get(Object key)
{
int index = Math.abs(key.hashCode() % capacity);
if (table[index] == null)
return null;

for (Entry e = (Entry) table[index]; e != null; e = e.next)
{
if (key.equals(e.getKey()))
return (V) e.getValue();

}

return null;

}

public String toString()
{
if (table == null || table.length == 0)
return null;

StringBuilder builder = new StringBuilder("{\n");
for (int i = 0; i < table.length; i++)
{
Entry e = table[i];
if (e != null)
{
builder.append(e.toString() + "\n");


}
// for (Entry e = table[i]; e != null; e = e.next)

}

builder.append("}");
return builder.toString();
}

public static void main(String[] ar)
{
MyHashMap<Integer, String> map = new MyHashMap<Integer, String>();
for (int i = 1; i < 96; i++)
map.put(i, "zhangjie_" + i);
System.out.println(map.toString());

}
}