咱们先来看一下抽象类java
* @auther draymonder */ public abstract class AbstractClassTest { private int Test1; public int Test2; public void test1() { return ; } protected void test2() { return ; } private void test3() { return ; } void test4() { return ; } public abstract void test5(); protected abstract void test6(); public static void test7() { return ; } }
咱们再来看一下接口node
/** * @auther draymonder */ public interface IntefaceTest { public int Test1 = 0; void test1(); default void test2() { return ; } public static void test3() { return ; } }
由此咱们能够知道程序员
JDK8
下interface
可使用default
实现方法)数组的默认大小为 10。算法
private static final int DEFAULT_CAPACITY = 10;
添加元素时使用 ensureCapacityInternal()
方法来保证容量足够,若是不够时,须要使用 grow()
方法进行扩容,新容量的大小为 oldCapacity + (oldCapacity >> 1)
,也就是旧容量的 1.5 倍。数组
数组的默认大小为 10。多线程
Vector 每次扩容请求其大小的 2 倍空间,而 ArrayList 是 1.5 倍。并发
Vector 是同步的,所以开销就比 ArrayList 要大,访问速度更慢。最好使用 ArrayList 而不是 Vector,由于同步操做彻底能够由程序员本身来控制;
可使用collections的同步list的方法less
List<String> list = new ArrayList<>(); List<String> synList = Collections.synchronizedList(list);
public boolean add(E e) { final ReentrantLock lock = this.lock; lock.lock(); try { Object[] elements = getArray(); int len = elements.length; Object[] newElements = Arrays.copyOf(elements, len + 1); newElements[len] = e; setArray(newElements); return true; } finally { lock.unlock(); } } final void setArray(Object[] a) { array = a; }
CopyOnWriteArrayList 在写操做的同时容许读操做,大大提升了读操做的性能,所以很适合读多写少的应用场景。dom
可是 CopyOnWriteArrayList 有其缺陷:函数
key & (hash - 1)
等同于key % hash
,但前者效率比后者高扩容的时候,table cap
变为2 * table cap
,rehash仅仅须要判断key & hash
若是为0,仍是原来的table[old]
,不然是table[old+table cap]
mask |= mask >> 1 11011000 mask |= mask >> 2 11111110 mask |= mask >> 4 11111111
mask+1 是大于原始数字的最小的 2 的 n 次方。
num 10010000 mask+1 100000000
如下是 HashMap 中计算数组容量的代码:
static final int tableSizeFor(int cap) { int n = cap - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; }
在扩容时候,因为是头插法,因此,原来是A->B,可是多线程状况下会出现。
线程1刚刚拿出A, 并准备rehash到B的后面,可是存在B->A尚未解除的状况,所以正好出现了A->B->A的状况
若是load factor过小,那么空间利用率过低;若是load factor太大,那么hash冲撞就会比较多
咱们来看一下hashmap的注释
Because TreeNodes are about twice the size of regular nodes, we use them only when bins contain enough nodes to warrant use (see TREEIFY_THRESHOLD). And when they become too small (due to removal or resizing) they are converted back to plain bins. In usages with well-distributed user hashCodes, tree bins are rarely used. Ideally, under random hashCodes, the frequency of nodes in bins follows a Poisson distribution (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of about 0.5 on average for the default resizing threshold of 0.75, although with a large variance because of resizing granularity. Ignoring variance, the expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The first values are: 0: 0.60653066 1: 0.30326533 2: 0.07581633 3: 0.01263606 4: 0.00157952 5: 0.00015795 6: 0.00001316 7: 0.00000094 8: 0.00000006 more: less than 1 in ten million
咱们去有道翻译translate一下
由于树节点的大小大约是普通节点的两倍,因此咱们 只有当容器中包含足够的节点以保证使用时才使用它们 (见TREEIFY_THRESHOLD)。当它们变得过小的时候 移除或调整大小)它们被转换回普通的箱子。在 使用分布良好的用户哈希码,树箱是 不多使用。理想状况下,在随机哈希码下 箱中的节点遵循泊松分布 (http://en.wikipedia.org/wiki/Poisson_distribution) 默认大小调整的参数平均约为0.5 阈值为0.75,虽然因为方差较大 调整粒度。忽略方差,获得指望 列表大小k的出现次数为(exp(-0.5) * pow(0.5, k) / 阶乘(k))。第一个值是: 0:0.60653066 1:0.30326533 2:0.07581633 3:0.01263606 4:0.00157952 5:0.00015795 6:0.00001316 7:0.00000094 8:0.00000006 多于:少于千分之一
因此,节点插入遵循泊松分布,所以出现一个桶内8个节点是极小几率事件,因此遇到这种状况咱们能够用红黑树加速get
操做
不支持 key为null 也不知支持 value为null
//默认的数组大小16(HashMap里的那个数组) static final int DEFAULT_INITIAL_CAPACITY = 16; //扩容因子0.75 static final float DEFAULT_LOAD_FACTOR = 0.75f; //ConcurrentHashMap中的数组 final Segment<K,V>[] segments //默认并发标准16 static final int DEFAULT_CONCURRENCY_LEVEL = 16; //Segment是ReentrantLock子类,所以拥有锁的操做 static final class Segment<K,V> extends ReentrantLock implements Serializable { //HashMap的那一套,分别是数组、键值对数量、阈值、负载因子 transient volatile HashEntry<K,V>[] table; transient int count; transient int threshold; final float loadFactor; Segment(float lf, int threshold, HashEntry<K,V>[] tab) { this.loadFactor = lf; this.threshold = threshold; this.table = tab; } } //换了马甲仍是认识你!!!HashEntry对象,存key、value、hash值以及下一个节点 static final class HashEntry<K,V> { final int hash; final K key; volatile V value; volatile HashEntry<K,V> next; } //segment中HashEntry[]数组最小长度 static final int MIN_SEGMENT_TABLE_CAPACITY = 2; //用于定位在segments数组中的位置,下面介绍 final int segmentMask; final int segmentShift;
public V put(K key, V value) { Segment<K,V> s; //步骤①注意valus不能为空!!! if (value == null) throw new NullPointerException(); //根据key计算hash值,key也不能为null,不然hash(key)报空指针 int hash = hash(key); //步骤②派上用场了,根据hash值计算在segments数组中的位置 int j = (hash >>> segmentShift) & segmentMask; //步骤③查看当前数组中指定位置Segment是否为空 //若为空,先建立初始化Segment再put值,不为空,直接put值。 if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment s = ensureSegment(j); return s.put(key, hash, value, false); }
能够看到JDK7版本下,ConcurrentHashMap
的segment
也是使用写时复制
的,而且使用CAS
算法来将副本替换
private Segment<K,V> ensureSegment(int k) { //获取segments final Segment<K,V>[] ss = this.segments; long u = (k << SSHIFT) + SBASE; // raw offset Segment<K,V> seg; if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { //拷贝一份和segment 0同样的segment Segment<K,V> proto = ss[0]; // use segment 0 as prototype //大小和segment 0一致,为2 int cap = proto.table.length; //负载因子和segment 0一致,为0.75 float lf = proto.loadFactor; //阈值和segment 0一致,为1 int threshold = (int)(cap * lf); //根据大小建立HashEntry数组tab HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap]; //再次检查 if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { // recheck 根据已有属性建立指定位置的Segment Segment<K,V> s = new Segment<K,V>(lf, threshold, tab); while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) break; } } } return seg; }
首先lock获取 tab[hash(key)]
而后进行操做
final V put(K key, int hash, V value, boolean onlyIfAbsent) { //步骤① start HashEntry<K,V> node = tryLock() ? null : scanAndLockForPut(key, hash, value); //步骤① end V oldValue; try { //步骤② start //获取Segment中的HashEntry[] HashEntry<K,V>[] tab = table; //算出在HashEntry[]中的位置 int index = (tab.length - 1) & hash; //找到HashEntry[]中的指定位置的第一个节点 HashEntry<K,V> first = entryAt(tab, index); for (HashEntry<K,V> e = first;;) { //若是不为空,遍历这条链 if (e != null) { K k; //状况① 以前已存过,则替换原值 if ((k = e.key) == key || (e.hash == hash && key.equals(k))) { oldValue = e.value; if (!onlyIfAbsent) { e.value = value; ++modCount; } break; } e = e.next; } else { //状况② 另外一个线程的准备工做 if (node != null) //链表头插入方式 node.setNext(first); else //状况③ 该位置为空,则新建一个节点(注意这里采用链表头插入方式) node = new HashEntry<K,V>(hash, key, value, first); //键值对数量+1 int c = count + 1; //若是键值对数量超过阈值 if (c > threshold && tab.length < MAXIMUM_CAPACITY) //扩容 rehash(node); else //未超过阈值,直接放在指定位置 setEntryAt(tab, index, node); ++modCount; count = c; //插入成功返回null oldValue = null; break; } } //步骤② end } finally { //步骤③ //解锁 unlock(); } //修改为功,返回原值 return oldValue; }
先retries
64次,不行的话,才用ReentrantLock
重入锁
private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) { //经过Segment和hash值寻找匹配的HashEntry HashEntry<K,V> first = entryForHash(this, hash); HashEntry<K,V> e = first; HashEntry<K,V> node = null; //重试次数 int retries = -1; // negative while locating node //循环尝试获取锁 while (!tryLock()) { HashEntry<K,V> f; // to recheck first below //步骤① if (retries < 0) { //状况① 没找到,以前表中不存在 if (e == null) { if (node == null) // speculatively create node //新建 HashEntry 备用,retries改为0 node = new HashEntry<K,V>(hash, key, value, null); retries = 0; } //状况② 找到,恰好第一个节点就是,retries改为0 else if (key.equals(e.key)) retries = 0; //状况③ 第一个节点不是,移到下一个,retries仍是-1,继续找 else e = e.next; } //步骤② //尝试了MAX_SCAN_RETRIES次还没拿到锁,简直B了dog! else if (++retries > MAX_SCAN_RETRIES) { //泉水挂机 lock(); break; } //步骤③ //在MAX_SCAN_RETRIES次过程当中,key对应的entry发生了变化,则从头开始 else if ((retries & 1) == 0 && (f = entryForHash(this, hash)) != first) { e = first = f; // re-traverse if entry changed retries = -1; } } return node; }
rehash
的话 同jdk8版本下的rehash
retries
2次 若是仍是不一样,那么就reentranLock
依次等待unlock
计算每一个tab的size
public V put(K key, V value) { return putVal(key, value, false); } /** Implementation for put and putIfAbsent */ final V putVal(K key, V value, boolean onlyIfAbsent) { // key/value不能为空!!! if (key == null || value == null) throw new NullPointerException(); //计算hash值 int hash = spread(key.hashCode()); int binCount = 0; for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; //注释① 表为null则初始化 if (tab == null || (n = tab.length) == 0) tab = initTable(); //CAS方法判断指定位置是否为null,为空则经过建立新节点,经过CAS方法设置在指定位置 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value, null))) break; // no lock when adding to empty bin } //当前节点正在扩容 else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); //指定位置不为空 else { V oldVal = null; //注释② 加锁 synchronized (f) { if (tabAt(tab, i) == f) { //节点是链表的状况 if (fh >= 0) { binCount = 1; //遍历总体链 for (Node<K,V> e = f;; ++binCount) { K ek; //若是已存在,替换原值 if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { oldVal = e.val; if (!onlyIfAbsent) e.val = value; break; } Node<K,V> pred = e; //若是是新加节点,则以尾部插入实现添加 if ((e = e.next) == null) { pred.next = new Node<K,V>(hash, key, value, null); break; } } } //节点是红黑树的状况 else if (f instanceof TreeBin) { Node<K,V> p; binCount = 2; //遍历红黑树 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, value)) != null) { oldVal = p.val; if (!onlyIfAbsent) p.val = value; } } else if (f instanceof ReservationNode) throw new IllegalStateException("Recursive update"); } } if (binCount != 0) { //链表中节点个数超过8转成红黑树 if (binCount >= TREEIFY_THRESHOLD) treeifyBin(tab, i); if (oldVal != null) return oldVal; break; } } } //注释③ 添加节点 addCount(1L, binCount); return null; }
其实hash冲撞的概率蛮低的,因此synchronized调用的次数并很少,更多的是在cas那里...
而后就是cas比synchronized的优势...
每次put
完毕,都会调用addCount
方法
private final void addCount(long x, int check) { CounterCell[] as; long b, s; if ((as = counterCells) != null || !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { CounterCell a; long v; int m; boolean uncontended = true; if (as == null || (m = as.length - 1) < 0 || (a = as[ThreadLocalRandom.getProbe() & m]) == null || !(uncontended = U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { fullAddCount(x, uncontended); return; } if (check <= 1) return; s = sumCount(); } if (check >= 0) { Node<K,V>[] tab, nt; int n, sc; while (s >= (long)(sc = sizeCtl) && (tab = table) != null && (n = tab.length) < MAXIMUM_CAPACITY) { int rs = resizeStamp(n); if (sc < 0) { if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS || (nt = nextTable) == null || transferIndex <= 0) break; if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) transfer(tab, nt); } else if (U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2)) transfer(tab, null); s = sumCount(); } } }