Coverage Report - org.apache.myfaces.shared.util.BiLevelCacheMap
 
Classes in this File Line Coverage Branch Coverage Complexity
BiLevelCacheMap
0%
0/88
0%
0/30
1.882
 
 1  
 /*
 2  
  * Licensed to the Apache Software Foundation (ASF) under one
 3  
  * or more contributor license agreements.  See the NOTICE file
 4  
  * distributed with this work for additional information
 5  
  * regarding copyright ownership.  The ASF licenses this file
 6  
  * to you under the Apache License, Version 2.0 (the
 7  
  * "License"); you may not use this file except in compliance
 8  
  * with the License.  You may obtain a copy of the License at
 9  
  *
 10  
  *   http://www.apache.org/licenses/LICENSE-2.0
 11  
  *
 12  
  * Unless required by applicable law or agreed to in writing,
 13  
  * software distributed under the License is distributed on an
 14  
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 15  
  * KIND, either express or implied.  See the License for the
 16  
  * specific language governing permissions and limitations
 17  
  * under the License.
 18  
  */
 19  
 package org.apache.myfaces.shared.util;
 20  
 
 21  
 import java.util.Collection;
 22  
 import java.util.Collections;
 23  
 import java.util.HashMap;
 24  
 import java.util.Map;
 25  
 import java.util.Set;
 26  
 
 27  
 /**
 28  
  * A bi-level cache based on HashMap for caching objects with minimal sychronization
 29  
  * overhead. The limitation is that <code>remove()</code> is very expensive.
 30  
  * <p>
 31  
  * Access to L1 map is not sychronized, to L2 map is synchronized. New values
 32  
  * are first stored in L2. Once there have been more that a specified mumber of
 33  
  * misses on L1, L1 and L2 maps are merged and the new map assigned to L1
 34  
  * and L2 cleared.
 35  
  * </p>
 36  
  * <p>
 37  
  * IMPORTANT:entrySet(), keySet(), and values() return unmodifiable snapshot collections.
 38  
  * </p>
 39  
  */
 40  
 public abstract class BiLevelCacheMap implements Map
 41  
 {
 42  
     //~ Instance fields ----------------------------------------------------------------------------
 43  
 
 44  
     private static final int INITIAL_SIZE_L1 = 32;
 45  
 
 46  
     /** To preinitialize <code>_cacheL1</code> with default values use an initialization block */
 47  
     protected Map       _cacheL1;
 48  
 
 49  
     /** Must be final because it is used for synchronization */
 50  
     private final Map   _cacheL2;
 51  
     private final int   _mergeThreshold;
 52  
     private int         _missCount;
 53  
 
 54  
     //~ Constructors -------------------------------------------------------------------------------
 55  
 
 56  
     public BiLevelCacheMap(int mergeThreshold)
 57  0
     {
 58  0
         _cacheL1            = new HashMap(INITIAL_SIZE_L1);
 59  0
         _cacheL2            = new HashMap(HashMapUtils.calcCapacity(mergeThreshold));
 60  0
         _mergeThreshold     = mergeThreshold;
 61  0
     }
 62  
 
 63  
     //~ Methods ------------------------------------------------------------------------------------
 64  
 
 65  
     public boolean isEmpty()
 66  
     {
 67  0
         synchronized (_cacheL2)
 68  
         {
 69  0
             return _cacheL1.isEmpty() && _cacheL2.isEmpty();
 70  0
         }
 71  
     }
 72  
 
 73  
     public void clear()
 74  
     {
 75  0
         synchronized (_cacheL2)
 76  
         {
 77  0
             _cacheL1 = new HashMap(); // dafault size
 78  0
             _cacheL2.clear();
 79  0
         }
 80  0
     }
 81  
 
 82  
     public boolean containsKey(Object key)
 83  
     {
 84  0
         synchronized (_cacheL2)
 85  
         {
 86  0
             return _cacheL1.containsKey(key) || _cacheL2.containsKey(key);
 87  0
         }
 88  
     }
 89  
 
 90  
     public boolean containsValue(Object value)
 91  
     {
 92  0
         synchronized (_cacheL2)
 93  
         {
 94  0
             return _cacheL1.containsValue(value) || _cacheL2.containsValue(value);
 95  0
         }
 96  
     }
 97  
 
 98  
     public Set entrySet()
 99  
     {
 100  0
         synchronized (_cacheL2)
 101  
         {
 102  0
             mergeIfL2NotEmpty();
 103  0
             return Collections.unmodifiableSet(_cacheL1.entrySet());
 104  0
         }
 105  
     }
 106  
 
 107  
     public Object get(Object key)
 108  
     {
 109  0
         Map    cacheL1 = _cacheL1;
 110  0
         Object retval = cacheL1.get(key);
 111  0
         if (retval != null)
 112  
         {
 113  0
             return retval;
 114  
         }
 115  
 
 116  0
         synchronized (_cacheL2)
 117  
         {
 118  
             // Has another thread merged caches while we were waiting on the mutex? Then check L1 again
 119  0
             if (cacheL1 != _cacheL1)
 120  
             {
 121  0
                 retval = _cacheL1.get(key);
 122  0
                 if (retval != null)
 123  
                 {
 124  
                     // do not update miss count (it is not a miss anymore)
 125  0
                     return retval;
 126  
                 }
 127  
             }
 128  
 
 129  0
             retval = _cacheL2.get(key);
 130  0
             if (retval == null)
 131  
             {
 132  0
                 retval = newInstance(key);
 133  0
                 if (retval != null)
 134  
                 {
 135  0
                     put(key, retval);
 136  0
                     mergeIfNeeded();
 137  
                 }
 138  
             }
 139  
             else
 140  
             {
 141  0
                 mergeIfNeeded();
 142  
             }
 143  0
         }
 144  
 
 145  0
         return retval;
 146  
     }
 147  
 
 148  
     public Set keySet()
 149  
     {
 150  0
         synchronized (_cacheL2)
 151  
         {
 152  0
             mergeIfL2NotEmpty();
 153  0
             return Collections.unmodifiableSet(_cacheL1.keySet());
 154  0
         }
 155  
     }
 156  
 
 157  
     /**
 158  
      * If key is already in cacheL1, the new value will show with a delay,
 159  
      * since merge L2->L1 may not happen immediately. To force the merge sooner,
 160  
      * call <code>size()<code>.
 161  
      */
 162  
     public Object put(Object key, Object value)
 163  
     {
 164  0
         synchronized (_cacheL2)
 165  
         {
 166  0
             _cacheL2.put(key, value);
 167  
 
 168  
             // not really a miss, but merge to avoid big increase in L2 size
 169  
             // (it cannot be reallocated, it is final)
 170  0
             mergeIfNeeded();
 171  0
         }
 172  
 
 173  0
         return value;
 174  
     }
 175  
 
 176  
     public void putAll(Map map)
 177  
     {
 178  0
         synchronized (_cacheL2)
 179  
         {
 180  0
             mergeIfL2NotEmpty();
 181  
 
 182  
             // sepatare merge to avoid increasing L2 size too much
 183  
             // (it cannot be reallocated, it is final)
 184  0
             merge(map);
 185  0
         }
 186  0
     }
 187  
 
 188  
     /** This operation is very expensive. A full copy of the Map is created */
 189  
     public Object remove(Object key)
 190  
     {
 191  0
         synchronized (_cacheL2)
 192  
         {
 193  0
             if (!_cacheL1.containsKey(key) && !_cacheL2.containsKey(key))
 194  
             {
 195  
                 // nothing to remove
 196  0
                 return null;
 197  
             }
 198  
 
 199  
             Object retval;
 200  
             Map newMap;
 201  0
             synchronized (_cacheL1)
 202  
             {
 203  
                 // "dummy" synchronization to guarantee _cacheL1 will be assigned after fully initialized
 204  
                 // at least until JVM 1.5 where this should be guaranteed by the volatile keyword
 205  0
                 newMap = HashMapUtils.merge(_cacheL1, _cacheL2);
 206  0
                 retval = newMap.remove(key);
 207  0
             }
 208  
 
 209  0
             _cacheL1 = newMap;
 210  0
             _cacheL2.clear();
 211  0
             _missCount = 0;
 212  0
             return retval;
 213  0
         }
 214  
     }
 215  
 
 216  
     public int size()
 217  
     {
 218  
         // Note: cannot simply return L1.size + L2.size
 219  
         //       because there might be overlaping of keys
 220  0
         synchronized (_cacheL2)
 221  
         {
 222  0
             mergeIfL2NotEmpty();
 223  0
             return _cacheL1.size();
 224  0
         }
 225  
     }
 226  
 
 227  
     public Collection values()
 228  
     {
 229  0
         synchronized (_cacheL2)
 230  
         {
 231  0
             mergeIfL2NotEmpty();
 232  0
             return Collections.unmodifiableCollection(_cacheL1.values());
 233  0
         }
 234  
     }
 235  
 
 236  
     private void mergeIfL2NotEmpty()
 237  
     {
 238  0
         if (!_cacheL2.isEmpty())
 239  
         {
 240  0
             merge(_cacheL2);
 241  
         }
 242  0
     }
 243  
 
 244  
     private void mergeIfNeeded()
 245  
     {
 246  0
         if (++_missCount >= _mergeThreshold)
 247  
         {
 248  0
             merge(_cacheL2);
 249  
         }
 250  0
     }
 251  
 
 252  
     private void merge(Map map)
 253  
     {
 254  
         Map newMap;
 255  0
         synchronized (_cacheL1)
 256  
         {
 257  
             // "dummy" synchronization to guarantee _cacheL1 will be assigned after fully initialized
 258  
             // at least until JVM 1.5 where this should be guaranteed by the volatile keyword
 259  
             // But is this enough (in our particular case) to resolve the issues with DCL?
 260  0
             newMap = HashMapUtils.merge(_cacheL1, map);
 261  0
         }
 262  0
         _cacheL1 = newMap;
 263  0
         _cacheL2.clear();
 264  0
         _missCount = 0;
 265  0
     }
 266  
 
 267  
     /**
 268  
      * Subclasses must implement to have automatic creation of new instances
 269  
      * or alternatively can use <code>put<code> to add new items to the cache.<br>
 270  
      *
 271  
      * Implementing this method is prefered to guarantee that there will be only
 272  
      * one instance per key ever created. Calling put() to add items in a multi-
 273  
      * threaded situation will require external synchronization to prevent two
 274  
      * instances for the same key, which defeats the purpose of this cache
 275  
      * (put() is useful when initialization is done during startup and items
 276  
      * are not added during execution or when (temporarily) having possibly two
 277  
      * or more instances of the same key is not of concern).<br>
 278  
      *
 279  
      * @param key lookup key
 280  
      * @return new instace for the requested key
 281  
      */
 282  
     protected abstract Object newInstance(Object key);
 283  
 }