#!/usr/bin/env python # coding: utf-8 #

Counting Bits

#
# # #

Given a non negative integer number num. For every numbers i in the range 0 ≤ i ≤ num calculate the number of 1's in their binary representation and return them as an array.

# #

Example 1:

# #
Input: 2
# Output: [0,1,1]
# #

Example 2:

# #
Input: 5
# Output: [0,1,1,2,1,2]
# 
# # #

 

# Source #
#

Code

# In[9]: def count_bits(num): """Naive answer. Really slow""" res = [] for i in range(0, num+1): res.append(sum(int(char) for char in '{:064b}'.format(i))) return res #

Check

# In[23]: count_bits(2) # In[24]: count_bits(5) # In[25]: count_bits(25) #
#

Follow up:

#

It is very easy to come up with a solution with run time O(n*sizeof(integer)). But can you do it with the following constraints:

# # # #

Code

# In[ ]: def count_bits(num): """Dynamic Programming: case 1: n is even --> finishes with 0 --> removing last binary digit wouldnt change number of 1 --> f(num) = f(num >> 1) = f(num/2) case 1: n is odd number --> finishes with 1 --> we can pluck it and add 1 --> f(num) = 1 + f(num >> 1) = 1 + f(num/2) """ result = [0] for i in range(1, num+1): # Option 1 d, m = divmod(i,2) result.append(result[d] + m) # Option 2: # result.append(result[i >> 1] + (i & 1)) # i&1 = 0 if even, 1 if odd return result # In[ ]: def count_bits(num): """Using the following bit manipulation trick (Brian Kernighan's Algorithm): n & (n-1) --> removes rightmost setbit: 37 & 36 = 36 --> 100101 & 100100 = 100100 36 & 35 = 32 --> 100100 & 100000 = 100000 32 & 31 = 0 --> 100000 & 011111 = 000000 Subtracting 1 = flips all the bits after the rightmost set bit (including rightmost bit) 10: 000010|10 9: 0000100|1 8: 0000|1000 7: 0000011|1 9: 000010|01 8: 0000100|0 7: 0000|0111 6: 0000011|0 """ def kernighan_algo(n): """Time Complexity O(k) with k number of setbits (as opposed to O(n))""" count = 0 while (n): n = n & (n-1) count = count + 1 return count result = [0] for i in range(1, num+1): nb_setbit = kernighan_algo(i) result.append(nb_setbit) return result # In[ ]: def count_bits(num): """One liner version of above solution """ result = [0] for i in range(1, num+1): result.append(result[i & (i-1)] + 1) return result